diff -Nru fonttools-3.0/.appveyor.yml fonttools-3.21.2/.appveyor.yml --- fonttools-3.0/.appveyor.yml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.appveyor.yml 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,72 @@ +environment: + matrix: + - JOB: "2.7.13 32-bit" + PYTHON_HOME: "C:\\Python27" + TOXENV: "py27-cov" + TOXPYTHON: "C:\\Python27\\python.exe" + + - JOB: "3.5.2 32-bit" + PYTHON_HOME: "C:\\Python35" + TOXENV: "py35-cov" + TOXPYTHON: "C:\\Python35\\python.exe" + + - JOB: "3.6.0 32-bit" + PYTHON_HOME: "C:\\Python36" + TOXENV: "py36-cov" + TOXPYTHON: "C:\\Python36\\python.exe" + + - JOB: "2.7.13 64-bit" + PYTHON_HOME: "C:\\Python27-x64" + TOXENV: "py27-cov" + TOXPYTHON: "C:\\Python27-x64\\python.exe" + + - JOB: "3.5.2 64-bit" + PYTHON_HOME: "C:\\Python35-x64" + TOXENV: "py35-cov" + TOXPYTHON: "C:\\Python35-x64\\python.exe" + + - JOB: "3.6.0 64-bit" + PYTHON_HOME: "C:\\Python36-x64" + TOXENV: "py36-cov" + TOXPYTHON: "C:\\Python36-x64\\python.exe" + +install: + # If there is a newer build queued for the same PR, cancel this one. + # The AppVeyor 'rollout builds' option is supposed to serve the same + # purpose but it is problematic because it tends to cancel builds pushed + # directly to master instead of just PR builds (or the converse). + # credits: JuliaLang developers. + - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod ` + https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | ` + Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { ` + throw "There are newer queued builds for this pull request, failing early." } + + # Prepend Python to the PATH of this build + - "SET PATH=%PYTHON_HOME%;%PYTHON_HOME%\\Scripts;%PATH%" + + # check that we have the expected version and architecture for Python + - "python --version" + - "python -c \"import struct; print(struct.calcsize('P') * 8)\"" + + # upgrade pip and setuptools to avoid out-of-date warnings + - "python -m pip install --disable-pip-version-check --user --upgrade pip setuptools" + + # install the dependencies to run the tests + - "python -m pip install tox" + + +build: false + +test_script: + - "tox" + +after_test: + - "tox -e codecov" + +notifications: + - provider: Email + to: + - fonttools-dev@googlegroups.com + on_build_success: false + on_build_failure: true + on_build_status_changed: true diff -Nru fonttools-3.0/.codecov.yml fonttools-3.21.2/.codecov.yml --- fonttools-3.0/.codecov.yml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.codecov.yml 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +comment: false +coverage: + status: + project: off + patch: off diff -Nru fonttools-3.0/.coveragerc fonttools-3.21.2/.coveragerc --- fonttools-3.0/.coveragerc 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.coveragerc 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,34 @@ +[run] +# measure 'branch' coverage in addition to 'statement' coverage +# See: http://coverage.readthedocs.org/en/coverage-4.0.3/branch.html#branch +branch = True + +# list of directories or packages to measure +source = fontTools + +# these are treated as equivalent when combining data +[paths] +source = + Lib/fontTools + .tox/*/lib/python*/site-packages/fontTools + .tox/pypy*/site-packages/fontTools + +[report] +# Regexes for lines to exclude from consideration +exclude_lines = + # keywords to use in inline comments to skip coverage + pragma: no cover + + # don't complain if tests don't hit defensive assertion code + raise AssertionError + raise NotImplementedError + + # don't complain if non-runnable code isn't run + if 0: + if __name__ == .__main__.: + +# ignore source code that can’t be found +ignore_errors = True + +# when running a summary report, show missing lines +show_missing = True diff -Nru fonttools-3.0/debian/changelog fonttools-3.21.2/debian/changelog --- fonttools-3.0/debian/changelog 2015-10-30 04:57:39.000000000 +0000 +++ fonttools-3.21.2/debian/changelog 2020-01-02 20:02:21.000000000 +0000 @@ -1,3 +1,113 @@ +fonttools (3.21.2-1~16.04.sav0) xenial; urgency=medium + + * Backport to Xenial + + -- Rob Savoury Thu, 02 Jan 2020 12:02:21 -0800 + +fonttools (3.21.2-1) unstable; urgency=medium + + * Team upload. + * New upstream release 3.21.2. + * debian/copyright: + - Upstream changed license from old MIT to Expat + - Properly document copyright of generated files + * debian/rules: + - Bump Standards-Version to 4.1.3. + - Change Vcs fields to salsa.debian.org. + * Add lintian overrides for documenting licenses of certain files. + + -- Yao Wei (魏銘廷) Wed, 17 Jan 2018 23:48:30 +0800 + +fonttools (3.20.1-1) unstable; urgency=medium + + * Team upload + * New upstream release + * debian/copyright: + - Use copyright-format 1.0 + - Add several known copyright information of test fonts (Closes: #878643) + * debian/control: replace python-sphinx to python3-sphinx + * debian/rules: add override_dh_auto_test + * Build unicodeData library from source + + -- Yao Wei (魏銘廷) Fri, 15 Dec 2017 12:28:17 +0800 + +fonttools (3.19.0-1) unstable; urgency=medium + + * Team upload + * New upstream release + * Add several Build-Depends for build tests + * Depend on python-brotli, python-gi, python-reportlab, and python-sympy + + -- Jeremy Bicha Sat, 18 Nov 2017 16:34:48 -0500 + +fonttools (3.16.0-2) unstable; urgency=medium + + * Team upload + * debian/control: + - Have fonttools depend on python3-pkg-resources + + -- Jeremy Bicha Sun, 29 Oct 2017 21:11:41 -0400 + +fonttools (3.16.0-1) unstable; urgency=medium + + * Team upload + * New upstream version 3.16.0 + + * add Replaces:/Breaks: also for python3-fonttools (closes: #878670) + + -- Rene Engelhard Sun, 15 Oct 2017 18:13:56 +0200 + +fonttools (3.15.1-3) unstable; urgency=medium + + * Team upload + * Separate the package to multiple packages (Closes: #877165) + * Provide python2 version of fonttools (Closes: #876439) + * Change the section from fonts to devel + * Set Standards-Version to 4.1.1 + + -- Yao Wei (魏銘廷) Tue, 03 Oct 2017 11:52:42 +0800 + +fonttools (3.15.1-2) unstable; urgency=medium + + * Team upload. + * Upload to unstable + + -- Hideki Yamane Sun, 03 Sep 2017 18:09:16 +0900 + +fonttools (3.15.1-1) experimental; urgency=medium + + * Team upload. + * New upstream release (Closes: #872448) + * debian/control + - set debhelper (>= 10) + - adjust python build dependency + - add python-sphinx since upstream use rst files. + - set Debian Fonts Task Force as Maintainer + - set Standards-Version to 4.1.0 + - update Homepage: (Closes: #838374) + - add Vcs-* + - add X-Python3-Version: >= 3.4 + * debian/compat + - set 10 + * debian/rules + - simplify it + - deal with upstream's change + - specify sphinx doc directory + * debian/docs + - update it to include rst files + * debian/stripinstall + - unnecessary with upstream's change, drop it + * debian/watch + - update to version4 + * debian/doc-base + - once drop it + * debian/patches + - add 0001-add-module-path-for-automodule-directive.patch + * debian/clean + - clean sphinx build files + + -- Hideki Yamane Mon, 28 Aug 2017 20:38:26 +0900 + fonttools (3.0-1) unstable; urgency=medium * New upstream release diff -Nru fonttools-3.0/debian/clean fonttools-3.21.2/debian/clean --- fonttools-3.0/debian/clean 2013-06-12 04:27:41.000000000 +0000 +++ fonttools-3.21.2/debian/clean 2018-01-17 15:48:30.000000000 +0000 @@ -1,2 +1 @@ -build/NEWS -build/README +Doc/_build diff -Nru fonttools-3.0/debian/compat fonttools-3.21.2/debian/compat --- fonttools-3.0/debian/compat 2013-06-12 04:26:40.000000000 +0000 +++ fonttools-3.21.2/debian/compat 2018-01-17 15:48:30.000000000 +0000 @@ -1 +1 @@ -9 +10 diff -Nru fonttools-3.0/debian/control fonttools-3.21.2/debian/control --- fonttools-3.0/debian/control 2015-10-30 04:54:39.000000000 +0000 +++ fonttools-3.21.2/debian/control 2018-01-17 15:48:30.000000000 +0000 @@ -1,25 +1,114 @@ Source: fonttools -Section: fonts +Section: devel Priority: optional -Maintainer: Luke Faraone +Maintainer: Debian Fonts Task Force +Uploaders: Luke Faraone Build-Depends: - debhelper (>= 9), - python (>= 2.6.6-3~), - python-all (>= 2.6.6-3~), + debhelper (>= 10), + dh-python, + gir1.2-atk-1.0 , + gir1.2-gtk-3.0 , + python-all, + python-brotli (>= 0.6.0) , + python-gi , python-numpy, - dh-python -Standards-Version: 3.9.6 -Homepage: http://sourceforge.net/projects/fonttools/ + python-pytest , + python-reportlab , + python-setuptools, + python-sympy , + python3-all, + python3-brotli (>= 0.6.0) , + python3-gi , + python3-numpy, + python3-pytest , + python3-reportlab , + python3-setuptools, + python3-sympy , + python3-sphinx, + unicode-data +Standards-Version: 4.1.3 +Homepage: https://github.com/fonttools/fonttools +Vcs-Git: https://salsa.debian.org/fonts-team/fonttools.git +Vcs-Browser: https://salsa.debian.org/fonts-team/fonttools +X-Python-Version: >= 2.7 +X-Python3-Version: >= 3.4 -Package: fonttools +Package: python3-fonttools +Section: python Architecture: all Depends: + gir1.2-atk-1.0, + gir1.2-gtk-3.0, + python3-brotli (>= 0.6.0), + python3-gi, + python3-numpy, + python3-pkg-resources, + python3-reportlab, + python3-sympy, + ${misc:Depends}, + ${sphinxdoc:Depends}, + ${python3:Depends} +Replaces: fonttools (<< 3.15.1-3) +Breaks: fonttools (<< 3.15.1-3) +Description: Converts OpenType and TrueType fonts to and from XML + FontTools/TTX is a library to manipulate font files from Python. + It supports reading and writing of TrueType/OpenType fonts, reading + and writing of AFM files, reading (and partially writing) of PS Type 1 + fonts. It also contains a tool called "TTX" which converts + TrueType/OpenType fonts to and from an XML-based format. + . + This is the Python 3 version of the fontTools package. + +Package: python-fonttools +Section: python +Architecture: all +Depends: + gir1.2-atk-1.0, + gir1.2-gtk-3.0, + python-brotli (>= 0.6.0), + python-gi, python-numpy, + python-pkg-resources, + python-reportlab, + python-sympy, ${misc:Depends}, ${python:Depends} +Replaces: fonttools (<< 3.15.1-1) +Breaks: fonttools (<< 3.15.1-1) +Description: Converts OpenType and TrueType fonts to and from XML + FontTools/TTX is a library to manipulate font files from Python. + It supports reading and writing of TrueType/OpenType fonts, reading + and writing of AFM files, reading (and partially writing) of PS Type 1 + fonts. It also contains a tool called "TTX" which converts + TrueType/OpenType fonts to and from an XML-based format. + . + This is the Python 2 version of the fontTools package. + +Package: python-fonttools-doc +Section: doc +Architecture: all +Depends: + ${sphinxdoc:Depends}, + ${misc:Depends} +Replaces: fonttools (<< 3.15.1-1) +Breaks: fonttools (<< 3.15.1-1) +Description: Converts OpenType and TrueType fonts to and from XML + FontTools/TTX is a library to manipulate font files from Python. + It supports reading and writing of TrueType/OpenType fonts, reading + and writing of AFM files, reading (and partially writing) of PS Type 1 + fonts. It also contains a tool called "TTX" which converts + TrueType/OpenType fonts to and from an XML-based format. + . + This is the sphinx documentation of the fontTools package. + +Package: fonttools +Depends: python3, python3-fonttools, ${misc:Depends} +Architecture: all Description: Converts OpenType and TrueType fonts to and from XML FontTools/TTX is a library to manipulate font files from Python. It supports reading and writing of TrueType/OpenType fonts, reading and writing of AFM files, reading (and partially writing) of PS Type 1 fonts. It also contains a tool called "TTX" which converts TrueType/OpenType fonts to and from an XML-based format. + . + This provides the executables of fontTools package. diff -Nru fonttools-3.0/debian/copyright fonttools-3.21.2/debian/copyright --- fonttools-3.0/debian/copyright 2015-10-30 04:58:57.000000000 +0000 +++ fonttools-3.21.2/debian/copyright 2018-01-17 15:48:30.000000000 +0000 @@ -1,41 +1,214 @@ -This package was debianized by Anthony Fok on -Mon, 16 Sep 2002 01:26:10 +0800. -This package was adopted by Paul Wise on -Fri, 07 Oct 2005 12:27:52 +0800 -This package was adopted by Luke Faraone on -Fri, 30 Oct 2015 04:57:37 +0000 - -Downloaded from: - - https://github.com/behdad/fonttools/ - -Author: - - Just van Rossum et al - -Copyright: - - Copyright 1996-2004 Just van Rossum - Copyright (c) 2000 BeOpen.com. All Rights Reserved. - Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. - Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved. - -Licence: - -Permission to use, copy, modify, and distribute this software and -its documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and -that both that copyright notice and this permission notice appear -in supporting documentation, and that the names of Just van Rossum -or Letterror not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -JUST VAN ROSSUM AND LETTERROR DISCLAIM ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL JUST VAN ROSSUM OR -LETTERROR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL -DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR -PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: fontTools +Upstream-Contact: Just van Rossum +Source: https://github.com/fonttools/fonttools/ +Files-Excluded: Lib/fontTools/unicodedata/Blocks.py + Lib/fontTools/unicodedata/ScriptExtensions.py + Lib/fontTools/unicodedata/Scripts.py + Lib/fontTools/ttLib/tables/__init__.py + +Files: * +Copyright: 1996- Just van Rossum + 2000 BeOpen.com + 1995-2001 Corporation for National Research Initiatives + 1991-1995 Stichting Mathematisch Centrum, Amsterdam +License: Expat + +Files: Tests/subset/data/Lobster.subset.ttx +Copyright: 2010 Pablo Impallari +Comment: With Reserved Font Name Lobster +License: OFL-1.1 + +Files: Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-* + Tests/varLib/data/master_ufo/TestFamily3-* +Copyright: 2015 Google Inc. +License: OFL-1.1 + +Files: Tests/subset/data/expect_keep_math.ttx + Tests/subset/data/TestMATH-Regular.ttx +Copyright: 2009-2012 Khaled Hosny + 2001-2011 STI Pub Companies + 1998-2003 MicroPress, Inc. + 1990 Elsevier, Inc. +Comment: With Reserved Font Name STIX Fonts, TM Math +License: OFL-1.1 + +Files: Lib/fontTools/agl.py +Copyright: 2003, 2005-2008, 2010 Adobe Systems Incorporated +Comment: File includes Adobe AGLFN +License: BSD-3-Clause-Adobe + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + . + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + . + Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + . + Neither the name of Adobe Systems Incorporated nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + . + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Files: Lib/fontTools/unicodedata/Blocks.py + Lib/fontTools/unicodedata/ScriptExtensions.py + Lib/fontTools/unicodedata/Scripts.py +Copyright: 1991-2017 Unicode, Inc. +License: X11-Unicode + Permission is hereby granted, free of charge, to any person obtaining + a copy of the Unicode data files and any associated documentation + (the "Data Files") or Unicode software and any associated documentation + (the "Software") to deal in the Data Files or Software + without restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, and/or sell copies of + the Data Files or Software, and to permit persons to whom the Data Files + or Software are furnished to do so, provided that either + (a) this copyright and permission notice appear with all copies + of the Data Files or Software, or + (b) this copyright and permission notice appear in associated + Documentation. + . + THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS + NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL + DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + PERFORMANCE OF THE DATA FILES OR SOFTWARE. + . + Except as contained in this notice, the name of a copyright holder + shall not be used in advertising or otherwise to promote the sale, + use or other dealings in these Data Files or Software without prior + written authorization of the copyright holder. + +Files: debian/* +Copyright: 2002- Anthony Fok + 2005- Paul Wise + 2015- Luke Faraone + 2017- Hideki Yamane + 2017- Rene Engelhard + 2017- Jeremy Bicha + 2017- Yao Wei (魏銘廷) +License: Expat + +License: Expat + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +License: OFL-1.1 + ----------------------------------------------------------- + SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 + ----------------------------------------------------------- + . + PREAMBLE + The goals of the Open Font License (OFL) are to stimulate worldwide + development of collaborative font projects, to support the font creation + efforts of academic and linguistic communities, and to provide a free and + open framework in which fonts may be shared and improved in partnership + with others. + . + The OFL allows the licensed fonts to be used, studied, modified and + redistributed freely as long as they are not sold by themselves. The + fonts, including any derivative works, can be bundled, embedded, + redistributed and/or sold with any software provided that any reserved + names are not used by derivative works. The fonts and derivatives, + however, cannot be released under any other type of license. The + requirement for fonts to remain under this license does not apply + to any document created using the fonts or their derivatives. + . + DEFINITIONS + "Font Software" refers to the set of files released by the Copyright + Holder(s) under this license and clearly marked as such. This may + include source files, build scripts and documentation. + . + "Reserved Font Name" refers to any names specified as such after the + copyright statement(s). + . + "Original Version" refers to the collection of Font Software components as + distributed by the Copyright Holder(s). + . + "Modified Version" refers to any derivative made by adding to, deleting, + or substituting -- in part or in whole -- any of the components of the + Original Version, by changing formats or by porting the Font Software to a + new environment. + . + "Author" refers to any designer, engineer, programmer, technical + writer or other person who contributed to the Font Software. + . + PERMISSION & CONDITIONS + Permission is hereby granted, free of charge, to any person obtaining + a copy of the Font Software, to use, study, copy, merge, embed, modify, + redistribute, and sell modified and unmodified copies of the Font + Software, subject to the following conditions: + . + 1) Neither the Font Software nor any of its individual components, + in Original or Modified Versions, may be sold by itself. + . + 2) Original or Modified Versions of the Font Software may be bundled, + redistributed and/or sold with any software, provided that each copy + contains the above copyright notice and this license. These can be + included either as stand-alone text files, human-readable headers or + in the appropriate machine-readable metadata fields within text or + binary files as long as those fields can be easily viewed by the user. + . + 3) No Modified Version of the Font Software may use the Reserved Font + Name(s) unless explicit written permission is granted by the corresponding + Copyright Holder. This restriction only applies to the primary font name as + presented to the users. + . + 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font + Software shall not be used to promote, endorse or advertise any + Modified Version, except to acknowledge the contribution(s) of the + Copyright Holder(s) and the Author(s) or with their explicit written + permission. + . + 5) The Font Software, modified or unmodified, in part or in whole, + must be distributed entirely under this license, and must not be + distributed under any other license. The requirement for fonts to + remain under this license does not apply to any document created + using the Font Software. + . + TERMINATION + This license becomes null and void if any of the above conditions are + not met. + . + DISCLAIMER + THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT + OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE + COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL + DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM + OTHER DEALINGS IN THE FONT SOFTWARE. diff -Nru fonttools-3.0/debian/doc-base fonttools-3.21.2/debian/doc-base --- fonttools-3.0/debian/doc-base 2009-11-08 13:14:31.000000000 +0000 +++ fonttools-3.21.2/debian/doc-base 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -Document: fonttools -Title: fonttools/ttx Manual -Author: Just van Rossum -Abstract: This manual describes fonttools and ttx. -Section: Programming - -Format: HTML -Index: /usr/share/doc/fonttools/documentation.html -Files: /usr/share/doc/fonttools/documentation.html diff -Nru fonttools-3.0/debian/docs fonttools-3.21.2/debian/docs --- fonttools-3.0/debian/docs 2015-10-30 04:53:00.000000000 +0000 +++ fonttools-3.21.2/debian/docs 2018-01-17 15:48:30.000000000 +0000 @@ -1,2 +1,2 @@ -build/README -Doc/documentation.html +NEWS.rst +README.rst diff -Nru fonttools-3.0/debian/fonttools.install fonttools-3.21.2/debian/fonttools.install --- fonttools-3.0/debian/fonttools.install 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/fonttools.install 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1,2 @@ +debian/tmp/usr/bin/* usr/bin/ +debian/tmp/usr/share/man/* usr/share/man/ diff -Nru fonttools-3.0/debian/patches/0001-add-module-path-for-automodule-directive.patch fonttools-3.21.2/debian/patches/0001-add-module-path-for-automodule-directive.patch --- fonttools-3.0/debian/patches/0001-add-module-path-for-automodule-directive.patch 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/patches/0001-add-module-path-for-automodule-directive.patch 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1,21 @@ +From: Hideki Yamane +Date: Mon, 28 Aug 2017 20:10:24 +0900 +Subject: add module path for automodule directive + +--- + Doc/source/conf.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/Doc/source/conf.py b/Doc/source/conf.py +index a3b2be2..eb046bd 100644 +--- a/Doc/source/conf.py ++++ b/Doc/source/conf.py +@@ -20,6 +20,8 @@ + # import sys + # sys.path.insert(0, os.path.abspath('.')) + ++import sys,os ++sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../Lib') + + # -- General configuration ------------------------------------------------ + diff -Nru fonttools-3.0/debian/patches/series fonttools-3.21.2/debian/patches/series --- fonttools-3.0/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/patches/series 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1 @@ +0001-add-module-path-for-automodule-directive.patch diff -Nru fonttools-3.0/debian/python3-fonttools.install fonttools-3.21.2/debian/python3-fonttools.install --- fonttools-3.0/debian/python3-fonttools.install 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/python3-fonttools.install 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1 @@ +debian/tmp/usr/lib/python3* usr/lib/ diff -Nru fonttools-3.0/debian/python-fonttools.install fonttools-3.21.2/debian/python-fonttools.install --- fonttools-3.0/debian/python-fonttools.install 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/python-fonttools.install 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1 @@ +debian/tmp/usr/lib/python2* usr/lib/ diff -Nru fonttools-3.0/debian/rules fonttools-3.21.2/debian/rules --- fonttools-3.0/debian/rules 2015-10-30 04:54:27.000000000 +0000 +++ fonttools-3.21.2/debian/rules 2018-01-17 15:48:30.000000000 +0000 @@ -1,14 +1,21 @@ #!/usr/bin/make -f + +export LC_ALL=C.UTF-8 + %: - +dh $@ --with python2 --buildsystem=pybuild + dh $@ --with python2,python3,sphinxdoc --buildsystem=pybuild override_dh_auto_build: - dh_auto_build --buildsystem=pybuild - cp Doc/changes.txt build/NEWS - sed -f debian/stripinstall < Doc/install.txt > build/README + python3 MetaTools/buildUCD.py --ucd-path=/usr/share/unicode + PYTHONPATH="./Lib" python3 MetaTools/buildTableList.py + dh_auto_build + +override_dh_sphinxdoc: + sphinx-build $(CURDIR)/Doc/source $(CURDIR)/debian/python-fonttools-doc/usr/share/doc/fonttools/html/ + dh_sphinxdoc -override_dh_install: - dh_install --package=fonttools +override_dh_auto_test: +ifeq ($(filter nocheck,$(DEB_BUILD_PROFILES)),) + dh_auto_test +endif -override_dh_installchangelogs: - dh_installchangelogs Doc/changes.txt diff -Nru fonttools-3.0/debian/source.lintian-overrides fonttools-3.21.2/debian/source.lintian-overrides --- fonttools-3.0/debian/source.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/debian/source.lintian-overrides 2018-01-17 15:48:30.000000000 +0000 @@ -0,0 +1,7 @@ +# The copyright paragraph is for a file generated during build time. The +# license is from another package but the distribution of the file requires the +# license to be included: +fonttools source: wildcard-matches-nothing-in-dep5-copyright Lib/fontTools/unicodedata/Blocks.py (paragraph at line 65) +fonttools source: wildcard-matches-nothing-in-dep5-copyright Lib/fontTools/unicodedata/ScriptExtensions.py (paragraph at line 65) +fonttools source: wildcard-matches-nothing-in-dep5-copyright Lib/fontTools/unicodedata/Scripts.py (paragraph at line 65) +fonttools source: unused-file-paragraph-in-dep5-copyright paragraph at line 65 diff -Nru fonttools-3.0/debian/stripinstall fonttools-3.21.2/debian/stripinstall --- fonttools-3.0/debian/stripinstall 2009-11-08 13:14:31.000000000 +0000 +++ fonttools-3.21.2/debian/stripinstall 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -/^Installation$/{ - :1 - N - /^\n$/d - s/.*\n// - b1 -} diff -Nru fonttools-3.0/debian/watch fonttools-3.21.2/debian/watch --- fonttools-3.0/debian/watch 2015-10-30 04:56:32.000000000 +0000 +++ fonttools-3.21.2/debian/watch 2018-01-17 15:48:30.000000000 +0000 @@ -1,3 +1,2 @@ -version=3 -opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/fonttools-$1\.tar\.gz/ \ - https://github.com/behdad/fonttools/tags .*/v?(\d\S*)\.tar\.gz +version=4 +https://github.com/fonttools/@PACKAGE@/tags .*/v*@ANY_VERSION@@ARCHIVE_EXT@ diff -Nru fonttools-3.0/dev-requirements.txt fonttools-3.21.2/dev-requirements.txt --- fonttools-3.0/dev-requirements.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/dev-requirements.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +pytest>=3.0 +tox>=2.5 +bump2version>=0.5.6 +sphinx>=1.5.5 diff -Nru fonttools-3.0/Doc/changes.txt fonttools-3.21.2/Doc/changes.txt --- fonttools-3.0/Doc/changes.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Doc/changes.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -TTX/FontTools Version 2.4 -- Option to write to arbitrary files -- Better dump format for DSIG -- Better detection of OTF XML -- Fix issue with Apple's kern table format -- Fix mangling of TT glyph programs -- Fix issues related to mona.ttf -- Fix Windows Installer instructions -- Fix some modern MacOS issues -- Fix minor issues and typos - -TTX/FontTools Version 2.3 - -- TrueType Collection (TTC) support -- Python 2.6 support -- Update Unicode data to 5.2.0 -- Couple of bug fixes - -TTX/FontTools Version 2.2 - -- ClearType support -- cmap format 1 support -- PFA font support -- Switched from Numeric to numpy -- Update Unicode data to 5.1.0 -- Update AGLFN data to 1.6 -- Many bug fixes - -TTX/FontTools Version 2.1 - -- Many years worth of fixes and features - -TTX/FontTools Version 2.0 beta 2 (released ??? 2002) - -- Be "forgiving" when interpreting the maxp table version field: - interpret any value as 1.0 if it's not 0.5. Fixes dumping of these - GPL fonts: http://www.freebsd.org/cgi/pds.cgi?ports/chinese/wangttf -- Fixed ttx -l: it turned out this part of the code didn't work with - Python 2.2.1 and earlier. My bad to do most of my testing with a - different version than I shipped TTX with :-( -- Fixed bug in ClassDef format 1 subtable (Andreas Seidel bumped into - this one). - -TTX/FontTools Version 2.0 beta 1 (released September 10 2002) - -- Fixed embarrassing bug: the master checksum in the head table is now - calculated correctly even on little-endian platforms (such as Intel). -- Made the cmap format 4 compiler smarter: the binary data it creates is - now more or less as compact as possible. TTX now makes more compact - data than in any shipping font I've tested it with. -- Dump glyph names as a separate "GlyphOrder" pseudo table as opposed to - as part of the glyf table (obviously needed for CFF-OTF's). -- Added proper support for the CFF table. -- Don't barf on empty tables (questionable, but "there are font out there...") -- When writing TT glyf data, align glyphs on 4-byte boundaries. This seems - to be the current recommendation by MS. Also: don't barf on fonts which - are already 4-byte aligned. -- Windows installer contributed bu Adam Twardoch! Yay! -- Changed the command line interface again, now by creating one new tool - replacing the old ones: ttx - It dumps and compiles, depending on input file types. The options have - changed somewhat. - - The -d option is back (output dir) - - ttcompile's -i options is now called -m (as in "merge"), to avoid clash - with dump's -i. - - The -s option ("split tables") no longer creates a directory, - but instead outputs a small .ttx file containing references to the - individual table files. This is not a true link, it's a simple file - name, and the referenced file should be in the same directory so - ttcompile can find them. - - compile no longer accepts a directory as input argument. Instead it - can parse the new "mini-ttx" format as output by "ttx -s". - - all arguments are input files -- Renamed the command line programs and moved them to the Tools - subdirectory. They are now installed by the setup.py install script. -- Added OpenType support. BASE, GDEF, GPOS, GSUB and JSTF are (almost) - fully supported. The XML output is not yet final, as I'm still - considering to output certain subtables in a more human-friendly - manner. -- Fixed 'kern' table to correctly accept subtables it doesn't know about, - as well as interpreting Apple's definition of the 'kern' table headers - correctly. -- Fixed bug where glyphnames were not calculated from 'cmap' if it was - (one of the) first tables to be decompiled. More specifically: it cmap - was the first to ask for a glyphID -> glyphName mapping. -- Switched XML parsers: use expat instead of xmlproc. Should be faster. -- Removed my UnicodeString object: I now require Python 2.0 or up, which - has unicode support built in. -- Removed assert in glyf table: redundant data at the end of the table - is now ignored instead of raising an error. Should become a warning. -- Fixed bug in hmtx/vmtx code that only occured if all advances were equal. -- Fixed subtle bug in TT instruction disassembler. -- Couple of fixes to the 'post' table. -- Updated OS/2 table to latest spec. - -TTX/FontTools Version 1.0 beta 1 (released August 10 2001) - -- Reorganized the command line interface for ttDump.py and ttCompile.py, - they now behave more like "normal" command line tool, in that they accept - multiple input files for batch processing. -- ttDump.py and ttCompile.py don't silently override files anymore, but ask - before doing so. Can be overridden by -f. -- Added -d option to both ttDump.py and ttCompile.py. -- Installation is now done with distutils. (Needs work for environments without - compilers.) -- Updated installation instructions. -- Added some workarounds so as to handle certain buggy fonts more gracefully. -- Updated Unicode table to Unicode 3.0 (Thanks Antoine!) -- Included a Python script by Adam Twardoch that adds some useful stuff to the - Windows registry. -- Moved the project to SourceForge. - -TTX/FontTools Version 1.0 alpha 6 (released March 15 2000) - -- Big reorganization: made ttLib a subpackage of the new fontTools package, - changed several module names. Called the entire suite "FontTools" -- Added several submodules to fontTools, some new, some older. -- Added experimental CFF/GPOS/GSUB support to ttLib, read-only (but XML dumping - of GPOS/GSUB is for now disabled) -- Fixed hdmx endian bug -- Added -b option to ttCompile.py, it disables recalculation of bounding boxes, - as requested by Werner Lemberg. -- Renamed tt2xml.pt to ttDump.py and xml2tt.py to ttCompile.py -- Use ".ttx" as file extension instead of ".xml". -- TTX is now the name of the XML-based *format* for TT fonts, and not just - an application. - -Version 1.0 alpha 5 (never released) - -- More tables supported: hdmx, vhea, vmtx - -Version 1.0 alpha 3 & 4 (never released) - -- fixed most portability issues -- retracted the "Euro_or_currency" change from 1.0a2: it was nonsense! - -Version 1.0 alpha 2 (released as binary for MacOS, 2 May 1999) - -- genenates full FOND resources: including width table, PS - font name info and kern table if applicable. -- added cmap format 4 support. Extra: dumps Unicode char names as XML comments! -- added cmap format 6 support -- now accepts true type files starting with "true" - (instead of just 0x00010000 and "OTTO") -- 'glyf' table support is now complete: I added support for composite scale, - xy-scale and two-by-two for the 'glyf' table. For now, component offset scale - behaviour defaults to Apple-style. This only affects the (re)calculation of - the glyph bounding box. -- changed "Euro" to "Euro_or_currency" in the Standard Apple Glyph order list, - since we cannot tell from the 'post' table which is meant. I should probably - doublecheck with a Unicode encoding if available. (This does not affect the - output!) - -Fixed bugs: -- 'hhea' table is now recalculated correctly -- fixed wrong assumption about sfnt resource names - -Version 1.0 alpha 1 (27 Apr 1999) - -- initial binary release for MacOS - diff -Nru fonttools-3.0/Doc/documentation.html fonttools-3.21.2/Doc/documentation.html --- fonttools-3.0/Doc/documentation.html 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Doc/documentation.html 1970-01-01 00:00:00.000000000 +0000 @@ -1,104 +0,0 @@ - - - -TTX Documentation - - - - - -

TTX -- From OpenType and TrueType to XML and Back

- -TTX is a tool for manipulating TrueType and OpenType fonts. It is written in Python and has a BSD-style, open-source licence -- see LICENSE.txt. Among other things this means you can use it free of charge. It's hosted at sourceforge.net. - -

-TTX can dump TrueType and OpenType fonts to an XML-based text format, which is also called TTX. TTX files have a .ttx file extension. - -

How to use TTX

- -The TTX application works can be used in two ways, depending on what platform you run it on: - -
    -
  • As a command line tool (Windows/DOS, Unix, MacOSX)
  • -
  • By dropping files onto the application (Windows, MacOS)
  • -
- -

-TTX detects what kind of files it is fed: it will output a .ttx file when it sees a .ttf or .otf, and it will compile a .ttf or .otf when the input file is a .ttx file. By default, the output file is created in the same folder as the input file, and will have the same name as the input file but with a different extension. TTX will never overwrite existing files, but if necessary will append a unique number to the output filename (before the extension), eg.: "Arial#1.ttf". - -

-When using TTX from the command line there are a bunch of extra options, these are explained in the help text, as displayed when typing "ttx -h" at the command prompt. These additional options include: -

    -
  • specifying the folder where the output files are created
  • -
  • specifying which tables to dump or which tables to exclude
  • -
  • merging partial .ttx files with existing .ttf or .otf files
  • -
  • listing brief table info isntead of dumping to .ttx
  • -
  • splitting tables to separate .ttx files
  • -
  • disabling TT instruction disassembly
  • -
- -

The TTX file format

- -The following tables are currently supported: -
- -BASE, CBDT, CBLC, CFF, COLR, CPAL, DSIG, EBDT, EBLC, FFTM, GDEF, GMAP, GPKG, GPOS, GSUB, JSTF, LTSH, MATH, META, OS/2, SING, SVG, TSI0, TSI1, TSI2, TSI3, TSI5, TSIB, TSID, TSIJ, TSIP, TSIS, TSIV, VDMX, VORG, avar, cmap, cvt, feat, fpgm, fvar, gasp, glyf, gvar, hdmx, head, hhea, hmtx, kern, loca, ltag, maxp, meta, name, post, prep, sbix, vhea and vmtx - -
-Other tables are dumped as hexadecimal data. - -

-TrueType fonts use glyph indices (GlyphID's) to refer to glyphs in most places. -While this is fine in binary form, it is really hard to work with for -humans. Therefore we use names instead. - -

The glyph names are either extracted from the 'CFF ' table or the 'post' table, -or are derived from a Unicode 'cmap' table. In the latter case the Adobe Glyph List -is used to calculate names based on Unicode values. If all of these mthods fail, -names are invented based on GlyphID (eg. "glyph00142"). - -

It is possible that different glyphs use the same name. If this happens, -we force the names to be unique by appending "#n" to the name (n being an -integer number). The original names are being kept, so this has no influence -on a "round tripped" font. - -

Because the order in which glyphs are stored inside the TT font is -important, we maintain an ordered list of glyph names in the font. - - -

Development and feedback

- -TTX/FontTools development is ongoing, but often goes in spurts. Feature requests and bug reports are always welcome. The best place for these is currently the fonttools-discussion mailing list at SourceForge. This list is both for discussion TTX from an end-user perspective as well as TTX/FontTools development. Subscription info can be found if you follow the "Mailing Lists" link at the SourceForge project page. You can also email me directly at just@letterror.com. - -

-Let me take this opportunity to mention that if you have special needs (eg. custom font monipulators, dufferent table formats, etc.): I am available for contracting. - -

Credits

- -Windows setup script: Adam Twardoch -
Icon: Hannes Famira - -

Acknowledgements

- -(in alphabetical order) -Erik van Blokland, Petr van Blokland, Jelle Bosma, Vincent Connare, -Simon Daniels, Hannes Famira, Yannis Haralambous, Greg Hitchcock, John Hudson, -Jack Jansen, Tom Kacvinsky, Antoine Leca, Werner Lemberg, Tal Leming, -Peter Lofting, Dave Opstad, Laurence Penney, Read Roberts, Guido van Rossum, Andreas Seidel, Adam Twardoch. - -

Copyrights

- -FontTools/TTX -
1999-2003 Just van Rossum; LettError (just@letterror.com). See LICENSE.txt for the full license. -

-Python -
Copyright (c) 2001-2003 Python Software Foundation. All Rights Reserved. -
Copyright (c) 2000 BeOpen.com. All Rights Reserved. -
Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved. -
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved. -

-Numeric Python (NumPy) -
Copyright (c) 1996. The Regents of the University of California. All rights reserved. - - - diff -Nru fonttools-3.0/Doc/install.txt fonttools-3.21.2/Doc/install.txt --- fonttools-3.0/Doc/install.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Doc/install.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,110 +0,0 @@ -TTX/FontTools - -TTX/FontTools is a suite of tools for manipulating fonts. It is written in -Python and has a BSD-style, open-source licence -- see LICENSE.txt. -It's hosted at http://sourceforge.net/. - -The flagship is TTX, a tool to convert OpenType and TrueType font files to -an XML-based format (also called TTX), and back. This lets you edit TTF or -OTF files with any text editor. - -The FontTools library currently reads and writes TrueType font files, reads -PostScript Type 1 fonts and more. - - -Scope - -TTX/FontTools' functionality is aimed towards font developers and font tool -developers. It can of course be used to just access fonts (outlines, -metrics, etc.) but it is not optimized for that. It will be further -developed so it can be the core of any font editor. And that's exactly -what it will be for our upcoming major rewrite of RoboFog, our (commercial) -PythonPowered font editor for MacOS. - - -Installation - -For Windows and MacOS there are easy-to-use TTX installers. The rest if this -document is meant for people who want to use TTX/FontTools from the source. - -You need the following software: - -Python - The fresh versions as well as older versions (You need 2.0 or higher) - can be downloaded from - http://www.python.org/download/ - or here - http://sourceforge.net/projects/python/ - - Windows: grab the Windows installer, run the full install. - Un*x: follow the build instructions. - MacOS: grab the installer, run "Easy Install" - -The numpy extension - See http://numpy.scipy.org/ - -Now run the "setup.py" script from the FontTools archive. This will install -all the modules in the right places, as well as tries to compile the one -(optional) C extension contained in FontTools. On Unix it also installs the -"ttx" command line tool. This tool can also be used on Windows, but might -need some fiddling. - -For instructions how to build a standalone Windows installer, see -Windows/README.TXT. Thanks a LOT to Adam Twardoch for this essential -contribution. - -For TTX usage instructions, see the file "documentation.html". - - -Feedback - -Please join the fonttools-discussion mailing list at SourceForge. Subscription -info can be found if you follow the "Mailing Lists" link at the SourceForge -project page: - http://sourceforge.net/projects/fonttools/ -You can also email me directly at just@letterror.com. - -If you want to follow the development of FontTools closely, or would like to -contribute, you can also subscribe to the fonttools-checkins mailing list. - - -Anonymous VCS access - -The FontTools sources are also accessible here: - http://sourceforge.net/projects/fonttools/ -Let me know if you'd like to become a co-developer. - - -Developer documentation - -Sorry, documentation beyond doc strings in the source code is still on my to-do list... -Below follows a brief overview of what's there. - - -The library - - Cross-platform - fontTools.t1Lib -- Provides a Type 1 font reader. Writing is a planned feature. - fontTools.ttLib -- Extensive TrueType tools. Reads and writes. This is the flagship - of FontTools, it's by far the most mature component. Contains a completely modular - TTF table converter architecture. See ttLib/tables/table_API_readme.txt. - fontTools.afmLib -- And AFM file reader/writer. - fontTools.cffLib -- Reads CFF fonts. Writing is a planned feature. - fontTools.unicode -- A simple (but large) module that translates - Unicode values to their descriptive names. Still Unicode 2.0. - fontTools.agl -- Interface to the Adobe Glyph List: maps unicode values - to glyph names and back. - - -Thank-you's - -(in alphabetical order) -Erik van Blokland, Petr van Blokland, Jelle Bosma, Vincent Connare, -Simon Daniels, Hannes Famira, Greg Hitchcock, John Hudson, Jack Jansen, -Antoine Leca, Werner Lemberg, Peter Lofting, Dave Opstad, Laurence Penney, -Guido van Rossum, Adam Twardoch. - -Copyrights - -FontTools/TTX -- 1999-2002 Just van Rossum; Letterror (just@letterror.com) -See LICENCE.txt for the full license. diff -Nru fonttools-3.0/Doc/make.bat fonttools-3.21.2/Doc/make.bat --- fonttools-3.0/Doc/make.bat 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/make.bat 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXPROJ=fontTools + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff -Nru fonttools-3.0/Doc/Makefile fonttools-3.21.2/Doc/Makefile --- fonttools-3.0/Doc/Makefile 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/Makefile 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = fontTools +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff -Nru fonttools-3.0/Doc/man/man1/ttx.1 fonttools-3.21.2/Doc/man/man1/ttx.1 --- fonttools-3.0/Doc/man/man1/ttx.1 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/man/man1/ttx.1 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,225 @@ +.Dd May 18, 2004 +.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7) +.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to +.\" be used, so I give a zero-width space as its argument. +.Os \& +.\" The "FontTools Manual" argument apparently has no effect in +.\" groff 1.18.1. I think it is a bug in the -mdoc groff package. +.Dt TTX 1 "FontTools Manual" +.Sh NAME +.Nm ttx +.Nd tool for manipulating TrueType and OpenType fonts +.Sh SYNOPSIS +.Nm +.Bk +.Op Ar option ... +.Ek +.Bk +.Ar file ... +.Ek +.Sh DESCRIPTION +.Nm +is a tool for manipulating TrueType and OpenType fonts. It can convert +TrueType and OpenType fonts to and from an +.Tn XML Ns -based format called +.Tn TTX . +.Tn TTX +files have a +.Ql .ttx +extension. +.Pp +For each +.Ar file +argument it is given, +.Nm +detects whether it is a +.Ql .ttf , +.Ql .otf +or +.Ql .ttx +file and acts accordingly: if it is a +.Ql .ttf +or +.Ql .otf +file, it generates a +.Ql .ttx +file; if it is a +.Ql .ttx +file, it generates a +.Ql .ttf +or +.Ql .otf +file. +.Pp +By default, every output file is created in the same directory as the +corresponding input file and with the same name except for the +extension, which is substituted appropriately. +.Nm +never overwrites existing files; if necessary, it appends a suffix to +the output file name before the extension, as in +.Pa Arial#1.ttf . +.Ss "General options" +.Bl -tag -width ".Fl t Ar table" +.It Fl h +Display usage information. +.It Fl d Ar dir +Write the output files to directory +.Ar dir +instead of writing every output file to the same directory as the +corresponding input file. +.It Fl o Ar file +Write the output to +.Ar file +instead of writing it to the same directory as the +corresponding input file. +.It Fl v +Be verbose. Write more messages to the standard output describing what +is being done. +.It Fl a +Allow virtual glyphs ID's on compile or decompile. +.El +.Ss "Dump options" +The following options control the process of dumping font files +(TrueType or OpenType) to +.Tn TTX +files. +.Bl -tag -width ".Fl t Ar table" +.It Fl l +List table information. Instead of dumping the font to a +.Tn TTX +file, display minimal information about each table. +.It Fl t Ar table +Dump table +.Ar table . +This option may be given multiple times to dump several tables at +once. When not specified, all tables are dumped. +.It Fl x Ar table +Exclude table +.Ar table +from the list of tables to dump. This option may be given multiple +times to exclude several tables from the dump. The +.Fl t +and +.Fl x +options are mutually exclusive. +.It Fl s +Split tables. Dump each table to a separate +.Tn TTX +file and write (under the name that would have been used for the output +file if the +.Fl s +option had not been given) one small +.Tn TTX +file containing references to the individual table dump files. This +file can be used as input to +.Nm +as long as the referenced files can be found in the same directory. +.It Fl i +.\" XXX: I suppose OpenType programs (exist and) are also affected. +Don't disassemble TrueType instructions. When this option is specified, +all TrueType programs (glyph programs, the font program and the +pre-program) are written to the +.Tn TTX +file as hexadecimal data instead of +assembly. This saves some time and results in smaller +.Tn TTX +files. +.It Fl y Ar n +When decompiling a TrueType Collection (TTC) file, +decompile font number +.Ar n , +starting from 0. +.El +.Ss "Compilation options" +The following options control the process of compiling +.Tn TTX +files into font files (TrueType or OpenType): +.Bl -tag -width ".Fl t Ar table" +.It Fl m Ar fontfile +Merge the input +.Tn TTX +file +.Ar file +with +.Ar fontfile . +No more than one +.Ar file +argument can be specified when this option is used. +.It Fl b +Don't recalculate glyph bounding boxes. Use the values in the +.Tn TTX +file as is. +.El +.Sh "THE TTX FILE FORMAT" +You can find some information about the +.Tn TTX +file format in +.Pa documentation.html . +In particular, you will find in that file the list of tables understood by +.Nm +and the relations between TrueType GlyphIDs and the glyph names used in +.Tn TTX +files. +.Sh EXAMPLES +In the following examples, all files are read from and written to the +current directory. Additionally, the name given for the output file +assumes in every case that it did not exist before +.Nm +was invoked. +.Pp +Dump the TrueType font contained in +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx FreeSans.ttf +.Pp +Compile +.Pa MyFont.ttx +into a TrueType or OpenType font file: +.Pp +.Dl ttx MyFont.ttx +.Pp +List the tables in +.Pa FreeSans.ttf +along with some information: +.Pp +.Dl ttx -l FreeSans.ttf +.Pp +Dump the +.Sq cmap +table from +.Pa FreeSans.ttf +to +.Pa FreeSans.ttx : +.Pp +.Dl ttx -t cmap FreeSans.ttf +.Sh NOTES +On MS\-Windows and MacOS, +.Nm +is available as a graphical application to which files can be dropped. +.Sh SEE ALSO +.Pa documentation.html +.Pp +.Xr fontforge 1 , +.Xr ftinfo 1 , +.Xr gfontview 1 , +.Xr xmbdfed 1 , +.Xr Font::TTF 3pm +.Sh AUTHORS +.Nm +was written by +.An -nosplit +.An "Just van Rossum" Aq just@letterror.com . +.Pp +This manual page was written by +.An "Florent Rougon" Aq f.rougon@free.fr +for the Debian GNU/Linux system based on the existing FontTools +documentation. It may be freely used, modified and distributed without +restrictions. +.\" For Emacs: +.\" Local Variables: +.\" fill-column: 72 +.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*" +.\" sentence-end-double-space: t +.\" End: \ No newline at end of file diff -Nru fonttools-3.0/Doc/source/afmLib.rst fonttools-3.21.2/Doc/source/afmLib.rst --- fonttools-3.0/Doc/source/afmLib.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/afmLib.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +afmLib +###### + +.. automodule:: fontTools.afmLib + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/agl.rst fonttools-3.21.2/Doc/source/agl.rst --- fonttools-3.0/Doc/source/agl.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/agl.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +### +agl +### + +.. automodule:: fontTools.agl + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/cffLib.rst fonttools-3.21.2/Doc/source/cffLib.rst --- fonttools-3.0/Doc/source/cffLib.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/cffLib.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +cffLib +###### + +.. automodule:: fontTools.cffLib + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/conf.py fonttools-3.21.2/Doc/source/conf.py --- fonttools-3.0/Doc/source/conf.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/conf.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# +# fontTools documentation build configuration file, created by +# sphinx-quickstart on Thu Apr 20 11:07:39 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +needs_sphinx = '1.3' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] + +autodoc_mock_imports = ['gtk'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'fontTools' +copyright = u'2017, Just van Rossum, Behdad Esfahbod et al.' +author = u'Just van Rossum, Behdad Esfahbod et al.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'3.10' +# The full version, including alpha/beta/rc tags. +release = u'3.10' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'classic' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'fontToolsDoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'fontTools.tex', u'fontTools Documentation', + u'Just van Rossum, Behdad Esfahbod et al.', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'fonttools', u'fontTools Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'fontTools', u'fontTools Documentation', + author, 'fontTools', 'A library for manipulating fonts, written in Python.', + 'Typography'), +] + diff -Nru fonttools-3.0/Doc/source/encodings.rst fonttools-3.21.2/Doc/source/encodings.rst --- fonttools-3.0/Doc/source/encodings.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/encodings.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ +######### +encodings +######### + +.. automodule:: fontTools.encodings + :members: + :undoc-members: + +codecs +------ + +.. automodule:: fontTools.encodings.codecs + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/feaLib.rst fonttools-3.21.2/Doc/source/feaLib.rst --- fonttools-3.0/Doc/source/feaLib.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/feaLib.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ +###### +feaLib +###### + +.. automodule:: fontTools.feaLib + :members: + :undoc-members: + +ast +--- + +.. automodule:: fontTools.feaLib.ast + :members: + :undoc-members: + +builder +------- + +.. automodule:: fontTools.feaLib.builder + :members: + :undoc-members: + +error +----- + +.. automodule:: fontTools.feaLib.parser + :members: + :undoc-members: + +lexer +----- + +.. automodule:: fontTools.feaLib.lexer + :members: + :undoc-members: + +parser +------ + +.. automodule:: fontTools.feaLib.parser + :members: + :undoc-members: + diff -Nru fonttools-3.0/Doc/source/index.rst fonttools-3.21.2/Doc/source/index.rst --- fonttools-3.0/Doc/source/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/index.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ +fontTools Docs +============== + +.. toctree:: + :maxdepth: 1 + + afmLib + agl + cffLib + inspect + encodings + feaLib + merge + misc/index + pens/index + subset + t1Lib + ttLib/index + ttx + varLib/index + voltLib + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff -Nru fonttools-3.0/Doc/source/inspect.rst fonttools-3.21.2/Doc/source/inspect.rst --- fonttools-3.0/Doc/source/inspect.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/inspect.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +####### +inspect +####### + +.. automodule:: fontTools.inspect + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/merge.rst fonttools-3.21.2/Doc/source/merge.rst --- fonttools-3.0/Doc/source/merge.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/merge.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +##### +merge +##### + +.. automodule:: fontTools.merge + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/arrayTools.rst fonttools-3.21.2/Doc/source/misc/arrayTools.rst --- fonttools-3.0/Doc/source/misc/arrayTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/arrayTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +########## +arrayTools +########## + +.. automodule:: fontTools.misc.arrayTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/bezierTools.rst fonttools-3.21.2/Doc/source/misc/bezierTools.rst --- fonttools-3.0/Doc/source/misc/bezierTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/bezierTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +########### +bezierTools +########### + +.. automodule:: fontTools.misc.bezierTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/classifyTools.rst fonttools-3.21.2/Doc/source/misc/classifyTools.rst --- fonttools-3.0/Doc/source/misc/classifyTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/classifyTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############# +classifyTools +############# + +.. automodule:: fontTools.misc.classifyTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/eexec.rst fonttools-3.21.2/Doc/source/misc/eexec.rst --- fonttools-3.0/Doc/source/misc/eexec.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/eexec.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +##### +eexec +##### + +.. automodule:: fontTools.misc.eexec + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/encodingTools.rst fonttools-3.21.2/Doc/source/misc/encodingTools.rst --- fonttools-3.0/Doc/source/misc/encodingTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/encodingTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############# +encodingTools +############# + +.. automodule:: fontTools.misc.encodingTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/fixedTools.rst fonttools-3.21.2/Doc/source/misc/fixedTools.rst --- fonttools-3.0/Doc/source/misc/fixedTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/fixedTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +########## +fixedTools +########## + +.. automodule:: fontTools.misc.fixedTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/index.rst fonttools-3.21.2/Doc/source/misc/index.rst --- fonttools-3.0/Doc/source/misc/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/index.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ +#### +misc +#### + +.. toctree:: + :maxdepth: 2 + + arrayTools + bezierTools + classifyTools + eexec + encodingTools + fixedTools + loggingTools + sstruct + psCharStrings + testTools + textTools + timeTools + transform + xmlReader + xmlWriter + diff -Nru fonttools-3.0/Doc/source/misc/loggingTools.rst fonttools-3.21.2/Doc/source/misc/loggingTools.rst --- fonttools-3.0/Doc/source/misc/loggingTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/loggingTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############ +loggingTools +############ + +.. automodule:: fontTools.misc.loggingTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/psCharStrings.rst fonttools-3.21.2/Doc/source/misc/psCharStrings.rst --- fonttools-3.0/Doc/source/misc/psCharStrings.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/psCharStrings.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############# +psCharStrings +############# + +.. automodule:: fontTools.misc.psCharStrings + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/sstruct.rst fonttools-3.21.2/Doc/source/misc/sstruct.rst --- fonttools-3.0/Doc/source/misc/sstruct.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/sstruct.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +####### +sstruct +####### + +.. automodule:: fontTools.misc.sstruct + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/testTools.rst fonttools-3.21.2/Doc/source/misc/testTools.rst --- fonttools-3.0/Doc/source/misc/testTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/testTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +testTools +######### + +.. automodule:: fontTools.misc.testTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/textTools.rst fonttools-3.21.2/Doc/source/misc/textTools.rst --- fonttools-3.0/Doc/source/misc/textTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/textTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +textTools +######### + +.. automodule:: fontTools.misc.textTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/timeTools.rst fonttools-3.21.2/Doc/source/misc/timeTools.rst --- fonttools-3.0/Doc/source/misc/timeTools.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/timeTools.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +timeTools +######### + +.. automodule:: fontTools.misc.timeTools + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/transform.rst fonttools-3.21.2/Doc/source/misc/transform.rst --- fonttools-3.0/Doc/source/misc/transform.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/transform.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +transform +######### + +.. automodule:: fontTools.misc.transform + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/xmlReader.rst fonttools-3.21.2/Doc/source/misc/xmlReader.rst --- fonttools-3.0/Doc/source/misc/xmlReader.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/xmlReader.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +xmlReader +######### + +.. automodule:: fontTools.misc.xmlReader + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/misc/xmlWriter.rst fonttools-3.21.2/Doc/source/misc/xmlWriter.rst --- fonttools-3.0/Doc/source/misc/xmlWriter.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/misc/xmlWriter.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +xmlWriter +######### + +.. automodule:: fontTools.misc.xmlWriter + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/areaPen.rst fonttools-3.21.2/Doc/source/pens/areaPen.rst --- fonttools-3.0/Doc/source/pens/areaPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/areaPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +####### +areaPen +####### + +.. automodule:: fontTools.pens.areaPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/basePen.rst fonttools-3.21.2/Doc/source/pens/basePen.rst --- fonttools-3.0/Doc/source/pens/basePen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/basePen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +####### +basePen +####### + +.. automodule:: fontTools.pens.basePen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/boundsPen.rst fonttools-3.21.2/Doc/source/pens/boundsPen.rst --- fonttools-3.0/Doc/source/pens/boundsPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/boundsPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +boundsPen +######### + +.. automodule:: fontTools.pens.boundsPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/filterPen.rst fonttools-3.21.2/Doc/source/pens/filterPen.rst --- fonttools-3.0/Doc/source/pens/filterPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/filterPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######### +filterPen +######### + +.. automodule:: fontTools.pens.filterPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/index.rst fonttools-3.21.2/Doc/source/pens/index.rst --- fonttools-3.0/Doc/source/pens/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/index.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ +#### +pens +#### + +.. toctree:: + :maxdepth: 1 + + basePen + boundsPen + pointInsidePen + filterPen + transformPen + t2CharStringPen + statisticsPen + recordingPen + teePen + areaPen + perimeterPen diff -Nru fonttools-3.0/Doc/source/pens/perimeterPen.rst fonttools-3.21.2/Doc/source/pens/perimeterPen.rst --- fonttools-3.0/Doc/source/pens/perimeterPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/perimeterPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############ +perimeterPen +############ + +.. automodule:: fontTools.pens.perimeterPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/pointInsidePen.rst fonttools-3.21.2/Doc/source/pens/pointInsidePen.rst --- fonttools-3.0/Doc/source/pens/pointInsidePen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/pointInsidePen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############## +pointInsidePen +############## + +.. automodule:: fontTools.pens.pointInsidePen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/recordingPen.rst fonttools-3.21.2/Doc/source/pens/recordingPen.rst --- fonttools-3.0/Doc/source/pens/recordingPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/recordingPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############ +recordingPen +############ + +.. automodule:: fontTools.pens.recordingPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/statisticsPen.rst fonttools-3.21.2/Doc/source/pens/statisticsPen.rst --- fonttools-3.0/Doc/source/pens/statisticsPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/statisticsPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############# +statisticsPen +############# + +.. automodule:: fontTools.pens.statisticsPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/t2CharStringPen.rst fonttools-3.21.2/Doc/source/pens/t2CharStringPen.rst --- fonttools-3.0/Doc/source/pens/t2CharStringPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/t2CharStringPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############### +t2CharStringPen +############### + +.. automodule:: fontTools.pens.t2CharStringPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/teePen.rst fonttools-3.21.2/Doc/source/pens/teePen.rst --- fonttools-3.0/Doc/source/pens/teePen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/teePen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +teePen +###### + +.. automodule:: fontTools.pens.teePen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/pens/transformPen.rst fonttools-3.21.2/Doc/source/pens/transformPen.rst --- fonttools-3.0/Doc/source/pens/transformPen.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/pens/transformPen.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############ +transformPen +############ + +.. automodule:: fontTools.pens.transformPen + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/subset.rst fonttools-3.21.2/Doc/source/subset.rst --- fonttools-3.0/Doc/source/subset.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/subset.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +subset +###### + +.. automodule:: fontTools.subset + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/t1Lib.rst fonttools-3.21.2/Doc/source/t1Lib.rst --- fonttools-3.0/Doc/source/t1Lib.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/t1Lib.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +##### +t1Lib +##### + +.. automodule:: fontTools.t1Lib + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/ttLib/index.rst fonttools-3.21.2/Doc/source/ttLib/index.rst --- fonttools-3.0/Doc/source/ttLib/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttLib/index.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ +##### +ttLib +##### + +.. toctree:: + :maxdepth: 1 + + macUtils + sfnt + tables + woff2 + +.. automodule:: fontTools.ttLib + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/ttLib/macUtils.rst fonttools-3.21.2/Doc/source/ttLib/macUtils.rst --- fonttools-3.0/Doc/source/ttLib/macUtils.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttLib/macUtils.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +######## +macUtils +######## + +.. automodule:: fontTools.ttLib.macUtils + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/ttLib/sfnt.rst fonttools-3.21.2/Doc/source/ttLib/sfnt.rst --- fonttools-3.0/Doc/source/ttLib/sfnt.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttLib/sfnt.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +#### +sfnt +#### + +.. automodule:: fontTools.ttLib.sfnt + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/ttLib/tables.rst fonttools-3.21.2/Doc/source/ttLib/tables.rst --- fonttools-3.0/Doc/source/ttLib/tables.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttLib/tables.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,506 @@ +###### +tables +###### + +.. automodule:: fontTools.ttLib.tables + :members: + :undoc-members: + +_a_v_a_r +-------- + +.. automodule:: fontTools.ttLib.tables._a_v_a_r + :members: + :undoc-members: + +_c_m_a_p +-------- + +.. automodule:: fontTools.ttLib.tables._c_m_a_p + :members: + :undoc-members: + +_c_v_a_r +-------- + +.. automodule:: fontTools.ttLib.tables._c_v_a_r + :members: + :undoc-members: + +_c_v_t +------ + +.. automodule:: fontTools.ttLib.tables._c_v_t + :members: + :undoc-members: + +_f_e_a_t +-------- + +.. automodule:: fontTools.ttLib.tables._f_e_a_t + :members: + :undoc-members: + +_f_p_g_m +-------- + +.. automodule:: fontTools.ttLib.tables._f_p_g_m + :members: + :undoc-members: + +_f_v_a_r +-------- + +.. automodule:: fontTools.ttLib.tables._f_v_a_r + :members: + :undoc-members: + +_g_a_s_p +-------- + +.. automodule:: fontTools.ttLib.tables._g_a_s_p + :members: + :undoc-members: + +_g_l_y_f +-------- + +.. automodule:: fontTools.ttLib.tables._g_l_y_f + :members: + :undoc-members: + +_g_v_a_r +-------- + +.. automodule:: fontTools.ttLib.tables._g_v_a_r + :members: + :undoc-members: + +_h_d_m_x +-------- + +.. automodule:: fontTools.ttLib.tables._h_d_m_x + :members: + :undoc-members: + +_h_e_a_d +-------- + +.. automodule:: fontTools.ttLib.tables._h_e_a_d + :members: + :undoc-members: + +_h_h_e_a +-------- + +.. automodule:: fontTools.ttLib.tables._h_h_e_a + :members: + :undoc-members: + +_h_m_t_x +-------- + +.. automodule:: fontTools.ttLib.tables._h_m_t_x + :members: + :undoc-members: + +_k_e_r_n +-------- + +.. automodule:: fontTools.ttLib.tables._k_e_r_n + :members: + :undoc-members: + +_l_o_c_a +-------- + +.. automodule:: fontTools.ttLib.tables._l_o_c_a + :members: + :undoc-members: + +_l_t_a_g +-------- + +.. automodule:: fontTools.ttLib.tables._l_t_a_g + :members: + :undoc-members: + +_m_a_x_p +-------- + +.. automodule:: fontTools.ttLib.tables._m_a_x_p + :members: + :undoc-members: + +_m_e_t_a +-------- + +.. automodule:: fontTools.ttLib.tables._m_e_t_a + :members: + :undoc-members: + +_n_a_m_e +-------- + +.. automodule:: fontTools.ttLib.tables._n_a_m_e + :members: + :undoc-members: + +_p_o_s_t +-------- + +.. automodule:: fontTools.ttLib.tables._p_o_s_t + :members: + :undoc-members: + +_p_r_e_p +-------- + +.. automodule:: fontTools.ttLib.tables._p_r_e_p + :members: + :undoc-members: + +_s_b_i_x +-------- + +.. automodule:: fontTools.ttLib.tables._s_b_i_x + :members: + :undoc-members: + +_t_r_a_k +-------- + +.. automodule:: fontTools.ttLib.tables._t_r_a_k + :members: + :undoc-members: + +_v_h_e_a +-------- + +.. automodule:: fontTools.ttLib.tables._v_h_e_a + :members: + :undoc-members: + +_v_m_t_x +-------- + +.. automodule:: fontTools.ttLib.tables._v_m_t_x + :members: + :undoc-members: + +asciiTable +---------- + +.. automodule:: fontTools.ttLib.tables.asciiTable + :members: + :undoc-members: + +B_A_S_E_ +-------- + +.. automodule:: fontTools.ttLib.tables.B_A_S_E_ + :members: + :undoc-members: + +BitmapGlyphMetrics +------------------ + +.. automodule:: fontTools.ttLib.tables.BitmapGlyphMetrics + :members: + :undoc-members: + +C_B_D_T_ +-------- + +.. automodule:: fontTools.ttLib.tables.C_B_D_T_ + :members: + :undoc-members: + +C_B_L_C_ +-------- + +.. automodule:: fontTools.ttLib.tables.C_B_L_C_ + :members: + :undoc-members: + +C_F_F_ +------ + +.. automodule:: fontTools.ttLib.tables.C_F_F_ + :members: + :undoc-members: + +C_F_F__2 +-------- + +.. automodule:: fontTools.ttLib.tables.C_F_F__2 + :members: + :undoc-members: + +C_O_L_R_ +-------- + +.. automodule:: fontTools.ttLib.tables.C_O_L_R_ + :members: + :undoc-members: + +C_P_A_L_ +-------- + +.. automodule:: fontTools.ttLib.tables.C_P_A_L_ + :members: + :undoc-members: + +D_S_I_G_ +-------- + +.. automodule:: fontTools.ttLib.tables.D_S_I_G_ + :members: + :undoc-members: + +DefaultTable +------------ + +.. automodule:: fontTools.ttLib.tables.DefaultTable + :members: + :undoc-members: + +E_B_D_T_ +-------- + +.. automodule:: fontTools.ttLib.tables.E_B_D_T_ + :members: + :undoc-members: + +E_B_L_C_ +-------- + +.. automodule:: fontTools.ttLib.tables.E_B_L_C_ + :members: + :undoc-members: + +F_F_T_M_ +-------- + +.. automodule:: fontTools.ttLib.tables.F_F_T_M_ + :members: + :undoc-members: + +G_D_E_F_ +-------- + +.. automodule:: fontTools.ttLib.tables.G_D_E_F_ + :members: + :undoc-members: + +G_M_A_P_ +-------- + +.. automodule:: fontTools.ttLib.tables.G_M_A_P_ + :members: + :undoc-members: + +G_P_K_G_ +-------- + +.. automodule:: fontTools.ttLib.tables.G_P_K_G_ + :members: + :undoc-members: + +G_P_O_S_ +-------- + +.. automodule:: fontTools.ttLib.tables.G_P_O_S_ + :members: + :undoc-members: + +G_S_U_B_ +-------- + +.. automodule:: fontTools.ttLib.tables.G_S_U_B_ + :members: + :undoc-members: + +H_V_A_R_ +-------- + +.. automodule:: fontTools.ttLib.tables.H_V_A_R_ + :members: + :undoc-members: + +J_S_T_F_ +-------- + +.. automodule:: fontTools.ttLib.tables.J_S_T_F_ + :members: + :undoc-members: + +L_T_S_H_ +-------- + +.. automodule:: fontTools.ttLib.tables.L_T_S_H_ + :members: + :undoc-members: + +M_A_T_H_ +-------- + +.. automodule:: fontTools.ttLib.tables.M_A_T_H_ + :members: + :undoc-members: + +M_E_T_A_ +-------- + +.. automodule:: fontTools.ttLib.tables.M_E_T_A_ + :members: + :undoc-members: + +M_V_A_R_ +-------- + +.. automodule:: fontTools.ttLib.tables.M_V_A_R_ + :members: + :undoc-members: + +O_S_2f_2 +-------- + +.. automodule:: fontTools.ttLib.tables.O_S_2f_2 + :members: + :undoc-members: + +otBase +------ + +.. automodule:: fontTools.ttLib.tables.otBase + :members: + :undoc-members: + +otConverters +------------ + +.. automodule:: fontTools.ttLib.tables.otConverters + :members: + :undoc-members: + +otData +------ + +.. automodule:: fontTools.ttLib.tables.otData + :members: + :undoc-members: + +otTables +-------- + +.. automodule:: fontTools.ttLib.tables.otTables + :members: + :undoc-members: + +S_I_N_G_ +-------- + +.. automodule:: fontTools.ttLib.tables.S_I_N_G_ + :members: + :undoc-members: + +S_T_A_T_ +-------- + +.. automodule:: fontTools.ttLib.tables.S_T_A_T_ + :members: + :undoc-members: + +S_V_G_ +------ + +.. automodule:: fontTools.ttLib.tables.S_V_G_ + :members: + :undoc-members: + +sbixGlyph +--------- + +.. automodule:: fontTools.ttLib.tables.sbixGlyph + :members: + :undoc-members: + +sbixStrike +---------- + +.. automodule:: fontTools.ttLib.tables.sbixStrike + :members: + :undoc-members: + +T_S_I__0 +-------- + +.. automodule:: fontTools.ttLib.tables.T_S_I__0 + :members: + :undoc-members: + +T_S_I__1 +-------- + +.. automodule:: fontTools.ttLib.tables.T_S_I__1 + :members: + :undoc-members: + +T_S_I__2 +-------- + +.. automodule:: fontTools.ttLib.tables.T_S_I__2 + :members: + :undoc-members: + +T_S_I__3 +-------- + +.. automodule:: fontTools.ttLib.tables.T_S_I__3 + :members: + :undoc-members: + +T_S_I__5 +-------- + +.. automodule:: fontTools.ttLib.tables.T_S_I__5 + :members: + :undoc-members: + +ttProgram +--------- + +.. automodule:: fontTools.ttLib.tables.ttProgram + :members: + :undoc-members: + +TupleVariation +-------------- + +.. automodule:: fontTools.ttLib.tables.TupleVariation + :members: + :undoc-members: + +V_D_M_X_ +-------- + +.. automodule:: fontTools.ttLib.tables.V_D_M_X_ + :members: + :undoc-members: + +V_O_R_G_ +-------- + +.. automodule:: fontTools.ttLib.tables.V_O_R_G_ + :members: + :undoc-members: + +V_V_A_R_ +-------- + +.. automodule:: fontTools.ttLib.tables.V_V_A_R_ + :members: + :undoc-members: + + diff -Nru fonttools-3.0/Doc/source/ttLib/woff2.rst fonttools-3.21.2/Doc/source/ttLib/woff2.rst --- fonttools-3.0/Doc/source/ttLib/woff2.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttLib/woff2.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +##### +woff2 +##### + +.. automodule:: fontTools.ttLib.woff2 + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/ttx.rst fonttools-3.21.2/Doc/source/ttx.rst --- fonttools-3.0/Doc/source/ttx.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/ttx.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +### +ttx +### + +.. automodule:: fontTools.ttx + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/designspace.rst fonttools-3.21.2/Doc/source/varLib/designspace.rst --- fonttools-3.0/Doc/source/varLib/designspace.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/designspace.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +########### +designspace +########### + +.. automodule:: fontTools.varLib.designspace + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/index.rst fonttools-3.21.2/Doc/source/varLib/index.rst --- fonttools-3.0/Doc/source/varLib/index.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/index.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ +###### +varLib +###### + +.. toctree:: + :maxdepth: 2 + + designspace + interpolatable + interpolate_layout + merger + models + mutator + +.. automodule:: fontTools.varLib + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/interpolatable.rst fonttools-3.21.2/Doc/source/varLib/interpolatable.rst --- fonttools-3.0/Doc/source/varLib/interpolatable.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/interpolatable.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +############## +interpolatable +############## + +.. automodule:: fontTools.varLib.interpolatable + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/interpolate_layout.rst fonttools-3.21.2/Doc/source/varLib/interpolate_layout.rst --- fonttools-3.0/Doc/source/varLib/interpolate_layout.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/interpolate_layout.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +################## +interpolate_layout +################## + +.. automodule:: fontTools.varLib.interpolate_layout + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/merger.rst fonttools-3.21.2/Doc/source/varLib/merger.rst --- fonttools-3.0/Doc/source/varLib/merger.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/merger.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +merger +###### + +.. automodule:: fontTools.varLib.merger + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/models.rst fonttools-3.21.2/Doc/source/varLib/models.rst --- fonttools-3.0/Doc/source/varLib/models.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/models.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +###### +models +###### + +.. automodule:: fontTools.varLib.models + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/varLib/mutator.rst fonttools-3.21.2/Doc/source/varLib/mutator.rst --- fonttools-3.0/Doc/source/varLib/mutator.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/varLib/mutator.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +####### +mutator +####### + +.. automodule:: fontTools.varLib.mutator + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/source/voltLib.rst fonttools-3.21.2/Doc/source/voltLib.rst --- fonttools-3.0/Doc/source/voltLib.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Doc/source/voltLib.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,35 @@ +####### +voltLib +####### + +.. automodule:: fontTools.voltLib + :members: + :undoc-members: + +ast +--- + +.. automodule:: fontTools.voltLib.ast + :members: + :undoc-members: + +error +----- + +.. automodule:: fontTools.voltLib.parser + :members: + :undoc-members: + +lexer +----- + +.. automodule:: fontTools.voltLib.lexer + :members: + :undoc-members: + +parser +------ + +.. automodule:: fontTools.voltLib.parser + :members: + :undoc-members: diff -Nru fonttools-3.0/Doc/ttx.1 fonttools-3.21.2/Doc/ttx.1 --- fonttools-3.0/Doc/ttx.1 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Doc/ttx.1 1970-01-01 00:00:00.000000000 +0000 @@ -1,225 +0,0 @@ -.Dd May 18, 2004 -.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7) -.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to -.\" be used, so I give a zero-width space as its argument. -.Os \& -.\" The "FontTools Manual" argument apparently has no effect in -.\" groff 1.18.1. I think it is a bug in the -mdoc groff package. -.Dt TTX 1 "FontTools Manual" -.Sh NAME -.Nm ttx -.Nd tool for manipulating TrueType and OpenType fonts -.Sh SYNOPSIS -.Nm -.Bk -.Op Ar option ... -.Ek -.Bk -.Ar file ... -.Ek -.Sh DESCRIPTION -.Nm -is a tool for manipulating TrueType and OpenType fonts. It can convert -TrueType and OpenType fonts to and from an -.Tn XML Ns -based format called -.Tn TTX . -.Tn TTX -files have a -.Ql .ttx -extension. -.Pp -For each -.Ar file -argument it is given, -.Nm -detects whether it is a -.Ql .ttf , -.Ql .otf -or -.Ql .ttx -file and acts accordingly: if it is a -.Ql .ttf -or -.Ql .otf -file, it generates a -.Ql .ttx -file; if it is a -.Ql .ttx -file, it generates a -.Ql .ttf -or -.Ql .otf -file. -.Pp -By default, every output file is created in the same directory as the -corresponding input file and with the same name except for the -extension, which is substituted appropriately. -.Nm -never overwrites existing files; if necessary, it appends a suffix to -the output file name before the extension, as in -.Pa Arial#1.ttf . -.Ss "General options" -.Bl -tag -width ".Fl t Ar table" -.It Fl h -Display usage information. -.It Fl d Ar dir -Write the output files to directory -.Ar dir -instead of writing every output file to the same directory as the -corresponding input file. -.It Fl o Ar file -Write the output to -.Ar file -instead of writing it to the same directory as the -corresponding input file. -.It Fl v -Be verbose. Write more messages to the standard output describing what -is being done. -.It Fl a -Allow virtual glyphs ID's on compile or decompile. -.El -.Ss "Dump options" -The following options control the process of dumping font files -(TrueType or OpenType) to -.Tn TTX -files. -.Bl -tag -width ".Fl t Ar table" -.It Fl l -List table information. Instead of dumping the font to a -.Tn TTX -file, display minimal information about each table. -.It Fl t Ar table -Dump table -.Ar table . -This option may be given multiple times to dump several tables at -once. When not specified, all tables are dumped. -.It Fl x Ar table -Exclude table -.Ar table -from the list of tables to dump. This option may be given multiple -times to exclude several tables from the dump. The -.Fl t -and -.Fl x -options are mutually exclusive. -.It Fl s -Split tables. Dump each table to a separate -.Tn TTX -file and write (under the name that would have been used for the output -file if the -.Fl s -option had not been given) one small -.Tn TTX -file containing references to the individual table dump files. This -file can be used as input to -.Nm -as long as the referenced files can be found in the same directory. -.It Fl i -.\" XXX: I suppose OpenType programs (exist and) are also affected. -Don't disassemble TrueType instructions. When this option is specified, -all TrueType programs (glyph programs, the font program and the -pre-program) are written to the -.Tn TTX -file as hexadecimal data instead of -assembly. This saves some time and results in smaller -.Tn TTX -files. -.It Fl y Ar n -When decompiling a TrueType Collection (TTC) file, -decompile font number -.Ar n , -starting from 0. -.El -.Ss "Compilation options" -The following options control the process of compiling -.Tn TTX -files into font files (TrueType or OpenType): -.Bl -tag -width ".Fl t Ar table" -.It Fl m Ar fontfile -Merge the input -.Tn TTX -file -.Ar file -with -.Ar fontfile . -No more than one -.Ar file -argument can be specified when this option is used. -.It Fl b -Don't recalculate glyph bounding boxes. Use the values in the -.Tn TTX -file as is. -.El -.Sh "THE TTX FILE FORMAT" -You can find some information about the -.Tn TTX -file format in -.Pa documentation.html . -In particular, you will find in that file the list of tables understood by -.Nm -and the relations between TrueType GlyphIDs and the glyph names used in -.Tn TTX -files. -.Sh EXAMPLES -In the following examples, all files are read from and written to the -current directory. Additionally, the name given for the output file -assumes in every case that it did not exist before -.Nm -was invoked. -.Pp -Dump the TrueType font contained in -.Pa FreeSans.ttf -to -.Pa FreeSans.ttx : -.Pp -.Dl ttx FreeSans.ttf -.Pp -Compile -.Pa MyFont.ttx -into a TrueType or OpenType font file: -.Pp -.Dl ttx MyFont.ttx -.Pp -List the tables in -.Pa FreeSans.ttf -along with some information: -.Pp -.Dl ttx -l FreeSans.ttf -.Pp -Dump the -.Sq cmap -table from -.Pa FreeSans.ttf -to -.Pa FreeSans.ttx : -.Pp -.Dl ttx -t cmap FreeSans.ttf -.Sh NOTES -On MS\-Windows and MacOS, -.Nm -is available as a graphical application to which files can be dropped. -.Sh SEE ALSO -.Pa documentation.html -.Pp -.Xr fontforge 1 , -.Xr ftinfo 1 , -.Xr gfontview 1 , -.Xr xmbdfed 1 , -.Xr Font::TTF 3pm -.Sh AUTHORS -.Nm -was written by -.An -nosplit -.An "Just van Rossum" Aq just@letterror.com . -.Pp -This manual page was written by -.An "Florent Rougon" Aq f.rougon@free.fr -for the Debian GNU/Linux system based on the existing FontTools -documentation. It may be freely used, modified and distributed without -restrictions. -.\" For Emacs: -.\" Local Variables: -.\" fill-column: 72 -.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\| \\| \\)[ \n]*" -.\" sentence-end-double-space: t -.\" End: diff -Nru fonttools-3.0/fonttools fonttools-3.21.2/fonttools --- fonttools-3.0/fonttools 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/fonttools 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +#!/usr/bin/env python +from __future__ import print_function, division, absolute_import +import sys +import os.path + +libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Lib') +sys.path.insert(0, libdir) + +from fontTools.__main__ import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/.gitignore fonttools-3.21.2/.gitignore --- fonttools-3.0/.gitignore 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/.gitignore 2018-01-08 12:40:40.000000000 +0000 @@ -1,5 +1,25 @@ +# Byte-compiled / optimized files +__pycache__/ +*.py[co] +*$py.class + +# Distribution / Packaging +*.egg +*.egg-info +*.eggs MANIFEST build dist -*.pyc + +# Unit test / coverage files +.tox/* +.cache/ +.coverage +.coverage.* +htmlcov/ + +# emacs backup files *~ + +# OSX Finder +.DS_Store diff -Nru fonttools-3.0/Lib/fontTools/afmLib.py fonttools-3.21.2/Lib/fontTools/afmLib.py --- fonttools-3.0/Lib/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/afmLib.py 2018-01-08 12:40:40.000000000 +0000 @@ -337,22 +337,13 @@ def readlines(path): - f = open(path, 'rb') - data = f.read() - f.close() - # read any text file, regardless whether it's formatted for Mac, Unix or Dos - sep = "" - if '\r' in data: - sep = sep + '\r' # mac or dos - if '\n' in data: - sep = sep + '\n' # unix or dos - return data.split(sep) + with open(path, "r", encoding="ascii") as f: + data = f.read() + return data.splitlines() def writelines(path, lines, sep='\r'): - f = open(path, 'wb') - for line in lines: - f.write(line + sep) - f.close() + with open(path, "w", encoding="ascii", newline=sep) as f: + f.write("\n".join(lines) + "\n") if __name__ == "__main__": diff -Nru fonttools-3.0/Lib/fontTools/agl.py fonttools-3.21.2/Lib/fontTools/agl.py --- fonttools-3.0/Lib/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/agl.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,8 +1,12 @@ +# -*- coding: utf-8 -*- # The table below is taken from # http://www.adobe.com/devnet/opentype/archives/aglfn.txt -from __future__ import print_function, division, absolute_import +from __future__ import (print_function, division, absolute_import, + unicode_literals) from fontTools.misc.py23 import * +import re + _aglText = """\ # ----------------------------------------------------------- @@ -727,7 +731,7 @@ unicode = m.group(1) assert len(unicode) == 4 unicode = int(unicode, 16) - glyphName = m.group(2) + glyphName = tostr(m.group(2)) if glyphName in AGL2UV: # the above table contains identical duplicates assert AGL2UV[glyphName] == unicode @@ -736,3 +740,136 @@ UV2AGL[unicode] = glyphName _builddicts() + + +def toUnicode(glyph, isZapfDingbats=False): + """Convert glyph names to Unicode, such as 'longs_t.oldstyle' --> u'ſt' + + If isZapfDingbats is True, the implementation recognizes additional + glyph names (as required by the AGL specification). + """ + # https://github.com/adobe-type-tools/agl-specification#2-the-mapping + # + # 1. Drop all the characters from the glyph name starting with + # the first occurrence of a period (U+002E; FULL STOP), if any. + glyph = glyph.split(".", 1)[0] + + # 2. Split the remaining string into a sequence of components, + # using underscore (U+005F; LOW LINE) as the delimiter. + components = glyph.split("_") + + # 3. Map each component to a character string according to the + # procedure below, and concatenate those strings; the result + # is the character string to which the glyph name is mapped. + result = [_glyphComponentToUnicode(c, isZapfDingbats) + for c in components] + return "".join(result) + + +def _glyphComponentToUnicode(component, isZapfDingbats): + # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), + # and the component is in the ITC Zapf Dingbats Glyph List, then + # map it to the corresponding character in that list. + dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None + if dingbat: + return dingbat + + # Otherwise, if the component is in AGL, then map it + # to the corresponding character in that list. + # + # TODO: We currently use the AGLFN (Adobe glyph list for new fonts), + # although the spec actually mandates the legacy AGL which is + # a superset of the AGLFN. + # https://github.com/fonttools/fonttools/issues/775 + uchar = AGL2UV.get(component) + if uchar: + return unichr(uchar) + + # Otherwise, if the component is of the form "uni" (U+0075, + # U+006E, and U+0069) followed by a sequence of uppercase + # hexadecimal digits (0–9 and A–F, meaning U+0030 through + # U+0039 and U+0041 through U+0046), if the length of that + # sequence is a multiple of four, and if each group of four + # digits represents a value in the ranges 0000 through D7FF + # or E000 through FFFF, then interpret each as a Unicode scalar + # value and map the component to the string made of those + # scalar values. Note that the range and digit-length + # restrictions mean that the "uni" glyph name prefix can be + # used only with UVs in the Basic Multilingual Plane (BMP). + uni = _uniToUnicode(component) + if uni: + return uni + + # Otherwise, if the component is of the form "u" (U+0075) + # followed by a sequence of four to six uppercase hexadecimal + # digits (0–9 and A–F, meaning U+0030 through U+0039 and + # U+0041 through U+0046), and those digits represents a value + # in the ranges 0000 through D7FF or E000 through 10FFFF, then + # interpret it as a Unicode scalar value and map the component + # to the string made of this scalar value. + uni = _uToUnicode(component) + if uni: + return uni + + # Otherwise, map the component to an empty string. + return '' + + +# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt +_AGL_ZAPF_DINGBATS = ( + " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀" + "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇" + "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔" + "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰") + + +def _zapfDingbatsToUnicode(glyph): + """Helper for toUnicode().""" + if len(glyph) < 2 or glyph[0] != 'a': + return None + try: + gid = int(glyph[1:]) + except ValueError: + return None + if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): + return None + uchar = _AGL_ZAPF_DINGBATS[gid] + return uchar if uchar != ' ' else None + + +_re_uni = re.compile("^uni([0-9A-F]+)$") + + +def _uniToUnicode(component): + """Helper for toUnicode() to handle "uniABCD" components.""" + match = _re_uni.match(component) + if match is None: + return None + digits = match.group(1) + if len(digits) % 4 != 0: + return None + chars = [int(digits[i : i + 4], 16) + for i in range(0, len(digits), 4)] + if any(c >= 0xD800 and c <= 0xDFFF for c in chars): + # The AGL specification explicitly excluded surrogate pairs. + return None + return ''.join([unichr(c) for c in chars]) + + +_re_u = re.compile("^u([0-9A-F]{4,6})$") + + +def _uToUnicode(component): + """Helper for toUnicode() to handle "u1ABCD" components.""" + match = _re_u.match(component) + if match is None: + return None + digits = match.group(1) + try: + value = int(digits, 16) + except ValueError: + return None + if ((value >= 0x0000 and value <= 0xD7FF) or + (value >= 0xE000 and value <= 0x10FFFF)): + return unichr(value) + return None diff -Nru fonttools-3.0/Lib/fontTools/cffLib/__init__.py fonttools-3.21.2/Lib/fontTools/cffLib/__init__.py --- fonttools-3.0/Lib/fontTools/cffLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/cffLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,2639 @@ +"""cffLib.py -- read/write tools for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.arrayTools import unionRect, intRect +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables.otBase import OTTableWriter +from fontTools.ttLib.tables.otBase import OTTableReader +from fontTools.ttLib.tables import otTables as ot +import struct +import logging +import re + +# mute cffLib debug messages when running ttx in verbose mode +DEBUG = logging.DEBUG - 1 +log = logging.getLogger(__name__) + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B +""" + +maxStackLimit = 513 +# maxstack operator has been deprecated. max stack is now always 513. + + +class CFFFontSet(object): + + def decompile(self, file, otFont, isCFF2=None): + self.otFont = otFont + sstruct.unpack(cffHeaderFormat, file.read(3), self) + if isCFF2 is not None: + # called from ttLib: assert 'major' as read from file matches the + # expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use 'major' version from file to determine if isCFF2 + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + if not isCFF2: + self.offSize = struct.unpack("B", file.read(1))[0] + file.seek(self.hdrSize) + self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2)) + self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2) + self.strings = IndexedStrings(file) + else: # isCFF2 + self.topDictSize = struct.unpack(">H", file.read(2))[0] + file.seek(self.hdrSize) + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = otFont.getGlyphOrder + # in CFF2, offsetSize is the size of the TopDict data. + self.topDictIndex = TopDictIndex( + file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2) + self.strings = None + self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, nameOrIndex): + """ Return TopDict instance identified by name (str) or index (int + or any object that implements `__index__`). + """ + if hasattr(nameOrIndex, "__index__"): + index = nameOrIndex.__index__() + elif isinstance(nameOrIndex, basestring): + name = nameOrIndex + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(nameOrIndex) + else: + raise TypeError(nameOrIndex) + return self.topDictIndex[index] + + def compile(self, file, otFont, isCFF2=None): + self.otFont = otFont + if isCFF2 is not None: + # called from ttLib: assert 'major' value matches expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use current 'major' value to determine output format + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + + if otFont.recalcBBoxes and not isCFF2: + for topDict in self.topDictIndex: + topDict.recalcFontBBox() + + if not isCFF2: + strings = IndexedStrings() + else: + strings = None + writer = CFFWriter(isCFF2) + topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2) + if isCFF2: + self.hdrSize = 5 + writer.add(sstruct.pack(cffHeaderFormat, self)) + # Note: topDictSize will most likely change in CFFWriter.toFile(). + self.topDictSize = topCompiler.getDataLength() + writer.add(struct.pack(">H", self.topDictSize)) + else: + self.hdrSize = 4 + self.offSize = 4 # will most likely change in CFFWriter.toFile(). + writer.add(sstruct.pack(cffHeaderFormat, self)) + writer.add(struct.pack("B", self.offSize)) + if not isCFF2: + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2)) + writer.add(topCompiler) + if not isCFF2: + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + children = topCompiler.getChildren(strings) + for child in children: + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter, progress=None): + xmlWriter.simpletag("major", value=self.major) + xmlWriter.newline() + xmlWriter.simpletag("minor", value=self.minor) + xmlWriter.newline() + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter, progress) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter, progress) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, otFont=None): + self.otFont = otFont + + # set defaults. These will be replaced if there are entries for them + # in the XML file. + if not hasattr(self, "major"): + self.major = 1 + if not hasattr(self, "minor"): + self.minor = 0 + + if name == "CFFFont": + if self.major == 1: + if not hasattr(self, "offSize"): + # this will be recalculated when the cff is compiled. + self.offSize = 4 + if not hasattr(self, "hdrSize"): + self.hdrSize = 4 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + self.fontNames.append(fontName) + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + elif self.major == 2: + if not hasattr(self, "hdrSize"): + self.hdrSize = 5 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDict = TopDict( + GlobalSubrs=self.GlobalSubrs, + cff2GetGlyphOrder=cff2GetGlyphOrder) + self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + elif name == "GlobalSubrs": + subrCharStringClass = psCharStrings.T2CharString + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + subr = subrCharStringClass() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + elif name == "major": + self.major = int(attrs['value']) + elif name == "minor": + self.minor = int(attrs['value']) + + def convertCFFToCFF2(self, otFont): + # This assumes a decompiled CFF table. + self.major = 2 + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDictData = TopDictIndex(None, cff2GetGlyphOrder, None) + topDictData.items = self.topDictIndex.items + self.topDictIndex = topDictData + topDict = topDictData[0] + if hasattr(topDict, 'Private'): + privateDict = topDict.Private + else: + privateDict = None + opOrder = buildOrder(topDictOperators2) + topDict.order = opOrder + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + for entry in topDictOperators: + key = entry[1] + if key not in opOrder: + if key in topDict.rawDict: + del topDict.rawDict[key] + if hasattr(topDict, key): + exec("del topDict.%s" % (key)) + + if not hasattr(topDict, "FDArray"): + fdArray = topDict.FDArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = topDict.GlobalSubrs + topDict.GlobalSubrs.fdArray = fdArray + charStrings = topDict.CharStrings + if charStrings.charStringsAreIndexed: + charStrings.charStringsIndex.fdArray = fdArray + else: + charStrings.fdArray = fdArray + fontDict = FontDict() + fdArray.append(fontDict) + fontDict.Private = privateDict + privateOpOrder = buildOrder(privateDictOperators2) + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + exec("del privateDict.%s" % (key)) + # print "Removing privateDict attr", key + else: + # clean up the PrivateDicts in the fdArray + privateOpOrder = buildOrder(privateDictOperators2) + for fontDict in fdArray: + privateDict = fontDict.Private + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + exec("del privateDict.%s" % (key)) + # print "Removing privateDict attr", key + # At this point, the Subrs and Charstrings are all still T2Charstring class + # easiest to fix this by compiling, then decompiling again + file = BytesIO() + self.compile(file, otFont, isCFF2=True) + file.seek(0) + self.decompile(file, otFont, isCFF2=True) + + +class CFFWriter(object): + + def __init__(self, isCFF2): + self.data = [] + self.isCFF2 = isCFF2 + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + if isinstance(item, TopDictIndexCompiler) and item.isCFF2: + self.topDictSize = item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + log.log(DEBUG, "CFFWriter.toFile() writing to file.") + begin = file.tell() + if self.isCFF2: + self.data[1] = struct.pack(">H", self.topDictSize) + else: + self.offSize = calcOffSize(lastPosList[-1]) + self.data[1] = struct.pack("B", self.offSize) + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + + def __init__(self, items, strings, parent, isCFF2=None): + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + # An empty INDEX contains only the count field. + if self.items: + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + else: + offsets = [] + return offsets + + def getDataLength(self): + if self.isCFF2: + countSize = 4 + else: + countSize = 2 + + if self.items: + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + countSize + # count + 1 + # offSize + (len(self.items) + 1) * offSize + # the offsets + lastOffset - 1 # size of object data + ) + else: + # count. For empty INDEX tables, this is the only entry. + dataLength = countSize + + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + # An empty INDEX contains only the count field. + if self.items: + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + data = tobytes(item, encoding="latin1") + file.write(data) + + +class IndexedStringsCompiler(IndexCompiler): + + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + def getOffsets(self): + if self.isCFF2: + offsets = [0, self.items[0].getDataLength()] + return offsets + else: + return super(TopDictIndexCompiler, self).getOffsets() + + def getDataLength(self): + if self.isCFF2: + dataLength = self.items[0].getDataLength() + return dataLength + else: + return super(TopDictIndexCompiler, self).getDataLength() + + def toFile(self, file): + if self.isCFF2: + self.items[0].toFile(file) + else: + super(TopDictIndexCompiler, self).toFile(file) + + +class FDArrayIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + +class SubrsCompiler(GlobalSubrsCompiler): + + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + + +class CharStringsCompiler(GlobalSubrsCompiler): + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + + """This class represents what the CFF spec calls an INDEX.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.items = [] + name = self.__class__.__name__ + if file is None: + return + self._isCFF2 = isCFF2 + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + if isCFF2: + count = readCard32(file) + else: + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + log.log(DEBUG, " index count: %s offSize: %s", count, offSize) + assert offSize <= 4, "offSize too large: %s" % offSize + self.offsets = offsets = [] + pad = b'\0' * (4 - offSize) + for index in range(count + 1): + chunk = file.read(offSize) + chunk = pad + chunk + offset, = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + log.log(DEBUG, " end of %s at %s", name, file.tell()) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index + 1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset) + self.items[index] = item + return item + + def __setitem__(self, index, item): + self.items[index] = item + + def produceItem(self, index, data, file, offset): + return data + + def append(self, item): + self.items.append(item) + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + +class GlobalSubrsIndex(Index): + + compilerClass = GlobalSubrsCompiler + subrClass = psCharStrings.T2CharString + charStringClass = psCharStrings.T2CharString + + def __init__(self, file=None, globalSubrs=None, private=None, + fdSelect=None, fdArray=None, isCFF2=None): + super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + if isCFF2: + # CFF2Subr's can have numeric arguments on the stack after the last operator. + self.subrClass = psCharStrings.CFF2Subr + self.charStringClass = psCharStrings.CFF2Subr + + + def produceItem(self, index, data, file, offset): + if self.private is not None: + private = self.private + elif hasattr(self, 'fdArray') and self.fdArray is not None: + if hasattr(self, 'fdSelect') and self.fdSelect is not None: + fdIndex = self.fdSelect[index] + else: + fdIndex = 0 + private = self.fdArray[fdIndex].Private + else: + private = None + return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter, progress): + xmlWriter.comment( + "The 'index' attribute is only for humans; " + "it is ignored when parsed.") + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = self.subrClass() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + + compilerClass = TopDictIndexCompiler + + def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, + isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + if file is not None and isCFF2: + self._isCFF2 = isCFF2 + self.items = [] + name = self.__class__.__name__ + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + count = 1 + self.items = [None] * count + self.offsets = [0, topSize] + self.offsetBase = file.tell() + # pretend we've read the whole lot + file.seek(self.offsetBase + topSize) + log.log(DEBUG, " end of %s at %s", name, file.tell()) + else: + super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) + + def produceItem(self, index, data, file, offset): + top = TopDict( + self.strings, file, offset, self.GlobalSubrs, + self.cff2GetGlyphOrder, isCFF2=self._isCFF2) + top.decompile(data) + return top + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(Index): + + compilerClass = FDArrayIndexCompiler + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + def produceItem(self, index, data, file, offset): + fontDict = FontDict( + self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2, + vstore=self.vstore) + fontDict.decompile(data) + return fontDict + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class VarStoreData(object): + + def __init__(self, file=None, otVarStore=None): + self.file = file + self.data = None + self.otVarStore = otVarStore + self.font = TTFont() # dummy font for the decompile function. + + def decompile(self): + if self.file: + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType="VarStore", cachingStats={}) + # read data in from file. Assume position is correct. + length = readCard16(self.file) + self.data = self.file.read(length) + globalState = {} + reader = OTTableReader(self.data, globalState) + self.otVarStore = ot.VarStore() + self.otVarStore.decompile(reader, self.font) + return self + + def compile(self): + writer = OTTableWriter() + self.otVarStore.compile(writer, self.font) + # Note that this omits the initial Card16 length from the CFF2 + # VarStore data block + self.data = writer.getAllData() + + def writeXML(self, xmlWriter, name): + self.otVarStore.toXML(xmlWriter, self.font) + + def xmlRead(self, name, attrs, content, parent): + self.otVarStore = ot.VarStore() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + self.otVarStore.fromXML(name, attrs, content, self.font) + else: + pass + return None + + def __len__(self): + return len(self.data) + + def getNumRegions(self, vsIndex): + varData = self.otVarStore.VarData[vsIndex] + numRegions = varData.VarRegionCount + return numRegions + + +class FDSelect(object): + + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray, + isCFF2=None): + self.globalSubrs = globalSubrs + if file is not None: + self.charStringsIndex = SubrsIndex( + file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + # read from OTF file: charStrings.values() are indices into + # charStringsIndex. + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + # read from ttx file: charStrings.values() are actual charstrings + self.charStringsAreIndexed = 0 + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, 'fdArray'): + if hasattr(self, 'fdSelect'): + sel = self.charStrings[name].fdSelectIndex + else: + sel = 0 + else: + sel = None + return self.charStrings[name], sel + + def toXML(self, xmlWriter, progress): + names = sorted(self.keys()) + i = 0 + step = 10 + numGlyphs = len(names) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [('name', name)] + raw) + else: + xmlWriter.begintag( + "CharString", + [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + if not i % step and progress is not None: + progress.setLabel("Dumping 'CFF ' table... (%s)" % name) + progress.increment(step / numGlyphs) + i = i + 1 + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + try: + fdID = safeEval(attrs["fdSelectIndex"]) + except KeyError: + fdID = 0 + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charStringClass = psCharStrings.T2CharString + charString = charStringClass( + private=private, + globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + + +def readCard16(file): + value, = struct.unpack(">H", file.read(2)) + return value + + +def readCard32(file): + value, = struct.unpack(">L", file.read(4)) + return value + + +def writeCard8(file, value): + file.write(bytechr(value)) + + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + + +def writeCard32(file, value): + file.write(struct.pack(">L", value)) + + +def packCard8(value): + return bytechr(value) + + +def packCard16(value): + return struct.pack(">H", value) + + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + + def read(self, parent, value): + return value + + def write(self, parent, value): + return value + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + + +class ASCIIConverter(SimpleConverter): + + def read(self, parent, value): + return tostr(value, encoding='ascii') + + def write(self, parent, value): + return tobytes(value, encoding='ascii') + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + + +class Latin1Converter(SimpleConverter): + + def read(self, parent, value): + return tostr(value, encoding='latin1') + + def write(self, parent, value): + return tobytes(value, encoding='latin1') + + def xmlWrite(self, xmlWriter, name, value, progress): + value = tounicode(value, encoding="latin1") + if name in ['Notice', 'Copyright']: + value = re.sub(r"[\r\n]\s+", " ", value) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + + +def parseBlendList(s): + valueList = [] + for element in s: + if isinstance(element, basestring): + continue + name, attrs, content = element + blendList = attrs["value"].split() + blendList = [eval(val) for val in blendList] + valueList.append(blendList) + if len(valueList) == 1: + valueList = valueList[0] + return valueList + + +class NumberConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + if isinstance(value, list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + blendValue = " ".join([str(val) for val in value]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + value = parseBlendList(content) + else: + value = parseNum(attrs["value"]) + return value + + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + if value and isinstance(value[0], list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + for valueList in value: + blendValue = " ".join([str(val) for val in valueList]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + value = " ".join([str(val) for val in value]) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + valueList = parseBlendList(content) + else: + values = valueString.split() + valueList = [parseNum(value) for value in values] + return valueList + + +class TableConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter, progress) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + + +class PrivateDictConverter(TableConverter): + + def getClass(self): + return PrivateDict + + def read(self, parent, value): + size, offset = value + file = parent.file + isCFF2 = parent._isCFF2 + try: + vstore = parent.vstore + except AttributeError: + vstore = None + priv = PrivateDict( + parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + + def write(self, parent, value): + return (0, 0) # dummy value + + +class SubrsConverter(TableConverter): + + def getClass(self): + return SubrsIndex + + def read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file, isCFF2=isCFF2) + + def write(self, parent, value): + return 0 # dummy value + + +class CharStringsConverter(TableConverter): + + def read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + charset = parent.charset + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "FDArray"): + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + charStrings = CharStrings( + file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + return charStrings + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "FDArray"): + # if it is a CID-keyed font, then the private Dict is extracted from the + # parent.FDArray + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + # if it is a name-keyed font, then the private dict is in the top dict, + # and + # there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings( + None, None, parent.GlobalSubrs, private, fdSelect, fdArray) + charStrings.fromXML(name, attrs, content) + return charStrings + + +class CharsetConverter(object): + def read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + log.log(DEBUG, "loading charset at %s", value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + log.log(DEBUG, " charset end at %s", file.tell()) + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + # We get here only when processing fontDicts from the FDArray of + # CFF-CID fonts. Only the real topDict references the chrset. + assert value == 0 + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + if charset and (len(charset) != parent.numGlyphs): + charset = charset[:parent.numGlyphs] + return charset + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + # # xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + pass + + +class CharsetCompiler(object): + + def __init__(self, strings, charset, parent): + assert charset[0] == '.notdef' + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getStdCharSet(charset): + # check to see if we can use a predefined charset value. + predefinedCharSetVal = None + predefinedCharSets = [ + (cffISOAdobeStringCount, cffISOAdobeStrings, 0), + (cffExpertStringCount, cffIExpertStrings, 1), + (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)] + lcs = len(charset) + for cnt, pcs, csv in predefinedCharSets: + if predefinedCharSetVal is not None: + break + if lcs > cnt: + continue + predefinedCharSetVal = csv + for i in range(lcs): + if charset[i] != pcs[i]: + predefinedCharSetVal = None + break + return predefinedCharSetVal + + +def getCIDfromName(name, strings): + return int(name[3:]) + + +def getSIDfromName(name, strings): + return strings.getSID(name) + + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name, strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = ['.notdef'] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first + nLeft + 1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first + nLeft + 1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, basestring) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + + def read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + else: + assert value > 1 + file = parent.file + file.seek(value) + log.log(DEBUG, "loading Encoding at %s", value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 + if haveSupplement: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: + encoding = parseEncoding0(parent.charset, file, haveSupplement, + parent.strings) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file, haveSupplement, + parent.strings) + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def parseEncoding0(charset, file, haveSupplement, strings): + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + return encoding + + +def parseEncoding1(charset, file, haveSupplement, strings): + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for i in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for glyphID in range(glyphID, glyphID + nLeft + 1): + encoding[code] = charset[glyphID] + code = code + 1 + glyphID = glyphID + 1 + return encoding + + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + + def read(self, parent, value): + try: + vstore = parent.VarStore + except AttributeError: + vstore = None + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(value) + fdArray = FDArrayIndex(file, isCFF2=isCFF2) + fdArray.vstore = vstore + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(object): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, [('format', value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +class VarStoreConverter(SimpleConverter): + + def read(self, parent, value): + file = parent.file + file.seek(value) + varStore = VarStoreData(file) + varStore.decompile() + return varStore + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + value.writeXML(xmlWriter, name) + + def xmlRead(self, name, attrs, content, parent): + varStore = VarStoreData() + varStore.xmlRead(name, attrs, content, parent) + return varStore + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class VarStoreCompiler(object): + + def __init__(self, varStoreData, parent): + self.parent = parent + if not varStoreData.data: + varStoreData.compile() + data = [ + packCard16(len(varStoreData.data)), + varStoreData.data + ] + self.data = bytesjoin(data) + + def setPos(self, pos, endPos): + self.parent.rawDict["VarStore"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + registry, order, supplement = value + xmlWriter.simpletag( + name, + [ + ('Registry', tostr(registry)), + ('Order', tostr(order)), + ('Supplement', supplement) + ]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) + +topDictOperators = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', -100, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', None, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +topDictOperators2 = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + +kBlendDictOpName = "blend" +blendOp = 23 + +privateDictOperators = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + +privateDictOperators2 = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + elif arg == 'blendList': + conv = None + else: + assert False + table[i] = op, name, arg, default, conv + + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + maxBlendStack = 0 + + def __init__(self, dictObj, strings, parent, isCFF2=None): + if strings: + assert isinstance(strings, IndexedStrings) + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + data = bytesjoin(data) + return data + + def toFile(self, file): + data = self.compile("toFile") + file.write(data) + + def arg_number(self, num): + if isinstance(num, list): + data = [encodeNumber(val) for val in num] + data.append(encodeNumber(1)) + data.append(bytechr(blendOp)) + datum = bytesjoin(data) + else: + datum = encodeNumber(num) + return datum + + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + + def arg_array(self, value): + data = [] + for num in value: + data.append(self.arg_number(num)) + return bytesjoin(data) + + def arg_delta(self, value): + if not value: + return b"" + val0 = value[0] + if isinstance(val0, list): + data = self.arg_delta_blend(value) + else: + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + + def arg_delta_blend(self, value): + """ A delta list with blend lists has to be *all* blend lists. + The value is a list is arranged as follows. + [ + [V0, d0..dn] + [V1, d0..dn] + ... + [Vm, d0..dn] + ] + V is the absolute coordinate value from the default font, and d0-dn are + the delta values from the n regions. Each V is an absolute coordinate + from the default font. + We want to return a list: + [ + [v0, v1..vm] + [d0..dn] + ... + [d0..dn] + numBlends + blendOp + ] + where each v is relative to the previous default font value. + """ + numMasters = len(value[0]) + numBlends = len(value) + numStack = (numBlends * numMasters) + 1 + if numStack > self.maxBlendStack: + # Figure out the max number of value we can blend + # and divide this list up into chunks of that size. + + numBlendValues = int((self.maxBlendStack - 1) / numMasters) + out = [] + while True: + numVal = min(len(value), numBlendValues) + if numVal == 0: + break + valList = value[0:numVal] + out1 = self.arg_delta_blend(valList) + out.extend(out1) + value = value[numVal:] + else: + firstList = [0] * numBlends + deltaList = [None] * numBlends + i = 0 + prevVal = 0 + while i < numBlends: + # For PrivateDict BlueValues, the default font + # values are absolute, not relative. + # Must convert these back to relative coordinates + # befor writing to CFF2. + defaultValue = value[i][0] + firstList[i] = defaultValue - prevVal + prevVal = defaultValue + deltaList[i] = value[i][1:] + i += 1 + + relValueList = firstList + for blendList in deltaList: + relValueList.extend(blendList) + out = [encodeNumber(val) for val in relValueList] + out.append(encodeNumber(numBlends)) + out.append(bytechr(blendOp)) + return out + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + isCFF2 = self.isCFF2 + children = [] + if self.dictObj.cff2GetGlyphOrder is None: + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + if hasattr(self.dictObj, "ROS"): # aka isCID + charsetCode = None + else: + charsetCode = getStdCharSet(self.dictObj.charset) + if charsetCode is None: + children.append(CharsetCompiler(strings, self.dictObj.charset, self)) + else: + self.rawDict["charset"] = charsetCode + if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding: + encoding = self.dictObj.Encoding + if not isinstance(encoding, basestring): + children.append(EncodingCompiler(strings, encoding, self)) + else: + if hasattr(self.dictObj, "VarStore"): + varStoreData = self.dictObj.VarStore + varStoreComp = VarStoreCompiler(varStoreData, self) + children.append(varStoreComp) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and the FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + # probably read in from XML; assume fdIndex in CharString data + if len(fdSelect) == 0: + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler( + items, strings, self, isCFF2=isCFF2) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that the + # FDArray info is correct and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + opcodes = buildOpcodeDict(topDictOperators) + + def __init__(self, dictObj, strings, parent, isCFF2=None): + super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2) + # + # We now take some effort to detect if there were any key/value pairs + # supplied that were ignored in the FontDict context, and issue a warning + # for those cases. + # + ignoredNames = [] + dictObj = self.dictObj + for name in sorted(set(dictObj.converters) - set(dictObj.order)): + if name in dictObj.rawDict: + # The font was directly read from binary. In this + # case, we want to report *all* "useless" key/value + # pairs that are in the font, not just the ones that + # are different from the default. + ignoredNames.append(name) + else: + # The font was probably read from a TTX file. We only + # warn about keys whos value is not the default. The + # ones that have the default value will not be written + # to binary anyway. + default = dictObj.defaults.get(name) + if default is not None: + conv = dictObj.converters[name] + default = conv.read(dictObj, default) + if getattr(dictObj, name, None) != default: + ignoredNames.append(name) + if ignoredNames: + log.warning( + "Some CFF FDArray/FontDict keys were ignored upon compile: " + + " ".join(sorted(ignoredNames))) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + + maxBlendStack = maxStackLimit + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.rawDict = {} + self.skipNames = [] + self.strings = strings + if file is None: + return + self._isCFF2 = isCFF2 + self.file = file + if offset is not None: + log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset) + self.offset = offset + + def decompile(self, data): + log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data)) + dec = self.decompilerClass(self.strings, self) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def __getattr__(self, name): + value = self.rawDict.get(name, None) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter, progress): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + # XXX For "charset" we never skip calling xmlWrite even if the + # value is None, so we always write the following XML comment: + # + # + # + # Charset is None when 'CFF ' table is imported from XML into an + # empty TTFont(). By writing this comment all the time, we obtain + # the same XML output whether roundtripping XML-to-XML or + # dumping binary-to-XML + if value is None and name != "charset": + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value, progress) + ignoredNames = set(self.rawDict) - set(self.order) + if ignoredNames: + xmlWriter.comment( + "some keys were ignored: %s" % " ".join(sorted(ignoredNames))) + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + compilerClass = TopDictCompiler + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None): + super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + self.GlobalSubrs = GlobalSubrs + if isCFF2: + self.defaults = buildDefaults(topDictOperators2) + self.charset = cff2GetGlyphOrder() + self.order = buildOrder(topDictOperators2) + else: + self.defaults = buildDefaults(topDictOperators) + self.order = buildOrder(topDictOperators) + + def getGlyphOrder(self): + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + if self._isCFF2: + self.numGlyphs = readCard32(self.file) + else: + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter, progress): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings(progress) + if hasattr(self, "ROS"): + self.skipNames = ['Encoding'] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = [ + 'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount'] + BaseDict.toXML(self, xmlWriter, progress) + + def decompileAllCharStrings(self, progress): + # Make sure that all the Private Dicts have been instantiated. + i = 0 + for charString in self.CharStrings.values(): + try: + charString.decompile() + except: + log.error("Error in charstring %s", i) + raise + if not i % 30 and progress: + progress.increment(0) # update + i = i + 1 + + def recalcFontBBox(self): + fontBBox = None + for charString in self.CharStrings.values(): + bounds = charString.calcBounds() + if bounds is not None: + if fontBBox is not None: + fontBBox = unionRect(fontBBox, bounds) + else: + fontBBox = bounds + + if fontBBox is None: + self.FontBBox = self.defaults['FontBBox'][:] + else: + self.FontBBox = list(intRect(fontBBox)) + + +class FontDict(BaseDict): + # + # Since fonttools used to pass a lot of fields that are not relevant in the FDArray + # FontDict, there are 'ttx' files in the wild that contain all these. These got in + # the ttx files because fonttools writes explicit values for all the TopDict default + # values. These are not actually illegal in the context of an FDArray FontDict - you + # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are + # useless since current major company CFF interpreters ignore anything but the set + # listed in this file. So, we just silently skip them. An exception is Weight: this + # is not used by any interpreter, but some foundries have asked that this be + # supported in FDArray FontDicts just to preserve information about the design when + # the font is being inspected. + # + # On top of that, there are fonts out there that contain such useless FontDict values. + # + # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading + # from binary or when reading from XML, but by overriding `order` with a limited + # list of names, we ensure that only the useful names ever get exported to XML and + # ever get compiled into the binary font. + # + # We override compilerClass so we can warn about "useless" key/value pairs, either + # from the original binary font or from TTX input. + # + # See: + # - https://github.com/fonttools/fonttools/issues/740 + # - https://github.com/fonttools/fonttools/issues/601 + # - https://github.com/adobe-type-tools/afdko/issues/137 + # + defaults = {} + converters = buildConverters(topDictOperators) + compilerClass = FontDictCompiler + order = ['FontName', 'FontMatrix', 'Weight', 'Private'] + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, isCFF2=None, vstore=None): + super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None, + vstore=None): + super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + if isCFF2: + self.defaults = buildDefaults(privateDictOperators2) + self.order = buildOrder(privateDictOperators2) + else: + self.defaults = buildDefaults(privateDictOperators) + self.order = buildOrder(privateDictOperators) + + def getNumRegions(self, vi=None): # called from misc/psCharStrings.py + # if getNumRegions is being called, we can assume that VarStore exists. + if vi is None: + if hasattr(self, 'vsindex'): + vi = self.vsindex + else: + vi = 0 + numRegions = self.vstore.getNumRegions(vi) + return numRegions + + +class IndexedStrings(object): + + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [ + tostr(s, encoding="latin1") + for s in Index(file, isCFF2=False) + ] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, self, isCFF2=False) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + s = tostr(s, encoding="latin1") + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', + 'Semibold' +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", +"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", +"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", +"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", +"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", +"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", +"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", +"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", +"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", +"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", +"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", +"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", +"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", +"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", +"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", +"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", +"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", +"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", +"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", +"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", +"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", +"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", +"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", +"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", +"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", +"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", +"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", +"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", +"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", +"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", +"zcaron"] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", +"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", +"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", +"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", +"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", +"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", +"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", +"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", +"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", +"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", +"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", +"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", +"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", +"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", +"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", +"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", +"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", +"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", +"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", +"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", +"centinferior", "dollarinferior", "periodinferior", "commainferior", +"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", +"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", +"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", +"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", +"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", +"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", +"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", +"Ydieresissmall"] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", +"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", +"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", +"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", +"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", +"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", +"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", +"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", +"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", +"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", +"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", +"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", +"eightinferior", "nineinferior", "centinferior", "dollarinferior", +"periodinferior", "commainferior"] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-3.0/Lib/fontTools/cffLib/specializer.py fonttools-3.21.2/Lib/fontTools/cffLib/specializer.py --- fonttools-3.0/Lib/fontTools/cffLib/specializer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/cffLib/specializer.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,551 @@ +# -*- coding: utf-8 -*- + +"""T2CharString operator specializer and generalizer.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + + +def stringToProgram(string): + if isinstance(string, basestring): + string = string.split() + program = [] + for token in string: + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + pass + program.append(token) + return program + +def programToString(program): + return ' '.join(str(x) for x in program) + + +def programToCommands(program): + """Takes a T2CharString program list and returns list of commands. + Each command is a two-tuple of commandname,arg-list. The commandname might + be empty string if no commandname shall be emitted (used for glyph width, + hintmask/cntrmask argument, as well as stray arguments at the end of the + program (¯\_(ツ)_/¯).""" + + width = None + commands = [] + stack = [] + it = iter(program) + for token in it: + if not isinstance(token, basestring): + stack.append(token) + continue + + if width is None and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm', + 'cntrmask', 'hintmask', + 'hmoveto', 'vmoveto', 'rmoveto', + 'endchar'}: + parity = token in {'hmoveto', 'vmoveto'} + if stack and (len(stack) % 2) ^ parity: + width = stack.pop(0) + commands.append(('', [width])) + + if token in {'hintmask', 'cntrmask'}: + if stack: + commands.append(('', stack)) + commands.append((token, [])) + commands.append(('', [next(it)])) + else: + commands.append((token,stack)) + stack = [] + if stack: + commands.append(('', stack)) + return commands + +def commandsToProgram(commands): + """Takes a commands list as returned by programToCommands() and converts + it back to a T2CharString program list.""" + program = [] + for op,args in commands: + program.extend(args) + if op: + program.append(op) + return program + + +def _everyN(el, n): + """Group the list el into groups of size n""" + if len(el) % n != 0: raise ValueError(el) + for i in range(0, len(el), n): + yield el[i:i+n] + + +class _GeneralizerDecombinerCommandsMap(object): + + @staticmethod + def rmoveto(args): + if len(args) != 2: raise ValueError(args) + yield ('rmoveto', args) + @staticmethod + def hmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [args[0], 0]) + @staticmethod + def vmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [0, args[0]]) + + @staticmethod + def rlineto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 2): + yield ('rlineto', args) + @staticmethod + def hlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [next(it), 0]) + yield ('rlineto', [0, next(it)]) + except StopIteration: + pass + @staticmethod + def vlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [0, next(it)]) + yield ('rlineto', [next(it), 0]) + except StopIteration: + pass + @staticmethod + def rrcurveto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 6): + yield ('rrcurveto', args) + @staticmethod + def hhcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0]) + @staticmethod + def vvcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]]) + @staticmethod + def hvcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + else: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + @staticmethod + def vhcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + else: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + + @staticmethod + def rcurveline(args): + if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args) + args, last_args = args[:-2], args[-2:] + for args in _everyN(args, 6): + yield ('rrcurveto', args) + yield ('rlineto', last_args) + @staticmethod + def rlinecurve(args): + if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args) + args, last_args = args[:-6], args[-6:] + for args in _everyN(args, 2): + yield ('rlineto', args) + yield ('rrcurveto', last_args) + + +def generalizeCommands(commands, ignoreErrors=False): + result = [] + mapping = _GeneralizerDecombinerCommandsMap + for op,args in commands: + func = getattr(mapping, op, None) + if not func: + result.append((op,args)) + continue + try: + for command in func(args): + result.append(command) + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(('', args)) + result.append(('', [op])) + else: + raise + return result + +def generalizeProgram(program, **kwargs): + return commandsToProgram(generalizeCommands(programToCommands(program), **kwargs)) + + +def _categorizeVector(v): + """ + Takes X,Y vector v and returns one of r, h, v, or 0 depending on which + of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, + it returns a single zero still. + + >>> _categorizeVector((0,0)) + ('0', (0,)) + >>> _categorizeVector((1,0)) + ('h', (1,)) + >>> _categorizeVector((0,2)) + ('v', (2,)) + >>> _categorizeVector((1,2)) + ('r', (1, 2)) + """ + if not v[0]: + if not v[1]: + return '0', v[:1] + else: + return 'v', v[1:] + else: + if not v[1]: + return 'h', v[:1] + else: + return 'r', v + +def _mergeCategories(a, b): + if a == '0': return b + if b == '0': return a + if a == b: return a + return None + +def _negateCategory(a): + if a == 'h': return 'v' + if a == 'v': return 'h' + assert a in '0r' + return a + +def specializeCommands(commands, + ignoreErrors=False, + generalizeFirst=True, + preserveTopology=False, + maxstack=48): + + # We perform several rounds of optimizations. They are carefully ordered and are: + # + # 0. Generalize commands. + # This ensures that they are in our expected simple form, with each line/curve only + # having arguments for one segment, and using the generic form (rlineto/rrcurveto). + # If caller is sure the input is in this form, they can turn off generalization to + # save time. + # + # 1. Combine successive rmoveto operations. + # + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # We specialize into some, made-up, variants as well, which simplifies following + # passes. + # + # 3. Merge or delete redundant operations, to the extent requested. + # OpenType spec declares point numbers in CFF undefined. As such, we happily + # change topology. If client relies on point numbers (in GPOS anchors, or for + # hinting purposes(what?)) they can turn this off. + # + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + # + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + # + # 6. Resolve any remaining made-up operators into real operators. + # + # I have convinced myself that this produces optimal bytecode (except for, possibly + # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) + # A dynamic-programming approach can do the same but would be significantly slower. + + + # 0. Generalize commands. + if generalizeFirst: + commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) + else: + commands = list(commands) # Make copy since we modify in-place later. + + # 1. Combine successive rmoveto operations. + for i in range(len(commands)-1, 0, -1): + if 'rmoveto' == commands[i][0] == commands[i-1][0]: + v1, v2 = commands[i-1][1], commands[i][1] + commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]]) + del commands[i] + + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # + # We, in fact, specialize into more, made-up, variants that special-case when both + # X and Y components are zero. This simplifies the following optimization passes. + # This case is rare, but OCD does not let me skip it. + # + # After this round, we will have four variants that use the following mnemonics: + # + # - 'r' for relative, ie. non-zero X and non-zero Y, + # - 'h' for horizontal, ie. zero X and non-zero Y, + # - 'v' for vertical, ie. non-zero X and zero Y, + # - '0' for zeros, ie. zero X and zero Y. + # + # The '0' pseudo-operators are not part of the spec, but help simplify the following + # optimization rounds. We resolve them at the end. So, after this, we will have four + # moveto and four lineto variants: + # + # - 0moveto, 0lineto + # - hmoveto, hlineto + # - vmoveto, vlineto + # - rmoveto, rlineto + # + # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve + # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. + # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. + # + # There are nine different variants of curves without the '0'. Those nine map exactly + # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, + # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of + # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of + # arguments) is in fact an rhcurveto. The operators in the spec are designed such that + # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. + # + # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest + # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be + # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always + # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute + # the '0' with either 'h' or 'v' and it works. + # + # When we get to curve splines however, things become more complicated... XXX finish this. + # There's one more complexity with splines. If one side of the spline is not horizontal or + # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. + # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and + # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. + # This limits our merge opportunities later. + # + for i in range(len(commands)): + op,args = commands[i] + + if op in {'rmoveto', 'rlineto'}: + c, args = _categorizeVector(args) + commands[i] = c+op[1:], args + continue + + if op == 'rrcurveto': + c1, args1 = _categorizeVector(args[:2]) + c2, args2 = _categorizeVector(args[-2:]) + commands[i] = c1+c2+'curveto', args1+args[2:4]+args2 + continue + + # 3. Merge or delete redundant operations, to the extent requested. + # + # TODO + # A 0moveto that comes before all other path operations can be removed. + # though I find conflicting evidence for this. + # + # TODO + # "If hstem and vstem hints are both declared at the beginning of a + # CharString, and this sequence is followed directly by the hintmask or + # cntrmask operators, then the vstem hint operator (or, if applicable, + # the vstemhm operator) need not be included." + # + # "The sequence and form of a CFF2 CharString program may be represented as: + # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" + # + # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 + # + # For Type2 CharStrings the sequence is: + # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" + + + # Some other redundancies change topology (point numbers). + if not preserveTopology: + for i in range(len(commands)-1, -1, -1): + op, args = commands[i] + + # A 00curveto is demoted to a (specialized) lineto. + if op == '00curveto': + assert len(args) == 4 + c, args = _categorizeVector(args[1:3]) + op = c+'lineto' + commands[i] = op, args + # and then... + + # A 0lineto can be deleted. + if op == '0lineto': + del commands[i] + continue + + # Merge adjacent hlineto's and vlineto's. + if i and op in {'hlineto', 'vlineto'} and op == commands[i-1][0]: + _, other_args = commands[i-1] + assert len(args) == 1 and len(other_args) == 1 + commands[i-1] = (op, [other_args[0]+args[0]]) + del commands[i] + continue + + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + for i in range(1, len(commands)-1): + op,args = commands[i] + prv,nxt = commands[i-1][0], commands[i+1][0] + + if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto': + assert len(args) == 1 + args = [0, args[0]] if op[0] == 'v' else [args[0], 0] + commands[i] = ('rlineto', args) + continue + + if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto': + assert (op[0] == 'r') ^ (op[1] == 'r') + args = list(args) + if op[0] == 'v': + pos = 0 + elif op[0] != 'r': + pos = 1 + elif op[1] == 'v': + pos = 4 + else: + pos = 5 + args.insert(pos, 0) + commands[i] = ('rrcurveto', args) + continue + + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + for i in range(len(commands)-1, 0, -1): + op1,args1 = commands[i-1] + op2,args2 = commands[i] + new_op = None + + # Merge logic... + if {op1, op2} <= {'rlineto', 'rrcurveto'}: + if op1 == op2: + new_op = op1 + else: + if op2 == 'rrcurveto' and len(args2) == 6: + new_op = 'rlinecurve' + elif len(args2) == 2: + new_op = 'rcurveline' + + elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}: + new_op = op2 + + elif {op1, op2} == {'vlineto', 'hlineto'}: + new_op = op1 + + elif 'curveto' == op1[2:] == op2[2:]: + d0, d1 = op1[:2] + d2, d3 = op2[:2] + + if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r': + continue + + d = _mergeCategories(d1, d2) + if d is None: continue + if d0 == 'r': + d = _mergeCategories(d, d3) + if d is None: continue + new_op = 'r'+d+'curveto' + elif d3 == 'r': + d0 = _mergeCategories(d0, _negateCategory(d)) + if d0 is None: continue + new_op = d0+'r'+'curveto' + else: + d0 = _mergeCategories(d0, d3) + if d0 is None: continue + new_op = d0+d+'curveto' + + if new_op and len(args1) + len(args2) <= maxstack: + commands[i-1] = (new_op, args1+args2) + del commands[i] + + # 6. Resolve any remaining made-up operators into real operators. + for i in range(len(commands)): + op,args = commands[i] + + if op in {'0moveto', '0lineto'}: + commands[i] = 'h'+op[1:], args + continue + + if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}: + op0, op1 = op[:2] + if (op0 == 'r') ^ (op1 == 'r'): + assert len(args) % 2 == 1 + if op0 == '0': op0 = 'h' + if op1 == '0': op1 = 'h' + if op0 == 'r': op0 = op1 + if op1 == 'r': op1 = _negateCategory(op0) + assert {op0,op1} <= {'h','v'}, (op0, op1) + + if len(args) % 2: + if op0 != op1: # vhcurveto / hvcurveto + if (op0 == 'h') ^ (len(args) % 8 == 1): + # Swap last two args order + args = args[:-2]+args[-1:]+args[-2:-1] + else: # hhcurveto / vvcurveto + if op0 == 'h': # hhcurveto + # Swap first two args order + args = args[1:2]+args[:1]+args[2:] + + commands[i] = op0+op1+'curveto', args + continue + + return commands + +def specializeProgram(program, **kwargs): + return commandsToProgram(specializeCommands(programToCommands(program), **kwargs)) + + +if __name__ == '__main__': + import sys + if len(sys.argv) == 1: + import doctest + sys.exit(doctest.testmod().failed) + program = stringToProgram(sys.argv[1:]) + print("Program:"); print(programToString(program)) + commands = programToCommands(program) + print("Commands:"); print(commands) + program2 = commandsToProgram(commands) + print("Program from commands:"); print(programToString(program2)) + assert program == program2 + print("Generalized program:"); print(programToString(generalizeProgram(program))) + print("Specialized program:"); print(programToString(specializeProgram(program))) + diff -Nru fonttools-3.0/Lib/fontTools/cffLib.py fonttools-3.21.2/Lib/fontTools/cffLib.py --- fonttools-3.0/Lib/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/cffLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1810 +0,0 @@ -"""cffLib.py -- read/write tools for Adobe CFF fonts.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc import psCharStrings -from fontTools.misc.textTools import safeEval -import struct - -DEBUG = 0 - - -cffHeaderFormat = """ - major: B - minor: B - hdrSize: B - offSize: B -""" - -class CFFFontSet(object): - - def __init__(self): - pass - - def decompile(self, file, otFont): - sstruct.unpack(cffHeaderFormat, file.read(4), self) - assert self.major == 1 and self.minor == 0, \ - "unknown CFF format: %d.%d" % (self.major, self.minor) - - file.seek(self.hdrSize) - self.fontNames = list(Index(file)) - self.topDictIndex = TopDictIndex(file) - self.strings = IndexedStrings(file) - self.GlobalSubrs = GlobalSubrsIndex(file) - self.topDictIndex.strings = self.strings - self.topDictIndex.GlobalSubrs = self.GlobalSubrs - - def __len__(self): - return len(self.fontNames) - - def keys(self): - return list(self.fontNames) - - def values(self): - return self.topDictIndex - - def __getitem__(self, name): - try: - index = self.fontNames.index(name) - except ValueError: - raise KeyError(name) - return self.topDictIndex[index] - - def compile(self, file, otFont): - strings = IndexedStrings() - writer = CFFWriter() - writer.add(sstruct.pack(cffHeaderFormat, self)) - fontNames = Index() - for name in self.fontNames: - fontNames.append(name) - writer.add(fontNames.getCompiler(strings, None)) - topCompiler = self.topDictIndex.getCompiler(strings, None) - writer.add(topCompiler) - writer.add(strings.getCompiler()) - writer.add(self.GlobalSubrs.getCompiler(strings, None)) - - for topDict in self.topDictIndex: - if not hasattr(topDict, "charset") or topDict.charset is None: - charset = otFont.getGlyphOrder() - topDict.charset = charset - - for child in topCompiler.getChildren(strings): - writer.add(child) - - writer.toFile(file) - - def toXML(self, xmlWriter, progress=None): - for fontName in self.fontNames: - xmlWriter.begintag("CFFFont", name=tostr(fontName)) - xmlWriter.newline() - font = self[fontName] - font.toXML(xmlWriter, progress) - xmlWriter.endtag("CFFFont") - xmlWriter.newline() - xmlWriter.newline() - xmlWriter.begintag("GlobalSubrs") - xmlWriter.newline() - self.GlobalSubrs.toXML(xmlWriter, progress) - xmlWriter.endtag("GlobalSubrs") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if not hasattr(self, "GlobalSubrs"): - self.GlobalSubrs = GlobalSubrsIndex() - self.major = 1 - self.minor = 0 - self.hdrSize = 4 - self.offSize = 4 # XXX ?? - if name == "CFFFont": - if not hasattr(self, "fontNames"): - self.fontNames = [] - self.topDictIndex = TopDictIndex() - fontName = attrs["name"] - topDict = TopDict(GlobalSubrs=self.GlobalSubrs) - topDict.charset = None # gets filled in later - self.fontNames.append(fontName) - self.topDictIndex.append(topDict) - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - topDict.fromXML(name, attrs, content) - elif name == "GlobalSubrs": - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.GlobalSubrs.append(subr) - - -class CFFWriter(object): - - def __init__(self): - self.data = [] - - def add(self, table): - self.data.append(table) - - def toFile(self, file): - lastPosList = None - count = 1 - while True: - if DEBUG: - print("CFFWriter.toFile() iteration:", count) - count = count + 1 - pos = 0 - posList = [pos] - for item in self.data: - if hasattr(item, "getDataLength"): - endPos = pos + item.getDataLength() - else: - endPos = pos + len(item) - if hasattr(item, "setPos"): - item.setPos(pos, endPos) - pos = endPos - posList.append(pos) - if posList == lastPosList: - break - lastPosList = posList - if DEBUG: - print("CFFWriter.toFile() writing to file.") - begin = file.tell() - posList = [0] - for item in self.data: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - posList.append(file.tell() - begin) - assert posList == lastPosList - - -def calcOffSize(largestOffset): - if largestOffset < 0x100: - offSize = 1 - elif largestOffset < 0x10000: - offSize = 2 - elif largestOffset < 0x1000000: - offSize = 3 - else: - offSize = 4 - return offSize - - -class IndexCompiler(object): - - def __init__(self, items, strings, parent): - self.items = self.getItems(items, strings) - self.parent = parent - - def getItems(self, items, strings): - return items - - def getOffsets(self): - pos = 1 - offsets = [pos] - for item in self.items: - if hasattr(item, "getDataLength"): - pos = pos + item.getDataLength() - else: - pos = pos + len(item) - offsets.append(pos) - return offsets - - def getDataLength(self): - lastOffset = self.getOffsets()[-1] - offSize = calcOffSize(lastOffset) - dataLength = ( - 2 + # count - 1 + # offSize - (len(self.items) + 1) * offSize + # the offsets - lastOffset - 1 # size of object data - ) - return dataLength - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(tobytes(item, encoding="latin1")) - - -class IndexedStringsCompiler(IndexCompiler): - - def getItems(self, items, strings): - return items.strings - - -class TopDictIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for topDict in self.items: - children.extend(topDict.getChildren(strings)) - return children - - -class FDArrayIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for fontDict in self.items: - children.extend(fontDict.getChildren(strings)) - return children - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - - def setPos(self, pos, endPos): - self.parent.rawDict["FDArray"] = pos - - -class GlobalSubrsCompiler(IndexCompiler): - def getItems(self, items, strings): - out = [] - for cs in items: - cs.compile() - out.append(cs.bytecode) - return out - -class SubrsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - offset = pos - self.parent.pos - self.parent.rawDict["Subrs"] = offset - -class CharStringsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - self.parent.rawDict["CharStrings"] = pos - - -class Index(object): - - """This class represents what the CFF spec calls an INDEX.""" - - compilerClass = IndexCompiler - - def __init__(self, file=None): - self.items = [] - name = self.__class__.__name__ - if file is None: - return - if DEBUG: - print("loading %s at %s" % (name, file.tell())) - self.file = file - count = readCard16(file) - if count == 0: - return - self.items = [None] * count - offSize = readCard8(file) - if DEBUG: - print(" index count: %s offSize: %s" % (count, offSize)) - assert offSize <= 4, "offSize too large: %s" % offSize - self.offsets = offsets = [] - pad = b'\0' * (4 - offSize) - for index in range(count+1): - chunk = file.read(offSize) - chunk = pad + chunk - offset, = struct.unpack(">L", chunk) - offsets.append(int(offset)) - self.offsetBase = file.tell() - 1 - file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot - if DEBUG: - print(" end of %s at %s" % (name, file.tell())) - - def __len__(self): - return len(self.items) - - def __getitem__(self, index): - item = self.items[index] - if item is not None: - return item - offset = self.offsets[index] + self.offsetBase - size = self.offsets[index+1] - self.offsets[index] - file = self.file - file.seek(offset) - data = file.read(size) - assert len(data) == size - item = self.produceItem(index, data, file, offset, size) - self.items[index] = item - return item - - def produceItem(self, index, data, file, offset, size): - return data - - def append(self, item): - self.items.append(item) - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - -class GlobalSubrsIndex(Index): - - compilerClass = GlobalSubrsCompiler - - def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): - Index.__init__(self, file) - self.globalSubrs = globalSubrs - self.private = private - if fdSelect: - self.fdSelect = fdSelect - if fdArray: - self.fdArray = fdArray - - def produceItem(self, index, data, file, offset, size): - if self.private is not None: - private = self.private - elif hasattr(self, 'fdArray') and self.fdArray is not None: - private = self.fdArray[self.fdSelect[index]].Private - else: - private = None - return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) - - def toXML(self, xmlWriter, progress): - xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") - xmlWriter.newline() - for i in range(len(self)): - subr = self[i] - if subr.needsDecompilation(): - xmlWriter.begintag("CharString", index=i, raw=1) - else: - xmlWriter.begintag("CharString", index=i) - xmlWriter.newline() - subr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if name != "CharString": - return - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.append(subr) - - def getItemAndSelector(self, index): - sel = None - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] - return self[index], sel - - -class SubrsIndex(GlobalSubrsIndex): - compilerClass = SubrsCompiler - - -class TopDictIndex(Index): - - compilerClass = TopDictIndexCompiler - - def produceItem(self, index, data, file, offset, size): - top = TopDict(self.strings, file, offset, self.GlobalSubrs) - top.decompile(data) - return top - - def toXML(self, xmlWriter, progress): - for i in range(len(self)): - xmlWriter.begintag("FontDict", index=i) - xmlWriter.newline() - self[i].toXML(xmlWriter, progress) - xmlWriter.endtag("FontDict") - xmlWriter.newline() - - -class FDArrayIndex(TopDictIndex): - - compilerClass = FDArrayIndexCompiler - - def fromXML(self, name, attrs, content): - if name != "FontDict": - return - fontDict = FontDict() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fontDict.fromXML(name, attrs, content) - self.append(fontDict) - - -class FDSelect: - def __init__(self, file=None, numGlyphs=None, format=None): - if file: - # read data in from file - self.format = readCard8(file) - if self.format == 0: - from array import array - self.gidArray = array("B", file.read(numGlyphs)).tolist() - elif self.format == 3: - gidArray = [None] * numGlyphs - nRanges = readCard16(file) - fd = None - prev = None - for i in range(nRanges): - first = readCard16(file) - if prev is not None: - for glyphID in range(prev, first): - gidArray[glyphID] = fd - prev = first - fd = readCard8(file) - if prev is not None: - first = readCard16(file) - for glyphID in range(prev, first): - gidArray[glyphID] = fd - self.gidArray = gidArray - else: - assert False, "unsupported FDSelect format: %s" % format - else: - # reading from XML. Make empty gidArray,, and leave format as passed in. - # format is None will result in the smallest representation being used. - self.format = format - self.gidArray = [] - - def __len__(self): - return len(self.gidArray) - - def __getitem__(self, index): - return self.gidArray[index] - - def __setitem__(self, index, fdSelectValue): - self.gidArray[index] = fdSelectValue - - def append(self, fdSelectValue): - self.gidArray.append(fdSelectValue) - - -class CharStrings(object): - - def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): - if file is not None: - self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) - self.charStrings = charStrings = {} - for i in range(len(charset)): - charStrings[charset[i]] = i - self.charStringsAreIndexed = 1 - else: - self.charStrings = {} - self.charStringsAreIndexed = 0 - self.globalSubrs = globalSubrs - self.private = private - if fdSelect is not None: - self.fdSelect = fdSelect - if fdArray is not None: - self.fdArray = fdArray - - def keys(self): - return list(self.charStrings.keys()) - - def values(self): - if self.charStringsAreIndexed: - return self.charStringsIndex - else: - return list(self.charStrings.values()) - - def has_key(self, name): - return name in self.charStrings - - __contains__ = has_key - - def __len__(self): - return len(self.charStrings) - - def __getitem__(self, name): - charString = self.charStrings[name] - if self.charStringsAreIndexed: - charString = self.charStringsIndex[charString] - return charString - - def __setitem__(self, name, charString): - if self.charStringsAreIndexed: - index = self.charStrings[name] - self.charStringsIndex[index] = charString - else: - self.charStrings[name] = charString - - def getItemAndSelector(self, name): - if self.charStringsAreIndexed: - index = self.charStrings[name] - return self.charStringsIndex.getItemAndSelector(index) - else: - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] # index is not defined at this point. Read R. ? - else: - raise KeyError("fdSelect array not yet defined.") - return self.charStrings[name], sel - - def toXML(self, xmlWriter, progress): - names = sorted(self.keys()) - i = 0 - step = 10 - numGlyphs = len(names) - for name in names: - charStr, fdSelectIndex = self.getItemAndSelector(name) - if charStr.needsDecompilation(): - raw = [("raw", 1)] - else: - raw = [] - if fdSelectIndex is None: - xmlWriter.begintag("CharString", [('name', name)] + raw) - else: - xmlWriter.begintag("CharString", - [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) - xmlWriter.newline() - charStr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - if not i % step and progress is not None: - progress.setLabel("Dumping 'CFF ' table... (%s)" % name) - progress.increment(step / numGlyphs) - i = i + 1 - - def fromXML(self, name, attrs, content): - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - if name != "CharString": - continue - fdID = -1 - if hasattr(self, "fdArray"): - fdID = safeEval(attrs["fdSelectIndex"]) - private = self.fdArray[fdID].Private - else: - private = self.private - - glyphName = attrs["name"] - charString = psCharStrings.T2CharString( - private=private, - globalSubrs=self.globalSubrs) - charString.fromXML(name, attrs, content) - if fdID >= 0: - charString.fdSelectIndex = fdID - self[glyphName] = charString - - -def readCard8(file): - return byteord(file.read(1)) - -def readCard16(file): - value, = struct.unpack(">H", file.read(2)) - return value - -def writeCard8(file, value): - file.write(bytechr(value)) - -def writeCard16(file, value): - file.write(struct.pack(">H", value)) - -def packCard8(value): - return bytechr(value) - -def packCard16(value): - return struct.pack(">H", value) - -def buildOperatorDict(table): - d = {} - for op, name, arg, default, conv in table: - d[op] = (name, arg) - return d - -def buildOpcodeDict(table): - d = {} - for op, name, arg, default, conv in table: - if isinstance(op, tuple): - op = bytechr(op[0]) + bytechr(op[1]) - else: - op = bytechr(op) - d[name] = (op, arg) - return d - -def buildOrder(table): - l = [] - for op, name, arg, default, conv in table: - l.append(name) - return l - -def buildDefaults(table): - d = {} - for op, name, arg, default, conv in table: - if default is not None: - d[name] = default - return d - -def buildConverters(table): - d = {} - for op, name, arg, default, conv in table: - d[name] = conv - return d - - -class SimpleConverter(object): - def read(self, parent, value): - return value - def write(self, parent, value): - return value - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return attrs["value"] - -class ASCIIConverter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='ascii') - def write(self, parent, value): - return tobytes(value, encoding='ascii') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("ascii")) - -class Latin1Converter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='latin1') - def write(self, parent, value): - return tobytes(value, encoding='latin1') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("latin1")) - - -def parseNum(s): - try: - value = int(s) - except: - value = float(s) - return value - -class NumberConverter(SimpleConverter): - def xmlRead(self, name, attrs, content, parent): - return parseNum(attrs["value"]) - -class ArrayConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - value = " ".join(map(str, value)) - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - values = attrs["value"].split() - return [parseNum(value) for value in values] - -class TableConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.begintag(name) - xmlWriter.newline() - value.toXML(xmlWriter, progress) - xmlWriter.endtag(name) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - ob = self.getClass()() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - ob.fromXML(name, attrs, content) - return ob - -class PrivateDictConverter(TableConverter): - def getClass(self): - return PrivateDict - def read(self, parent, value): - size, offset = value - file = parent.file - priv = PrivateDict(parent.strings, file, offset) - file.seek(offset) - data = file.read(size) - assert len(data) == size - priv.decompile(data) - return priv - def write(self, parent, value): - return (0, 0) # dummy value - -class SubrsConverter(TableConverter): - def getClass(self): - return SubrsIndex - def read(self, parent, value): - file = parent.file - file.seek(parent.offset + value) # Offset(self) - return SubrsIndex(file) - def write(self, parent, value): - return 0 # dummy value - -class CharStringsConverter(TableConverter): - def read(self, parent, value): - file = parent.file - charset = parent.charset - globalSubrs = parent.GlobalSubrs - if hasattr(parent, "ROS"): - fdSelect, fdArray = parent.FDSelect, parent.FDArray - private = None - else: - fdSelect, fdArray = None, None - private = parent.Private - file.seek(value) # Offset(0) - return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) - def write(self, parent, value): - return 0 # dummy value - def xmlRead(self, name, attrs, content, parent): - if hasattr(parent, "ROS"): - # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray - private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray - else: - # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. - private, fdSelect, fdArray = parent.Private, None, None - charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) - charStrings.fromXML(name, attrs, content) - return charStrings - -class CharsetConverter(object): - def read(self, parent, value): - isCID = hasattr(parent, "ROS") - if value > 2: - numGlyphs = parent.numGlyphs - file = parent.file - file.seek(value) - if DEBUG: - print("loading charset at %s" % value) - format = readCard8(file) - if format == 0: - charset = parseCharset0(numGlyphs, file, parent.strings, isCID) - elif format == 1 or format == 2: - charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) - else: - raise NotImplementedError - assert len(charset) == numGlyphs - if DEBUG: - print(" charset end at %s" % file.tell()) - else: # offset == 0 -> no charset data. - if isCID or "CharStrings" not in parent.rawDict: - assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. - charset = None - elif value == 0: - charset = cffISOAdobeStrings - elif value == 1: - charset = cffIExpertStrings - elif value == 2: - charset = cffExpertSubsetStrings - return charset - - def write(self, parent, value): - return 0 # dummy value - def xmlWrite(self, xmlWriter, name, value, progress): - # XXX only write charset when not in OT/TTX context, where we - # dump charset as a separate "GlyphOrder" table. - ##xmlWriter.simpletag("charset") - xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - if 0: - return safeEval(attrs["value"]) - - -class CharsetCompiler(object): - - def __init__(self, strings, charset, parent): - assert charset[0] == '.notdef' - isCID = hasattr(parent.dictObj, "ROS") - data0 = packCharset0(charset, isCID, strings) - data = packCharset(charset, isCID, strings) - if len(data) < len(data0): - self.data = data - else: - self.data = data0 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["charset"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -def getCIDfromName(name, strings): - return int(name[3:]) - -def getSIDfromName(name, strings): - return strings.getSID(name) - -def packCharset0(charset, isCID, strings): - fmt = 0 - data = [packCard8(fmt)] - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - data.append(packCard16(getNameID(name,strings))) - return bytesjoin(data) - - -def packCharset(charset, isCID, strings): - fmt = 1 - ranges = [] - first = None - end = 0 - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - SID = getNameID(name, strings) - if first is None: - first = SID - elif end + 1 != SID: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - first = SID - end = SID - if end: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - - data = [packCard8(fmt)] - if fmt == 1: - nLeftFunc = packCard8 - else: - nLeftFunc = packCard16 - for first, nLeft in ranges: - data.append(packCard16(first) + nLeftFunc(nLeft)) - return bytesjoin(data) - -def parseCharset0(numGlyphs, file, strings, isCID): - charset = [".notdef"] - if isCID: - for i in range(numGlyphs - 1): - CID = readCard16(file) - charset.append("cid" + str(CID).zfill(5)) - else: - for i in range(numGlyphs - 1): - SID = readCard16(file) - charset.append(strings[SID]) - return charset - -def parseCharset(numGlyphs, file, strings, isCID, fmt): - charset = ['.notdef'] - count = 1 - if fmt == 1: - nLeftFunc = readCard8 - else: - nLeftFunc = readCard16 - while count < numGlyphs: - first = readCard16(file) - nLeft = nLeftFunc(file) - if isCID: - for CID in range(first, first+nLeft+1): - charset.append("cid" + str(CID).zfill(5)) - else: - for SID in range(first, first+nLeft+1): - charset.append(strings[SID]) - count = count + nLeft + 1 - return charset - - -class EncodingCompiler(object): - - def __init__(self, strings, encoding, parent): - assert not isinstance(encoding, basestring) - data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) - data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) - if len(data0) < len(data1): - self.data = data0 - else: - self.data = data1 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["Encoding"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class EncodingConverter(SimpleConverter): - - def read(self, parent, value): - if value == 0: - return "StandardEncoding" - elif value == 1: - return "ExpertEncoding" - else: - assert value > 1 - file = parent.file - file.seek(value) - if DEBUG: - print("loading Encoding at %s" % value) - fmt = readCard8(file) - haveSupplement = fmt & 0x80 - if haveSupplement: - raise NotImplementedError("Encoding supplements are not yet supported") - fmt = fmt & 0x7f - if fmt == 0: - encoding = parseEncoding0(parent.charset, file, haveSupplement, - parent.strings) - elif fmt == 1: - encoding = parseEncoding1(parent.charset, file, haveSupplement, - parent.strings) - return encoding - - def write(self, parent, value): - if value == "StandardEncoding": - return 0 - elif value == "ExpertEncoding": - return 1 - return 0 # dummy value - - def xmlWrite(self, xmlWriter, name, value, progress): - if value in ("StandardEncoding", "ExpertEncoding"): - xmlWriter.simpletag(name, name=value) - xmlWriter.newline() - return - xmlWriter.begintag(name) - xmlWriter.newline() - for code in range(len(value)): - glyphName = value[code] - if glyphName != ".notdef": - xmlWriter.simpletag("map", code=hex(code), name=glyphName) - xmlWriter.newline() - xmlWriter.endtag(name) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - if "name" in attrs: - return attrs["name"] - encoding = [".notdef"] * 256 - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - code = safeEval(attrs["code"]) - glyphName = attrs["name"] - encoding[code] = glyphName - return encoding - - -def parseEncoding0(charset, file, haveSupplement, strings): - nCodes = readCard8(file) - encoding = [".notdef"] * 256 - for glyphID in range(1, nCodes + 1): - code = readCard8(file) - if code != 0: - encoding[code] = charset[glyphID] - return encoding - -def parseEncoding1(charset, file, haveSupplement, strings): - nRanges = readCard8(file) - encoding = [".notdef"] * 256 - glyphID = 1 - for i in range(nRanges): - code = readCard8(file) - nLeft = readCard8(file) - for glyphID in range(glyphID, glyphID + nLeft + 1): - encoding[code] = charset[glyphID] - code = code + 1 - glyphID = glyphID + 1 - return encoding - -def packEncoding0(charset, encoding, strings): - fmt = 0 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - codes = [] - for name in charset[1:]: - code = m.get(name) - codes.append(code) - - while codes and codes[-1] is None: - codes.pop() - - data = [packCard8(fmt), packCard8(len(codes))] - for code in codes: - if code is None: - code = 0 - data.append(packCard8(code)) - return bytesjoin(data) - -def packEncoding1(charset, encoding, strings): - fmt = 1 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - ranges = [] - first = None - end = 0 - for name in charset[1:]: - code = m.get(name, -1) - if first is None: - first = code - elif end + 1 != code: - nLeft = end - first - ranges.append((first, nLeft)) - first = code - end = code - nLeft = end - first - ranges.append((first, nLeft)) - - # remove unencoded glyphs at the end. - while ranges and ranges[-1][0] == -1: - ranges.pop() - - data = [packCard8(fmt), packCard8(len(ranges))] - for first, nLeft in ranges: - if first == -1: # unencoded - first = 0 - data.append(packCard8(first) + packCard8(nLeft)) - return bytesjoin(data) - - -class FDArrayConverter(TableConverter): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdArray = FDArrayIndex(file) - fdArray.strings = parent.strings - fdArray.GlobalSubrs = parent.GlobalSubrs - return fdArray - - def write(self, parent, value): - return 0 # dummy value - - def xmlRead(self, name, attrs, content, parent): - fdArray = FDArrayIndex() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fdArray.fromXML(name, attrs, content) - return fdArray - - -class FDSelectConverter(object): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdSelect = FDSelect(file, parent.numGlyphs) - return fdSelect - - def write(self, parent, value): - return 0 # dummy value - - # The FDSelect glyph data is written out to XML in the charstring keys, - # so we write out only the format selector - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, [('format', value.format)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - fmt = safeEval(attrs["format"]) - file = None - numGlyphs = None - fdSelect = FDSelect(file, numGlyphs, fmt) - return fdSelect - - -def packFDSelect0(fdSelectArray): - fmt = 0 - data = [packCard8(fmt)] - for index in fdSelectArray: - data.append(packCard8(index)) - return bytesjoin(data) - - -def packFDSelect3(fdSelectArray): - fmt = 3 - fdRanges = [] - first = None - end = 0 - lenArray = len(fdSelectArray) - lastFDIndex = -1 - for i in range(lenArray): - fdIndex = fdSelectArray[i] - if lastFDIndex != fdIndex: - fdRanges.append([i, fdIndex]) - lastFDIndex = fdIndex - sentinelGID = i + 1 - - data = [packCard8(fmt)] - data.append(packCard16( len(fdRanges) )) - for fdRange in fdRanges: - data.append(packCard16(fdRange[0])) - data.append(packCard8(fdRange[1])) - data.append(packCard16(sentinelGID)) - return bytesjoin(data) - - -class FDSelectCompiler(object): - - def __init__(self, fdSelect, parent): - fmt = fdSelect.format - fdSelectArray = fdSelect.gidArray - if fmt == 0: - self.data = packFDSelect0(fdSelectArray) - elif fmt == 3: - self.data = packFDSelect3(fdSelectArray) - else: - # choose smaller of the two formats - data0 = packFDSelect0(fdSelectArray) - data3 = packFDSelect3(fdSelectArray) - if len(data0) < len(data3): - self.data = data0 - fdSelect.format = 0 - else: - self.data = data3 - fdSelect.format = 3 - - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["FDSelect"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class ROSConverter(SimpleConverter): - - def xmlWrite(self, xmlWriter, name, value, progress): - registry, order, supplement = value - xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), - ('Supplement', supplement)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) - - -topDictOperators = [ -# opcode name argument type default converter - ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), - ((12, 20), 'SyntheticBase', 'number', None, None), - (0, 'version', 'SID', None, None), - (1, 'Notice', 'SID', None, Latin1Converter()), - ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), - (2, 'FullName', 'SID', None, None), - ((12, 38), 'FontName', 'SID', None, None), - (3, 'FamilyName', 'SID', None, None), - (4, 'Weight', 'SID', None, None), - ((12, 1), 'isFixedPitch', 'number', 0, None), - ((12, 2), 'ItalicAngle', 'number', 0, None), - ((12, 3), 'UnderlinePosition', 'number', None, None), - ((12, 4), 'UnderlineThickness', 'number', 50, None), - ((12, 5), 'PaintType', 'number', 0, None), - ((12, 6), 'CharstringType', 'number', 2, None), - ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), - (13, 'UniqueID', 'number', None, None), - (5, 'FontBBox', 'array', [0, 0, 0, 0], None), - ((12, 8), 'StrokeWidth', 'number', 0, None), - (14, 'XUID', 'array', None, None), - ((12, 21), 'PostScript', 'SID', None, None), - ((12, 22), 'BaseFontName', 'SID', None, None), - ((12, 23), 'BaseFontBlend', 'delta', None, None), - ((12, 31), 'CIDFontVersion', 'number', 0, None), - ((12, 32), 'CIDFontRevision', 'number', 0, None), - ((12, 33), 'CIDFontType', 'number', 0, None), - ((12, 34), 'CIDCount', 'number', 8720, None), - (15, 'charset', 'number', 0, CharsetConverter()), - ((12, 35), 'UIDBase', 'number', None, None), - (16, 'Encoding', 'number', 0, EncodingConverter()), - (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), - ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), - ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), - (17, 'CharStrings', 'number', None, CharStringsConverter()), -] - -# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, -# in order for the font to compile back from xml. - - -privateDictOperators = [ -# opcode name argument type default converter - (6, 'BlueValues', 'delta', None, None), - (7, 'OtherBlues', 'delta', None, None), - (8, 'FamilyBlues', 'delta', None, None), - (9, 'FamilyOtherBlues', 'delta', None, None), - ((12, 9), 'BlueScale', 'number', 0.039625, None), - ((12, 10), 'BlueShift', 'number', 7, None), - ((12, 11), 'BlueFuzz', 'number', 1, None), - (10, 'StdHW', 'number', None, None), - (11, 'StdVW', 'number', None, None), - ((12, 12), 'StemSnapH', 'delta', None, None), - ((12, 13), 'StemSnapV', 'delta', None, None), - ((12, 14), 'ForceBold', 'number', 0, None), - ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated - ((12, 16), 'lenIV', 'number', None, None), # deprecated - ((12, 17), 'LanguageGroup', 'number', 0, None), - ((12, 18), 'ExpansionFactor', 'number', 0.06, None), - ((12, 19), 'initialRandomSeed', 'number', 0, None), - (20, 'defaultWidthX', 'number', 0, None), - (21, 'nominalWidthX', 'number', 0, None), - (19, 'Subrs', 'number', None, SubrsConverter()), -] - -def addConverters(table): - for i in range(len(table)): - op, name, arg, default, conv = table[i] - if conv is not None: - continue - if arg in ("delta", "array"): - conv = ArrayConverter() - elif arg == "number": - conv = NumberConverter() - elif arg == "SID": - conv = ASCIIConverter() - else: - assert False - table[i] = op, name, arg, default, conv - -addConverters(privateDictOperators) -addConverters(topDictOperators) - - -class TopDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(topDictOperators) - - -class PrivateDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(privateDictOperators) - - -class DictCompiler(object): - - def __init__(self, dictObj, strings, parent): - assert isinstance(strings, IndexedStrings) - self.dictObj = dictObj - self.strings = strings - self.parent = parent - rawDict = {} - for name in dictObj.order: - value = getattr(dictObj, name, None) - if value is None: - continue - conv = dictObj.converters[name] - value = conv.write(dictObj, value) - if value == dictObj.defaults.get(name): - continue - rawDict[name] = value - self.rawDict = rawDict - - def setPos(self, pos, endPos): - pass - - def getDataLength(self): - return len(self.compile("getDataLength")) - - def compile(self, reason): - if DEBUG: - print("-- compiling %s for %s" % (self.__class__.__name__, reason)) - print("in baseDict: ", self) - rawDict = self.rawDict - data = [] - for name in self.dictObj.order: - value = rawDict.get(name) - if value is None: - continue - op, argType = self.opcodes[name] - if isinstance(argType, tuple): - l = len(argType) - assert len(value) == l, "value doesn't match arg type" - for i in range(l): - arg = argType[i] - v = value[i] - arghandler = getattr(self, "arg_" + arg) - data.append(arghandler(v)) - else: - arghandler = getattr(self, "arg_" + argType) - data.append(arghandler(value)) - data.append(op) - return bytesjoin(data) - - def toFile(self, file): - file.write(self.compile("toFile")) - - def arg_number(self, num): - return encodeNumber(num) - def arg_SID(self, s): - return psCharStrings.encodeIntCFF(self.strings.getSID(s)) - def arg_array(self, value): - data = [] - for num in value: - data.append(encodeNumber(num)) - return bytesjoin(data) - def arg_delta(self, value): - out = [] - last = 0 - for v in value: - out.append(v - last) - last = v - data = [] - for num in out: - data.append(encodeNumber(num)) - return bytesjoin(data) - - -def encodeNumber(num): - if isinstance(num, float): - return psCharStrings.encodeFloat(num) - else: - return psCharStrings.encodeIntCFF(num) - - -class TopDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "charset") and self.dictObj.charset: - children.append(CharsetCompiler(strings, self.dictObj.charset, self)) - if hasattr(self.dictObj, "Encoding"): - encoding = self.dictObj.Encoding - if not isinstance(encoding, basestring): - children.append(EncodingCompiler(strings, encoding, self)) - if hasattr(self.dictObj, "FDSelect"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that - # either the font was read from XML, and teh FDSelect indices are all - # in the charstring data, or the FDSelect array is already fully defined. - fdSelect = self.dictObj.FDSelect - if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - fdSelect.append(charStrings[name].fdSelectIndex) - fdSelectComp = FDSelectCompiler(fdSelect, self) - children.append(fdSelectComp) - if hasattr(self.dictObj, "CharStrings"): - items = [] - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - items.append(charStrings[name]) - charStringsComp = CharStringsCompiler(items, strings, self) - children.append(charStringsComp) - if hasattr(self.dictObj, "FDArray"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that the FDArray info is correct - # and complete. - fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) - children.append(fdArrayIndexComp) - children.extend(fdArrayIndexComp.getChildren(strings)) - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class FontDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class PrivateDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(privateDictOperators) - - def setPos(self, pos, endPos): - size = endPos - pos - self.parent.rawDict["Private"] = size, pos - self.pos = pos - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Subrs"): - children.append(self.dictObj.Subrs.getCompiler(strings, self)) - return children - - -class BaseDict(object): - - def __init__(self, strings=None, file=None, offset=None): - self.rawDict = {} - if DEBUG: - print("loading %s at %s" % (self.__class__.__name__, offset)) - self.file = file - self.offset = offset - self.strings = strings - self.skipNames = [] - - def decompile(self, data): - if DEBUG: - print(" length %s is %s" % (self.__class__.__name__, len(data))) - dec = self.decompilerClass(self.strings) - dec.decompile(data) - self.rawDict = dec.getDict() - self.postDecompile() - - def postDecompile(self): - pass - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - def __getattr__(self, name): - value = self.rawDict.get(name) - if value is None: - value = self.defaults.get(name) - if value is None: - raise AttributeError(name) - conv = self.converters[name] - value = conv.read(self, value) - setattr(self, name, value) - return value - - def toXML(self, xmlWriter, progress): - for name in self.order: - if name in self.skipNames: - continue - value = getattr(self, name, None) - if value is None: - continue - conv = self.converters[name] - conv.xmlWrite(xmlWriter, name, value, progress) - - def fromXML(self, name, attrs, content): - conv = self.converters[name] - value = conv.xmlRead(name, attrs, content, self) - setattr(self, name, value) - - -class TopDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = TopDictDecompiler - compilerClass = TopDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def postDecompile(self): - offset = self.rawDict.get("CharStrings") - if offset is None: - return - # get the number of glyphs beforehand. - self.file.seek(offset) - self.numGlyphs = readCard16(self.file) - - def toXML(self, xmlWriter, progress): - if hasattr(self, "CharStrings"): - self.decompileAllCharStrings(progress) - if hasattr(self, "ROS"): - self.skipNames = ['Encoding'] - if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): - # these values have default values, but I only want them to show up - # in CID fonts. - self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', - 'CIDCount'] - BaseDict.toXML(self, xmlWriter, progress) - - def decompileAllCharStrings(self, progress): - # XXX only when doing ttdump -i? - i = 0 - for charString in self.CharStrings.values(): - try: - charString.decompile() - except: - print("Error in charstring ", i) - import sys - typ, value = sys.exc_info()[0:2] - raise typ(value) - if not i % 30 and progress: - progress.increment(0) # update - i = i + 1 - - -class FontDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = None - compilerClass = FontDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def toXML(self, xmlWriter, progress): - self.skipNames = ['Encoding'] - BaseDict.toXML(self, xmlWriter, progress) - - -class PrivateDict(BaseDict): - defaults = buildDefaults(privateDictOperators) - converters = buildConverters(privateDictOperators) - order = buildOrder(privateDictOperators) - decompilerClass = PrivateDictDecompiler - compilerClass = PrivateDictCompiler - - -class IndexedStrings(object): - - """SID -> string mapping.""" - - def __init__(self, file=None): - if file is None: - strings = [] - else: - strings = [tostr(s, encoding="latin1") for s in Index(file)] - self.strings = strings - - def getCompiler(self): - return IndexedStringsCompiler(self, None, None) - - def __len__(self): - return len(self.strings) - - def __getitem__(self, SID): - if SID < cffStandardStringCount: - return cffStandardStrings[SID] - else: - return self.strings[SID - cffStandardStringCount] - - def getSID(self, s): - if not hasattr(self, "stringMapping"): - self.buildStringMapping() - if s in cffStandardStringMapping: - SID = cffStandardStringMapping[s] - elif s in self.stringMapping: - SID = self.stringMapping[s] - else: - SID = len(self.strings) + cffStandardStringCount - self.strings.append(s) - self.stringMapping[s] = SID - return SID - - def getStrings(self): - return self.strings - - def buildStringMapping(self): - self.stringMapping = {} - for index in range(len(self.strings)): - self.stringMapping[self.strings[index]] = index + cffStandardStringCount - - -# The 391 Standard Strings as used in the CFF format. -# from Adobe Technical None #5176, version 1.0, 18 March 1998 - -cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', - 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', - 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', - 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', - 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', - 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', - 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', - 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', - 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', - 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', - 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', - 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', - 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', - 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', - 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', - 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', - 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', - 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', - 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', - 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', - 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', - 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', - 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', - 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', - 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', - 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', - 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', - 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', - 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', - 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', - 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', - 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', - 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', - 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', - 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', - 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', - 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', - 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', - 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', - 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', - 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', - 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', - 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', - 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', - 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', - 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', - 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', - 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', - 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', - 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', - 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', - 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', - 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', - 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', - 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', - 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', - 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', - 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', - 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', - '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', - 'Semibold' -] - -cffStandardStringCount = 391 -assert len(cffStandardStrings) == cffStandardStringCount -# build reverse mapping -cffStandardStringMapping = {} -for _i in range(cffStandardStringCount): - cffStandardStringMapping[cffStandardStrings[_i]] = _i - -cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", -"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", -"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", -"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", -"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", -"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", -"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", -"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", -"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", -"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", -"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", -"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", -"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", -"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", -"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", -"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", -"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", -"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", -"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", -"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", -"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", -"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", -"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", -"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", -"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", -"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", -"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", -"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", -"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", -"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", -"zcaron"] - -cffISOAdobeStringCount = 229 -assert len(cffISOAdobeStrings) == cffISOAdobeStringCount - -cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", -"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", -"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", -"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", -"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", -"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", -"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", -"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", -"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", -"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", -"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", -"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", -"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", -"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", -"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", -"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", -"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", -"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", -"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", -"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", -"centinferior", "dollarinferior", "periodinferior", "commainferior", -"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", -"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", -"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", -"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", -"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", -"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", -"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", -"Ydieresissmall"] - -cffExpertStringCount = 166 -assert len(cffIExpertStrings) == cffExpertStringCount - -cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", -"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", -"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", -"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", -"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", -"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", -"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", -"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", -"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", -"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", -"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", -"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", -"eightinferior", "nineinferior", "centinferior", "dollarinferior", -"periodinferior", "commainferior"] - -cffExpertSubsetStringCount = 87 -assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-3.0/Lib/fontTools/encodings/codecs_test.py fonttools-3.21.2/Lib/fontTools/encodings/codecs_test.py --- fonttools-3.0/Lib/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -import fontTools.encodings.codecs # Not to be confused with "import codecs" - -class ExtendedCodecsTest(unittest.TestCase): - - def test_decode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), - unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) - - def test_encode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy', - (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) - - def test_decode_mac_trad_chinese(self): - self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), - unichr(0x5C)) - - def test_decode_mac_romanian(self): - self.assertEqual(b'x\xfb'.decode("mac_romanian"), - unichr(0x78)+unichr(0x02DA)) - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/feaLib/ast.py fonttools-3.21.2/Lib/fontTools/feaLib/ast.py --- fonttools-3.0/Lib/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/ast.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,98 +1,1198 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.feaLib.error import FeatureLibError +from fontTools.misc.encodingTools import getEncoding +from collections import OrderedDict +import itertools + +SHIFT = " " * 4 + + +def deviceToString(device): + if device is None: + return "" + else: + return "" % ", ".join("%d %d" % t for t in device) + + +fea_keywords = set([ + "anchor", "anchordef", "anon", "anonymous", + "by", + "contour", "cursive", + "device", + "enum", "enumerate", "excludedflt", "exclude_dflt", + "feature", "from", + "ignore", "ignorebaseglyphs", "ignoreligatures", "ignoremarks", + "include", "includedflt", "include_dflt", + "language", "languagesystem", "lookup", "lookupflag", + "mark", "markattachmenttype", "markclass", + "nameid", "null", + "parameters", "pos", "position", + "required", "righttoleft", "reversesub", "rsub", + "script", "sub", "substitute", "subtable", + "table", + "usemarkfilteringset", "useextension", "valuerecorddef"] +) + + +def asFea(g): + if hasattr(g, 'asFea'): + return g.asFea() + elif isinstance(g, tuple) and len(g) == 2: + return asFea(g[0]) + "-" + asFea(g[1]) # a range + elif g.lower() in fea_keywords: + return "\\" + g + else: + return g -class FeatureFile(object): - def __init__(self): +class Element(object): + + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + def asFea(self, indent=""): + raise NotImplementedError + + def __str__(self): + return self.asFea() + + +class Statement(Element): + pass + + +class Expression(Element): + pass + + +class Comment(Element): + def __init__(self, location, text): + super(Comment, self).__init__(location) + self.text = text + + def asFea(self, indent=""): + return self.text + + +class GlyphName(Expression): + """A single glyph name, such as cedilla.""" + def __init__(self, location, glyph): + Expression.__init__(self, location) + self.glyph = glyph + + def glyphSet(self): + return (self.glyph,) + + def asFea(self, indent=""): + return str(self.glyph) + + +class GlyphClass(Expression): + """A glyph class, such as [acute cedilla grave].""" + def __init__(self, location, glyphs=None): + Expression.__init__(self, location) + self.glyphs = glyphs if glyphs is not None else [] + self.original = [] + self.curr = 0 + + def glyphSet(self): + return tuple(self.glyphs) + + def asFea(self, indent=""): + if len(self.original): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.curr = len(self.glyphs) + return "[" + " ".join(map(asFea, self.original)) + "]" + else: + return "[" + " ".join(map(asFea, self.glyphs)) + "]" + + def extend(self, glyphs): + self.glyphs.extend(glyphs) + + def append(self, glyph): + self.glyphs.append(glyph) + + def add_range(self, start, end, glyphs): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append((start, end)) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_cid_range(self, start, end, glyphs): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append(("cid{:05d}".format(start), "cid{:05d}".format(end))) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_class(self, gc): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append(gc) + self.glyphs.extend(gc.glyphSet()) + self.curr = len(self.glyphs) + + +class GlyphClassName(Expression): + """A glyph class name, such as @FRENCH_MARKS.""" + def __init__(self, location, glyphclass): + Expression.__init__(self, location) + assert isinstance(glyphclass, GlyphClassDefinition) + self.glyphclass = glyphclass + + def glyphSet(self): + return tuple(self.glyphclass.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.glyphclass.name + + +class MarkClassName(Expression): + """A mark class name, such as @FRENCH_MARKS defined with markClass.""" + def __init__(self, location, markClass): + Expression.__init__(self, location) + assert isinstance(markClass, MarkClass) + self.markClass = markClass + + def glyphSet(self): + return self.markClass.glyphSet() + + def asFea(self, indent=""): + return "@" + self.markClass.name + + +class AnonymousBlock(Statement): + def __init__(self, tag, content, location): + Statement.__init__(self, location) + self.tag, self.content = tag, content + + def asFea(self, indent=""): + res = "anon {} {{\n".format(self.tag) + res += self.content + res += "}} {};\n\n".format(self.tag) + return res + + +class Block(Statement): + def __init__(self, location): + Statement.__init__(self, location) self.statements = [] + def build(self, builder): + for s in self.statements: + s.build(builder) + + def asFea(self, indent=""): + indent += SHIFT + return indent + ("\n" + indent).join( + [s.asFea(indent=indent) for s in self.statements]) + "\n" + + +class FeatureFile(Block): + def __init__(self): + Block.__init__(self, location=None) + self.markClasses = {} # name --> ast.MarkClass + + def asFea(self, indent=""): + return "\n".join(s.asFea(indent=indent) for s in self.statements) + -class FeatureBlock(object): +class FeatureBlock(Block): def __init__(self, location, name, use_extension): - self.location = location + Block.__init__(self, location) self.name, self.use_extension = name, use_extension - self.statements = [] + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_feature(self.location, self.name) + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + features.setdefault(key, []).extend(value) + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "feature %s {\n" % self.name.strip() + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res + + +class FeatureNamesBlock(Block): + def __init__(self, location): + Block.__init__(self, location) + + def asFea(self, indent=""): + res = indent + "featureNames {\n" + res += Block.asFea(self, indent=indent) + res += indent + "};\n" + return res -class LookupBlock(object): + +class LookupBlock(Block): def __init__(self, location, name, use_extension): - self.location = location + Block.__init__(self, location) self.name, self.use_extension = name, use_extension - self.statements = [] + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + def asFea(self, indent=""): + res = "lookup {} {{\n".format(self.name) + res += Block.asFea(self, indent=indent) + res += "{}}} {};\n".format(indent, self.name) + return res + + +class TableBlock(Block): + def __init__(self, location, name): + Block.__init__(self, location) + self.name = name -class GlyphClassDefinition(object): + def asFea(self, indent=""): + res = "table {} {{\n".format(self.name.strip()) + res += super(TableBlock, self).asFea(indent=indent) + res += "}} {};\n".format(self.name.strip()) + return res + + +class GlyphClassDefinition(Statement): + """Example: @UPPERCASE = [A-Z];""" def __init__(self, location, name, glyphs): - self.location = location + Statement.__init__(self, location) self.name = name self.glyphs = glyphs + def glyphSet(self): + return tuple(self.glyphs.glyphSet()) -class AlternateSubstitution(object): - def __init__(self, location, glyph, from_class): - self.location = location - self.glyph, self.from_class = (glyph, from_class) + def asFea(self, indent=""): + return "@" + self.name + " = " + self.glyphs.asFea() + ";" + + +class GlyphClassDefStatement(Statement): + """Example: GlyphClassDef @UPPERCASE, [B], [C], [D];""" + def __init__(self, location, baseGlyphs, markGlyphs, + ligatureGlyphs, componentGlyphs): + Statement.__init__(self, location) + self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs) + self.ligatureGlyphs = ligatureGlyphs + self.componentGlyphs = componentGlyphs + + def build(self, builder): + base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple() + liga = self.ligatureGlyphs.glyphSet() \ + if self.ligatureGlyphs else tuple() + mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple() + comp = (self.componentGlyphs.glyphSet() + if self.componentGlyphs else tuple()) + builder.add_glyphClassDef(self.location, base, liga, mark, comp) + + def asFea(self, indent=""): + return "GlyphClassDef {}, {}, {}, {};".format( + self.baseGlyphs.asFea() if self.baseGlyphs else "", + self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "", + self.markGlyphs.asFea() if self.markGlyphs else "", + self.componentGlyphs.asFea() if self.componentGlyphs else "") + + +# While glyph classes can be defined only once, the feature file format +# allows expanding mark classes with multiple definitions, each using +# different glyphs and anchors. The following are two MarkClassDefinitions +# for the same MarkClass: +# markClass [acute grave] @FRENCH_ACCENTS; +# markClass [cedilla] @FRENCH_ACCENTS; +class MarkClass(object): + def __init__(self, name): + self.name = name + self.definitions = [] + self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions + def addDefinition(self, definition): + assert isinstance(definition, MarkClassDefinition) + self.definitions.append(definition) + for glyph in definition.glyphSet(): + if glyph in self.glyphs: + otherLoc = self.glyphs[glyph].location + raise FeatureLibError( + "Glyph %s already defined at %s:%d:%d" % ( + glyph, otherLoc[0], otherLoc[1], otherLoc[2]), + definition.location) + self.glyphs[glyph] = definition + + def glyphSet(self): + return tuple(self.glyphs.keys()) + + def asFea(self, indent=""): + res = "\n".join(d.asFea(indent=indent) for d in self.definitions) + return res + + +class MarkClassDefinition(Statement): + def __init__(self, location, markClass, anchor, glyphs): + Statement.__init__(self, location) + assert isinstance(markClass, MarkClass) + assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression) + self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs + + def glyphSet(self): + return self.glyphs.glyphSet() + + def asFea(self, indent=""): + return "{}markClass {} {} @{};".format( + indent, self.glyphs.asFea(), self.anchor.asFea(), + self.markClass.name) + + +class AlternateSubstStatement(Statement): + def __init__(self, location, prefix, glyph, suffix, replacement): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix) + self.replacement = replacement + + def build(self, builder): + glyph = self.glyph.glyphSet() + assert len(glyph) == 1, glyph + glyph = list(glyph)[0] + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + replacement = self.replacement.glyphSet() + builder.add_alternate_subst(self.location, prefix, glyph, suffix, + replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" # even though we really only use 1 + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " from " + res += asFea(self.replacement) + res += ";" + return res + + +class Anchor(Expression): + def __init__(self, location, name, x, y, contourpoint, + xDeviceTable, yDeviceTable): + Expression.__init__(self, location) + self.name = name + self.x, self.y, self.contourpoint = x, y, contourpoint + self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable + + def asFea(self, indent=""): + if self.name is not None: + return "".format(self.name) + res = "" + exit = self.exitAnchor.asFea() if self.exitAnchor else "" + return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit) + + +class FeatureReferenceStatement(Statement): + """Example: feature salt;""" + def __init__(self, location, featureName): + Statement.__init__(self, location) + self.location, self.featureName = (location, featureName) + + def build(self, builder): + builder.add_feature_reference(self.location, self.featureName) + + def asFea(self, indent=""): + return "feature {};".format(self.featureName) + + +class IgnorePosStatement(Statement): + def __init__(self, location, chainContexts): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_pos( + self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore pos " + ", ".join(contexts) + ";" + + +class IgnoreSubstStatement(Statement): + def __init__(self, location, chainContexts): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_subst( + self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore sub " + ", ".join(contexts) + ";" -class LanguageStatement(object): + +class LanguageStatement(Statement): def __init__(self, location, language, include_default, required): - self.location = location + Statement.__init__(self, location) + assert(len(language) == 4) self.language = language self.include_default = include_default self.required = required + def build(self, builder): + builder.set_language(location=self.location, language=self.language, + include_default=self.include_default, + required=self.required) + + def asFea(self, indent=""): + res = "language {}".format(self.language.strip()) + if not self.include_default: + res += " exclude_dflt" + if self.required: + res += " required" + res += ";" + return res -class LanguageSystemStatement(object): + +class LanguageSystemStatement(Statement): def __init__(self, location, script, language): - self.location = location + Statement.__init__(self, location) self.script, self.language = (script, language) + def build(self, builder): + builder.add_language_system(self.location, self.script, self.language) -class IgnoreSubstitutionRule(object): - def __init__(self, location, prefix, glyphs, suffix): - self.location = location + def asFea(self, indent=""): + return "languagesystem {} {};".format(self.script, self.language.strip()) + + +class FontRevisionStatement(Statement): + def __init__(self, location, revision): + Statement.__init__(self, location) + self.revision = revision + + def build(self, builder): + builder.set_font_revision(self.location, self.revision) + + def asFea(self, indent=""): + return "FontRevision {:.3f};".format(self.revision) + + +class LigatureCaretByIndexStatement(Statement): + def __init__(self, location, glyphs, carets): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByIndex {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets)) + + +class LigatureCaretByPosStatement(Statement): + def __init__(self, location, glyphs, carets): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByPos {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets)) + + +class LigatureSubstStatement(Statement): + def __init__(self, location, prefix, glyphs, suffix, replacement, + forceChain): + Statement.__init__(self, location) self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + self.replacement, self.forceChain = replacement, forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + glyphs = [g.glyphSet() for g in self.glyphs] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_ligature_subst( + self.location, prefix, glyphs, suffix, self.replacement, + self.forceChain) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(g.asFea() for g in self.prefix) + " " + res += " ".join(g.asFea() + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(g.asFea() for g in self.suffix) + else: + res += " ".join(g.asFea() for g in self.glyphs) + res += " by " + res += asFea(self.replacement) + res += ";" + return res + + +class LookupFlagStatement(Statement): + def __init__(self, location, value, markAttachment, markFilteringSet): + Statement.__init__(self, location) + self.value = value + self.markAttachment = markAttachment + self.markFilteringSet = markFilteringSet + + def build(self, builder): + markAttach = None + if self.markAttachment is not None: + markAttach = self.markAttachment.glyphSet() + markFilter = None + if self.markFilteringSet is not None: + markFilter = self.markFilteringSet.glyphSet() + builder.set_lookup_flag(self.location, self.value, + markAttach, markFilter) + + def asFea(self, indent=""): + res = "lookupflag" + flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"] + curr = 1 + for i in range(len(flags)): + if self.value & curr != 0: + res += " " + flags[i] + curr = curr << 1 + if self.markAttachment is not None: + res += " MarkAttachmentType {}".format(self.markAttachment.asFea()) + if self.markFilteringSet is not None: + res += " UseMarkFilteringSet {}".format(self.markFilteringSet.asFea()) + res += ";" + return res -class LookupReferenceStatement(object): +class LookupReferenceStatement(Statement): def __init__(self, location, lookup): + Statement.__init__(self, location) self.location, self.lookup = (location, lookup) + def build(self, builder): + builder.add_lookup_call(self.lookup.name) -class ScriptStatement(object): + def asFea(self, indent=""): + return "lookup {};".format(self.lookup.name) + + +class MarkBasePosStatement(Statement): + def __init__(self, location, base, marks): + Statement.__init__(self, location) + self.base, self.marks = base, marks + + def build(self, builder): + builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos base {}".format(self.base.asFea()) + for a, m in self.marks: + res += " {} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MarkLigPosStatement(Statement): + def __init__(self, location, ligatures, marks): + Statement.__init__(self, location) + self.ligatures, self.marks = ligatures, marks + + def build(self, builder): + builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos ligature {}".format(self.ligatures.asFea()) + ligs = [] + for l in self.marks: + temp = "" + if l is None or not len(l): + temp = " " + else: + for a, m in l: + temp += " {} mark @{}".format(a.asFea(), m.name) + ligs.append(temp) + res += ("\n" + indent + SHIFT + "ligComponent").join(ligs) + res += ";" + return res + + +class MarkMarkPosStatement(Statement): + def __init__(self, location, baseMarks, marks): + Statement.__init__(self, location) + self.baseMarks, self.marks = baseMarks, marks + + def build(self, builder): + builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos mark {}".format(self.baseMarks.asFea()) + for a, m in self.marks: + res += " {} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MultipleSubstStatement(Statement): + def __init__(self, location, prefix, glyph, suffix, replacement): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = prefix, glyph, suffix + self.replacement = replacement + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_multiple_subst( + self.location, prefix, self.glyph, suffix, self.replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " by " + res += " ".join(map(asFea, self.replacement)) + res += ";" + return res + + +class PairPosStatement(Statement): + def __init__(self, location, enumerated, + glyphs1, valuerecord1, glyphs2, valuerecord2): + Statement.__init__(self, location) + self.enumerated = enumerated + self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1 + self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2 + + def build(self, builder): + if self.enumerated: + g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()] + for glyph1, glyph2 in itertools.product(*g): + builder.add_specific_pair_pos( + self.location, glyph1, self.valuerecord1, + glyph2, self.valuerecord2) + return + + is_specific = (isinstance(self.glyphs1, GlyphName) and + isinstance(self.glyphs2, GlyphName)) + if is_specific: + builder.add_specific_pair_pos( + self.location, self.glyphs1.glyph, self.valuerecord1, + self.glyphs2.glyph, self.valuerecord2) + else: + builder.add_class_pair_pos( + self.location, self.glyphs1.glyphSet(), self.valuerecord1, + self.glyphs2.glyphSet(), self.valuerecord2) + + def asFea(self, indent=""): + res = "enum " if self.enumerated else "" + if self.valuerecord2: + res += "pos {} {} {} {};".format( + self.glyphs1.asFea(), self.valuerecord1.makeString(), + self.glyphs2.asFea(), self.valuerecord2.makeString()) + else: + res += "pos {} {} {};".format( + self.glyphs1.asFea(), self.glyphs2.asFea(), + self.valuerecord1.makeString()) + return res + + +class ReverseChainSingleSubstStatement(Statement): + def __init__(self, location, old_prefix, old_suffix, glyphs, replacements): + Statement.__init__(self, location) + self.old_prefix, self.old_suffix = old_prefix, old_suffix + self.glyphs = glyphs + self.replacements = replacements + + def build(self, builder): + prefix = [p.glyphSet() for p in self.old_prefix] + suffix = [s.glyphSet() for s in self.old_suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_reverse_chain_single_subst( + self.location, prefix, suffix, dict(zip(originals, replaces))) + + def asFea(self, indent=""): + res = "rsub " + if len(self.old_prefix) or len(self.old_suffix): + if len(self.old_prefix): + res += " ".join(asFea(g) for g in self.old_prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.old_suffix): + res += " " + " ".join(asFea(g) for g in self.old_suffix) + else: + res += " ".join(map(asFea, self.glyphs)) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class SingleSubstStatement(Statement): + def __init__(self, location, glyphs, replace, prefix, suffix, forceChain): + Statement.__init__(self, location) + self.prefix, self.suffix = prefix, suffix + self.forceChain = forceChain + self.glyphs = glyphs + self.replacements = replace + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_single_subst(self.location, prefix, suffix, + OrderedDict(zip(originals, replaces)), + self.forceChain) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(asFea(g) for g in self.prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(asFea(g) for g in self.suffix) + else: + res += " ".join(asFea(g) for g in self.glyphs) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class ScriptStatement(Statement): def __init__(self, location, script): - self.location = location + Statement.__init__(self, location) self.script = script + def build(self, builder): + builder.set_script(self.location, self.script) -class SubtableStatement(object): - def __init__(self, location): - self.location = location + def asFea(self, indent=""): + return "script {};".format(self.script.strip()) -class SubstitutionRule(object): - def __init__(self, location, old, new): - self.location, self.old, self.new = (location, old, new) - self.old_prefix = [] - self.old_suffix = [] - self.lookups = [None] * len(old) +class SinglePosStatement(Statement): + def __init__(self, location, pos, prefix, suffix, forceChain): + Statement.__init__(self, location) + self.pos, self.prefix, self.suffix = pos, prefix, suffix + self.forceChain = forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + pos = [(g.glyphSet(), value) for g, value in self.pos] + builder.add_single_pos(self.location, prefix, suffix, + pos, self.forceChain) + + def asFea(self, indent=""): + res = "pos " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += " ".join([asFea(x[0]) + "'" + ( + (" " + x[1].makeString()) if x[1] else "") for x in self.pos]) + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += " ".join([asFea(x[0]) + " " + + (x[1].makeString() if x[1] else "") for x in self.pos]) + res += ";" + return res -class ValueRecord(object): - def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): - self.location = location +class SubtableStatement(Statement): + def __init__(self, location): + Statement.__init__(self, location) + + +class ValueRecord(Expression): + def __init__(self, location, vertical, + xPlacement, yPlacement, xAdvance, yAdvance, + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice): + Expression.__init__(self, location) self.xPlacement, self.yPlacement = (xPlacement, yPlacement) self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice) + self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice) + self.vertical = vertical + + def __eq__(self, other): + return (self.xPlacement == other.xPlacement and + self.yPlacement == other.yPlacement and + self.xAdvance == other.xAdvance and + self.yAdvance == other.yAdvance and + self.xPlaDevice == other.xPlaDevice and + self.xAdvDevice == other.xAdvDevice) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return (hash(self.xPlacement) ^ hash(self.yPlacement) ^ + hash(self.xAdvance) ^ hash(self.yAdvance) ^ + hash(self.xPlaDevice) ^ hash(self.yPlaDevice) ^ + hash(self.xAdvDevice) ^ hash(self.yAdvDevice)) + + def makeString(self, vertical=None): + x, y = self.xPlacement, self.yPlacement + xAdvance, yAdvance = self.xAdvance, self.yAdvance + xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice + xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice + if vertical is None: + vertical = self.vertical + + # Try format A, if possible. + if x is None and y is None: + if xAdvance is None and vertical: + return str(yAdvance) + elif yAdvance is None and not vertical: + return str(xAdvance) + + # Try format B, if possible. + if (xPlaDevice is None and yPlaDevice is None and + xAdvDevice is None and yAdvDevice is None): + return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance) + + # Last resort is format C. + return "<%s %s %s %s %s %s %s %s>" % ( + x, y, xAdvance, yAdvance, + deviceToString(xPlaDevice), deviceToString(yPlaDevice), + deviceToString(xAdvDevice), deviceToString(yAdvDevice)) -class ValueRecordDefinition(object): +class ValueRecordDefinition(Statement): def __init__(self, location, name, value): - self.location = location + Statement.__init__(self, location) self.name = name self.value = value + + def asFea(self, indent=""): + return "valueRecordDef {} {};".format(self.value.asFea(), self.name) + + +def simplify_name_attributes(pid, eid, lid): + if pid == 3 and eid == 1 and lid == 1033: + return "" + elif pid == 1 and eid == 0 and lid == 0: + return "1" + else: + return "{} {} {}".format(pid, eid, lid) + + +class NameRecord(Statement): + def __init__(self, location, nameID, platformID, + platEncID, langID, string): + Statement.__init__(self, location) + self.nameID = nameID + self.platformID = platformID + self.platEncID = platEncID + self.langID = langID + self.string = string + + def build(self, builder): + builder.add_name_record( + self.location, self.nameID, self.platformID, + self.platEncID, self.langID, self.string) + + def asFea(self, indent=""): + def escape(c, escape_pattern): + # Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS + if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C): + return unichr(c) + else: + return escape_pattern % c + encoding = getEncoding(self.platformID, self.platEncID, self.langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", self.location) + s = tobytes(self.string, encoding=encoding) + if encoding == "utf_16_be": + escaped_string = "".join([ + escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x") + for i in range(0, len(s), 2)]) + else: + escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s]) + plat = simplify_name_attributes( + self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return "nameid {} {}\"{}\";".format(self.nameID, plat, escaped_string) + + +class FeatureNameStatement(NameRecord): + def build(self, builder): + NameRecord.build(self, builder) + builder.add_featureName(self.location, self.nameID) + + def asFea(self, indent=""): + if self.nameID == "size": + tag = "sizemenuname" + else: + tag = "name" + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return "{} {}\"{}\";".format(tag, plat, self.string) + + +class SizeParameters(Statement): + def __init__(self, location, DesignSize, SubfamilyID, RangeStart, + RangeEnd): + Statement.__init__(self, location) + self.DesignSize = DesignSize + self.SubfamilyID = SubfamilyID + self.RangeStart = RangeStart + self.RangeEnd = RangeEnd + + def build(self, builder): + builder.set_size_parameters(self.location, self.DesignSize, + self.SubfamilyID, self.RangeStart, self.RangeEnd) + + def asFea(self, indent=""): + res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID) + if self.RangeStart != 0 or self.RangeEnd != 0: + res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10)) + return res + ";" + + +class BaseAxis(Statement): + def __init__(self, location, bases, scripts, vertical): + Statement.__init__(self, location) + self.bases = bases + self.scripts = scripts + self.vertical = vertical + + def build(self, builder): + builder.set_base_axis(self.bases, self.scripts, self.vertical) + + def asFea(self, indent=""): + direction = "Vert" if self.vertical else "Horiz" + scripts = ["{} {} {}".format(a[0], a[1], " ".join(map(str, a[2]))) for a in self.scripts] + return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format( + direction, " ".join(self.bases), indent, direction, ", ".join(scripts)) + + +class OS2Field(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_os2_field(self.key, self.value) + + def asFea(self, indent=""): + def intarr2str(x): + return " ".join(map(str, x)) + numbers = ("FSType", "TypoAscender", "TypoDescender", "TypoLineGap", + "winAscent", "winDescent", "XHeight", "CapHeight", + "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize") + ranges = ("UnicodeRange", "CodePageRange") + keywords = dict([(x.lower(), [x, str]) for x in numbers]) + keywords.update([(x.lower(), [x, intarr2str]) for x in ranges]) + keywords["panose"] = ["Panose", intarr2str] + keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)] + if self.key in keywords: + return "{} {};".format(keywords[self.key][0], keywords[self.key][1](self.value)) + return "" # should raise exception + + +class HheaField(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_hhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class VheaField(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_vhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) diff -Nru fonttools-3.0/Lib/fontTools/feaLib/builder.py fonttools-3.21.2/Lib/fontTools/feaLib/builder.py --- fonttools-3.0/Lib/fontTools/feaLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1503 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import binary2num, safeEval +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.parser import Parser +from fontTools.otlLib import builder as otl +from fontTools.ttLib import newTable, getTableModule +from fontTools.ttLib.tables import otBase, otTables +import itertools + + +def addOpenTypeFeatures(font, featurefile): + builder = Builder(font, featurefile) + builder.build() + + +def addOpenTypeFeaturesFromString(font, features, filename=None): + featurefile = UnicodeIO(tounicode(features)) + if filename: + # the directory containing 'filename' is used as the root of relative + # include paths; if None is provided, the current directory is assumed + featurefile.name = filename + addOpenTypeFeatures(font, featurefile) + + +class Builder(object): + def __init__(self, font, featurefile): + self.font = font + self.file = featurefile + self.glyphMap = font.getReverseGlyphMap() + self.default_language_systems_ = set() + self.script_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.language_systems = set() + self.named_lookups_ = {} + self.cur_lookup_ = None + self.cur_lookup_name_ = None + self.cur_feature_name_ = None + self.lookups_ = [] + self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*] + self.parseTree = None + self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp' + # for feature 'aalt' + self.aalt_features_ = [] # [(location, featureName)*], for 'aalt' + self.aalt_location_ = None + self.aalt_alternates_ = {} + # for 'featureNames' + self.featureNames_ = [] + self.featureNames_ids_ = {} + # for feature 'size' + self.size_parameters_ = None + # for table 'head' + self.fontRevision_ = None # 2.71 + # for table 'name' + self.names_ = [] + # for table 'BASE' + self.base_horiz_axis_ = None + self.base_vert_axis_ = None + # for table 'GDEF' + self.attachPoints_ = {} # "a" --> {3, 7} + self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600} + self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7} + self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column)) + self.markAttach_ = {} # "acute" --> (4, (file, line, column)) + self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4 + self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4 + # for table 'OS/2' + self.os2_ = {} + # for table 'hhea' + self.hhea_ = {} + # for table 'vhea' + self.vhea_ = {} + + def build(self): + self.parseTree = Parser(self.file, self.glyphMap).parse() + self.parseTree.build(self) + self.build_feature_aalt_() + self.build_head() + self.build_hhea() + self.build_vhea() + self.build_name() + self.build_OS_2() + for tag in ('GPOS', 'GSUB'): + table = self.makeTable(tag) + if (table.ScriptList.ScriptCount > 0 or + table.FeatureList.FeatureCount > 0 or + table.LookupList.LookupCount > 0): + fontTable = self.font[tag] = newTable(tag) + fontTable.table = table + elif tag in self.font: + del self.font[tag] + gdef = self.buildGDEF() + if gdef: + self.font["GDEF"] = gdef + elif "GDEF" in self.font: + del self.font["GDEF"] + base = self.buildBASE() + if base: + self.font["BASE"] = base + elif "BASE" in self.font: + del self.font["BASE"] + + def get_chained_lookup_(self, location, builder_class): + result = builder_class(self.font, location) + result.lookupflag = self.lookupflag_ + result.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(result) + return result + + def add_lookup_to_feature_(self, lookup, feature_name): + for script, lang in self.language_systems: + key = (script, lang, feature_name) + self.features_.setdefault(key, []).append(lookup) + + def get_lookup_(self, location, builder_class): + if (self.cur_lookup_ and + type(self.cur_lookup_) == builder_class and + self.cur_lookup_.lookupflag == self.lookupflag_ and + self.cur_lookup_.markFilterSet == + self.lookupflag_markFilterSet_): + return self.cur_lookup_ + if self.cur_lookup_name_ and self.cur_lookup_: + raise FeatureLibError( + "Within a named lookup block, all rules must be of " + "the same lookup type and flag", location) + self.cur_lookup_ = builder_class(self.font, location) + self.cur_lookup_.lookupflag = self.lookupflag_ + self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(self.cur_lookup_) + if self.cur_lookup_name_: + # We are starting a lookup rule inside a named lookup block. + self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_ + if self.cur_feature_name_: + # We are starting a lookup rule inside a feature. This includes + # lookup rules inside named lookups inside features. + self.add_lookup_to_feature_(self.cur_lookup_, + self.cur_feature_name_) + return self.cur_lookup_ + + def build_feature_aalt_(self): + if not self.aalt_features_ and not self.aalt_alternates_: + return + alternates = {g: set(a) for g, a in self.aalt_alternates_.items()} + for location, name in self.aalt_features_ + [(None, "aalt")]: + feature = [(script, lang, feature, lookups) + for (script, lang, feature), lookups + in self.features_.items() + if feature == name] + # "aalt" does not have to specify its own lookups, but it might. + if not feature and name != "aalt": + raise FeatureLibError("Feature %s has not been defined" % name, + location) + for script, lang, feature, lookups in feature: + for lookup in lookups: + for glyph, alts in lookup.getAlternateGlyphs().items(): + alternates.setdefault(glyph, set()).update(alts) + single = {glyph: list(repl)[0] for glyph, repl in alternates.items() + if len(repl) == 1} + # TODO: Figure out the glyph alternate ordering used by makeotf. + # https://github.com/fonttools/fonttools/issues/836 + multi = {glyph: sorted(repl, key=self.font.getGlyphID) + for glyph, repl in alternates.items() + if len(repl) > 1} + if not single and not multi: + return + self.features_ = {(script, lang, feature): lookups + for (script, lang, feature), lookups + in self.features_.items() + if feature != "aalt"} + old_lookups = self.lookups_ + self.lookups_ = [] + self.start_feature(self.aalt_location_, "aalt") + if single: + single_lookup = self.get_lookup_(location, SingleSubstBuilder) + single_lookup.mapping = single + if multi: + multi_lookup = self.get_lookup_(location, AlternateSubstBuilder) + multi_lookup.alternates = multi + self.end_feature() + self.lookups_.extend(old_lookups) + + def build_head(self): + if not self.fontRevision_: + return + table = self.font.get("head") + if not table: # this only happens for unit tests + table = self.font["head"] = newTable("head") + table.decompile(b"\0" * 54, self.font) + table.tableVersion = 1.0 + table.created = table.modified = 3406620153 # 2011-12-13 11:22:33 + table.fontRevision = self.fontRevision_ + + def build_hhea(self): + if not self.hhea_: + return + table = self.font.get("hhea") + if not table: # this only happens for unit tests + table = self.font["hhea"] = newTable("hhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00010000 + if "caretoffset" in self.hhea_: + table.caretOffset = self.hhea_["caretoffset"] + if "ascender" in self.hhea_: + table.ascent = self.hhea_["ascender"] + if "descender" in self.hhea_: + table.descent = self.hhea_["descender"] + if "linegap" in self.hhea_: + table.lineGap = self.hhea_["linegap"] + + def build_vhea(self): + if not self.vhea_: + return + table = self.font.get("vhea") + if not table: # this only happens for unit tests + table = self.font["vhea"] = newTable("vhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00011000 + if "verttypoascender" in self.vhea_: + table.ascent = self.vhea_["verttypoascender"] + if "verttypodescender" in self.vhea_: + table.descent = self.vhea_["verttypodescender"] + if "verttypolinegap" in self.vhea_: + table.lineGap = self.vhea_["verttypolinegap"] + + def get_user_name_id(self, table): + # Try to find first unused font-specific name id + nameIDs = [name.nameID for name in table.names] + for user_name_id in range(256, 32767): + if user_name_id not in nameIDs: + return user_name_id + + def buildFeatureParams(self, tag): + params = None + if tag == "size": + params = otTables.FeatureParamsSize() + params.DesignSize, params.SubfamilyID, params.RangeStart, \ + params.RangeEnd = self.size_parameters_ + if tag in self.featureNames_ids_: + params.SubfamilyNameID = self.featureNames_ids_[tag] + else: + params.SubfamilyNameID = 0 + elif tag in self.featureNames_: + assert tag in self.featureNames_ids_ + params = otTables.FeatureParamsStylisticSet() + params.Version = 0 + params.UINameID = self.featureNames_ids_[tag] + return params + + def build_name(self): + if not self.names_: + return + table = self.font.get("name") + if not table: # this only happens for unit tests + table = self.font["name"] = newTable("name") + table.names = [] + for name in self.names_: + nameID, platformID, platEncID, langID, string = name + if not isinstance(nameID, int): + # A featureNames name and nameID is actually the tag + tag = nameID + if tag not in self.featureNames_ids_: + self.featureNames_ids_[tag] = self.get_user_name_id(table) + assert self.featureNames_ids_[tag] is not None + nameID = self.featureNames_ids_[tag] + table.setName(string, nameID, platformID, platEncID, langID) + + def build_OS_2(self): + if not self.os2_: + return + table = self.font.get("OS/2") + if not table: # this only happens for unit tests + table = self.font["OS/2"] = newTable("OS/2") + data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0) + table.decompile(data, self.font) + version = 0 + if "fstype" in self.os2_: + table.fsType = self.os2_["fstype"] + if "panose" in self.os2_: + panose = getTableModule("OS/2").Panose() + panose.bFamilyType, panose.bSerifStyle, panose.bWeight,\ + panose.bProportion, panose.bContrast, panose.bStrokeVariation,\ + panose.bArmStyle, panose.bLetterForm, panose.bMidline, \ + panose.bXHeight = self.os2_["panose"] + table.panose = panose + if "typoascender" in self.os2_: + table.sTypoAscender = self.os2_["typoascender"] + if "typodescender" in self.os2_: + table.sTypoDescender = self.os2_["typodescender"] + if "typolinegap" in self.os2_: + table.sTypoLineGap = self.os2_["typolinegap"] + if "winascent" in self.os2_: + table.usWinAscent = self.os2_["winascent"] + if "windescent" in self.os2_: + table.usWinDescent = self.os2_["windescent"] + if "vendor" in self.os2_: + table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''") + if "weightclass" in self.os2_: + table.usWeightClass = self.os2_["weightclass"] + if "widthclass" in self.os2_: + table.usWidthClass = self.os2_["widthclass"] + if "unicoderange" in self.os2_: + table.setUnicodeRanges(self.os2_["unicoderange"]) + if "codepagerange" in self.os2_: + pages = self.build_codepages_(self.os2_["codepagerange"]) + table.ulCodePageRange1, table.ulCodePageRange2 = pages + version = 1 + if "xheight" in self.os2_: + table.sxHeight = self.os2_["xheight"] + version = 2 + if "capheight" in self.os2_: + table.sCapHeight = self.os2_["capheight"] + version = 2 + if "loweropsize" in self.os2_: + table.usLowerOpticalPointSize = self.os2_["loweropsize"] + version = 5 + if "upperopsize" in self.os2_: + table.usUpperOpticalPointSize = self.os2_["upperopsize"] + version = 5 + def checkattr(table, attrs): + for attr in attrs: + if not hasattr(table, attr): + setattr(table, attr, 0) + table.version = max(version, table.version) + # this only happens for unit tests + if version >= 1: + checkattr(table, ("ulCodePageRange1", "ulCodePageRange2")) + if version >= 2: + checkattr(table, ("sxHeight", "sCapHeight", "usDefaultChar", + "usBreakChar", "usMaxContext")) + if version >= 5: + checkattr(table, ("usLowerOpticalPointSize", + "usUpperOpticalPointSize")) + + def build_codepages_(self, pages): + pages2bits = { + 1252: 0, 1250: 1, 1251: 2, 1253: 3, 1254: 4, 1255: 5, 1256: 6, + 1257: 7, 1258: 8, 874: 16, 932: 17, 936: 18, 949: 19, 950: 20, + 1361: 21, 869: 48, 866: 49, 865: 50, 864: 51, 863: 52, 862: 53, + 861: 54, 860: 55, 857: 56, 855: 57, 852: 58, 775: 59, 737: 60, + 708: 61, 850: 62, 437: 63, + } + bits = [pages2bits[p] for p in pages if p in pages2bits] + pages = [] + for i in range(2): + pages.append("") + for j in range(i * 32, (i + 1) * 32): + if j in bits: + pages[i] += "1" + else: + pages[i] += "0" + return [binary2num(p[::-1]) for p in pages] + + def buildBASE(self): + if not self.base_horiz_axis_ and not self.base_vert_axis_: + return None + base = otTables.BASE() + base.Version = 0x00010000 + base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_) + base.VertAxis = self.buildBASEAxis(self.base_vert_axis_) + + result = newTable("BASE") + result.table = base + return result + + def buildBASEAxis(self, axis): + if not axis: + return + bases, scripts = axis + axis = otTables.Axis() + axis.BaseTagList = otTables.BaseTagList() + axis.BaseTagList.BaselineTag = bases + axis.BaseTagList.BaseTagCount = len(bases) + axis.BaseScriptList = otTables.BaseScriptList() + axis.BaseScriptList.BaseScriptRecord = [] + axis.BaseScriptList.BaseScriptCount = len(scripts) + for script in sorted(scripts): + record = otTables.BaseScriptRecord() + record.BaseScriptTag = script[0] + record.BaseScript = otTables.BaseScript() + record.BaseScript.BaseLangSysCount = 0 + record.BaseScript.BaseValues = otTables.BaseValues() + record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1]) + record.BaseScript.BaseValues.BaseCoord = [] + record.BaseScript.BaseValues.BaseCoordCount = len(script[2]) + for c in script[2]: + coord = otTables.BaseCoord() + coord.Format = 1 + coord.Coordinate = c + record.BaseScript.BaseValues.BaseCoord.append(coord) + axis.BaseScriptList.BaseScriptRecord.append(record) + return axis + + def buildGDEF(self): + gdef = otTables.GDEF() + gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_() + gdef.AttachList = \ + otl.buildAttachList(self.attachPoints_, self.glyphMap) + gdef.LigCaretList = \ + otl.buildLigCaretList(self.ligCaretCoords_, self.ligCaretPoints_, + self.glyphMap) + gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_() + gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_() + gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000 + if any((gdef.GlyphClassDef, gdef.AttachList, gdef.LigCaretList, + gdef.MarkAttachClassDef, gdef.MarkGlyphSetsDef)): + result = newTable("GDEF") + result.table = gdef + return result + else: + return None + + def buildGDEFGlyphClassDef_(self): + if self.glyphClassDefs_: + classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()} + else: + classes = {} + for lookup in self.lookups_: + classes.update(lookup.inferGlyphClasses()) + for markClass in self.parseTree.markClasses.values(): + for markClassDef in markClass.definitions: + for glyph in markClassDef.glyphSet(): + classes[glyph] = 3 + if classes: + result = otTables.GlyphClassDef() + result.classDefs = classes + return result + else: + return None + + def buildGDEFMarkAttachClassDef_(self): + classDefs = {g: c for g, (c, _) in self.markAttach_.items()} + if not classDefs: + return None + result = otTables.MarkAttachClassDef() + result.classDefs = classDefs + return result + + def buildGDEFMarkGlyphSetsDef_(self): + sets = [] + for glyphs, id_ in sorted(self.markFilterSets_.items(), + key=lambda item: item[1]): + sets.append(glyphs) + return otl.buildMarkGlyphSetsDef(sets, self.glyphMap) + + def buildLookups_(self, tag): + assert tag in ('GPOS', 'GSUB'), tag + for lookup in self.lookups_: + lookup.lookup_index = None + lookups = [] + for lookup in self.lookups_: + if lookup.table != tag: + continue + lookup.lookup_index = len(lookups) + lookups.append(lookup) + return [l.build() for l in lookups] + + def makeTable(self, tag): + table = getattr(otTables, tag, None)() + table.Version = 0x00010000 + table.ScriptList = otTables.ScriptList() + table.ScriptList.ScriptRecord = [] + table.FeatureList = otTables.FeatureList() + table.FeatureList.FeatureRecord = [] + table.LookupList = otTables.LookupList() + table.LookupList.Lookup = self.buildLookups_(tag) + + # Build a table for mapping (tag, lookup_indices) to feature_index. + # For example, ('liga', (2,3,7)) --> 23. + feature_indices = {} + required_feature_indices = {} # ('latn', 'DEU') --> 23 + scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24 + # Sort the feature table by feature tag: + # https://github.com/behdad/fonttools/issues/568 + sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) + for key, lookups in sorted(self.features_.items(), key=sortFeatureTag): + script, lang, feature_tag = key + # l.lookup_index will be None when a lookup is not needed + # for the table under construction. For example, substitution + # rules will have no lookup_index while building GPOS tables. + lookup_indices = tuple([l.lookup_index for l in lookups + if l.lookup_index is not None]) + + size_feature = (tag == "GPOS" and feature_tag == "size") + if len(lookup_indices) == 0 and not size_feature: + continue + + feature_key = (feature_tag, lookup_indices) + feature_index = feature_indices.get(feature_key) + if feature_index is None: + feature_index = len(table.FeatureList.FeatureRecord) + frec = otTables.FeatureRecord() + frec.FeatureTag = feature_tag + frec.Feature = otTables.Feature() + frec.Feature.FeatureParams = self.buildFeatureParams( + feature_tag) + frec.Feature.LookupListIndex = lookup_indices + frec.Feature.LookupCount = len(lookup_indices) + table.FeatureList.FeatureRecord.append(frec) + feature_indices[feature_key] = feature_index + scripts.setdefault(script, {}).setdefault(lang, []).append( + feature_index) + if self.required_features_.get((script, lang)) == feature_tag: + required_feature_indices[(script, lang)] = feature_index + + # Build ScriptList. + for script, lang_features in sorted(scripts.items()): + srec = otTables.ScriptRecord() + srec.ScriptTag = script + srec.Script = otTables.Script() + srec.Script.DefaultLangSys = None + srec.Script.LangSysRecord = [] + for lang, feature_indices in sorted(lang_features.items()): + langrec = otTables.LangSysRecord() + langrec.LangSys = otTables.LangSys() + langrec.LangSys.LookupOrder = None + + req_feature_index = \ + required_feature_indices.get((script, lang)) + if req_feature_index is None: + langrec.LangSys.ReqFeatureIndex = 0xFFFF + else: + langrec.LangSys.ReqFeatureIndex = req_feature_index + + langrec.LangSys.FeatureIndex = [i for i in feature_indices + if i != req_feature_index] + langrec.LangSys.FeatureCount = \ + len(langrec.LangSys.FeatureIndex) + + if lang == "dflt": + srec.Script.DefaultLangSys = langrec.LangSys + else: + langrec.LangSysTag = lang + srec.Script.LangSysRecord.append(langrec) + srec.Script.LangSysCount = len(srec.Script.LangSysRecord) + table.ScriptList.ScriptRecord.append(srec) + + table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) + table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) + table.LookupList.LookupCount = len(table.LookupList.Lookup) + return table + + def add_language_system(self, location, script, language): + # OpenType Feature File Specification, section 4.b.i + if (script == "DFLT" and language == "dflt" and + self.default_language_systems_): + raise FeatureLibError( + 'If "languagesystem DFLT dflt" is present, it must be ' + 'the first of the languagesystem statements', location) + if (script, language) in self.default_language_systems_: + raise FeatureLibError( + '"languagesystem %s %s" has already been specified' % + (script.strip(), language.strip()), location) + self.default_language_systems_.add((script, language)) + + def get_default_language_systems_(self): + # OpenType Feature File specification, 4.b.i. languagesystem: + # If no "languagesystem" statement is present, then the + # implementation must behave exactly as though the following + # statement were present at the beginning of the feature file: + # languagesystem DFLT dflt; + if self.default_language_systems_: + return frozenset(self.default_language_systems_) + else: + return frozenset({('DFLT', 'dflt')}) + + def start_feature(self, location, name): + self.language_systems = self.get_default_language_systems_() + self.script_ = 'DFLT' + self.cur_lookup_ = None + self.cur_feature_name_ = name + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + if name == "aalt": + self.aalt_location_ = location + + def end_feature(self): + assert self.cur_feature_name_ is not None + self.cur_feature_name_ = None + self.language_systems = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def start_lookup_block(self, location, name): + if name in self.named_lookups_: + raise FeatureLibError( + 'Lookup "%s" has already been defined' % name, location) + if self.cur_feature_name_ == "aalt": + raise FeatureLibError( + "Lookup blocks cannot be placed inside 'aalt' features; " + "move it out, and then refer to it with a lookup statement", + location) + self.cur_lookup_name_ = name + self.named_lookups_[name] = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def end_lookup_block(self): + assert self.cur_lookup_name_ is not None + self.cur_lookup_name_ = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def add_lookup_call(self, lookup_name): + assert lookup_name in self.named_lookups_, lookup_name + self.cur_lookup_ = None + lookup = self.named_lookups_[lookup_name] + self.add_lookup_to_feature_(lookup, self.cur_feature_name_) + + def set_font_revision(self, location, revision): + self.fontRevision_ = revision + + def set_language(self, location, language, include_default, required): + assert(len(language) == 4) + if self.cur_feature_name_ in ('aalt', 'size'): + raise FeatureLibError( + "Language statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + if language != 'dflt' and self.script_ == 'DFLT': + raise FeatureLibError("Need non-DFLT script when using non-dflt " + "language (was: \"%s\")" % language, location) + self.cur_lookup_ = None + + key = (self.script_, language, self.cur_feature_name_) + if not include_default: + # don't include any lookups added by script DFLT in this feature + self.features_[key] = [] + elif language != 'dflt': + # add rules defined between script statement and its first following + # language statement to each of its explicitly specified languages: + # http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html#4.b.ii + lookups = self.features_.get((key[0], 'dflt', key[2])) + dflt_lookups = self.features_.get(('DFLT', 'dflt', key[2]), []) + if lookups: + if key[:2] in self.get_default_language_systems_(): + lookups = [l for l in lookups if l not in dflt_lookups] + self.features_.setdefault(key, []).extend(lookups) + if self.script_ == 'DFLT': + langsys = set(self.get_default_language_systems_()) + else: + langsys = set() + langsys.add((self.script_, language)) + self.language_systems = frozenset(langsys) + + if required: + key = (self.script_, language) + if key in self.required_features_: + raise FeatureLibError( + "Language %s (script %s) has already " + "specified feature %s as its required feature" % ( + language.strip(), self.script_.strip(), + self.required_features_[key].strip()), + location) + self.required_features_[key] = self.cur_feature_name_ + + def getMarkAttachClass_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markAttachClassID_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markAttachClassID_) + 1 + self.markAttachClassID_[glyphs] = id_ + for glyph in glyphs: + if glyph in self.markAttach_: + _, loc = self.markAttach_[glyph] + raise FeatureLibError( + "Glyph %s already has been assigned " + "a MarkAttachmentType at %s:%d:%d" % ( + glyph, loc[0], loc[1], loc[2]), + location) + self.markAttach_[glyph] = (id_, location) + return id_ + + def getMarkFilterSet_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markFilterSets_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markFilterSets_) + self.markFilterSets_[glyphs] = id_ + return id_ + + def set_lookup_flag(self, location, value, markAttach, markFilter): + value = value & 0xFF + if markAttach: + markAttachClass = self.getMarkAttachClass_(location, markAttach) + value = value | (markAttachClass << 8) + if markFilter: + markFilterSet = self.getMarkFilterSet_(location, markFilter) + value = value | 0x10 + self.lookupflag_markFilterSet_ = markFilterSet + else: + self.lookupflag_markFilterSet_ = None + self.lookupflag_ = value + + def set_script(self, location, script): + if self.cur_feature_name_ in ('aalt', 'size'): + raise FeatureLibError( + "Script statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + self.cur_lookup_ = None + self.script_ = script + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.set_language(location, "dflt", + include_default=True, required=False) + + def find_lookup_builders_(self, lookups): + """Helper for building chain contextual substitutions + + Given a list of lookup names, finds the LookupBuilder for each name. + If an input name is None, it gets mapped to a None LookupBuilder. + """ + lookup_builders = [] + for lookup in lookups: + if lookup is not None: + lookup_builders.append(self.named_lookups_.get(lookup.name)) + else: + lookup_builders.append(None) + return lookup_builders + + def add_attach_points(self, location, glyphs, contourPoints): + for glyph in glyphs: + self.attachPoints_.setdefault(glyph, set()).update(contourPoints) + + def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): + lookup = self.get_lookup_(location, ChainContextPosBuilder) + lookup.rules.append((prefix, glyphs, suffix, + self.find_lookup_builders_(lookups))) + + def add_chain_context_subst(self, location, + prefix, glyphs, suffix, lookups): + lookup = self.get_lookup_(location, ChainContextSubstBuilder) + lookup.substitutions.append((prefix, glyphs, suffix, + self.find_lookup_builders_(lookups))) + + def add_alternate_subst(self, location, + prefix, glyph, suffix, replacement): + if self.cur_feature_name_ == "aalt": + alts = self.aalt_alternates_.setdefault(glyph, set()) + alts.update(replacement) + return + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, AlternateSubstBuilder) + chain.substitutions.append((prefix, [glyph], suffix, [lookup])) + else: + lookup = self.get_lookup_(location, AlternateSubstBuilder) + if glyph in lookup.alternates: + raise FeatureLibError( + 'Already defined alternates for glyph "%s"' % glyph, + location) + lookup.alternates[glyph] = replacement + + def add_feature_reference(self, location, featureName): + if self.cur_feature_name_ != "aalt": + raise FeatureLibError( + 'Feature references are only allowed inside "feature aalt"', + location) + self.aalt_features_.append((location, featureName)) + + def add_featureName(self, location, tag): + self.featureNames_.append(tag) + + def set_base_axis(self, bases, scripts, vertical): + if vertical: + self.base_vert_axis_ = (bases, scripts) + else: + self.base_horiz_axis_ = (bases, scripts) + + def set_size_parameters(self, location, DesignSize, SubfamilyID, + RangeStart, RangeEnd): + if self.cur_feature_name_ != 'size': + raise FeatureLibError( + "Parameters statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd] + for script, lang in self.language_systems: + key = (script, lang, self.cur_feature_name_) + self.features_.setdefault(key, []) + + def add_ligature_subst(self, location, + prefix, glyphs, suffix, replacement, forceChain): + if prefix or suffix or forceChain: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, LigatureSubstBuilder) + chain.substitutions.append((prefix, glyphs, suffix, [lookup])) + else: + lookup = self.get_lookup_(location, LigatureSubstBuilder) + + # OpenType feature file syntax, section 5.d, "Ligature substitution": + # "Since the OpenType specification does not allow ligature + # substitutions to be specified on target sequences that contain + # glyph classes, the implementation software will enumerate + # all specific glyph sequences if glyph classes are detected" + for g in sorted(itertools.product(*glyphs)): + lookup.ligatures[g] = replacement + + def add_multiple_subst(self, location, + prefix, glyph, suffix, replacements): + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = self.get_chained_lookup_(location, MultipleSubstBuilder) + sub.mapping[glyph] = replacements + chain.substitutions.append((prefix, [{glyph}], suffix, [sub])) + return + lookup = self.get_lookup_(location, MultipleSubstBuilder) + if glyph in lookup.mapping: + raise FeatureLibError( + 'Already defined substitution for glyph "%s"' % glyph, + location) + lookup.mapping[glyph] = replacements + + def add_reverse_chain_single_subst(self, location, old_prefix, + old_suffix, mapping): + lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder) + lookup.substitutions.append((old_prefix, old_suffix, mapping)) + + def add_single_subst(self, location, prefix, suffix, mapping, forceChain): + if self.cur_feature_name_ == "aalt": + for (from_glyph, to_glyph) in mapping.items(): + alts = self.aalt_alternates_.setdefault(from_glyph, set()) + alts.add(to_glyph) + return + if prefix or suffix or forceChain: + self.add_single_subst_chained_(location, prefix, suffix, mapping) + return + lookup = self.get_lookup_(location, SingleSubstBuilder) + for (from_glyph, to_glyph) in mapping.items(): + if from_glyph in lookup.mapping: + raise FeatureLibError( + 'Already defined rule for replacing glyph "%s" by "%s"' % + (from_glyph, lookup.mapping[from_glyph]), + location) + lookup.mapping[from_glyph] = to_glyph + + def find_chainable_SingleSubst_(self, chain, glyphs): + """Helper for add_single_subst_chained_()""" + for _, _, _, substitutions in chain.substitutions: + for sub in substitutions: + if (isinstance(sub, SingleSubstBuilder) and + not any(g in glyphs for g in sub.mapping.keys())): + return sub + return None + + def add_single_subst_chained_(self, location, prefix, suffix, mapping): + # https://github.com/behdad/fonttools/issues/512 + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = self.find_chainable_SingleSubst_(chain, set(mapping.keys())) + if sub is None: + sub = self.get_chained_lookup_(location, SingleSubstBuilder) + sub.mapping.update(mapping) + chain.substitutions.append((prefix, [mapping.keys()], suffix, [sub])) + + def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor): + lookup = self.get_lookup_(location, CursivePosBuilder) + lookup.add_attachment( + location, glyphclass, + makeOpenTypeAnchor(entryAnchor), + makeOpenTypeAnchor(exitAnchor)) + + def add_marks_(self, location, lookupBuilder, marks): + """Helper for add_mark_{base,liga,mark}_pos.""" + for _, markClass in marks: + for markClassDef in markClass.definitions: + for mark in markClassDef.glyphs.glyphSet(): + if mark not in lookupBuilder.marks: + otMarkAnchor = makeOpenTypeAnchor(markClassDef.anchor) + lookupBuilder.marks[mark] = ( + markClass.name, otMarkAnchor) + else: + existingMarkClass = lookupBuilder.marks[mark][0] + if markClass.name != existingMarkClass: + raise FeatureLibError( + "Glyph %s cannot be in both @%s and @%s" % ( + mark, existingMarkClass, markClass.name), + location) + + def add_mark_base_pos(self, location, bases, marks): + builder = self.get_lookup_(location, MarkBasePosBuilder) + self.add_marks_(location, builder, marks) + for baseAnchor, markClass in marks: + otBaseAnchor = makeOpenTypeAnchor(baseAnchor) + for base in bases: + builder.bases.setdefault(base, {})[markClass.name] = ( + otBaseAnchor) + + def add_mark_lig_pos(self, location, ligatures, components): + builder = self.get_lookup_(location, MarkLigPosBuilder) + componentAnchors = [] + for marks in components: + anchors = {} + self.add_marks_(location, builder, marks) + for ligAnchor, markClass in marks: + anchors[markClass.name] = makeOpenTypeAnchor(ligAnchor) + componentAnchors.append(anchors) + for glyph in ligatures: + builder.ligatures[glyph] = componentAnchors + + def add_mark_mark_pos(self, location, baseMarks, marks): + builder = self.get_lookup_(location, MarkMarkPosBuilder) + self.add_marks_(location, builder, marks) + for baseAnchor, markClass in marks: + otBaseAnchor = makeOpenTypeAnchor(baseAnchor) + for baseMark in baseMarks: + builder.baseMarks.setdefault(baseMark, {})[markClass.name] = ( + otBaseAnchor) + + def add_class_pair_pos(self, location, glyphclass1, value1, + glyphclass2, value2): + lookup = self.get_lookup_(location, PairPosBuilder) + lookup.addClassPair(location, glyphclass1, value1, glyphclass2, value2) + + def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2): + lookup = self.get_lookup_(location, PairPosBuilder) + lookup.addGlyphPair(location, glyph1, value1, glyph2, value2) + + def add_single_pos(self, location, prefix, suffix, pos, forceChain): + if prefix or suffix or forceChain: + self.add_single_pos_chained_(location, prefix, suffix, pos) + else: + lookup = self.get_lookup_(location, SinglePosBuilder) + for glyphs, value in pos: + for glyph in glyphs: + lookup.add_pos(location, glyph, value) + + def find_chainable_SinglePos_(self, lookups, glyphs, value): + """Helper for add_single_pos_chained_()""" + for look in lookups: + if all(look.can_add(glyph, value) for glyph in glyphs): + return look + return None + + def add_single_pos_chained_(self, location, prefix, suffix, pos): + # https://github.com/fonttools/fonttools/issues/514 + chain = self.get_lookup_(location, ChainContextPosBuilder) + targets = [] + for _, _, _, lookups in chain.rules: + for lookup in lookups: + if isinstance(lookup, SinglePosBuilder): + targets.append(lookup) + subs = [] + for glyphs, value in pos: + if value is None: + subs.append(None) + continue + otValue, _ = makeOpenTypeValueRecord(value, pairPosContext=False) + sub = self.find_chainable_SinglePos_(targets, glyphs, otValue) + if sub is None: + sub = self.get_chained_lookup_(location, SinglePosBuilder) + targets.append(sub) + for glyph in glyphs: + sub.add_pos(location, glyph, value) + subs.append(sub) + assert len(pos) == len(subs), (pos, subs) + chain.rules.append( + (prefix, [g for g, v in pos], suffix, subs)) + + def setGlyphClass_(self, location, glyph, glyphClass): + oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None)) + if oldClass and oldClass != glyphClass: + raise FeatureLibError( + "Glyph %s was assigned to a different class at %s:%s:%s" % + (glyph, oldLocation[0], oldLocation[1], oldLocation[2]), + location) + self.glyphClassDefs_[glyph] = (glyphClass, location) + + def add_glyphClassDef(self, location, baseGlyphs, ligatureGlyphs, + markGlyphs, componentGlyphs): + for glyph in baseGlyphs: + self.setGlyphClass_(location, glyph, 1) + for glyph in ligatureGlyphs: + self.setGlyphClass_(location, glyph, 2) + for glyph in markGlyphs: + self.setGlyphClass_(location, glyph, 3) + for glyph in componentGlyphs: + self.setGlyphClass_(location, glyph, 4) + + def add_ligatureCaretByIndex_(self, location, glyphs, carets): + for glyph in glyphs: + self.ligCaretPoints_.setdefault(glyph, set()).update(carets) + + def add_ligatureCaretByPos_(self, location, glyphs, carets): + for glyph in glyphs: + self.ligCaretCoords_.setdefault(glyph, set()).update(carets) + + def add_name_record(self, location, nameID, platformID, platEncID, + langID, string): + self.names_.append([nameID, platformID, platEncID, langID, string]) + + def add_os2_field(self, key, value): + self.os2_[key] = value + + def add_hhea_field(self, key, value): + self.hhea_[key] = value + + def add_vhea_field(self, key, value): + self.vhea_[key] = value + + +def makeOpenTypeAnchor(anchor): + """ast.Anchor --> otTables.Anchor""" + if anchor is None: + return None + deviceX, deviceY = None, None + if anchor.xDeviceTable is not None: + deviceX = otl.buildDevice(dict(anchor.xDeviceTable)) + if anchor.yDeviceTable is not None: + deviceY = otl.buildDevice(dict(anchor.yDeviceTable)) + return otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, + deviceX, deviceY) + + +_VALUEREC_ATTRS = { + name[0].lower() + name[1:]: (name, isDevice) + for _, name, isDevice, _ in otBase.valueRecordFormat + if not name.startswith("Reserved") +} + + +def makeOpenTypeValueRecord(v, pairPosContext): + """ast.ValueRecord --> (otBase.ValueRecord, int ValueFormat)""" + if v is None: + return None, 0 + + vr = {} + for astName, (otName, isDevice) in _VALUEREC_ATTRS.items(): + val = getattr(v, astName, None) + if val: + vr[otName] = otl.buildDevice(dict(val)) if isDevice else val + if pairPosContext and not vr: + vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0} + valRec = otl.buildValue(vr) + return valRec, valRec.getFormat() + + +class LookupBuilder(object): + def __init__(self, font, location, table, lookup_type): + self.font = font + self.glyphMap = font.getReverseGlyphMap() + self.location = location + self.table, self.lookup_type = table, lookup_type + self.lookupflag = 0 + self.markFilterSet = None + self.lookup_index = None # assigned when making final tables + assert table in ('GPOS', 'GSUB') + + def equals(self, other): + return (isinstance(other, self.__class__) and + self.table == other.table and + self.lookupflag == other.lookupflag and + self.markFilterSet == other.markFilterSet) + + def inferGlyphClasses(self): + """Infers glyph glasses for the GDEF table, such as {"cedilla":3}.""" + return {} + + def getAlternateGlyphs(self): + """Helper for building 'aalt' features.""" + return {} + + def buildLookup_(self, subtables): + return otl.buildLookup(subtables, self.lookupflag, self.markFilterSet) + + def buildMarkClasses_(self, marks): + """{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1} + + Helper for MarkBasePostBuilder, MarkLigPosBuilder, and + MarkMarkPosBuilder. Seems to return the same numeric IDs + for mark classes as the AFDKO makeotf tool. + """ + ids = {} + for mark in sorted(marks.keys(), key=self.font.getGlyphID): + markClassName, _markAnchor = marks[mark] + if markClassName not in ids: + ids[markClassName] = len(ids) + return ids + + def setBacktrackCoverage_(self, prefix, subtable): + subtable.BacktrackGlyphCount = len(prefix) + subtable.BacktrackCoverage = [] + for p in reversed(prefix): + coverage = otl.buildCoverage(p, self.glyphMap) + subtable.BacktrackCoverage.append(coverage) + + def setLookAheadCoverage_(self, suffix, subtable): + subtable.LookAheadGlyphCount = len(suffix) + subtable.LookAheadCoverage = [] + for s in suffix: + coverage = otl.buildCoverage(s, self.glyphMap) + subtable.LookAheadCoverage.append(coverage) + + def setInputCoverage_(self, glyphs, subtable): + subtable.InputGlyphCount = len(glyphs) + subtable.InputCoverage = [] + for g in glyphs: + coverage = otl.buildCoverage(g, self.glyphMap) + subtable.InputCoverage.append(coverage) + + +class AlternateSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 3) + self.alternates = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.alternates == other.alternates) + + def build(self): + subtable = otl.buildAlternateSubstSubtable(self.alternates) + return self.buildLookup_([subtable]) + + def getAlternateGlyphs(self): + return self.alternates + + +class ChainContextPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 8) + self.rules = [] # (prefix, input, suffix, lookups) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.rules == other.rules) + + def build(self): + subtables = [] + for (prefix, glyphs, suffix, lookups) in self.rules: + st = otTables.ChainContextPos() + subtables.append(st) + st.Format = 3 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + self.setInputCoverage_(glyphs, st) + + st.PosCount = len([l for l in lookups if l is not None]) + st.PosLookupRecord = [] + for sequenceIndex, l in enumerate(lookups): + if l is not None: + rec = otTables.PosLookupRecord() + rec.SequenceIndex = sequenceIndex + rec.LookupListIndex = l.lookup_index + st.PosLookupRecord.append(rec) + return self.buildLookup_(subtables) + + +class ChainContextSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 6) + self.substitutions = [] # (prefix, input, suffix, lookups) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.substitutions == other.substitutions) + + def build(self): + subtables = [] + for (prefix, input, suffix, lookups) in self.substitutions: + st = otTables.ChainContextSubst() + subtables.append(st) + st.Format = 3 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + self.setInputCoverage_(input, st) + + st.SubstCount = len([l for l in lookups if l is not None]) + st.SubstLookupRecord = [] + for sequenceIndex, l in enumerate(lookups): + if l is not None: + rec = otTables.SubstLookupRecord() + rec.SequenceIndex = sequenceIndex + rec.LookupListIndex = l.lookup_index + st.SubstLookupRecord.append(rec) + return self.buildLookup_(subtables) + + def getAlternateGlyphs(self): + result = {} + for (_prefix, _input, _suffix, lookups) in self.substitutions: + for lookup in lookups: + alts = lookup.getAlternateGlyphs() + for glyph, replacements in alts.items(): + result.setdefault(glyph, set()).update(replacements) + return result + + +class LigatureSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 4) + self.ligatures = {} # {('f','f','i'): 'f_f_i'} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.ligatures == other.ligatures) + + def build(self): + subtable = otl.buildLigatureSubstSubtable(self.ligatures) + return self.buildLookup_([subtable]) + + +class MultipleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 2) + self.mapping = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtable = otl.buildMultipleSubstSubtable(self.mapping) + return self.buildLookup_([subtable]) + + +class CursivePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 3) + self.attachments = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.attachments == other.attachments) + + def add_attachment(self, location, glyphs, entryAnchor, exitAnchor): + for glyph in glyphs: + self.attachments[glyph] = (entryAnchor, exitAnchor) + + def build(self): + st = otl.buildCursivePosSubtable(self.attachments, self.glyphMap) + return self.buildLookup_([st]) + + +class MarkBasePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 4) + self.marks = {} # glyphName -> (markClassName, anchor) + self.bases = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.bases == other.bases) + + def inferGlyphClasses(self): + result = {glyph: 1 for glyph in self.bases} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + bases = {} + for glyph, anchors in self.bases.items(): + bases[glyph] = {markClasses[mc]: anchor + for (mc, anchor) in anchors.items()} + subtables = otl.buildMarkBasePos(marks, bases, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkLigPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 5) + self.marks = {} # glyphName -> (markClassName, anchor) + self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...] + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.ligatures == other.ligatures) + + def inferGlyphClasses(self): + result = {glyph: 2 for glyph in self.ligatures} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + ligs = {} + for lig, components in self.ligatures.items(): + ligs[lig] = [] + for c in components: + ligs[lig].append({markClasses[mc]: a for mc, a in c.items()}) + subtables = otl.buildMarkLigPos(marks, ligs, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkMarkPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 6) + self.marks = {} # glyphName -> (markClassName, anchor) + self.baseMarks = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.baseMarks == other.baseMarks) + + def inferGlyphClasses(self): + result = {glyph: 3 for glyph in self.baseMarks} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + markClassList = sorted(markClasses.keys(), key=markClasses.get) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + + st = otTables.MarkMarkPos() + st.Format = 1 + st.ClassCount = len(markClasses) + st.Mark1Coverage = otl.buildCoverage(marks, self.glyphMap) + st.Mark2Coverage = otl.buildCoverage(self.baseMarks, self.glyphMap) + st.Mark1Array = otl.buildMarkArray(marks, self.glyphMap) + st.Mark2Array = otTables.Mark2Array() + st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs) + st.Mark2Array.Mark2Record = [] + for base in st.Mark2Coverage.glyphs: + anchors = [self.baseMarks[base].get(mc) for mc in markClassList] + st.Mark2Array.Mark2Record.append(otl.buildMark2Record(anchors)) + return self.buildLookup_([st]) + + +class ReverseChainSingleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 8) + self.substitutions = [] # (prefix, suffix, mapping) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.substitutions == other.substitutions) + + def build(self): + subtables = [] + for prefix, suffix, mapping in self.substitutions: + st = otTables.ReverseChainSingleSubst() + st.Format = 1 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + st.Coverage = otl.buildCoverage(mapping.keys(), self.glyphMap) + st.GlyphCount = len(mapping) + st.Substitute = [mapping[g] for g in st.Coverage.glyphs] + subtables.append(st) + return self.buildLookup_(subtables) + + +class SingleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 1) + self.mapping = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtable = otl.buildSingleSubstSubtable(self.mapping) + return self.buildLookup_([subtable]) + + def getAlternateGlyphs(self): + return {glyph: set([repl]) for glyph, repl in self.mapping.items()} + + +class ClassPairPosSubtableBuilder(object): + def __init__(self, builder, valueFormat1, valueFormat2): + self.builder_ = builder + self.classDef1_, self.classDef2_ = None, None + self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2) + self.valueFormat1_, self.valueFormat2_ = valueFormat1, valueFormat2 + self.forceSubtableBreak_ = False + self.subtables_ = [] + + def addPair(self, gc1, value1, gc2, value2): + mergeable = (not self.forceSubtableBreak_ and + self.classDef1_ is not None and + self.classDef1_.canAdd(gc1) and + self.classDef2_ is not None and + self.classDef2_.canAdd(gc2)) + if not mergeable: + self.flush_() + self.classDef1_ = otl.ClassDefBuilder(useClass0=True) + self.classDef2_ = otl.ClassDefBuilder(useClass0=False) + self.values_ = {} + self.classDef1_.add(gc1) + self.classDef2_.add(gc2) + self.values_[(gc1, gc2)] = (value1, value2) + + def addSubtableBreak(self): + self.forceSubtableBreak_ = True + + def subtables(self): + self.flush_() + return self.subtables_ + + def flush_(self): + if self.classDef1_ is None or self.classDef2_ is None: + return + st = otl.buildPairPosClassesSubtable(self.values_, + self.builder_.glyphMap) + self.subtables_.append(st) + + +class PairPosBuilder(LookupBuilder): + SUBTABLE_BREAK_ = "SUBTABLE_BREAK" + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 2) + self.pairs = [] # [(gc1, value1, gc2, value2)*] + self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2) + self.locations = {} # (gc1, gc2) --> (filepath, line, column) + + def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2): + self.pairs.append((glyphclass1, value1, glyphclass2, value2)) + + def addGlyphPair(self, location, glyph1, value1, glyph2, value2): + key = (glyph1, glyph2) + oldValue = self.glyphPairs.get(key, None) + if oldValue is not None: + otherLoc = self.locations[key] + raise FeatureLibError( + 'Already defined position for pair %s %s at %s:%d:%d' + % (glyph1, glyph2, otherLoc[0], otherLoc[1], otherLoc[2]), + location) + val1, _ = makeOpenTypeValueRecord(value1, pairPosContext=True) + val2, _ = makeOpenTypeValueRecord(value2, pairPosContext=True) + self.glyphPairs[key] = (val1, val2) + self.locations[key] = location + + def add_subtable_break(self, location): + self.pairs.append((self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_)) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.glyphPairs == other.glyphPairs and + self.pairs == other.pairs) + + def build(self): + builders = {} + builder = None + for glyphclass1, value1, glyphclass2, value2 in self.pairs: + if glyphclass1 is self.SUBTABLE_BREAK_: + if builder is not None: + builder.addSubtableBreak() + continue + val1, valFormat1 = makeOpenTypeValueRecord( + value1, pairPosContext=True) + val2, valFormat2 = makeOpenTypeValueRecord( + value2, pairPosContext=True) + builder = builders.get((valFormat1, valFormat2)) + if builder is None: + builder = ClassPairPosSubtableBuilder( + self, valFormat1, valFormat2) + builders[(valFormat1, valFormat2)] = builder + builder.addPair(glyphclass1, val1, glyphclass2, val2) + subtables = [] + if self.glyphPairs: + subtables.extend( + otl.buildPairPosGlyphs(self.glyphPairs, self.glyphMap)) + for key in sorted(builders.keys()): + subtables.extend(builders[key].subtables()) + return self.buildLookup_(subtables) + + +class SinglePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 1) + self.locations = {} # glyph -> (filename, line, column) + self.mapping = {} # glyph -> otTables.ValueRecord + + def add_pos(self, location, glyph, valueRecord): + otValueRecord, _ = makeOpenTypeValueRecord( + valueRecord, pairPosContext=False) + if not self.can_add(glyph, otValueRecord): + otherLoc = self.locations[glyph] + raise FeatureLibError( + 'Already defined different position for glyph "%s" at %s:%d:%d' + % (glyph, otherLoc[0], otherLoc[1], otherLoc[2]), + location) + if otValueRecord: + self.mapping[glyph] = otValueRecord + self.locations[glyph] = location + + def can_add(self, glyph, value): + assert isinstance(value, otl.ValueRecord) + curValue = self.mapping.get(glyph) + return curValue is None or curValue == value + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtables = otl.buildSinglePos(self.mapping, self.glyphMap) + return self.buildLookup_(subtables) diff -Nru fonttools-3.0/Lib/fontTools/feaLib/error.py fonttools-3.21.2/Lib/fontTools/feaLib/error.py --- fonttools-3.0/Lib/fontTools/feaLib/error.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/error.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class FeatureLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message diff -Nru fonttools-3.0/Lib/fontTools/feaLib/lexer.py fonttools-3.21.2/Lib/fontTools/feaLib/lexer.py --- fonttools-3.0/Lib/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/lexer.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,25 +1,14 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals -import codecs +from fontTools.misc.py23 import * +from fontTools.feaLib.error import FeatureLibError +import re import os -class LexerError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message - - class Lexer(object): NUMBER = "NUMBER" + FLOAT = "FLOAT" STRING = "STRING" NAME = "NAME" FILENAME = "FILENAME" @@ -28,15 +17,18 @@ SYMBOL = "SYMBOL" COMMENT = "COMMENT" NEWLINE = "NEWLINE" + ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" CHAR_WHITESPACE_ = " \t" CHAR_NEWLINE_ = "\r\n" - CHAR_SYMBOL_ = ";:-+'{}[]<>()=" + CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" CHAR_DIGIT_ = "0123456789" CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" - CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." + CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + + RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.]+$") MODE_NORMAL_ = "NORMAL" MODE_FILENAME_ = "FILENAME" @@ -59,13 +51,16 @@ def __next__(self): # Python 3 while True: token_type, token, location = self.next_() - if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: + if token_type != Lexer.NEWLINE: return (token_type, token, location) + def location_(self): + column = self.pos_ - self.line_start_ + 1 + return (self.filename_, self.line_, column) + def next_(self): self.scan_over_(Lexer.CHAR_WHITESPACE_) - column = self.pos_ - self.line_start_ + 1 - location = (self.filename_, self.line_, column) + location = self.location_() start = self.pos_ text = self.text_ limit = len(text) @@ -90,11 +85,13 @@ if self.mode_ is Lexer.MODE_FILENAME_: if cur_char != "(": - raise LexerError("Expected '(' before file name", location) + raise FeatureLibError("Expected '(' before file name", + location) self.scan_until_(")") cur_char = text[self.pos_] if self.pos_ < limit else None if cur_char != ")": - raise LexerError("Expected ')' after file name", location) + raise FeatureLibError("Expected ')' after file name", + location) self.pos_ += 1 self.mode_ = Lexer.MODE_NORMAL_ return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) @@ -108,11 +105,15 @@ self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) glyphclass = text[start + 1:self.pos_] if len(glyphclass) < 1: - raise LexerError("Expected glyph class name", location) - if len(glyphclass) > 30: - raise LexerError( - "Glyph class names must not be longer than 30 characters", + raise FeatureLibError("Expected glyph class name", location) + if len(glyphclass) > 63: + raise FeatureLibError( + "Glyph class names must not be longer than 63 characters", location) + if not Lexer.RE_GLYPHCLASS.match(glyphclass): + raise FeatureLibError( + "Glyph class names must consist of letters, digits, " + "underscore, or period", location) return (Lexer.GLYPHCLASS, glyphclass, location) if cur_char in Lexer.CHAR_NAME_START_: self.pos_ += 1 @@ -127,23 +128,35 @@ return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) if cur_char in Lexer.CHAR_DIGIT_: self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start:self.pos_]), location) if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start:self.pos_]), location) if cur_char in Lexer.CHAR_SYMBOL_: self.pos_ += 1 return (Lexer.SYMBOL, cur_char, location) if cur_char == '"': self.pos_ += 1 - self.scan_until_('"\r\n') + self.scan_until_('"') if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': self.pos_ += 1 - return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + # strip newlines embedded within a string + string = re.sub("[\r\n]", "", text[start + 1:self.pos_ - 1]) + return (Lexer.STRING, string, location) else: - raise LexerError("Expected '\"' to terminate string", location) - raise LexerError("Unexpected character: '%s'" % cur_char, location) + raise FeatureLibError("Expected '\"' to terminate string", + location) + raise FeatureLibError("Unexpected character: %r" % cur_char, + location) def scan_over_(self, valid): p = self.pos_ @@ -157,10 +170,25 @@ p += 1 self.pos_ = p + def scan_anonymous_block(self, tag): + location = self.location_() + tag = tag.strip() + self.scan_until_(Lexer.CHAR_NEWLINE_) + self.scan_over_(Lexer.CHAR_NEWLINE_) + regexp = r'}\s*' + tag + r'\s*;' + split = re.split(regexp, self.text_[self.pos_:], maxsplit=1) + if len(split) != 2: + raise FeatureLibError( + "Expected '} %s;' to terminate anonymous block" % tag, + location) + self.pos_ += len(split[0]) + return (Lexer.ANONYMOUS_BLOCK, split[0], location) + class IncludingLexer(object): - def __init__(self, filename): - self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] + def __init__(self, featurefile): + self.lexers_ = [self.make_lexer_(featurefile)] + self.featurefilepath = self.lexers_[0].filename_ def __iter__(self): return self @@ -172,22 +200,22 @@ while self.lexers_: lexer = self.lexers_[-1] try: - token_type, token, location = lexer.next() + token_type, token, location = next(lexer) except StopIteration: self.lexers_.pop() continue if token_type is Lexer.NAME and token == "include": fname_type, fname_token, fname_location = lexer.next() if fname_type is not Lexer.FILENAME: - raise LexerError("Expected file name", fname_location) - semi_type, semi_token, semi_location = lexer.next() - if semi_type is not Lexer.SYMBOL or semi_token != ";": - raise LexerError("Expected ';'", semi_location) - curpath, _ = os.path.split(lexer.filename_) + raise FeatureLibError("Expected file name", fname_location) + #semi_type, semi_token, semi_location = lexer.next() + #if semi_type is not Lexer.SYMBOL or semi_token != ";": + # raise FeatureLibError("Expected ';'", semi_location) + curpath = os.path.dirname(self.featurefilepath) path = os.path.join(curpath, fname_token) if len(self.lexers_) >= 5: - raise LexerError("Too many recursive includes", - fname_location) + raise FeatureLibError("Too many recursive includes", + fname_location) self.lexers_.append(self.make_lexer_(path, fname_location)) continue else: @@ -195,9 +223,20 @@ raise StopIteration() @staticmethod - def make_lexer_(filename, location): - try: - with codecs.open(filename, "rb", "utf-8") as f: - return Lexer(f.read(), filename) - except IOError as err: - raise LexerError(str(err), location) + def make_lexer_(file_or_path, location=None): + if hasattr(file_or_path, "read"): + fileobj, closing = file_or_path, False + else: + filename, closing = file_or_path, True + try: + fileobj = open(filename, "r", encoding="utf-8") + except IOError as err: + raise FeatureLibError(str(err), location) + data = fileobj.read() + filename = fileobj.name if hasattr(fileobj, "name") else "" + if closing: + fileobj.close() + return Lexer(data, filename) + + def scan_anonymous_block(self, tag): + return self.lexers_[-1].scan_anonymous_block(tag) diff -Nru fonttools-3.0/Lib/fontTools/feaLib/lexer_test.py fonttools-3.21.2/Lib/fontTools/feaLib/lexer_test.py --- fonttools-3.0/Lib/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError -import os -import unittest - - -def lex(s): - return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] - - -class LexerErrorTest(unittest.TestCase): - def test_str(self): - err = LexerError("Squeak!", ("foo.fea", 23, 42)) - self.assertEqual(str(err), "foo.fea:23:42: Squeak!") - - def test_str_nolocation(self): - err = LexerError("Squeak!", None) - self.assertEqual(str(err), "Squeak!") - - -class LexerTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_empty(self): - self.assertEqual(lex(""), []) - self.assertEqual(lex(" \t "), []) - - def test_name(self): - self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) - self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) - self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) - self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) - self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) - - def test_cid(self): - self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) - - def test_glyphclass(self): - self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") - self.assertRaisesRegex(LexerError, "not be longer than 30 characters", - lex, "@a123456789.a123456789.a123456789.x") - - def test_include(self): - self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "~/foo/bar baz.fea"), - (Lexer.SYMBOL, ";") - ]) - self.assertEqual(lex("include # Comment\n (foo) \n;"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "foo"), - (Lexer.SYMBOL, ";") - ]) - self.assertRaises(LexerError, lex, "include blah") - self.assertRaises(LexerError, lex, "include (blah") - - def test_number(self): - self.assertEqual(lex("123 -456"), - [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) - self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) - self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) - - def test_symbol(self): - self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) - self.assertEqual( - lex("foo - -2"), - [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) - - def test_comment(self): - self.assertEqual(lex("# Comment\n#"), []) - - def test_string(self): - self.assertEqual(lex('"foo" "bar"'), - [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) - self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) - - def test_bad_character(self): - self.assertRaises(LexerError, lambda: lex("123 \u0001")) - - def test_newline(self): - lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix - self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh - self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows - self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed - - def test_location(self): - locs = lambda s: ["%s:%d:%d" % loc - for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(locs("a b # Comment\n12 @x"), [ - "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", - "test.fea:2:4" - ]) - - def test_scan_over_(self): - lexer = Lexer("abbacabba12", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("xyz") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("0123456789") - self.assertEqual(lexer.pos_, 11) - - def test_scan_until_(self): - lexer = Lexer("foo'bar", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - - -class IncludingLexerTest(unittest.TestCase): - @staticmethod - def getpath(filename): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", filename) - - def test_include(self): - lexer = IncludingLexer(self.getpath("include4.fea")) - result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) - for _, token, loc in lexer] - self.assertEqual(result, [ - "I4a include4.fea:1", - "I3a include3.fea:1", - "I2a include2.fea:1", - "I1a include1.fea:1", - "I0 include0.fea:1", - "I1b include1.fea:3", - "I2b include2.fea:3", - "I3b include3.fea:3", - "I4b include4.fea:3" - ]) - - def test_include_limit(self): - lexer = IncludingLexer(self.getpath("include6.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_self(self): - lexer = IncludingLexer(self.getpath("includeself.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_missing_file(self): - lexer = IncludingLexer(self.getpath("includemissingfile.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/feaLib/__main__.py fonttools-3.21.2/Lib/fontTools/feaLib/__main__.py --- fonttools-3.0/Lib/fontTools/feaLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.feaLib.builder import addOpenTypeFeatures +from fontTools import configLogger +from fontTools.misc.cliTools import makeOutputFileName +import sys +import argparse +import logging + + +log = logging.getLogger("fontTools.feaLib") + + +def main(args=None): + parser = argparse.ArgumentParser( + description="Use fontTools to compile OpenType feature files (*.fea).") + parser.add_argument( + "input_fea", metavar="FEATURES", help="Path to the feature file") + parser.add_argument( + "input_font", metavar="INPUT_FONT", help="Path to the input font") + parser.add_argument( + "-o", "--output", dest="output_font", metavar="OUTPUT_FONT", + help="Path to the output font.") + parser.add_argument( + "-v", "--verbose", help="increase the logger verbosity. Multiple -v " + "options are allowed.", action="count", default=0) + options = parser.parse_args(args) + + levels = ["WARNING", "INFO", "DEBUG"] + configLogger(level=levels[min(len(levels) - 1, options.verbose)]) + + output_font = options.output_font or makeOutputFileName(options.input_font) + log.info("Compiling features to '%s'" % (output_font)) + + font = TTFont(options.input_font) + addOpenTypeFeatures(font, options.input_fea) + font.save(output_font) + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/feaLib/parser.py fonttools-3.21.2/Lib/fontTools/feaLib/parser.py --- fonttools-3.0/Lib/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/parser.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,64 +1,137 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals +from fontTools.feaLib.error import FeatureLibError from fontTools.feaLib.lexer import Lexer, IncludingLexer +from fontTools.misc.encodingTools import getEncoding +from fontTools.misc.py23 import * import fontTools.feaLib.ast as ast +import logging import os import re -class ParserError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message +log = logging.getLogger(__name__) class Parser(object): - def __init__(self, path): - self.doc_ = ast.FeatureFile() + extensions = {} + ast = ast + + def __init__(self, featurefile, glyphNames=(), **kwargs): + if "glyphMap" in kwargs: + from fontTools.misc.loggingTools import deprecateArgument + deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead") + if glyphNames: + raise TypeError("'glyphNames' and (deprecated) 'glyphMap' are " + "mutually exclusive") + glyphNames = kwargs.pop("glyphMap") + if kwargs: + raise TypeError("unsupported keyword argument%s: %s" + % ("" if len(kwargs) == 1 else "s", + ", ".join(repr(k) for k in kwargs))) + + self.glyphNames_ = set(glyphNames) + self.doc_ = self.ast.FeatureFile() self.anchors_ = SymbolTable() self.glyphclasses_ = SymbolTable() self.lookups_ = SymbolTable() self.valuerecords_ = SymbolTable() self.symbol_tables_ = { - self.anchors_, self.glyphclasses_, - self.lookups_, self.valuerecords_ + self.anchors_, self.valuerecords_ } self.next_token_type_, self.next_token_ = (None, None) + self.cur_comments_ = [] self.next_token_location_ = None - self.lexer_ = IncludingLexer(path) - self.advance_lexer_() + self.lexer_ = IncludingLexer(featurefile) + self.advance_lexer_(comments=True) def parse(self): statements = self.doc_.statements while self.next_token_type_ is not None: - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_(("anon", "anonymous")): + statements.append(self.parse_anonymous_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) elif self.is_cur_keyword_("languagesystem"): statements.append(self.parse_languagesystem_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) elif self.is_cur_keyword_("feature"): statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("table"): + statements.append(self.parse_table_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append( self.parse_valuerecord_definition_(vertical=False)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions: + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";": + continue else: - raise ParserError("Expected feature, languagesystem, " - "lookup, or glyph class definition", - self.cur_token_location_) + raise FeatureLibError( + "Expected feature, languagesystem, lookup, markClass, " + "table, or glyph class definition, got {} \"{}\"".format(self.cur_token_type_, self.cur_token_), + self.cur_token_location_) return self.doc_ + def parse_anchor_(self): + self.expect_symbol_("<") + self.expect_keyword_("anchor") + location = self.cur_token_location_ + + if self.next_token_ == "NULL": + self.expect_keyword_("NULL") + self.expect_symbol_(">") + return None + + if self.next_token_type_ == Lexer.NAME: + name = self.expect_name_() + anchordef = self.anchors_.resolve(name) + if anchordef is None: + raise FeatureLibError( + 'Unknown anchor "%s"' % name, + self.cur_token_location_) + self.expect_symbol_(">") + return self.ast.Anchor(location, name, anchordef.x, anchordef.y, + anchordef.contourpoint, + xDeviceTable=None, yDeviceTable=None) + + x, y = self.expect_number_(), self.expect_number_() + + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + + if self.next_token_ == "<": + xDeviceTable = self.parse_device_() + yDeviceTable = self.parse_device_() + else: + xDeviceTable, yDeviceTable = None, None + + self.expect_symbol_(">") + return self.ast.Anchor(location, None, x, y, contourpoint, + xDeviceTable, yDeviceTable) + + def parse_anchor_marks_(self): + """Parses a sequence of [ mark @MARKCLASS]*.""" + anchorMarks = [] # [(self.ast.Anchor, markClassName)*] + while self.next_token_ == "<": + anchor = self.parse_anchor_() + if anchor is None and self.next_token_ != "mark": + continue # without mark, eg. in GPOS type 5 + self.expect_keyword_("mark") + markClass = self.expect_markClass_reference_() + anchorMarks.append((anchor, markClass)) + return anchorMarks + def parse_anchordef_(self): assert self.is_cur_keyword_("anchorDef") location = self.cur_token_location_ @@ -69,113 +142,288 @@ contourpoint = self.expect_number_() name = self.expect_name_() self.expect_symbol_(";") - anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) + anchordef = self.ast.AnchorDefinition(location, name, x, y, contourpoint) self.anchors_.define(name, anchordef) return anchordef + def parse_anonymous_(self): + assert self.is_cur_keyword_(("anon", "anonymous")) + tag = self.expect_tag_() + _, content, location = self.lexer_.scan_anonymous_block(tag) + self.advance_lexer_() + self.expect_symbol_('}') + end_tag = self.expect_tag_() + assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()" + self.expect_symbol_(';') + return self.ast.AnonymousBlock(tag, content, location) + + def parse_attach_(self): + assert self.is_cur_keyword_("Attach") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + contourPoints = {self.expect_number_()} + while self.next_token_ != ";": + contourPoints.add(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.AttachStatement(location, glyphs, contourPoints) + + def parse_enumerate_(self, vertical): + assert self.cur_token_ in {"enumerate", "enum"} + self.advance_lexer_() + return self.parse_position_(enumerated=True, vertical=vertical) + + def parse_GlyphClassDef_(self): + """Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'""" + assert self.is_cur_keyword_("GlyphClassDef") + location = self.cur_token_location_ + if self.next_token_ != ",": + baseGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + baseGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + ligatureGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + markGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + markGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ";": + componentGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + componentGlyphs = None + self.expect_symbol_(";") + return self.ast.GlyphClassDefStatement(location, baseGlyphs, markGlyphs, + ligatureGlyphs, componentGlyphs) + def parse_glyphclass_definition_(self): + """Parses glyph class definitions such as '@UPPERCASE = [A-Z];'""" location, name = self.cur_token_location_, self.cur_token_ self.expect_symbol_("=") glyphs = self.parse_glyphclass_(accept_glyphname=False) self.expect_symbol_(";") - if self.glyphclasses_.resolve(name) is not None: - raise ParserError("Glyph class @%s already defined" % name, - location) - glyphclass = ast.GlyphClassDefinition(location, name, glyphs) + glyphclass = self.ast.GlyphClassDefinition(location, name, glyphs) self.glyphclasses_.define(name, glyphclass) return glyphclass + def split_glyph_range_(self, name, location): + # Since v1.20, the OpenType Feature File specification allows + # for dashes in glyph names. A sequence like "a-b-c-d" could + # therefore mean a single glyph whose name happens to be + # "a-b-c-d", or it could mean a range from glyph "a" to glyph + # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a + # range from glyph "a-b-c" to glyph "d".Technically, this + # example could be resolved because the (pretty complex) + # definition of glyph ranges renders most of these splits + # invalid. But the specification does not say that a compiler + # should try to apply such fancy heuristics. To encourage + # unambiguous feature files, we therefore try all possible + # splits and reject the feature file if there are multiple + # splits possible. It is intentional that we don't just emit a + # warning; warnings tend to get ignored. To fix the problem, + # font designers can trivially add spaces around the intended + # split point, and we emit a compiler error that suggests + # how exactly the source should be rewritten to make things + # unambiguous. + parts = name.split("-") + solutions = [] + for i in range(len(parts)): + start, limit = "-".join(parts[0:i]), "-".join(parts[i:]) + if start in self.glyphNames_ and limit in self.glyphNames_: + solutions.append((start, limit)) + if len(solutions) == 1: + start, limit = solutions[0] + return start, limit + elif len(solutions) == 0: + raise FeatureLibError( + "\"%s\" is not a glyph in the font, and it can not be split " + "into a range of known glyphs" % name, location) + else: + ranges = " or ".join(["\"%s - %s\"" % (s, l) for s, l in solutions]) + raise FeatureLibError( + "Ambiguous glyph range \"%s\"; " + "please use %s to clarify what you mean" % (name, ranges), + location) + def parse_glyphclass_(self, accept_glyphname): - result = set() - if accept_glyphname and self.next_token_type_ is Lexer.NAME: - result.add(self.expect_name_()) - return result + if (accept_glyphname and + self.next_token_type_ in (Lexer.NAME, Lexer.CID)): + glyph = self.expect_glyph_() + return self.ast.GlyphName(self.cur_token_location_, glyph) if self.next_token_type_ is Lexer.GLYPHCLASS: self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: - raise ParserError("Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_) - result.update(gc.glyphs) - return result + raise FeatureLibError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + if isinstance(gc, self.ast.MarkClass): + return self.ast.MarkClassName(self.cur_token_location_, gc) + else: + return self.ast.GlyphClassName(self.cur_token_location_, gc) self.expect_symbol_("[") + location = self.cur_token_location_ + glyphs = self.ast.GlyphClass(location) while self.next_token_ != "]": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: + if self.next_token_type_ is Lexer.NAME: + glyph = self.expect_glyph_() + location = self.cur_token_location_ + if '-' in glyph and glyph not in self.glyphNames_: + start, limit = self.split_glyph_range_(glyph, location) + glyphs.add_range( + start, limit, + self.make_glyph_range_(location, start, limit)) + elif self.next_token_ == "-": + start = glyph + self.expect_symbol_("-") + limit = self.expect_glyph_() + glyphs.add_range( + start, limit, + self.make_glyph_range_(location, start, limit)) + else: + glyphs.append(glyph) + elif self.next_token_type_ is Lexer.CID: + glyph = self.expect_glyph_() if self.next_token_ == "-": - range_location_ = self.cur_token_location_ + range_location = self.cur_token_location_ range_start = self.cur_token_ self.expect_symbol_("-") - range_end = self.expect_name_() - result.update(self.make_glyph_range_(range_location_, - range_start, - range_end)) + range_end = self.expect_cid_() + glyphs.add_cid_range(range_start, range_end, + self.make_cid_range_(range_location, + range_start, range_end)) else: - result.add(self.cur_token_) - elif self.cur_token_type_ is Lexer.GLYPHCLASS: + glyphs.append("cid%05d" % self.cur_token_) + elif self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: - raise ParserError( + raise FeatureLibError( "Unknown glyph class @%s" % self.cur_token_, self.cur_token_location_) - result.update(gc.glyphs) + if isinstance(gc, self.ast.MarkClass): + gc = self.ast.MarkClassName(self.cur_token_location_, gc) + else: + gc = self.ast.GlyphClassName(self.cur_token_location_, gc) + glyphs.add_class(gc) else: - raise ParserError( + raise FeatureLibError( "Expected glyph name, glyph range, " "or glyph class reference", - self.cur_token_location_) + self.next_token_location_) self.expect_symbol_("]") - return result + return glyphs - def parse_glyph_pattern_(self): - prefix, glyphs, lookups, suffix = ([], [], [], []) - while self.next_token_ not in {"by", "from", ";"}: + def parse_class_name_(self): + name = self.expect_class_name_() + gc = self.glyphclasses_.resolve(name) + if gc is None: + raise FeatureLibError( + "Unknown glyph class @%s" % name, + self.cur_token_location_) + if isinstance(gc, self.ast.MarkClass): + return self.ast.MarkClassName(self.cur_token_location_, gc) + else: + return self.ast.GlyphClassName(self.cur_token_location_, gc) + + def parse_glyph_pattern_(self, vertical): + prefix, glyphs, lookups, values, suffix = ([], [], [], [], []) + hasMarks = False + while self.next_token_ not in {"by", "from", ";", ","}: gc = self.parse_glyphclass_(accept_glyphname=True) marked = False if self.next_token_ == "'": self.expect_symbol_("'") - marked = True + hasMarks = marked = True if marked: + if suffix: + # makeotf also reports this as an error, while FontForge + # silently inserts ' in all the intervening glyphs. + # https://github.com/fonttools/fonttools/pull/1096 + raise FeatureLibError( + "Unsupported contextual target sequence: at most " + "one run of marked (') glyph/class names allowed", + self.cur_token_location_) glyphs.append(gc) elif glyphs: suffix.append(gc) else: prefix.append(gc) + if self.is_next_value_(): + values.append(self.parse_valuerecord_(vertical)) + else: + values.append(None) + lookup = None if self.next_token_ == "lookup": self.expect_keyword_("lookup") if not marked: - raise ParserError("Lookups can only follow marked glyphs", - self.cur_token_location_) + raise FeatureLibError( + "Lookups can only follow marked glyphs", + self.cur_token_location_) lookup_name = self.expect_name_() lookup = self.lookups_.resolve(lookup_name) if lookup is None: - raise ParserError('Unknown lookup "%s"' % lookup_name, - self.cur_token_location_) + raise FeatureLibError( + 'Unknown lookup "%s"' % lookup_name, + self.cur_token_location_) if marked: lookups.append(lookup) if not glyphs and not suffix: # eg., "sub f f i by" assert lookups == [] - return ([], prefix, [None] * len(prefix), []) + return ([], prefix, [None] * len(prefix), values, [], hasMarks) else: - return (prefix, glyphs, lookups, suffix) + assert not any(values[:len(prefix)]), values + values = values[len(prefix):][:len(glyphs)] + return (prefix, glyphs, lookups, values, suffix, hasMarks) + + def parse_chain_context_(self): + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + chainContext = [(prefix, glyphs, suffix)] + hasLookups = any(lookups) + while self.next_token_ == ",": + self.expect_symbol_(",") + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + chainContext.append((prefix, glyphs, suffix)) + hasLookups = hasLookups or any(lookups) + self.expect_symbol_(";") + return chainContext, hasLookups def parse_ignore_(self): assert self.is_cur_keyword_("ignore") location = self.cur_token_location_ self.advance_lexer_() if self.cur_token_ in ["substitute", "sub"]: - prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() - self.expect_symbol_(";") - return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) - raise ParserError("Expected \"substitute\"", self.next_token_location_) + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + "No lookups can be specified for \"ignore sub\"", + location) + return self.ast.IgnoreSubstStatement(location, chainContext) + if self.cur_token_ in ["position", "pos"]: + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + "No lookups can be specified for \"ignore pos\"", + location) + return self.ast.IgnorePosStatement(location, chainContext) + raise FeatureLibError( + "Expected \"substitute\" or \"position\"", + self.cur_token_location_) def parse_language_(self): assert self.is_cur_keyword_("language") - location, language = self.cur_token_location_, self.expect_tag_() + location = self.cur_token_location_ + language = self.expect_language_tag_() include_default, required = (True, False) if self.next_token_ in {"exclude_dflt", "include_dflt"}: include_default = (self.expect_name_() == "include_dflt") @@ -183,8 +431,28 @@ self.expect_keyword_("required") required = True self.expect_symbol_(";") - return ast.LanguageStatement(location, language.strip(), - include_default, required) + return self.ast.LanguageStatement(location, language, + include_default, required) + + def parse_ligatureCaretByIndex_(self): + assert self.is_cur_keyword_("LigatureCaretByIndex") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByIndexStatement(location, glyphs, carets) + + def parse_ligatureCaretByPos_(self): + assert self.is_cur_keyword_("LigatureCaretByPos") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByPosStatement(location, glyphs, carets) def parse_lookup_(self, vertical): assert self.is_cur_keyword_("lookup") @@ -193,37 +461,194 @@ if self.next_token_ == ";": lookup = self.lookups_.resolve(name) if lookup is None: - raise ParserError("Unknown lookup \"%s\"" % name, - self.cur_token_location_) + raise FeatureLibError("Unknown lookup \"%s\"" % name, + self.cur_token_location_) self.expect_symbol_(";") - return ast.LookupReferenceStatement(location, lookup) + return self.ast.LookupReferenceStatement(location, lookup) use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True - block = ast.LookupBlock(location, name, use_extension) + block = self.ast.LookupBlock(location, name, use_extension) self.parse_block_(block, vertical) self.lookups_.define(name, block) return block + def parse_lookupflag_(self): + assert self.is_cur_keyword_("lookupflag") + location = self.cur_token_location_ + + # format B: "lookupflag 6;" + if self.next_token_type_ == Lexer.NUMBER: + value = self.expect_number_() + self.expect_symbol_(";") + return self.ast.LookupFlagStatement(location, value, None, None) + + # format A: "lookupflag RightToLeft MarkAttachmentType @M;" + value, markAttachment, markFilteringSet = 0, None, None + flags = { + "RightToLeft": 1, "IgnoreBaseGlyphs": 2, + "IgnoreLigatures": 4, "IgnoreMarks": 8 + } + seen = set() + while self.next_token_ != ";": + if self.next_token_ in seen: + raise FeatureLibError( + "%s can be specified only once" % self.next_token_, + self.next_token_location_) + seen.add(self.next_token_) + if self.next_token_ == "MarkAttachmentType": + self.expect_keyword_("MarkAttachmentType") + markAttachment = self.parse_class_name_() + elif self.next_token_ == "UseMarkFilteringSet": + self.expect_keyword_("UseMarkFilteringSet") + markFilteringSet = self.parse_class_name_() + elif self.next_token_ in flags: + value = value | flags[self.expect_name_()] + else: + raise FeatureLibError( + '"%s" is not a recognized lookupflag' % self.next_token_, + self.next_token_location_) + self.expect_symbol_(";") + return self.ast.LookupFlagStatement(location, value, + markAttachment, markFilteringSet) + + def parse_markClass_(self): + assert self.is_cur_keyword_("markClass") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + anchor = self.parse_anchor_() + name = self.expect_class_name_() + self.expect_symbol_(";") + markClass = self.doc_.markClasses.get(name) + if markClass is None: + markClass = self.ast.MarkClass(name) + self.doc_.markClasses[name] = markClass + self.glyphclasses_.define(name, markClass) + mcdef = self.ast.MarkClassDefinition(location, markClass, anchor, glyphs) + markClass.addDefinition(mcdef) + return mcdef + + def parse_position_(self, enumerated, vertical): + assert self.cur_token_ in {"position", "pos"} + if self.next_token_ == "cursive": # GPOS type 3 + return self.parse_position_cursive_(enumerated, vertical) + elif self.next_token_ == "base": # GPOS type 4 + return self.parse_position_base_(enumerated, vertical) + elif self.next_token_ == "ligature": # GPOS type 5 + return self.parse_position_ligature_(enumerated, vertical) + elif self.next_token_ == "mark": # GPOS type 6 + return self.parse_position_mark_(enumerated, vertical) + + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical) + self.expect_symbol_(";") + + if any(lookups): + # GPOS type 8: Chaining contextual positioning; explicit lookups + if any(values): + raise FeatureLibError( + "If \"lookup\" is present, no values must be specified", + location) + return self.ast.ChainContextPosStatement( + location, prefix, glyphs, suffix, lookups) + + # Pair positioning, format A: "pos V 10 A -10;" + # Pair positioning, format B: "pos V A -20;" + if not prefix and not suffix and len(glyphs) == 2 and not hasMarks: + if values[0] is None: # Format B: "pos V A -20;" + values.reverse() + return self.ast.PairPosStatement( + location, enumerated, + glyphs[0], values[0], glyphs[1], values[1]) + + if enumerated: + raise FeatureLibError( + '"enumerate" is only allowed with pair positionings', location) + return self.ast.SinglePosStatement(location, list(zip(glyphs, values)), + prefix, suffix, forceChain=hasMarks) + + def parse_position_cursive_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("cursive") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'cursive attachment positioning', + location) + glyphclass = self.parse_glyphclass_(accept_glyphname=True) + entryAnchor = self.parse_anchor_() + exitAnchor = self.parse_anchor_() + self.expect_symbol_(";") + return self.ast.CursivePosStatement( + location, glyphclass, entryAnchor, exitAnchor) + + def parse_position_base_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("base") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-base attachment positioning', + location) + base = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkBasePosStatement(location, base, marks) + + def parse_position_ligature_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("ligature") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-ligature attachment positioning', + location) + ligatures = self.parse_glyphclass_(accept_glyphname=True) + marks = [self.parse_anchor_marks_()] + while self.next_token_ == "ligComponent": + self.expect_keyword_("ligComponent") + marks.append(self.parse_anchor_marks_()) + self.expect_symbol_(";") + return self.ast.MarkLigPosStatement(location, ligatures, marks) + + def parse_position_mark_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("mark") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-mark attachment positioning', + location) + baseMarks = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkMarkPosStatement(location, baseMarks, marks) + def parse_script_(self): assert self.is_cur_keyword_("script") - location, script = self.cur_token_location_, self.expect_tag_() + location, script = self.cur_token_location_, self.expect_script_tag_() self.expect_symbol_(";") - return ast.ScriptStatement(location, script) + return self.ast.ScriptStatement(location, script) def parse_substitute_(self): - assert self.cur_token_ in {"substitute", "sub"} + assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"} location = self.cur_token_location_ - old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() - + reverse = self.cur_token_ in {"reversesub", "rsub"} + old_prefix, old, lookups, values, old_suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + if any(values): + raise FeatureLibError( + "Substitution statements cannot contain values", location) new = [] if self.next_token_ == "by": keyword = self.expect_keyword_("by") while self.next_token_ != ";": - new.append(self.parse_glyphclass_(accept_glyphname=True)) + gc = self.parse_glyphclass_(accept_glyphname=True) + new.append(gc) elif self.next_token_ == "from": keyword = self.expect_keyword_("from") new = [self.parse_glyphclass_(accept_glyphname=False)] @@ -231,46 +656,443 @@ keyword = None self.expect_symbol_(";") if len(new) is 0 and not any(lookups): - raise ParserError( + raise FeatureLibError( 'Expected "by", "from" or explicit lookup references', self.cur_token_location_) + # GSUB lookup type 3: Alternate substitution. + # Format: "substitute a from [a.1 a.2 a.3];" if keyword == "from": - if len(old) != 1 or len(old[0]) != 1: - raise ParserError('Expected a single glyph before "from"', - location) + if reverse: + raise FeatureLibError( + 'Reverse chaining substitutions do not support "from"', + location) + if len(old) != 1 or len(old[0].glyphSet()) != 1: + raise FeatureLibError( + 'Expected a single glyph before "from"', + location) if len(new) != 1: - raise ParserError('Expected a single glyphclass after "from"', - location) - return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) - - rule = ast.SubstitutionRule(location, old, new) - rule.old_prefix, rule.old_suffix = old_prefix, old_suffix - rule.lookups = lookups + raise FeatureLibError( + 'Expected a single glyphclass after "from"', + location) + return self.ast.AlternateSubstStatement( + location, old_prefix, old[0], old_suffix, new[0]) + + num_lookups = len([l for l in lookups if l is not None]) + + # GSUB lookup type 1: Single substitution. + # Format A: "substitute a by a.sc;" + # Format B: "substitute [one.fitted one.oldstyle] by one;" + # Format C: "substitute [a-d] by [A.sc-D.sc];" + if (not reverse and len(old) == 1 and len(new) == 1 and + num_lookups == 0): + glyphs = list(old[0].glyphSet()) + replacements = list(new[0].glyphSet()) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + 'but found a glyph class with %d elements' % + (len(glyphs), len(replacements)), location) + return self.ast.SingleSubstStatement( + location, old, new, + old_prefix, old_suffix, + forceChain=hasMarks + ) + + # GSUB lookup type 2: Multiple substitution. + # Format: "substitute f_f_i by f f i;" + if (not reverse and + len(old) == 1 and len(old[0].glyphSet()) == 1 and + len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1 and + num_lookups == 0): + return self.ast.MultipleSubstStatement( + location, old_prefix, tuple(old[0].glyphSet())[0], old_suffix, + tuple([list(n.glyphSet())[0] for n in new])) + + # GSUB lookup type 4: Ligature substitution. + # Format: "substitute f f i by f_f_i;" + if (not reverse and + len(old) > 1 and len(new) == 1 and + len(new[0].glyphSet()) == 1 and + num_lookups == 0): + return self.ast.LigatureSubstStatement( + location, old_prefix, old, old_suffix, + list(new[0].glyphSet())[0], forceChain=hasMarks) + + # GSUB lookup type 8: Reverse chaining substitution. + if reverse: + if len(old) != 1: + raise FeatureLibError( + "In reverse chaining single substitutions, " + "only a single glyph or glyph class can be replaced", + location) + if len(new) != 1: + raise FeatureLibError( + 'In reverse chaining single substitutions, ' + 'the replacement (after "by") must be a single glyph ' + 'or glyph class', location) + if num_lookups != 0: + raise FeatureLibError( + "Reverse chaining substitutions cannot call named lookups", + location) + glyphs = sorted(list(old[0].glyphSet())) + replacements = sorted(list(new[0].glyphSet())) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + 'but found a glyph class with %d elements' % + (len(glyphs), len(replacements)), location) + return self.ast.ReverseChainSingleSubstStatement( + location, old_prefix, old_suffix, old, new) + + # GSUB lookup type 6: Chaining contextual substitution. + assert len(new) == 0, new + rule = self.ast.ChainContextSubstStatement( + location, old_prefix, old, old_suffix, lookups) return rule def parse_subtable_(self): assert self.is_cur_keyword_("subtable") location = self.cur_token_location_ self.expect_symbol_(";") - return ast.SubtableStatement(location) + return self.ast.SubtableStatement(location) + + def parse_size_parameters_(self): + assert self.is_cur_keyword_("parameters") + location = self.cur_token_location_ + DesignSize = self.expect_decipoint_() + SubfamilyID = self.expect_number_() + RangeStart = 0 + RangeEnd = 0 + if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or \ + SubfamilyID != 0: + RangeStart = self.expect_decipoint_() + RangeEnd = self.expect_decipoint_() + + self.expect_symbol_(";") + return self.ast.SizeParameters(location, DesignSize, SubfamilyID, + RangeStart, RangeEnd) + + def parse_size_menuname_(self): + assert self.is_cur_keyword_("sizemenuname") + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.FeatureNameStatement(location, "size", platformID, + platEncID, langID, string) + + def parse_table_(self): + assert self.is_cur_keyword_("table") + location, name = self.cur_token_location_, self.expect_tag_() + table = self.ast.TableBlock(location, name) + self.expect_symbol_("{") + handler = { + "GDEF": self.parse_table_GDEF_, + "head": self.parse_table_head_, + "hhea": self.parse_table_hhea_, + "vhea": self.parse_table_vhea_, + "name": self.parse_table_name_, + "BASE": self.parse_table_BASE_, + "OS/2": self.parse_table_OS_2_, + }.get(name) + if handler: + handler(table) + else: + raise FeatureLibError('"table %s" is not supported' % name.strip(), + location) + self.expect_symbol_("}") + end_tag = self.expect_tag_() + if end_tag != name: + raise FeatureLibError('Expected "%s"' % name.strip(), + self.cur_token_location_) + self.expect_symbol_(";") + return table + + def parse_table_GDEF_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("Attach"): + statements.append(self.parse_attach_()) + elif self.is_cur_keyword_("GlyphClassDef"): + statements.append(self.parse_GlyphClassDef_()) + elif self.is_cur_keyword_("LigatureCaretByIndex"): + statements.append(self.parse_ligatureCaretByIndex_()) + elif self.is_cur_keyword_("LigatureCaretByPos"): + statements.append(self.parse_ligatureCaretByPos_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected Attach, LigatureCaretByIndex, " + "or LigatureCaretByPos", + self.cur_token_location_) + + def parse_table_head_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("FontRevision"): + statements.append(self.parse_FontRevision_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected FontRevision", + self.cur_token_location_) + + def parse_table_hhea_(self, table): + statements = table.statements + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.HheaField(self.cur_token_location_, key, value)) + if self.next_token_ != ";": + raise FeatureLibError("Incomplete statement", self.next_token_location_) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected CaretOffset, Ascender, " + "Descender or LineGap", + self.cur_token_location_) + + def parse_table_vhea_(self, table): + statements = table.statements + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.VheaField(self.cur_token_location_, key, value)) + if self.next_token_ != ";": + raise FeatureLibError("Incomplete statement", self.next_token_location_) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected VertTypoAscender, " + "VertTypoDescender or VertTypoLineGap", + self.cur_token_location_) + + def parse_table_name_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("nameid"): + statement = self.parse_nameid_() + if statement: + statements.append(statement) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected nameid", + self.cur_token_location_) + + def parse_name_(self): + platEncID = None + langID = None + if self.next_token_type_ == Lexer.NUMBER: + platformID = self.expect_number_() + location = self.cur_token_location_ + if platformID not in (1, 3): + raise FeatureLibError("Expected platform id 1 or 3", location) + if self.next_token_type_ == Lexer.NUMBER: + platEncID = self.expect_number_() + langID = self.expect_number_() + else: + platformID = 3 + location = self.cur_token_location_ + + if platformID == 1: # Macintosh + platEncID = platEncID or 0 # Roman + langID = langID or 0 # English + else: # 3, Windows + platEncID = platEncID or 1 # Unicode + langID = langID or 0x0409 # English + + string = self.expect_string_() + self.expect_symbol_(";") + + encoding = getEncoding(platformID, platEncID, langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", location) + unescaped = self.unescape_string_(string, encoding) + return platformID, platEncID, langID, unescaped + + def parse_nameid_(self): + assert self.cur_token_ == "nameid", self.cur_token_ + location, nameID = self.cur_token_location_, self.expect_number_() + if nameID > 32767: + raise FeatureLibError("Name id value cannot be greater than 32767", + self.cur_token_location_) + if 1 <= nameID <= 6: + log.warning("Name id %d cannot be set from the feature file. " + "Ignoring record" % nameID) + self.parse_name_() # skip to the next record + return None + + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.NameRecord(location, nameID, platformID, platEncID, + langID, string) + + def unescape_string_(self, string, encoding): + if encoding == "utf_16_be": + s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string) + else: + unescape = lambda m: self.unescape_byte_(m, encoding) + s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string) + # We now have a Unicode string, but it might contain surrogate pairs. + # We convert surrogates to actual Unicode by round-tripping through + # Python's UTF-16 codec in a special mode. + utf16 = tobytes(s, "utf_16_be", "surrogatepass") + return tounicode(utf16, "utf_16_be") + + @staticmethod + def unescape_unichr_(match): + n = match.group(0)[1:] + return unichr(int(n, 16)) + + @staticmethod + def unescape_byte_(match, encoding): + n = match.group(0)[1:] + return bytechr(int(n, 16)).decode(encoding) + + def parse_table_BASE_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("HorizAxis.BaseTagList"): + horiz_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("HorizAxis.BaseScriptList"): + horiz_scripts = self.parse_base_script_list_(len(horiz_bases)) + statements.append( + self.ast.BaseAxis(self.cur_token_location_, horiz_bases, + horiz_scripts, False)) + elif self.is_cur_keyword_("VertAxis.BaseTagList"): + vert_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("VertAxis.BaseScriptList"): + vert_scripts = self.parse_base_script_list_(len(vert_bases)) + statements.append( + self.ast.BaseAxis(self.cur_token_location_, vert_bases, + vert_scripts, True)) + elif self.cur_token_ == ";": + continue + + def parse_table_OS_2_(self, table): + statements = table.statements + numbers = ("FSType", "TypoAscender", "TypoDescender", "TypoLineGap", + "winAscent", "winDescent", "XHeight", "CapHeight", + "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize") + ranges = ("UnicodeRange", "CodePageRange") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME: + key = self.cur_token_.lower() + value = None + if self.cur_token_ in numbers: + value = self.expect_number_() + elif self.is_cur_keyword_("Panose"): + value = [] + for i in range(10): + value.append(self.expect_number_()) + elif self.cur_token_ in ranges: + value = [] + while self.next_token_ != ";": + value.append(self.expect_number_()) + elif self.is_cur_keyword_("Vendor"): + value = self.expect_string_() + statements.append( + self.ast.OS2Field(self.cur_token_location_, key, value)) + elif self.cur_token_ == ";": + continue + + def parse_base_tag_list_(self): + assert self.cur_token_ in ("HorizAxis.BaseTagList", + "VertAxis.BaseTagList"), self.cur_token_ + bases = [] + while self.next_token_ != ";": + bases.append(self.expect_script_tag_()) + self.expect_symbol_(";") + return bases + + def parse_base_script_list_(self, count): + assert self.cur_token_ in ("HorizAxis.BaseScriptList", + "VertAxis.BaseScriptList"), self.cur_token_ + scripts = [(self.parse_base_script_record_(count))] + while self.next_token_ == ",": + self.expect_symbol_(",") + scripts.append(self.parse_base_script_record_(count)) + self.expect_symbol_(";") + return scripts + + def parse_base_script_record_(self, count): + script_tag = self.expect_script_tag_() + base_tag = self.expect_script_tag_() + coords = [self.expect_number_() for i in range(count)] + return script_tag, base_tag, coords + + def parse_device_(self): + result = None + self.expect_symbol_("<") + self.expect_keyword_("device") + if self.next_token_ == "NULL": + self.expect_keyword_("NULL") + else: + result = [(self.expect_number_(), self.expect_number_())] + while self.next_token_ == ",": + self.expect_symbol_(",") + result.append((self.expect_number_(), self.expect_number_())) + result = tuple(result) # make it hashable + self.expect_symbol_(">") + return result + + def is_next_value_(self): + return self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<" def parse_valuerecord_(self, vertical): if self.next_token_type_ is Lexer.NUMBER: number, location = self.expect_number_(), self.cur_token_location_ if vertical: - val = ast.ValueRecord(location, 0, 0, 0, number) + val = self.ast.ValueRecord(location, vertical, + None, None, None, number, + None, None, None, None) else: - val = ast.ValueRecord(location, 0, 0, number, 0) + val = self.ast.ValueRecord(location, vertical, + None, None, number, None, + None, None, None, None) return val self.expect_symbol_("<") location = self.cur_token_location_ if self.next_token_type_ is Lexer.NAME: name = self.expect_name_() + if name == "NULL": + self.expect_symbol_(">") + return None vrd = self.valuerecords_.resolve(name) if vrd is None: - raise ParserError("Unknown valueRecordDef \"%s\"" % name, - self.cur_token_location_) + raise FeatureLibError("Unknown valueRecordDef \"%s\"" % name, + self.cur_token_location_) value = vrd.value xPlacement, yPlacement = (value.xPlacement, value.yPlacement) xAdvance, yAdvance = (value.xAdvance, value.yAdvance) @@ -278,9 +1100,30 @@ xPlacement, yPlacement, xAdvance, yAdvance = ( self.expect_number_(), self.expect_number_(), self.expect_number_(), self.expect_number_()) + + if self.next_token_ == "<": + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( + self.parse_device_(), self.parse_device_(), + self.parse_device_(), self.parse_device_()) + allDeltas = sorted([ + delta + for size, delta + in (xPlaDevice if xPlaDevice else ()) + + (yPlaDevice if yPlaDevice else ()) + + (xAdvDevice if xAdvDevice else ()) + + (yAdvDevice if yAdvDevice else ())]) + if allDeltas[0] < -128 or allDeltas[-1] > 127: + raise FeatureLibError( + "Device value out of valid range (-128..127)", + self.cur_token_location_) + else: + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( + None, None, None, None) + self.expect_symbol_(">") - return ast.ValueRecord( - location, xPlacement, yPlacement, xAdvance, yAdvance) + return self.ast.ValueRecord( + location, vertical, xPlacement, yPlacement, xAdvance, yAdvance, + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice) def parse_valuerecord_definition_(self, vertical): assert self.is_cur_keyword_("valueRecordDef") @@ -288,62 +1131,139 @@ value = self.parse_valuerecord_(vertical) name = self.expect_name_() self.expect_symbol_(";") - vrd = ast.ValueRecordDefinition(location, name, value) + vrd = self.ast.ValueRecordDefinition(location, name, value) self.valuerecords_.define(name, vrd) return vrd def parse_languagesystem_(self): assert self.cur_token_ == "languagesystem" location = self.cur_token_location_ - script, language = self.expect_tag_(), self.expect_tag_() + script = self.expect_script_tag_() + language = self.expect_language_tag_() self.expect_symbol_(";") - return ast.LanguageSystemStatement(location, script, language) + if script == "DFLT" and language != "dflt": + raise FeatureLibError( + 'For script "DFLT", the language must be "dflt"', + self.cur_token_location_) + return self.ast.LanguageSystemStatement(location, script, language) def parse_feature_block_(self): assert self.cur_token_ == "feature" location = self.cur_token_location_ tag = self.expect_tag_() - vertical = (tag == "vkrn") + vertical = (tag in {"vkrn", "vpal", "vhal", "valt"}) + stylisticset = None + if tag in ["ss%02d" % i for i in range(1, 20+1)]: + stylisticset = tag + + size_feature = (tag == "size") use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True - block = ast.FeatureBlock(location, tag, use_extension) - self.parse_block_(block, vertical) + block = self.ast.FeatureBlock(location, tag, use_extension) + self.parse_block_(block, vertical, stylisticset, size_feature) + return block + + def parse_feature_reference_(self): + assert self.cur_token_ == "feature", self.cur_token_ + location = self.cur_token_location_ + featureName = self.expect_tag_() + self.expect_symbol_(";") + return self.ast.FeatureReferenceStatement(location, featureName) + + def parse_featureNames_(self, tag): + assert self.cur_token_ == "featureNames", self.cur_token_ + block = self.ast.FeatureNamesBlock(self.cur_token_location_) + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + block.statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + block.statements.append( + self.ast.FeatureNameStatement(location, tag, platformID, + platEncID, langID, string)) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError('Expected "name"', + self.cur_token_location_) + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + self.expect_symbol_(";") return block - def parse_block_(self, block, vertical): + def parse_FontRevision_(self): + assert self.cur_token_ == "FontRevision", self.cur_token_ + location, version = self.cur_token_location_, self.expect_float_() + self.expect_symbol_(";") + if version <= 0: + raise FeatureLibError("Font revision numbers must be positive", + location) + return self.ast.FontRevisionStatement(location, version) + + def parse_block_(self, block, vertical, stylisticset=None, + size_feature=False): self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() statements = block.statements - while self.next_token_ != "}": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_({"enum", "enumerate"}): + statements.append(self.parse_enumerate_(vertical=vertical)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_reference_()) elif self.is_cur_keyword_("ignore"): statements.append(self.parse_ignore_()) elif self.is_cur_keyword_("language"): statements.append(self.parse_language_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("lookupflag"): + statements.append(self.parse_lookupflag_()) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) + elif self.is_cur_keyword_({"pos", "position"}): + statements.append( + self.parse_position_(enumerated=False, vertical=vertical)) elif self.is_cur_keyword_("script"): statements.append(self.parse_script_()) - elif (self.is_cur_keyword_("substitute") or - self.is_cur_keyword_("sub")): + elif (self.is_cur_keyword_({"sub", "substitute", + "rsub", "reversesub"})): statements.append(self.parse_substitute_()) elif self.is_cur_keyword_("subtable"): statements.append(self.parse_subtable_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append(self.parse_valuerecord_definition_(vertical)) + elif stylisticset and self.is_cur_keyword_("featureNames"): + statements.append(self.parse_featureNames_(stylisticset)) + elif size_feature and self.is_cur_keyword_("parameters"): + statements.append(self.parse_size_parameters_()) + elif size_feature and self.is_cur_keyword_("sizemenuname"): + statements.append(self.parse_size_menuname_()) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions: + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_ == ";": + continue else: - raise ParserError( - "Expected glyph class definition or statement", + raise FeatureLibError( + "Expected glyph class definition or statement: got {} {}".format(self.cur_token_type_, self.cur_token_), self.cur_token_location_) self.expect_symbol_("}") @@ -352,64 +1272,193 @@ name = self.expect_name_() if name != block.name.strip(): - raise ParserError("Expected \"%s\"" % block.name.strip(), - self.cur_token_location_) + raise FeatureLibError("Expected \"%s\"" % block.name.strip(), + self.cur_token_location_) self.expect_symbol_(";") + # A multiple substitution may have a single destination, in which case + # it will look just like a single substitution. So if there are both + # multiple and single substitutions, upgrade all the single ones to + # multiple substitutions. + + # Check if we have a mix of non-contextual singles and multiples. + has_single = False + has_multiple = False + for s in statements: + if isinstance(s, self.ast.SingleSubstStatement): + has_single = not any([s.prefix, s.suffix, s.forceChain]) + elif isinstance(s, self.ast.MultipleSubstStatement): + has_multiple = not any([s.prefix, s.suffix]) + + # Upgrade all single substitutions to multiple substitutions. + if has_single and has_multiple: + for i, s in enumerate(statements): + if isinstance(s, self.ast.SingleSubstStatement): + statements[i] = self.ast.MultipleSubstStatement(s.location, + s.prefix, s.glyphs[0].glyphSet()[0], s.suffix, + [r.glyphSet()[0] for r in s.replacements]) + def is_cur_keyword_(self, k): - return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + if self.cur_token_type_ is Lexer.NAME: + if isinstance(k, type("")): # basestring is gone in Python3 + return self.cur_token_ == k + else: + return self.cur_token_ in k + return False + + def expect_class_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.GLYPHCLASS: + raise FeatureLibError("Expected @NAME", self.cur_token_location_) + return self.cur_token_ + + def expect_cid_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.CID: + return self.cur_token_ + raise FeatureLibError("Expected a CID", self.cur_token_location_) + + def expect_glyph_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + self.cur_token_ = self.cur_token_.lstrip("\\") + if len(self.cur_token_) > 63: + raise FeatureLibError( + "Glyph names must not be longer than 63 characters", + self.cur_token_location_) + return self.cur_token_ + elif self.cur_token_type_ is Lexer.CID: + return "cid%05d" % self.cur_token_ + raise FeatureLibError("Expected a glyph name or CID", + self.cur_token_location_) + + def expect_markClass_reference_(self): + name = self.expect_class_name_() + mc = self.glyphclasses_.resolve(name) + if mc is None: + raise FeatureLibError("Unknown markClass @%s" % name, + self.cur_token_location_) + if not isinstance(mc, self.ast.MarkClass): + raise FeatureLibError("@%s is not a markClass" % name, + self.cur_token_location_) + return mc def expect_tag_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.NAME: - raise ParserError("Expected a tag", self.cur_token_location_) + raise FeatureLibError("Expected a tag", self.cur_token_location_) if len(self.cur_token_) > 4: - raise ParserError("Tags can not be longer than 4 characters", - self.cur_token_location_) + raise FeatureLibError("Tags can not be longer than 4 characters", + self.cur_token_location_) return (self.cur_token_ + " ")[:4] + def expect_script_tag_(self): + tag = self.expect_tag_() + if tag == "dflt": + raise FeatureLibError( + '"dflt" is not a valid script tag; use "DFLT" instead', + self.cur_token_location_) + return tag + + def expect_language_tag_(self): + tag = self.expect_tag_() + if tag == "DFLT": + raise FeatureLibError( + '"DFLT" is not a valid language tag; use "dflt" instead', + self.cur_token_location_) + return tag + def expect_symbol_(self, symbol): self.advance_lexer_() if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: return symbol - raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) + raise FeatureLibError("Expected '%s'" % symbol, + self.cur_token_location_) def expect_keyword_(self, keyword): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: return self.cur_token_ - raise ParserError("Expected \"%s\"" % keyword, - self.cur_token_location_) + raise FeatureLibError("Expected \"%s\"" % keyword, + self.cur_token_location_) def expect_name_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME: return self.cur_token_ - raise ParserError("Expected a name", self.cur_token_location_) + raise FeatureLibError("Expected a name", self.cur_token_location_) def expect_number_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NUMBER: return self.cur_token_ - raise ParserError("Expected a number", self.cur_token_location_) + raise FeatureLibError("Expected a number", self.cur_token_location_) - def advance_lexer_(self): - self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( - self.next_token_type_, self.next_token_, self.next_token_location_) - try: - (self.next_token_type_, self.next_token_, - self.next_token_location_) = self.lexer_.next() - except StopIteration: - self.next_token_type_, self.next_token_ = (None, None) + def expect_float_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.FLOAT: + return self.cur_token_ + raise FeatureLibError("Expected a floating-point number", + self.cur_token_location_) + + def expect_decipoint_(self): + if self.next_token_type_ == Lexer.FLOAT: + return self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + return self.expect_number_() / 10 + else: + raise FeatureLibError("Expected an integer or floating-point number", + self.cur_token_location_) + + def expect_string_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.STRING: + return self.cur_token_ + raise FeatureLibError("Expected a string", self.cur_token_location_) + + def advance_lexer_(self, comments=False): + if comments and self.cur_comments_: + self.cur_token_type_ = Lexer.COMMENT + self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0) + return + else: + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + self.cur_comments_ = [] + while True: + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = next(self.lexer_) + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + if self.next_token_type_ != Lexer.COMMENT: + break + self.cur_comments_.append((self.next_token_, self.next_token_location_)) + + @staticmethod + def reverse_string_(s): + """'abc' --> 'cba'""" + return ''.join(reversed(list(s))) + + def make_cid_range_(self, location, start, limit): + """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]""" + result = list() + if start > limit: + raise FeatureLibError( + "Bad range: start should be less than limit", location) + for cid in range(start, limit + 1): + result.append("cid%05d" % cid) + return result def make_glyph_range_(self, location, start, limit): - """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" - result = set() + """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]""" + result = list() if len(start) != len(limit): - raise ParserError( + raise FeatureLibError( "Bad range: \"%s\" and \"%s\" should have the same length" % (start, limit), location) - rev = lambda s: ''.join(reversed(list(s))) # string reversal + + rev = self.reverse_string_ prefix = os.path.commonprefix([start, limit]) suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) if len(suffix) > 0: @@ -420,29 +1469,31 @@ limit_range = limit[len(prefix):] if start_range >= limit_range: - raise ParserError("Start of range must be smaller than its end", - location) + raise FeatureLibError( + "Start of range must be smaller than its end", + location) uppercase = re.compile(r'^[A-Z]$') if uppercase.match(start_range) and uppercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) + result.append("%s%c%s" % (prefix, c, suffix)) return result lowercase = re.compile(r'^[a-z]$') if lowercase.match(start_range) and lowercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) + result.append("%s%c%s" % (prefix, c, suffix)) return result digits = re.compile(r'^[0-9]{1,3}$') if digits.match(start_range) and digits.match(limit_range): for i in range(int(start_range, 10), int(limit_range, 10) + 1): number = ("000" + str(i))[-len(start_range):] - result.add("%s%s%s" % (prefix, number, suffix)) + result.append("%s%s%s" % (prefix, number, suffix)) return result - raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) + raise FeatureLibError("Bad range: \"%s-%s\"" % (start, limit), + location) class SymbolTable(object): diff -Nru fonttools-3.0/Lib/fontTools/feaLib/parser_test.py fonttools-3.21.2/Lib/fontTools/feaLib/parser_test.py --- fonttools-3.0/Lib/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,448 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import LexerError -from fontTools.feaLib.parser import Parser, ParserError, SymbolTable -from fontTools.misc.py23 import * -import fontTools.feaLib.ast as ast -import codecs -import os -import shutil -import sys -import tempfile -import unittest - - -class ParserTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_anchordef(self): - [foo] = self.parse("anchorDef 123 456 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, None) - - def test_anchordef_contourpoint(self): - [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, 5) - - def test_feature_block(self): - [liga] = self.parse("feature liga {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertFalse(liga.use_extension) - - def test_feature_block_useExtension(self): - [liga] = self.parse("feature liga useExtension {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertTrue(liga.use_extension) - - def test_glyphclass(self): - [gc] = self.parse("@dash = [endash emdash figuredash];").statements - self.assertEqual(gc.name, "dash") - self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) - - def test_glyphclass_bad(self): - self.assertRaisesRegex( - ParserError, - "Expected glyph name, glyph range, or glyph class reference", - self.parse, "@bad = [a 123];") - - def test_glyphclass_duplicate(self): - self.assertRaisesRegex( - ParserError, "Glyph class @dup already defined", - self.parse, "@dup = [a b]; @dup = [x];") - - def test_glyphclass_empty(self): - [gc] = self.parse("@empty_set = [];").statements - self.assertEqual(gc.name, "empty_set") - self.assertEqual(gc.glyphs, set()) - - def test_glyphclass_equality(self): - [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(bar.glyphs, {"a", "b"}) - - def test_glyphclass_range_uppercase(self): - [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements - self.assertEqual(gc.name, "swashes") - self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) - - def test_glyphclass_range_lowercase(self): - [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements - self.assertEqual(gc.name, "defg.sc") - self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) - - def test_glyphclass_range_digit1(self): - [gc] = self.parse("@range = [foo.2-foo.5];").statements - self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) - - def test_glyphclass_range_digit2(self): - [gc] = self.parse("@range = [foo.09-foo.11];").statements - self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) - - def test_glyphclass_range_digit3(self): - [gc] = self.parse("@range = [foo.123-foo.125];").statements - self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) - - def test_glyphclass_range_bad(self): - self.assertRaisesRegex( - ParserError, - "Bad range: \"a\" and \"foobar\" should have the same length", - self.parse, "@bad = [a-foobar];") - self.assertRaisesRegex( - ParserError, "Bad range: \"A.swash-z.swash\"", - self.parse, "@bad = [A.swash-z.swash];") - self.assertRaisesRegex( - ParserError, "Start of range must be smaller than its end", - self.parse, "@bad = [B.swash-A.swash];") - self.assertRaisesRegex( - ParserError, "Bad range: \"foo.1234-foo.9876\"", - self.parse, "@bad = [foo.1234-foo.9876];") - - def test_glyphclass_range_mixed(self): - [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements - self.assertEqual(gc.glyphs, { - "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" - }) - - def test_glyphclass_reference(self): - [vowels_lc, vowels_uc, vowels] = self.parse( - "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" - "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements - self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) - self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) - self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) - self.assertRaisesRegex( - ParserError, "Unknown glyph class @unknown", - self.parse, "@bad = [@unknown];") - - def test_glyphclass_scoping(self): - [foo, liga, smcp] = self.parse( - "@foo = [a b];" - "feature liga { @bar = [@foo l]; } liga;" - "feature smcp { @bar = [@foo s]; } smcp;" - ).statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) - self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) - - def test_ignore_sub(self): - doc = self.parse("feature test {ignore sub e t' c;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"e"}]) - self.assertEqual(s.glyphs, [{"t"}]) - self.assertEqual(s.suffix, [{"c"}]) - - def test_ignore_substitute(self): - doc = self.parse( - "feature test {" - " ignore substitute f [a e] d' [a u]' [e y];" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) - self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) - self.assertEqual(s.suffix, [{"e", "y"}]) - - def test_language(self): - doc = self.parse("feature test {language DEU;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt(self): - doc = self.parse("feature test {language DEU exclude_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt_required(self): - doc = self.parse("feature test {" - " language DEU exclude_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertTrue(s.required) - - def test_language_include_dflt(self): - doc = self.parse("feature test {language DEU include_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_include_dflt_required(self): - doc = self.parse("feature test {" - " language DEU include_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertTrue(s.required) - - def test_lookup_block(self): - [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements - self.assertEqual(lookup.name, "Ligatures") - self.assertFalse(lookup.use_extension) - - def test_lookup_block_useExtension(self): - [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements - self.assertEqual(lookup.name, "Foo") - self.assertTrue(lookup.use_extension) - - def test_lookup_block_name_mismatch(self): - self.assertRaisesRegex( - ParserError, 'Expected "Foo"', - self.parse, "lookup Foo {} Bar;") - - def test_lookup_block_with_horizontal_valueRecordDef(self): - doc = self.parse("feature liga {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} liga;") - [liga] = doc.statements - [look] = liga.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 123) - self.assertEqual(foo.value.yAdvance, 0) - - def test_lookup_block_with_vertical_valueRecordDef(self): - doc = self.parse("feature vkrn {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} vkrn;") - [vkrn] = doc.statements - [look] = vkrn.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 0) - self.assertEqual(foo.value.yAdvance, 123) - - def test_lookup_reference(self): - [foo, bar] = self.parse("lookup Foo {} Foo;" - "feature Bar {lookup Foo;} Bar;").statements - [ref] = bar.statements - self.assertEqual(type(ref), ast.LookupReferenceStatement) - self.assertEqual(ref.lookup, foo) - - def test_lookup_reference_unknown(self): - self.assertRaisesRegex( - ParserError, 'Unknown lookup "Huh"', - self.parse, "feature liga {lookup Huh;} liga;") - - def test_script(self): - doc = self.parse("feature test {script cyrl;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.ScriptStatement) - self.assertEqual(s.script, "cyrl") - - def test_substitute_single_format_a(self): # GSUB LookupType 1 - doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"a.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_b(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [one.fitted one.oldstyle] by one;" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"one"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_c(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [a-d] by [A.sc-D.sc];" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_multiple(self): # GSUB LookupType 2 - doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.SubstitutionRule) - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f_f_i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_from(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " substitute a from [a.1 a.2 a.3];" - "} test;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "a") - self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) - - def test_substitute_from_glyphclass(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " @Ampersands = [ampersand.1 ampersand.2];" - " substitute ampersand from @Ampersands;" - "} test;") - [glyphclass, sub] = doc.statements[0].statements - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "ampersand") - self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) - - def test_substitute_ligature(self): # GSUB LookupType 4 - doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f_f_i"}]) - self.assertEqual(sub.lookups, [None, None, None]) - - def test_substitute_lookups(self): - doc = Parser(self.getpath("spec5fi.fea")).parse() - [ligs, sub, feature] = doc.statements - self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) - self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) - - def test_substitute_missing_by(self): - self.assertRaisesRegex( - ParserError, 'Expected "by", "from" or explicit lookup references', - self.parse, "feature liga {substitute f f i;} liga;") - - def test_subtable(self): - doc = self.parse("feature test {subtable;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.SubtableStatement) - - def test_valuerecord_format_a_horizontal(self): - doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 123) - self.assertEqual(value.yAdvance, 0) - - def test_valuerecord_format_a_vertical(self): - doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 0) - self.assertEqual(value.yAdvance, 123) - - def test_valuerecord_format_b(self): - doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named(self): - doc = self.parse("valueRecordDef <1 2 3 4> foo;" - "feature liga {valueRecordDef bar;} liga;") - value = doc.statements[1].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named_unknown(self): - self.assertRaisesRegex( - ParserError, "Unknown valueRecordDef \"unknown\"", - self.parse, "valueRecordDef foo;") - - def test_valuerecord_scoping(self): - [foo, liga, smcp] = self.parse( - "valueRecordDef 789 foo;" - "feature liga {valueRecordDef bar;} liga;" - "feature smcp {valueRecordDef bar;} smcp;" - ).statements - self.assertEqual(foo.value.xAdvance, 789) - self.assertEqual(liga.statements[0].value.xAdvance, 789) - self.assertEqual(smcp.statements[0].value.xAdvance, 789) - - def test_languagesystem(self): - [langsys] = self.parse("languagesystem latn DEU;").statements - self.assertEqual(langsys.script, "latn") - self.assertEqual(langsys.language, "DEU ") - self.assertRaisesRegex( - ParserError, "Expected ';'", - self.parse, "languagesystem latn DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem foobar DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem latn FOOBAR") - - def setUp(self): - self.tempdir = None - self.num_tempfiles = 0 - - def tearDown(self): - if self.tempdir: - shutil.rmtree(self.tempdir) - - def parse(self, text): - if not self.tempdir: - self.tempdir = tempfile.mkdtemp() - self.num_tempfiles += 1 - path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) - with codecs.open(path, "wb", "utf-8") as outfile: - outfile.write(text) - return Parser(path).parse() - - @staticmethod - def getpath(testfile): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", testfile) - - -class SymbolTableTest(unittest.TestCase): - def test_scopes(self): - symtab = SymbolTable() - symtab.define("foo", 23) - self.assertEqual(symtab.resolve("foo"), 23) - symtab.enter_scope() - self.assertEqual(symtab.resolve("foo"), 23) - symtab.define("foo", 42) - self.assertEqual(symtab.resolve("foo"), 42) - symtab.exit_scope() - self.assertEqual(symtab.resolve("foo"), 23) - - def test_resolve_undefined(self): - self.assertEqual(SymbolTable().resolve("abc"), None) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include0.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include0.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -I0 diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include1.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include1.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I1a -include(include0.fea); -I1b diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include2.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include2.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I2a -include(include1.fea); -I2b diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include3.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include3.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I3a -include(include2.fea); -I3b - diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include4.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include4.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I4a -include(include3.fea); -I4b - diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include5.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include5.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I5a -include(include4.fea); -I5b diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/include6.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include6.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I6a -include(include5.fea); -I6b diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(missingfile.fea); diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/includeself.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/includeself.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(includeself.fea); diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/mini.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/mini.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Example file from OpenType Feature File specification, section 1. -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -# Script and language coverage -languagesystem DFLT dflt; -languagesystem latn dflt; - -# Ligature formation -feature liga { - substitute f i by f_i; - substitute f l by f_l; -} liga; - -# Kerning -feature kern { - position A Y -100; - position a y -80; - position s f' <0 0 10 0> t; -} kern; diff -Nru fonttools-3.0/Lib/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.21.2/Lib/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-3.0/Lib/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -# OpenType Feature File specification, section 5.f.i, example 1 -# "Specifying a Chain Sub rule and marking sub-runs" -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -lookup CNTXT_LIGS { - substitute f i by f_i; - substitute c t by c_t; - } CNTXT_LIGS; - -lookup CNTXT_SUB { - substitute n by n.end; - substitute s by s.end; - } CNTXT_SUB; - -feature test { - substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; - substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; -} test; diff -Nru fonttools-3.0/Lib/fontTools/__init__.py fonttools-3.21.2/Lib/fontTools/__init__.py --- fonttools-3.0/Lib/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,4 +1,10 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import logging +from fontTools.misc.loggingTools import configLogger -version = "3.0" +log = logging.getLogger(__name__) + +version = __version__ = "3.21.2" + +__all__ = ["version", "log", "configLogger"] diff -Nru fonttools-3.0/Lib/fontTools/inspect.py fonttools-3.21.2/Lib/fontTools/inspect.py --- fonttools-3.0/Lib/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/inspect.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,8 +8,14 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import misc, ttLib, cffLib -import pygtk -pygtk.require('2.0') +try: + from gi import pygtkcompat +except ImportError: + pygtkcompat = None + +if pygtkcompat is not None: + pygtkcompat.enable() + pygtkcompat.enable_gtk(version='3.0') import gtk import sys @@ -73,7 +79,7 @@ def _add_object(self, key, value): # Make sure item is decompiled try: - value["asdf"] + value.asdf # Any better way?! except (AttributeError, KeyError, TypeError, ttLib.TTLibError): pass if isinstance(value, ttLib.getTableModule('glyf').Glyph): @@ -256,10 +262,10 @@ args = sys.argv[1:] if len(args) < 1: print("usage: pyftinspect font...", file=sys.stderr) - sys.exit(1) + return 1 for arg in args: Inspect(arg) gtk.main() if __name__ == "__main__": - main() + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/__main__.py fonttools-3.21.2/Lib/fontTools/__main__.py --- fonttools-3.0/Lib/fontTools/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ +from __future__ import print_function, division, absolute_import +import sys + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # TODO Add help output, --help, etc. + + # TODO Handle library-wide options. Eg.: + # --unicodedata + # --verbose / other logging stuff + + # TODO Allow a way to run arbitrary modules? Useful for setting + # library-wide options and calling another library. Eg.: + # + # $ fonttools --unicodedata=... fontmake ... + # + # This allows for a git-like command where thirdparty commands + # can be added. Should we just try importing the fonttools + # module first and try without if it fails? + + mod = 'fontTools.'+sys.argv[1] + sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1] + del sys.argv[0] + + import runpy + runpy.run_module(mod, run_name='__main__') + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/merge.py fonttools-3.21.2/Lib/fontTools/merge.py --- fonttools-3.0/Lib/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/merge.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,10 +11,16 @@ from fontTools import ttLib, cffLib from fontTools.ttLib.tables import otTables, _h_e_a_d from fontTools.ttLib.tables.DefaultTable import DefaultTable +from fontTools.misc.loggingTools import Timer from functools import reduce import sys import time import operator +import logging + + +log = logging.getLogger("fontTools.merge") +timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO) def _add_method(*clazzes, **kwargs): @@ -22,7 +28,10 @@ more classes.""" allowDefault = kwargs.get('allowDefaultTable', False) def wrapper(method): + done = [] for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' assert method.__name__ not in clazz.__dict__, \ "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) @@ -141,7 +150,7 @@ @_add_method(DefaultTable, allowDefaultTable=True) def merge(self, m, tables): if not hasattr(self, 'mergeMap'): - m.log("Don't know how to merge '%s'." % self.tableTag) + log.info("Don't know how to merge '%s'.", self.tableTag) return NotImplemented logic = self.mergeMap @@ -307,12 +316,6 @@ 'metrics': sumDicts, } -ttLib.getTableClass('gasp').mergeMap = { - 'tableTag': equal, - 'version': max, - 'gaspRange': first, # FIXME? Appears irreconcilable -} - ttLib.getTableClass('name').mergeMap = { 'tableTag': equal, 'names': first, # FIXME? Does mixing name records make sense? @@ -346,24 +349,32 @@ ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable @_add_method(ttLib.getTableClass('cmap')) def merge(self, m, tables): # TODO Handle format=14. - cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] - # TODO Better handle format-4 and format-12 coexisting in same font. - # TODO Insert both a format-4 and format-12 if needed. - module = ttLib.getTableModule('cmap') - assert all(t.format in [4, 12] for t,_ in cmapTables) - format = max(t.format for t,_ in cmapTables) - cmapTable = module.cmap_classes[format](format) - cmapTable.cmap = {} - cmapTable.platformID = 3 - cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) - cmapTable.language = 0 - cmap = cmapTable.cmap + # Only merges 4/3/1 and 12/3/10 subtables, ignores all other subtables + # If there is a format 12 table for the same font, ignore the format 4 table + cmapTables = [] + for fontIdx,table in enumerate(tables): + format4 = None + format12 = None + for subtable in table.tables: + properties = (subtable.format, subtable.platformID, subtable.platEncID) + if properties == (4,3,1): + format4 = subtable + elif properties == (12,3,10): + format12 = subtable + if format12 is not None: + cmapTables.append((format12, fontIdx)) + elif format4 is not None: + cmapTables.append((format4, fontIdx)) + + # Build a unicode mapping, then decide which format is needed to store it. + cmap = {} for table,fontIdx in cmapTables: - # TODO handle duplicates. + # handle duplicates for uni,gid in table.cmap.items(): oldgid = cmap.get(uni, None) if oldgid is None: @@ -371,26 +382,121 @@ elif oldgid != gid: # Char previously mapped to oldgid, now to gid. # Record, to fix up in GSUB 'locl' later. - assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid - m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + if m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid: + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + else: + # Char previously mapped to oldgid but already remapped to a different gid. + # TODO: Try harder to do something about these. + log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid) + + cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF} + self.tables = [] + module = ttLib.getTableModule('cmap') + if len(cmapBmpOnly) != len(cmap): + # format-12 required. + cmapTable = module.cmap_classes[12](12) + cmapTable.platformID = 3 + cmapTable.platEncID = 10 + cmapTable.language = 0 + cmapTable.cmap = cmap + self.tables.append(cmapTable) + # always create format-4 + cmapTable = module.cmap_classes[4](4) + cmapTable.platformID = 3 + cmapTable.platEncID = 1 + cmapTable.language = 0 + cmapTable.cmap = cmapBmpOnly + # ordered by platform then encoding + self.tables.insert(0, cmapTable) self.tableVersion = 0 - self.tables = [cmapTable] self.numSubTables = len(self.tables) return self +def mergeLookupLists(lst): + # TODO Do smarter merge. + return sumLists(lst) + +def mergeFeatures(lst): + assert lst + self = otTables.Feature() + self.FeatureParams = None + self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex]) + self.LookupCount = len(self.LookupListIndex) + return self + +def mergeFeatureLists(lst): + d = {} + for l in lst: + for f in l: + tag = f.FeatureTag + if tag not in d: + d[tag] = [] + d[tag].append(f.Feature) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.FeatureRecord() + rec.FeatureTag = tag + rec.Feature = mergeFeatures(d[tag]) + ret.append(rec) + return ret + +def mergeLangSyses(lst): + assert lst + + # TODO Support merging ReqFeatureIndex + assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) + + self = otTables.LangSys() + self.LookupOrder = None + self.ReqFeatureIndex = 0xFFFF + self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex]) + self.FeatureCount = len(self.FeatureIndex) + return self + +def mergeScripts(lst): + assert lst + + if len(lst) == 1: + return lst[0] + # TODO Support merging LangSysRecords + assert all(not s.LangSysRecord for s in lst) + + self = otTables.Script() + self.LangSysRecord = [] + self.LangSysCount = 0 + self.DefaultLangSys = mergeLangSyses([s.DefaultLangSys for s in lst if s.DefaultLangSys]) + return self + +def mergeScriptRecords(lst): + d = {} + for l in lst: + for s in l: + tag = s.ScriptTag + if tag not in d: + d[tag] = [] + d[tag].append(s.Script) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.ScriptRecord() + rec.ScriptTag = tag + rec.Script = mergeScripts(d[tag]) + ret.append(rec) + return ret + otTables.ScriptList.mergeMap = { - 'ScriptCount': sum, - 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), + 'ScriptCount': lambda lst: None, # TODO + 'ScriptRecord': mergeScriptRecords, } otTables.BaseScriptList.mergeMap = { - 'BaseScriptCount': sum, + 'BaseScriptCount': lambda lst: None, # TODO + # TODO: Merge duplicate entries 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), } otTables.FeatureList.mergeMap = { 'FeatureCount': sum, - 'FeatureRecord': sumLists, + 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), } otTables.LookupList.mergeMap = { @@ -399,10 +505,12 @@ } otTables.Coverage.mergeMap = { + 'Format': min, 'glyphs': sumLists, } otTables.ClassDef.mergeMap = { + 'Format': min, 'classDefs': sumDicts, } @@ -463,15 +571,14 @@ assert len(tables) == len(m.duplicateGlyphsPerFont) for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): if not dups: continue - assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) - lookupMap = {id(v):v for v in table.table.LookupList.Lookup} - featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups) synthFeature = None synthLookup = None for script in table.table.ScriptList.ScriptRecord: if script.ScriptTag == 'DFLT': continue # XXX for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: - feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] + if langsys is None: continue # XXX Create! + feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl'] assert len(feature) <= 1 if feature: feature = feature[0] @@ -483,9 +590,8 @@ f.FeatureParams = None f.LookupCount = 0 f.LookupListIndex = [] - langsys.FeatureIndex.append(id(synthFeature)) - featureMap[id(synthFeature)] = synthFeature - langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) + langsys.FeatureIndex.append(synthFeature) + langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) table.table.FeatureList.FeatureRecord.append(synthFeature) table.table.FeatureList.FeatureCount += 1 feature = synthFeature @@ -501,7 +607,7 @@ table.table.LookupList.Lookup.append(synthLookup) table.table.LookupList.LookupCount += 1 - feature.Feature.LookupListIndex[:0] = [id(synthLookup)] + feature.Feature.LookupListIndex[:0] = [synthLookup] feature.Feature.LookupCount += 1 DefaultTable.merge(self, m, tables) @@ -647,6 +753,9 @@ def __init__(self, **kwargs): + self.verbose = False + self.timing = False + self.set(**kwargs) def set(self, **kwargs): @@ -655,7 +764,7 @@ raise self.UnknownOptionError("Unknown option '%s'" % k) setattr(self, k, v) - def parse_opts(self, argv, ignore_unknown=False): + def parse_opts(self, argv, ignore_unknown=[]): ret = [] opts = {} for a in argv: @@ -715,18 +824,50 @@ return ret +class _AttendanceRecordingIdentityDict(dict): + """A dictionary-like object that records indices of items actually accessed + from a list.""" + + def __init__(self, lst): + self.l = lst + self.d = {id(v):i for i,v in enumerate(lst)} + self.s = set() + + def __getitem__(self, v): + self.s.add(self.d[id(v)]) + return v + +class _GregariousDict(dict): + """A dictionary-like object that welcomes guests without reservations and + adds them to the end of the guest list.""" + + def __init__(self, lst): + self.l = lst + self.s = set(id(v) for v in lst) + + def __getitem__(self, v): + if id(v) not in self.s: + self.s.add(id(v)) + self.l.append(v) + return v + +class _NonhashableDict(dict): + """A dictionary-like object mapping objects to their index within a list.""" + + def __init__(self, lst): + self.d = {id(v):i for i,v in enumerate(lst)} + + def __getitem__(self, v): + return self.d[id(v)] class Merger(object): - def __init__(self, options=None, log=None): + def __init__(self, options=None): - if not log: - log = Logger() if not options: options = Options() self.options = options - self.log = log def merge(self, fontfiles): @@ -763,19 +904,19 @@ allTags = ['cmap'] + list(allTags) for tag in allTags: + with timer("merge '%s'" % tag): + tables = [font.get(tag, NotImplemented) for font in fonts] - tables = [font.get(tag, NotImplemented) for font in fonts] - - clazz = ttLib.getTableClass(tag) - table = clazz(tag).merge(self, tables) - # XXX Clean this up and use: table = mergeObjects(tables) - - if table is not NotImplemented and table is not False: - mega[tag] = table - self.log("Merged '%s'." % tag) - else: - self.log("Dropped '%s'." % tag) - self.log.lapse("merge '%s'" % tag) + log.info("Merging '%s'.", tag) + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + log.info("Merged '%s'.", tag) + else: + log.info("Dropped '%s'.", tag) del self.duplicateGlyphsPerFont @@ -831,14 +972,12 @@ if not t: continue if t.table.LookupList: - lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} + lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)} t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) + t.table.FeatureList.mapLookups(lookupMap) if t.table.FeatureList and t.table.ScriptList: - featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)} t.table.ScriptList.mapFeatures(featureMap) # TODO GDEF/Lookup MarkFilteringSets @@ -855,95 +994,85 @@ for t in [GSUB, GPOS]: if not t: continue + if t.table.FeatureList and t.table.ScriptList: + + # Collect unregistered (new) features. + featureMap = _GregariousDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + # Record used features. + featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + usedIndices = featureMap.s + + # Remove unused features + t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices] + + # Map back to indices. + featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) + if t.table.LookupList: - lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} + + # Collect unregistered (new) lookups. + lookupMap = _GregariousDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) - if t.table.FeatureList and t.table.ScriptList: - # XXX Handle present ScriptList but absent FeatureList - featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} - t.table.ScriptList.mapFeatures(featureMap) + # Record used lookups. + lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) + usedIndices = lookupMap.s - # TODO GDEF/Lookup MarkFilteringSets - # TODO FeatureParams nameIDs + # Remove unused lookups + t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices] + # Map back to indices. + lookupMap = _NonhashableDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) -class Logger(object): + t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - font.disassembleInstructions = False # Work around ttLib bug - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs __all__ = [ 'Options', 'Merger', - 'Logger', 'main' ] +@timer("make one with everything (TOTAL TIME)") def main(args=None): + from fontTools import configLogger if args is None: args = sys.argv[1:] - log = Logger() - args = log.parse_opts(args) - options = Options() args = options.parse_opts(args) if len(args) < 1: print("usage: pyftmerge font...", file=sys.stderr) - sys.exit(1) + return 1 + + configLogger(level=logging.INFO if options.verbose else logging.WARNING) + if options.timing: + timer.logger.setLevel(logging.DEBUG) + else: + timer.logger.disabled = True - merger = Merger(options=options, log=log) + merger = Merger(options=options) font = merger.merge(args) outfile = 'merged.ttf' - font.save(outfile) - log.lapse("compile and save font") + with timer("compile and save font"): + font.save(outfile) - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") if __name__ == "__main__": - main() + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/misc/arrayTools.py fonttools-3.21.2/Lib/fontTools/misc/arrayTools.py --- fonttools-3.0/Lib/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/arrayTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,7 +6,9 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from numbers import Number import math +import operator def calcBounds(array): """Return the bounding rectangle of a 2D points array as a tuple: @@ -21,13 +23,9 @@ def calcIntBounds(array): """Return the integer bounding rectangle of a 2D points array as a tuple: (xMin, yMin, xMax, yMax) + Values are rounded to closest integer. """ - xMin, yMin, xMax, yMax = calcBounds(array) - xMin = int(math.floor(xMin)) - xMax = int(math.ceil(xMax)) - yMin = int(math.floor(yMin)) - yMax = int(math.ceil(yMax)) - return xMin, yMin, xMax, yMax + return tuple(round(v) for v in calcBounds(array)) def updateBounds(bounds, p, min=min, max=max): @@ -124,6 +122,136 @@ return (xMin, yMin, xMax, yMax) +class Vector(object): + """A math-like vector.""" + + def __init__(self, values, keep=False): + self.values = values if keep else list(values) + + def __getitem__(self, index): + return self.values[index] + + def __len__(self): + return len(self.values) + + def __repr__(self): + return "Vector(%s)" % self.values + + def _vectorOp(self, other, op): + if isinstance(other, Vector): + assert len(self.values) == len(other.values) + a = self.values + b = other.values + return [op(a[i], b[i]) for i in range(len(self.values))] + if isinstance(other, Number): + return [op(v, other) for v in self.values] + raise NotImplementedError + + def _scalarOp(self, other, op): + if isinstance(other, Number): + return [op(v, other) for v in self.values] + raise NotImplementedError + + def _unaryOp(self, op): + return [op(v) for v in self.values] + + def __add__(self, other): + return Vector(self._vectorOp(other, operator.add), keep=True) + def __iadd__(self, other): + self.values = self._vectorOp(other, operator.add) + return self + __radd__ = __add__ + + def __sub__(self, other): + return Vector(self._vectorOp(other, operator.sub), keep=True) + def __isub__(self, other): + self.values = self._vectorOp(other, operator.sub) + return self + def __rsub__(self, other): + return other + (-self) + + def __mul__(self, other): + return Vector(self._scalarOp(other, operator.mul), keep=True) + def __imul__(self, other): + self.values = self._scalarOp(other, operator.mul) + return self + __rmul__ = __mul__ + + def __truediv__(self, other): + return Vector(self._scalarOp(other, operator.div), keep=True) + def __itruediv__(self, other): + self.values = self._scalarOp(other, operator.div) + return self + + def __pos__(self): + return Vector(self._unaryOp(operator.pos), keep=True) + def __neg__(self): + return Vector(self._unaryOp(operator.neg), keep=True) + def __round__(self): + return Vector(self._unaryOp(round), keep=True) + def toInt(self): + return self.__round__() + + def __eq__(self, other): + if type(other) == Vector: + return self.values == other.values + else: + return self.values == other + def __ne__(self, other): + return not self.__eq__(other) + + def __bool__(self): + return any(self.values) + __nonzero__ = __bool__ + + def __abs__(self): + return math.sqrt(sum([x*x for x in self.values])) + def dot(self, other): + a = self.values + b = other.values if type(other) == Vector else b + assert len(a) == len(b) + return sum([a[i] * b[i] for i in range(len(a))]) + + +def pairwise(iterable, reverse=False): + """Iterate over current and next items in iterable, optionally in + reverse order. + + >>> tuple(pairwise([])) + () + >>> tuple(pairwise([], reverse=True)) + () + >>> tuple(pairwise([0])) + ((0, 0),) + >>> tuple(pairwise([0], reverse=True)) + ((0, 0),) + >>> tuple(pairwise([0, 1])) + ((0, 1), (1, 0)) + >>> tuple(pairwise([0, 1], reverse=True)) + ((1, 0), (0, 1)) + >>> tuple(pairwise([0, 1, 2])) + ((0, 1), (1, 2), (2, 0)) + >>> tuple(pairwise([0, 1, 2], reverse=True)) + ((2, 1), (1, 0), (0, 2)) + >>> tuple(pairwise(['a', 'b', 'c', 'd'])) + (('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')) + >>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True)) + (('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd')) + """ + if not iterable: + return + if reverse: + it = reversed(iterable) + else: + it = iter(iterable) + first = next(it, None) + a = first + for b in it: + yield (a, b) + a = b + yield (a, first) + + def _test(): """ >>> import math diff -Nru fonttools-3.0/Lib/fontTools/misc/bezierTools.py fonttools-3.21.2/Lib/fontTools/misc/bezierTools.py --- fonttools-3.0/Lib/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/bezierTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,20 @@ +# -*- coding: utf-8 -*- """fontTools.misc.bezierTools.py -- tools for working with bezier path segments. """ from __future__ import print_function, division, absolute_import +from fontTools.misc.arrayTools import calcBounds from fontTools.misc.py23 import * +import math + __all__ = [ + "approximateCubicArcLength", + "approximateCubicArcLengthC", + "approximateQuadraticArcLength", + "approximateQuadraticArcLengthC", + "calcQuadraticArcLength", + "calcQuadraticArcLengthC", "calcQuadraticBounds", "calcCubicBounds", "splitLine", @@ -16,9 +26,96 @@ "solveCubic", ] -from fontTools.misc.arrayTools import calcBounds -epsilon = 1e-12 +epsilonDigits = 6 +epsilon = 1e-10 + + +def _dot(v1, v2): + return (v1 * v2.conjugate()).real + + +def _intSecAtan(x): + # In : sympy.integrate(sp.sec(sp.atan(x))) + # Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2 + return x * math.sqrt(x**2 + 1)/2 + math.asinh(x)/2 + + +def calcQuadraticArcLength(pt1, pt2, pt3, approximate_fallback=False): + """Return the arc length for a qudratic bezier segment. + pt1 and pt3 are the "anchor" points, pt2 is the "handle". + + >>> calcQuadraticArcLength((0, 0), (0, 0), (0, 0)) # empty segment + 0.0 + >>> calcQuadraticArcLength((0, 0), (50, 0), (80, 0)) # collinear points + 80.0 + >>> calcQuadraticArcLength((0, 0), (0, 50), (0, 80)) # collinear points vertical + 80.0 + >>> calcQuadraticArcLength((0, 0), (50, 20), (100, 40)) # collinear points + 107.70329614269008 + >>> calcQuadraticArcLength((0, 0), (0, 100), (100, 0)) + 154.02976155645263 + >>> calcQuadraticArcLength((0, 0), (0, 50), (100, 0)) + 120.21581243984076 + >>> calcQuadraticArcLength((0, 0), (50, -10), (80, 50)) + 102.53273816445825 + >>> calcQuadraticArcLength((0, 0), (40, 0), (-40, 0), True) # collinear points, control point outside, exact result should be 66.6666666666667 + 69.41755572720999 + >>> calcQuadraticArcLength((0, 0), (40, 0), (0, 0), True) # collinear points, looping back, exact result should be 40 + 34.4265186329548 + """ + return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3), approximate_fallback) + + +def calcQuadraticArcLengthC(pt1, pt2, pt3, approximate_fallback=False): + """Return the arc length for a qudratic bezier segment using complex points. + pt1 and pt3 are the "anchor" points, pt2 is the "handle".""" + + # Analytical solution to the length of a quadratic bezier. + # I'll explain how I arrived at this later. + d0 = pt2 - pt1 + d1 = pt3 - pt2 + d = d1 - d0 + n = d * 1j + scale = abs(n) + if scale == 0.: + return abs(pt3-pt1) + origDist = _dot(n,d0) + if origDist == 0.: + if _dot(d0,d1) >= 0: + return abs(pt3-pt1) + if approximate_fallback: + return approximateQuadraticArcLengthC(pt1, pt2, pt3) + assert 0 # TODO handle cusps + x0 = _dot(d,d0) / origDist + x1 = _dot(d,d1) / origDist + Len = abs(2 * (_intSecAtan(x1) - _intSecAtan(x0)) * origDist / (scale * (x1 - x0))) + return Len + + +def approximateQuadraticArcLength(pt1, pt2, pt3): + # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature + # with n=3 points. + return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) + + +def approximateQuadraticArcLengthC(pt1, pt2, pt3): + # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature + # with n=3 points for complex points. + # + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching fifth-degree polynomial + # approximation of it. + # + #https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature + + # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2), + # weighted 5/18, 8/18, 5/18 respectively. + v0 = abs(-0.492943519233745*pt1 + 0.430331482911935*pt2 + 0.0626120363218102*pt3) + v1 = abs(pt3-pt1)*0.4444444444444444 + v2 = abs(-0.0626120363218102*pt1 - 0.430331482911935*pt2 + 0.492943519233745*pt3) + + return v0 + v1 + v2 def calcQuadraticBounds(pt1, pt2, pt3): @@ -42,6 +139,50 @@ return calcBounds(points) +def approximateCubicArcLength(pt1, pt2, pt3, pt4): + """Return the approximate arc length for a cubic bezier segment. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". + + >>> approximateCubicArcLength((0, 0), (25, 100), (75, 100), (100, 0)) + 190.04332968932817 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 50), (100, 100)) + 154.8852074945903 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (150, 0)) # line; exact result should be 150. + 149.99999999999991 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (-50, 0)) # cusp; exact result should be 150. + 136.9267662156362 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, -50), (-50, 0)) # cusp + 154.80848416537057 + """ + # Approximate length of cubic Bezier curve using Gauss-Lobatto quadrature + # with n=5 points. + return approximateCubicArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4)) + + +def approximateCubicArcLengthC(pt1, pt2, pt3, pt4): + """Return the approximate arc length for a cubic bezier segment of complex points. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".""" + + # Approximate length of cubic Bezier curve using Gauss-Lobatto quadrature + # with n=5 points for complex points. + # + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching seventh-degree polynomial + # approximation of it. + # + # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Lobatto_rules + + # abs(BezierCurveC[3].diff(t).subs({t:T})) for T in sorted(0, .5±(3/7)**.5/2, .5, 1), + # weighted 1/20, 49/180, 32/90, 49/180, 1/20 respectively. + v0 = abs(pt2-pt1)*.15 + v1 = abs(-0.558983582205757*pt1 + 0.325650248872424*pt2 + 0.208983582205757*pt3 + 0.024349751127576*pt4) + v2 = abs(pt4-pt1+pt3-pt2)*0.26666666666666666 + v3 = abs(-0.024349751127576*pt1 - 0.208983582205757*pt2 - 0.325650248872424*pt3 + 0.558983582205757*pt4) + v4 = abs(pt4-pt3)*.15 + + return v0 + v1 + v2 + v3 + v4 + + def calcCubicBounds(pt1, pt2, pt3, pt4): """Return the bounding rectangle for a cubic bezier segment. pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". @@ -214,12 +355,14 @@ t2 = ts[i+1] delta = (t2 - t1) # calc new a, b and c - a1x = ax * delta**2 - a1y = ay * delta**2 + delta_2 = delta*delta + a1x = ax * delta_2 + a1y = ay * delta_2 b1x = (2*ax*t1 + bx) * delta b1y = (2*ay*t1 + by) * delta - c1x = ax*t1**2 + bx*t1 + cx - c1y = ay*t1**2 + by*t1 + cy + t1_2 = t1*t1 + c1x = ax*t1_2 + bx*t1 + cx + c1y = ay*t1_2 + by*t1 + cy pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) segments.append((pt1, pt2, pt3)) @@ -239,15 +382,21 @@ t1 = ts[i] t2 = ts[i+1] delta = (t2 - t1) + + delta_2 = delta*delta + delta_3 = delta*delta_2 + t1_2 = t1*t1 + t1_3 = t1*t1_2 + # calc new a, b, c and d - a1x = ax * delta**3 - a1y = ay * delta**3 - b1x = (3*ax*t1 + bx) * delta**2 - b1y = (3*ay*t1 + by) * delta**2 - c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta - c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta - d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx - d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy + a1x = ax * delta_3 + a1y = ay * delta_3 + b1x = (3*ax*t1 + bx) * delta_2 + b1y = (3*ay*t1 + by) * delta_2 + c1x = (2*bx*t1 + cx + 3*ax*t1_2) * delta + c1y = (2*by*t1 + cy + 3*ay*t1_2) * delta + d1x = ax*t1_3 + bx*t1_2 + cx*t1 + dx + d1y = ay*t1_3 + by*t1_2 + cy*t1 + dy pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y)) segments.append((pt1, pt2, pt3, pt4)) return segments @@ -291,6 +440,21 @@ a*x*x*x + b*x*x + c*x + d = 0 This function returns a list of roots. Note that the returned list is neither guaranteed to be sorted nor to contain unique values! + + >>> solveCubic(1, 1, -6, 0) + [-3.0, -0.0, 2.0] + >>> solveCubic(-10.0, -9.0, 48.0, -29.0) + [-2.9, 1.0, 1.0] + >>> solveCubic(-9.875, -9.0, 47.625, -28.75) + [-2.911392, 1.0, 1.0] + >>> solveCubic(1.0, -4.5, 6.75, -3.375) + [1.5, 1.5, 1.5] + >>> solveCubic(-12.0, 18.0, -9.0, 1.50023651123) + [0.5, 0.5, 0.5] + >>> solveCubic( + ... 9.0, 0.0, 0.0, -7.62939453125e-05 + ... ) == [-0.0, -0.0, -0.0] + True """ # # adapted from: @@ -309,24 +473,46 @@ Q = (a1*a1 - 3.0*a2)/9.0 R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 - R2_Q3 = R*R - Q*Q*Q - if R2_Q3 < 0: - theta = acos(R/sqrt(Q*Q*Q)) + R2 = R*R + Q3 = Q*Q*Q + R2 = 0 if R2 < epsilon else R2 + Q3 = 0 if abs(Q3) < epsilon else Q3 + + R2_Q3 = R2 - Q3 + + if R2 == 0. and Q3 == 0.: + x = round(-a1/3.0, epsilonDigits) + return [x, x, x] + elif R2_Q3 <= epsilon * .5: + # The epsilon * .5 above ensures that Q3 is not zero. + theta = acos(max(min(R/sqrt(Q3), 1.0), -1.0)) rQ2 = -2.0*sqrt(Q) - x0 = rQ2*cos(theta/3.0) - a1/3.0 - x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0 - x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0 + a1_3 = a1/3.0 + x0 = rQ2*cos(theta/3.0) - a1_3 + x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1_3 + x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1_3 + x0, x1, x2 = sorted([x0, x1, x2]) + # Merge roots that are close-enough + if x1 - x0 < epsilon and x2 - x1 < epsilon: + x0 = x1 = x2 = round((x0 + x1 + x2) / 3., epsilonDigits) + elif x1 - x0 < epsilon: + x0 = x1 = round((x0 + x1) / 2., epsilonDigits) + x2 = round(x2, epsilonDigits) + elif x2 - x1 < epsilon: + x0 = round(x0, epsilonDigits) + x1 = x2 = round((x1 + x2) / 2., epsilonDigits) + else: + x0 = round(x0, epsilonDigits) + x1 = round(x1, epsilonDigits) + x2 = round(x2, epsilonDigits) return [x0, x1, x2] else: - if Q == 0 and R == 0: - x = 0 - else: - x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) - x = x + Q/x + x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) + x = x + Q/x if R >= 0.0: x = -x - x = x - a1/3.0 + x = round(x - a1/3.0, epsilonDigits) return [x] diff -Nru fonttools-3.0/Lib/fontTools/misc/classifyTools.py fonttools-3.21.2/Lib/fontTools/misc/classifyTools.py --- fonttools-3.0/Lib/fontTools/misc/classifyTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/classifyTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,173 @@ +""" fontTools.misc.classifyTools.py -- tools for classifying things. +""" + +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * + +class Classifier(object): + + """ + Main Classifier object, used to classify things into similar sets. + """ + + def __init__(self, sort=True): + + self._things = set() # set of all things known so far + self._sets = [] # list of class sets produced so far + self._mapping = {} # map from things to their class set + self._dirty = False + self._sort = sort + + def add(self, set_of_things): + """ + Add a set to the classifier. Any iterable is accepted. + """ + if not set_of_things: + return + + self._dirty = True + + things, sets, mapping = self._things, self._sets, self._mapping + + s = set(set_of_things) + intersection = s.intersection(things) # existing things + s.difference_update(intersection) # new things + difference = s + del s + + # Add new class for new things + if difference: + things.update(difference) + sets.append(difference) + for thing in difference: + mapping[thing] = difference + del difference + + while intersection: + # Take one item and process the old class it belongs to + old_class = mapping[next(iter(intersection))] + old_class_intersection = old_class.intersection(intersection) + + # Update old class to remove items from new set + old_class.difference_update(old_class_intersection) + + # Remove processed items from todo list + intersection.difference_update(old_class_intersection) + + # Add new class for the intersection with old class + sets.append(old_class_intersection) + for thing in old_class_intersection: + mapping[thing] = old_class_intersection + del old_class_intersection + + def update(self, list_of_sets): + """ + Add a a list of sets to the classifier. Any iterable of iterables is accepted. + """ + for s in list_of_sets: + self.add(s) + + def _process(self): + if not self._dirty: + return + + # Do any deferred processing + sets = self._sets + self._sets = [s for s in sets if s] + + if self._sort: + self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) + + self._dirty = False + + # Output methods + + def getThings(self): + """Returns the set of all things known so far. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._things + + def getMapping(self): + """Returns the mapping from things to their class set. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._mapping + + def getClasses(self): + """Returns the list of class sets. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._sets + + +def classify(list_of_sets, sort=True): + """ + Takes a iterable of iterables (list of sets from here on; but any + iterable works.), and returns the smallest list of sets such that + each set, is either a subset, or is disjoint from, each of the input + sets. + + In other words, this function classifies all the things present in + any of the input sets, into similar classes, based on which sets + things are a member of. + + If sort=True, return class sets are sorted by decreasing size and + their natural sort order within each class size. Otherwise, class + sets are returned in the order that they were identified, which is + generally not significant. + + >>> classify([]) == ([], {}) + True + >>> classify([[]]) == ([], {}) + True + >>> classify([[], []]) == ([], {}) + True + >>> classify([[1]]) == ([{1}], {1: {1}}) + True + >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) + True + >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) + True + >>> classify([[1,2],[2,4,5]]) == ( + ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2],[2,4,5]], sort=False) == ( + ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2,9],[2,4,5]], sort=False) == ( + ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, + ... 9: {1, 9}}) + True + >>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( + ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, + ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) + True + >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) + >>> set([frozenset(c) for c in classes]) == set( + ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) + True + >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} + True + """ + classifier = Classifier(sort=sort) + classifier.update(list_of_sets) + return classifier.getClasses(), classifier.getMapping() + + +if __name__ == "__main__": + import sys, doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/cliTools.py fonttools-3.21.2/Lib/fontTools/misc/cliTools.py --- fonttools-3.0/Lib/fontTools/misc/cliTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/cliTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,26 @@ +"""Collection of utilities for command-line interfaces and console scripts.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import re + + +numberAddedRE = re.compile("#\d+$") + + +def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False): + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) + if outputDir: + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + if extension is None: + extension = os.path.splitext(input)[1] + output = os.path.join(dirName, fileName + extension) + n = 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join( + dirName, fileName + "#" + repr(n) + extension) + n += 1 + return output diff -Nru fonttools-3.0/Lib/fontTools/misc/eexec.py fonttools-3.21.2/Lib/fontTools/misc/eexec.py --- fonttools-3.0/Lib/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/eexec.py 2018-01-08 12:40:40.000000000 +0000 @@ -19,19 +19,35 @@ def decrypt(cipherstring, R): + r""" + >>> testStr = b"\0\0asdadads asds\265" + >>> decryptedStr, R = decrypt(testStr, 12321) + >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + True + >>> R == 36142 + True + """ plainList = [] for cipher in cipherstring: plain, R = _decryptChar(cipher, R) plainList.append(plain) - plainstring = strjoin(plainList) + plainstring = bytesjoin(plainList) return plainstring, int(R) def encrypt(plainstring, R): + r""" + >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + >>> encryptedStr, R = encrypt(testStr, 12321) + >>> encryptedStr == b"\0\0asdadads asds\265" + True + >>> R == 36142 + True + """ cipherList = [] for plain in plainstring: cipher, R = _encryptChar(plain, R) cipherList.append(cipher) - cipherstring = strjoin(cipherList) + cipherstring = bytesjoin(cipherList) return cipherstring, int(R) @@ -41,15 +57,11 @@ def deHexString(h): import binascii - h = strjoin(h.split()) + h = bytesjoin(h.split()) return binascii.unhexlify(h) -def _test(): - testStr = "\0\0asdadads asds\265" - print(decrypt, decrypt(testStr, 12321)) - print(encrypt, encrypt(testStr, 12321)) - - if __name__ == "__main__": - _test() + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/encodingTools_test.py fonttools-3.21.2/Lib/fontTools/misc/encodingTools_test.py --- fonttools-3.0/Lib/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -from .encodingTools import getEncoding - -class EncodingTest(unittest.TestCase): - - def test_encoding_unicode(self): - - self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well - self.assertEqual(getEncoding(3, 1, None), "utf_16_be") - self.assertEqual(getEncoding(3, 10, None), "utf_16_be") - self.assertEqual(getEncoding(0, 3, None), "utf_16_be") - - def test_encoding_macroman_misc(self): - self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") - self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") - self.assertEqual(getEncoding(1, 0, 45), "mac_roman") - - def test_extended_mac_encodings(self): - encoding = getEncoding(1, 1, 0) # Mac Japanese - decoded = b'\xfe'.decode(encoding) - self.assertEqual(decoded, unichr(0x2122)) - - def test_extended_unknown(self): - self.assertEqual(getEncoding(10, 11, 12), None) - self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") - self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/misc/fixedTools.py fonttools-3.21.2/Lib/fontTools/misc/fixedTools.py --- fonttools-3.0/Lib/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/fixedTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,10 +3,16 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import logging + +log = logging.getLogger(__name__) __all__ = [ "fixedToFloat", "floatToFixed", + "floatToFixedToFloat", + "ensureVersionIsLong", + "versionToFixed", ] def fixedToFloat(value, precisionBits): @@ -42,6 +48,34 @@ def floatToFixed(value, precisionBits): """Converts a float to a fixed-point number given the number of - precisionBits. Ie. int(round(value * (1<>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> formatter = LevelFormatter( + ... fmt={ + ... '*': '[%(levelname)s] %(message)s', + ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s', + ... 'INFO': '%(message)s', + ... }) + >>> handler.setFormatter(formatter) + >>> log = logging.getLogger('test') + >>> log.setLevel(logging.DEBUG) + >>> log.addHandler(handler) + >>> log.debug('this uses a custom format string') + test [DEBUG] this uses a custom format string + >>> log.info('this also uses a custom format string') + this also uses a custom format string + >>> log.warning("this one uses the default format string") + [WARNING] this one uses the default format string + """ + + def __init__(self, fmt=None, datefmt=None, style="%"): + if style != '%': + raise ValueError( + "only '%' percent style is supported in both python 2 and 3") + if fmt is None: + fmt = DEFAULT_FORMATS + if isinstance(fmt, basestring): + default_format = fmt + custom_formats = {} + elif isinstance(fmt, collections.Mapping): + custom_formats = dict(fmt) + default_format = custom_formats.pop("*", None) + else: + raise TypeError('fmt must be a str or a dict of str: %r' % fmt) + super(LevelFormatter, self).__init__(default_format, datefmt) + self.default_format = self._fmt + self.custom_formats = {} + for level, fmt in custom_formats.items(): + level = logging._checkLevel(level) + self.custom_formats[level] = fmt + + def format(self, record): + if self.custom_formats: + fmt = self.custom_formats.get(record.levelno, self.default_format) + if self._fmt != fmt: + self._fmt = fmt + # for python >= 3.2, _style needs to be set if _fmt changes + if PercentStyle: + self._style = PercentStyle(fmt) + return super(LevelFormatter, self).format(record) + + +def configLogger(**kwargs): + """ Do basic configuration for the logging system. This is more or less + the same as logging.basicConfig with some additional options and defaults. + + The default behaviour is to create a StreamHandler which writes to + sys.stderr, set a formatter using the DEFAULT_FORMATS strings, and add + the handler to the top-level library logger ("fontTools"). + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + logger Specifies the logger name or a Logger instance to be configured. + (it defaults to "fontTools" logger). Unlike basicConfig, this + function can be called multiple times to reconfigure a logger. + If the logger or any of its children already exists before the + call is made, they will be reset before the new configuration + is applied. + filename Specifies that a FileHandler be created, using the specified + filename, rather than a StreamHandler. + filemode Specifies the mode to open the file, if filename is specified + (if filemode is unspecified, it defaults to 'a'). + format Use the specified format string for the handler. This argument + also accepts a dictionary of format strings keyed by level name, + to allow customising the records appearance for specific levels. + The special '*' key is for 'any other' level. + datefmt Use the specified date/time format. + level Set the logger level to the specified level. + stream Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with 'filename' - if both + are present, 'stream' is ignored. + handlers If specified, this should be an iterable of already created + handlers, which will be added to the logger. Any handler + in the list which does not have a formatter assigned will be + assigned the formatter created in this function. + filters If specified, this should be an iterable of already created + filters, which will be added to the handler(s), if the latter + do(es) not already have filters assigned. + propagate All loggers have a "propagate" attribute initially set to True, + which determines whether to continue searching for handlers up + the logging hierarchy. By default, this arguments sets the + "propagate" attribute to False. + """ + # using kwargs to enforce keyword-only arguments in py2. + handlers = kwargs.pop("handlers", None) + if handlers is None: + if "stream" in kwargs and "filename" in kwargs: + raise ValueError("'stream' and 'filename' should not be " + "specified together") + else: + if "stream" in kwargs or "filename" in kwargs: + raise ValueError("'stream' or 'filename' should not be " + "specified together with 'handlers'") + if handlers is None: + filename = kwargs.pop("filename", None) + mode = kwargs.pop("filemode", 'a') + if filename: + h = logging.FileHandler(filename, mode) + else: + stream = kwargs.pop("stream", None) + h = logging.StreamHandler(stream) + handlers = [h] + # By default, the top-level library logger is configured. + logger = kwargs.pop("logger", "fontTools") + if not logger or isinstance(logger, basestring): + # empty "" or None means the 'root' logger + logger = logging.getLogger(logger) + # before (re)configuring, reset named logger and its children (if exist) + _resetExistingLoggers(parent=logger.name) + # use DEFAULT_FORMATS if 'format' is None + fs = kwargs.pop("format", None) + dfs = kwargs.pop("datefmt", None) + # XXX: '%' is the only format style supported on both py2 and 3 + style = kwargs.pop("style", '%') + fmt = LevelFormatter(fs, dfs, style) + filters = kwargs.pop("filters", []) + for h in handlers: + if h.formatter is None: + h.setFormatter(fmt) + if not h.filters: + for f in filters: + h.addFilter(f) + logger.addHandler(h) + if logger.name != "root": + # stop searching up the hierarchy for handlers + logger.propagate = kwargs.pop("propagate", False) + # set a custom severity level + level = kwargs.pop("level", None) + if level is not None: + logger.setLevel(level) + if kwargs: + keys = ', '.join(kwargs.keys()) + raise ValueError('Unrecognised argument(s): %s' % keys) + + +def _resetExistingLoggers(parent="root"): + """ Reset the logger named 'parent' and all its children to their initial + state, if they already exist in the current configuration. + """ + root = logging.root + # get sorted list of all existing loggers + existing = sorted(root.manager.loggerDict.keys()) + if parent == "root": + # all the existing loggers are children of 'root' + loggers_to_reset = [parent] + existing + elif parent not in existing: + # nothing to do + return + elif parent in existing: + loggers_to_reset = [parent] + # collect children, starting with the entry after parent name + i = existing.index(parent) + 1 + prefixed = parent + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + loggers_to_reset.append(existing[i]) + i += 1 + for name in loggers_to_reset: + if name == "root": + root.setLevel(logging.WARNING) + for h in root.handlers[:]: + root.removeHandler(h) + for f in root.filters[:]: + root.removeFilters(f) + root.disabled = False + else: + logger = root.manager.loggerDict[name] + logger.level = logging.NOTSET + logger.handlers = [] + logger.filters = [] + logger.propagate = True + logger.disabled = False + + +class Timer(object): + """ Keeps track of overall time and split/lap times. + + >>> import time + >>> timer = Timer() + >>> time.sleep(0.01) + >>> print("First lap:", timer.split()) + First lap: ... + >>> time.sleep(0.02) + >>> print("Second lap:", timer.split()) + Second lap: ... + >>> print("Overall time:", timer.time()) + Overall time: ... + + Can be used as a context manager inside with-statements. + + >>> with Timer() as t: + ... time.sleep(0.01) + >>> print("%0.3f seconds" % t.elapsed) + 0... seconds + + If initialised with a logger, it can log the elapsed time automatically + upon exiting the with-statement. + + >>> import logging + >>> log = logging.getLogger("fontTools") + >>> configLogger(level="DEBUG", format="%(message)s", stream=sys.stdout) + >>> with Timer(log, 'do something'): + ... time.sleep(0.01) + Took ... to do something + + The same Timer instance, holding a reference to a logger, can be reused + in multiple with-statements, optionally with different messages or levels. + + >>> timer = Timer(log) + >>> with timer(): + ... time.sleep(0.01) + elapsed time: ...s + >>> with timer('redo it', level=logging.INFO): + ... time.sleep(0.02) + Took ... to redo it + + It can also be used as a function decorator to log the time elapsed to run + the decorated function. + + >>> @timer() + ... def test1(): + ... time.sleep(0.01) + >>> @timer('run test 2', level=logging.INFO) + ... def test2(): + ... time.sleep(0.02) + >>> test1() + Took ... to run 'test1' + >>> test2() + Took ... to run test 2 + """ + + # timeit.default_timer choses the most accurate clock for each platform + _time = timeit.default_timer + default_msg = "elapsed time: %(time).3fs" + default_format = "Took %(time).3fs to %(msg)s" + + def __init__(self, logger=None, msg=None, level=None, start=None): + self.reset(start) + if logger is None: + for arg in ('msg', 'level'): + if locals().get(arg) is not None: + raise ValueError( + "'%s' can't be specified without a 'logger'" % arg) + self.logger = logger + self.level = level if level is not None else TIME_LEVEL + self.msg = msg + + def reset(self, start=None): + """ Reset timer to 'start_time' or the current time. """ + if start is None: + self.start = self._time() + else: + self.start = start + self.last = self.start + self.elapsed = 0.0 + + def time(self): + """ Return the overall time (in seconds) since the timer started. """ + return self._time() - self.start + + def split(self): + """ Split and return the lap time (in seconds) in between splits. """ + current = self._time() + self.elapsed = current - self.last + self.last = current + return self.elapsed + + def formatTime(self, msg, time): + """ Format 'time' value in 'msg' and return formatted string. + If 'msg' contains a '%(time)' format string, try to use that. + Otherwise, use the predefined 'default_format'. + If 'msg' is empty or None, fall back to 'default_msg'. + """ + if not msg: + msg = self.default_msg + if msg.find("%(time)") < 0: + msg = self.default_format % {"msg": msg, "time": time} + else: + try: + msg = msg % {"time": time} + except (KeyError, ValueError): + pass # skip if the format string is malformed + return msg + + def __enter__(self): + """ Start a new lap """ + self.last = self._time() + self.elapsed = 0.0 + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ End the current lap. If timer has a logger, log the time elapsed, + using the format string in self.msg (or the default one). + """ + time = self.split() + if self.logger is None or exc_type: + # if there's no logger attached, or if any exception occurred in + # the with-statement, exit without logging the time + return + message = self.formatTime(self.msg, time) + # Allow log handlers to see the individual parts to facilitate things + # like a server accumulating aggregate stats. + msg_parts = { 'msg': self.msg, 'time': time } + self.logger.log(self.level, message, msg_parts) + + def __call__(self, func_or_msg=None, **kwargs): + """ If the first argument is a function, return a decorator which runs + the wrapped function inside Timer's context manager. + Otherwise, treat the first argument as a 'msg' string and return an updated + Timer instance, referencing the same logger. + A 'level' keyword can also be passed to override self.level. + """ + if isinstance(func_or_msg, collections.Callable): + func = func_or_msg + # use the function name when no explicit 'msg' is provided + if not self.msg: + self.msg = "run '%s'" % func.__name__ + + @wraps(func) + def wrapper(*args, **kwds): + with self: + return func(*args, **kwds) + return wrapper + else: + msg = func_or_msg or kwargs.get("msg") + level = kwargs.get("level", self.level) + return self.__class__(self.logger, msg, level) + + def __float__(self): + return self.elapsed + + def __int__(self): + return int(self.elapsed) + + def __str__(self): + return "%.3f" % self.elapsed + + +class ChannelsFilter(logging.Filter): + """ Filter out records emitted from a list of enabled channel names, + including their children. It works the same as the logging.Filter class, + but allows to specify multiple channel names. + + >>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> handler.setFormatter(logging.Formatter("%(message)s")) + >>> filter = ChannelsFilter("A.B", "C.D") + >>> handler.addFilter(filter) + >>> root = logging.getLogger() + >>> root.addHandler(handler) + >>> root.setLevel(level=logging.DEBUG) + >>> logging.getLogger('A.B').debug('this record passes through') + this record passes through + >>> logging.getLogger('A.B.C').debug('records from children also pass') + records from children also pass + >>> logging.getLogger('C.D').debug('this one as well') + this one as well + >>> logging.getLogger('A.B.').debug('also this one') + also this one + >>> logging.getLogger('A.F').debug('but this one does not!') + >>> logging.getLogger('C.DE').debug('neither this one!') + """ + + def __init__(self, *names): + self.names = names + self.num = len(names) + self.lenghts = {n: len(n) for n in names} + + def filter(self, record): + if self.num == 0: + return True + for name in self.names: + nlen = self.lenghts[name] + if name == record.name: + return True + elif (record.name.find(name, 0, nlen) == 0 + and record.name[nlen] == "."): + return True + return False + + +class CapturingLogHandler(logging.Handler): + def __init__(self, logger, level): + self.records = [] + self.level = logging._checkLevel(level) + if isinstance(logger, basestring): + self.logger = logging.getLogger(logger) + else: + self.logger = logger + + def __enter__(self): + self.original_disabled = self.logger.disabled + self.original_level = self.logger.level + + self.logger.addHandler(self) + self.logger.level = self.level + self.logger.disabled = False + + return self + + def __exit__(self, type, value, traceback): + self.logger.removeHandler(self) + self.logger.level = self.original_level + self.logger.disabled = self.logger.disabled + return self + + def handle(self, record): + self.records.append(record) + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + def assertRegex(self, regexp): + import re + pattern = re.compile(regexp) + for r in self.records: + if pattern.search(r.msg): + return True + assert 0, "Pattern '%s' not found in logger records" % regexp + + +class LogMixin(object): + """ Mixin class that adds logging functionality to another class. + You can define a new class that subclasses from LogMixin as well as + other base classes through multiple inheritance. + All instances of that class will have a 'log' property that returns + a logging.Logger named after their respective .. + For example: + + >>> class BaseClass(object): + ... pass + >>> class MyClass(LogMixin, BaseClass): + ... pass + >>> a = MyClass() + >>> isinstance(a.log, logging.Logger) + True + >>> print(a.log.name) + fontTools.misc.loggingTools.MyClass + >>> class AnotherClass(MyClass): + ... pass + >>> b = AnotherClass() + >>> isinstance(b.log, logging.Logger) + True + >>> print(b.log.name) + fontTools.misc.loggingTools.AnotherClass + """ + + @property + def log(self): + name = ".".join([self.__class__.__module__, self.__class__.__name__]) + return logging.getLogger(name) + + +def deprecateArgument(name, msg, category=UserWarning): + """ Raise a warning about deprecated function argument 'name'. """ + warnings.warn( + "%r is deprecated; %s" % (name, msg), category=category, stacklevel=3) + + +def deprecateFunction(msg, category=UserWarning): + """ Decorator to raise a warning when a deprecated function is called. """ + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + warnings.warn( + "%r is deprecated; %s" % (func.__name__, msg), + category=category, stacklevel=2) + return func(*args, **kwargs) + return wrapper + return decorator + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/macCreatorType.py fonttools-3.21.2/Lib/fontTools/misc/macCreatorType.py --- fonttools-3.0/Lib/fontTools/misc/macCreatorType.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/macCreatorType.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,10 +2,14 @@ from fontTools.misc.py23 import * import sys try: + import xattr +except ImportError: + xattr = None +try: import MacOS except ImportError: MacOS = None -from .py23 import * + def _reverseString(s): s = list(s) @@ -14,6 +18,15 @@ def getMacCreatorAndType(path): + if xattr is not None: + try: + finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo') + except (KeyError, IOError): + pass + else: + fileType = Tag(finderInfo[:4]) + fileCreator = Tag(finderInfo[4:8]) + return fileCreator, fileType if MacOS is not None: fileCreator, fileType = MacOS.GetCreatorAndType(path) if sys.version_info[:2] < (2, 7) and sys.byteorder == "little": @@ -28,5 +41,11 @@ def setMacCreatorAndType(path, fileCreator, fileType): + if xattr is not None: + from fontTools.misc.textTools import pad + if not all(len(s) == 4 for s in (fileCreator, fileType)): + raise TypeError('arg must be string of 4 chars') + finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) + xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo) if MacOS is not None: MacOS.SetCreatorAndType(path, fileCreator, fileType) diff -Nru fonttools-3.0/Lib/fontTools/misc/macRes.py fonttools-3.21.2/Lib/fontTools/misc/macRes.py --- fonttools-3.0/Lib/fontTools/misc/macRes.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/macRes.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,233 @@ +""" Tools for reading Mac resource forks. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import struct +from fontTools.misc import sstruct +from collections import OrderedDict +try: + from collections.abc import MutableMapping +except ImportError: + from UserDict import DictMixin as MutableMapping + + +class ResourceError(Exception): + pass + + +class ResourceReader(MutableMapping): + + def __init__(self, fileOrPath): + self._resources = OrderedDict() + if hasattr(fileOrPath, 'read'): + self.file = fileOrPath + else: + try: + # try reading from the resource fork (only works on OS X) + self.file = self.openResourceFork(fileOrPath) + self._readFile() + return + except (ResourceError, IOError): + # if it fails, use the data fork + self.file = self.openDataFork(fileOrPath) + self._readFile() + + @staticmethod + def openResourceFork(path): + with open(path + '/..namedfork/rsrc', 'rb') as resfork: + data = resfork.read() + infile = BytesIO(data) + infile.name = path + return infile + + @staticmethod + def openDataFork(path): + with open(path, 'rb') as datafork: + data = datafork.read() + infile = BytesIO(data) + infile.name = path + return infile + + def _readFile(self): + self._readHeaderAndMap() + self._readTypeList() + + def _read(self, numBytes, offset=None): + if offset is not None: + try: + self.file.seek(offset) + except OverflowError: + raise ResourceError("Failed to seek offset ('offset' is too large)") + if self.file.tell() != offset: + raise ResourceError('Failed to seek offset (reached EOF)') + try: + data = self.file.read(numBytes) + except OverflowError: + raise ResourceError("Cannot read resource ('numBytes' is too large)") + if len(data) != numBytes: + raise ResourceError('Cannot read resource (not enough data)') + return data + + def _readHeaderAndMap(self): + self.file.seek(0) + headerData = self._read(ResourceForkHeaderSize) + sstruct.unpack(ResourceForkHeader, headerData, self) + # seek to resource map, skip reserved + mapOffset = self.mapOffset + 22 + resourceMapData = self._read(ResourceMapHeaderSize, mapOffset) + sstruct.unpack(ResourceMapHeader, resourceMapData, self) + self.absTypeListOffset = self.mapOffset + self.typeListOffset + self.absNameListOffset = self.mapOffset + self.nameListOffset + + def _readTypeList(self): + absTypeListOffset = self.absTypeListOffset + numTypesData = self._read(2, absTypeListOffset) + self.numTypes, = struct.unpack('>H', numTypesData) + absTypeListOffset2 = absTypeListOffset + 2 + for i in range(self.numTypes + 1): + resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i + resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) + item = sstruct.unpack(ResourceTypeItem, resTypeItemData) + resType = tostr(item['type'], encoding='mac-roman') + refListOffset = absTypeListOffset + item['refListOffset'] + numRes = item['numRes'] + 1 + resources = self._readReferenceList(resType, refListOffset, numRes) + self._resources[resType] = resources + + def _readReferenceList(self, resType, refListOffset, numRes): + resources = [] + for i in range(numRes): + refOffset = refListOffset + ResourceRefItemSize * i + refData = self._read(ResourceRefItemSize, refOffset) + res = Resource(resType) + res.decompile(refData, self) + resources.append(res) + return resources + + def __getitem__(self, resType): + return self._resources[resType] + + def __delitem__(self, resType): + del self._resources[resType] + + def __setitem__(self, resType, resources): + self._resources[resType] = resources + + def __len__(self): + return len(self._resources) + + def __iter__(self): + return iter(self._resources) + + def keys(self): + return self._resources.keys() + + @property + def types(self): + return list(self._resources.keys()) + + def countResources(self, resType): + """Return the number of resources of a given type.""" + try: + return len(self[resType]) + except KeyError: + return 0 + + def getIndices(self, resType): + numRes = self.countResources(resType) + if numRes: + return list(range(1, numRes+1)) + else: + return [] + + def getNames(self, resType): + """Return list of names of all resources of a given type.""" + return [res.name for res in self.get(resType, []) if res.name is not None] + + def getIndResource(self, resType, index): + """Return resource of given type located at an index ranging from 1 + to the number of resources for that type, or None if not found. + """ + if index < 1: + return None + try: + res = self[resType][index-1] + except (KeyError, IndexError): + return None + return res + + def getNamedResource(self, resType, name): + """Return the named resource of given type, else return None.""" + name = tostr(name, encoding='mac-roman') + for res in self.get(resType, []): + if res.name == name: + return res + return None + + def close(self): + if not self.file.closed: + self.file.close() + + +class Resource(object): + + def __init__(self, resType=None, resData=None, resID=None, resName=None, + resAttr=None): + self.type = resType + self.data = resData + self.id = resID + self.name = resName + self.attr = resAttr + + def decompile(self, refData, reader): + sstruct.unpack(ResourceRefItem, refData, self) + # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct + self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset])) + absDataOffset = reader.dataOffset + self.dataOffset + dataLength, = struct.unpack(">L", reader._read(4, absDataOffset)) + self.data = reader._read(dataLength) + if self.nameOffset == -1: + return + absNameOffset = reader.absNameListOffset + self.nameOffset + nameLength, = struct.unpack('B', reader._read(1, absNameOffset)) + name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength)) + self.name = tostr(name, encoding='mac-roman') + + +ResourceForkHeader = """ + > # big endian + dataOffset: L + mapOffset: L + dataLen: L + mapLen: L +""" + +ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader) + +ResourceMapHeader = """ + > # big endian + attr: H + typeListOffset: H + nameListOffset: H +""" + +ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader) + +ResourceTypeItem = """ + > # big endian + type: 4s + numRes: H + refListOffset: H +""" + +ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem) + +ResourceRefItem = """ + > # big endian + id: h + nameOffset: h + attr: B + dataOffset: 3s + reserved: L +""" + +ResourceRefItemSize = sstruct.calcsize(ResourceRefItem) diff -Nru fonttools-3.0/Lib/fontTools/misc/psCharStrings.py fonttools-3.21.2/Lib/fontTools/misc/psCharStrings.py --- fonttools-3.0/Lib/fontTools/misc/psCharStrings.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/psCharStrings.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,10 +4,13 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat +from fontTools.pens.boundsPen import BoundsPen import struct +import logging -DEBUG = 0 +log = logging.getLogger(__name__) def read_operator(self, b0, data, index): @@ -16,7 +19,10 @@ index = index+1 else: op = b0 - operator = self.operators[op] + try: + operator = self.operators[op] + except KeyError: + return None, index value = self.handle_operator(operator) return value, index @@ -41,7 +47,7 @@ def read_fixed1616(self, b0, data, index): value, = struct.unpack(">l", data[index:index+4]) - return value / 65536, index+4 + return fixedToFloat(value, precisionBits=16), index+4 def read_reserved(self, b0, data, index): assert NotImplementedError @@ -85,9 +91,7 @@ '.', 'E', 'E-', None, '-'] realNibblesDict = {v:i for i,v in enumerate(realNibbles)} - -class ByteCodeBase(object): - pass +maxOpStack = 193 def buildOperatorDict(operatorList): @@ -117,6 +121,7 @@ (10, 'callsubr'), (11, 'return'), (14, 'endchar'), + (15, 'vsindex'), (16, 'blend'), (18, 'hstemhm'), (19, 'hintmask'), @@ -162,7 +167,6 @@ ((12, 37), 'flex1'), ] - def getIntEncoder(format): if format == "cff": fourByteOp = bytechr(29) @@ -213,7 +217,7 @@ def encodeFixed(f, pack=struct.pack): # For T2 only - return b"\xff" + pack(">l", int(round(f * 65536))) + return b"\xff" + pack(">l", round(f * 65536)) def encodeFloat(f): # For CFF only, used in cffLib @@ -242,261 +246,14 @@ class CharStringCompileError(Exception): pass -class T2CharString(ByteCodeBase): - - operandEncoding = t2OperandEncoding - operators, opcodes = buildOperatorDict(t2Operators) - - def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.private = private - self.globalSubrs = globalSubrs if globalSubrs is not None else [] - - def __repr__(self): - if self.bytecode is None: - return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) - else: - return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) - - def getIntEncoder(self): - return encodeIntT2 - - def getFixedEncoder(self): - return encodeFixed - - def decompile(self): - if not self.needsDecompilation(): - return - subrs = getattr(self.private, "Subrs", []) - decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) - decompiler.execute(self) - - def draw(self, pen): - subrs = getattr(self.private, "Subrs", []) - extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, - self.private.nominalWidthX, self.private.defaultWidthX) - extractor.execute(self) - self.width = extractor.width - - def compile(self): - if self.bytecode is not None: - return - assert self.program, "illegal CharString: decompiled to empty program" - assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" - bytecode = [] - opcodes = self.opcodes - program = self.program - encodeInt = self.getIntEncoder() - encodeFixed = self.getFixedEncoder() - i = 0 - end = len(program) - while i < end: - token = program[i] - i = i + 1 - tp = type(token) - if issubclass(tp, basestring): - try: - bytecode.extend(bytechr(b) for b in opcodes[token]) - except KeyError: - raise CharStringCompileError("illegal operator: %s" % token) - if token in ('hintmask', 'cntrmask'): - bytecode.append(program[i]) # hint mask - i = i + 1 - elif tp == int: - bytecode.append(encodeInt(token)) - elif tp == float: - bytecode.append(encodeFixed(token)) - else: - assert 0, "unsupported type: %s" % tp - try: - bytecode = bytesjoin(bytecode) - except TypeError: - print(bytecode) - raise - self.setBytecode(bytecode) - - def needsDecompilation(self): - return self.bytecode is not None - - def setProgram(self, program): - self.program = program - self.bytecode = None - - def setBytecode(self, bytecode): - self.bytecode = bytecode - self.program = None - - def getToken(self, index, - len=len, byteord=byteord, basestring=basestring, - isinstance=isinstance): - if self.bytecode is not None: - if index >= len(self.bytecode): - return None, 0, 0 - b0 = byteord(self.bytecode[index]) - index = index + 1 - handler = self.operandEncoding[b0] - token, index = handler(self, b0, self.bytecode, index) - else: - if index >= len(self.program): - return None, 0, 0 - token = self.program[index] - index = index + 1 - isOperator = isinstance(token, basestring) - return token, isOperator, index - - def getBytes(self, index, nBytes): - if self.bytecode is not None: - newIndex = index + nBytes - bytes = self.bytecode[index:newIndex] - index = newIndex - else: - bytes = self.program[index] - index = index + 1 - assert len(bytes) == nBytes - return bytes, index - - def handle_operator(self, operator): - return operator - - def toXML(self, xmlWriter): - from fontTools.misc.textTools import num2binary - if self.bytecode is not None: - xmlWriter.dumphex(self.bytecode) - else: - index = 0 - args = [] - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - if isOperator: - args = [str(arg) for arg in args] - if token in ('hintmask', 'cntrmask'): - hintMask, isOperator, index = self.getToken(index) - bits = [] - for byte in hintMask: - bits.append(num2binary(byteord(byte), 8)) - hintMask = strjoin(bits) - line = ' '.join(args + [token, hintMask]) - else: - line = ' '.join(args + [token]) - xmlWriter.write(line) - xmlWriter.newline() - args = [] - else: - args.append(token) - - def fromXML(self, name, attrs, content): - from fontTools.misc.textTools import binary2num, readHex - if attrs.get("raw"): - self.setBytecode(readHex(content)) - return - content = strjoin(content) - content = content.split() - program = [] - end = len(content) - i = 0 - while i < end: - token = content[i] - i = i + 1 - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - program.append(token) - if token in ('hintmask', 'cntrmask'): - mask = content[i] - maskBytes = b"" - for j in range(0, len(mask), 8): - maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) - program.append(maskBytes) - i = i + 1 - else: - program.append(token) - else: - program.append(token) - self.setProgram(program) - - -t1Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (9, 'closepath'), - (10, 'callsubr'), - (11, 'return'), - (13, 'hsbw'), - (14, 'endchar'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'dotsection'), - ((12, 1), 'vstem3'), - ((12, 2), 'hstem3'), - ((12, 6), 'seac'), - ((12, 7), 'sbw'), - ((12, 12), 'div'), - ((12, 16), 'callothersubr'), - ((12, 17), 'pop'), - ((12, 33), 'setcurrentpoint'), -] - -class T1CharString(T2CharString): - - operandEncoding = t1OperandEncoding - operators, opcodes = buildOperatorDict(t1Operators) - - def __init__(self, bytecode=None, program=None, subrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.subrs = subrs - - def getIntEncoder(self): - return encodeIntT1 - - def getFixedEncoder(self): - def encodeFixed(value): - raise TypeError("Type 1 charstrings don't support floating point operands") - - def decompile(self): - if self.bytecode is None: - return - program = [] - index = 0 - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - program.append(token) - self.setProgram(program) - - def draw(self, pen): - extractor = T1OutlineExtractor(pen, self.subrs) - extractor.execute(self) - self.width = extractor.width - - class SimpleT2Decompiler(object): - def __init__(self, localSubrs, globalSubrs): + def __init__(self, localSubrs, globalSubrs, private=None): self.localSubrs = localSubrs self.localBias = calcSubrBias(localSubrs) self.globalSubrs = globalSubrs self.globalBias = calcSubrBias(globalSubrs) + self.private = private self.reset() def reset(self): @@ -504,6 +261,23 @@ self.operandStack = [] self.hintCount = 0 self.hintMaskBytes = 0 + self.numRegions = 0 + + def check_program(self, program): + if not hasattr(self, 'private') or self.private is None: + # Type 1 charstrings don't have self.private. + # Type2 CFF charstrings may have self.private == None. + # In both cases, they are not CFF2 charstrings + isCFF2 = False + else: + isCFF2 = self.private._isCFF2 + if isCFF2: + if program: + assert program[-1] not in ("seac",), "illegal CharString Terminator" + else: + assert program, "illegal CharString: decompiled to empty program" + assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" def execute(self, charString): self.callingStack.append(charString) @@ -533,9 +307,7 @@ else: pushToStack(token) if needsDecompilation: - assert program, "illegal CharString: decompiled to empty program" - assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" + self.check_program(program) charString.setProgram(program) del self.callingStack[-1] @@ -640,19 +412,100 @@ def op_roll(self, index): raise NotImplementedError -class T2OutlineExtractor(SimpleT2Decompiler): + # TODO(behdad): move to T2OutlineExtractor and add a 'setVariation' + # method that takes VarStoreData and a location + def op_blend(self, index): + if self.numRegions == 0: + self.numRegions = self.private.getNumRegions() + numBlends = self.pop() + numOps = numBlends * (self.numRegions + 1) + blendArgs = self.operandStack[-numOps:] + del self.operandStack[:-(numOps-numBlends)] # Leave the default operands on the stack. + + def op_vsindex(self, index): + vi = self.pop() + self.numRegions = self.private.getNumRegions(vi) + + +t1Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), +] + + +class T2WidthExtractor(SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + self.nominalWidthX = nominalWidthX + self.defaultWidthX = defaultWidthX + + def reset(self): + SimpleT2Decompiler.reset(self) + self.gotWidth = 0 + self.width = 0 + + def popallWidth(self, evenOdd=0): + args = self.popall() + if not self.gotWidth: + if evenOdd ^ (len(args) % 2): + self.width = self.nominalWidthX + args[0] + args = args[1:] + else: + self.width = self.defaultWidthX + self.gotWidth = 1 + return args + + def countHints(self): + args = self.popallWidth() + self.hintCount = self.hintCount + len(args) // 2 + + def op_rmoveto(self, index): + self.popallWidth() + + def op_hmoveto(self, index): + self.popallWidth(1) + + def op_vmoveto(self, index): + self.popallWidth(1) + + def op_endchar(self, index): + self.popallWidth() + + +class T2OutlineExtractor(T2WidthExtractor): def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): - SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX) self.pen = pen - self.nominalWidthX = nominalWidthX - self.defaultWidthX = defaultWidthX def reset(self): - SimpleT2Decompiler.reset(self) - self.hints = [] - self.gotWidth = 0 - self.width = 0 + T2WidthExtractor.reset(self) self.currentPoint = (0, 0) self.sawMoveTo = 0 @@ -687,21 +540,6 @@ # finishing a sub path. self.closePath() - def popallWidth(self, evenOdd=0): - args = self.popall() - if not self.gotWidth: - if evenOdd ^ (len(args) % 2): - self.width = self.nominalWidthX + args[0] - args = args[1:] - else: - self.width = self.defaultWidthX - self.gotWidth = 1 - return args - - def countHints(self): - args = self.popallWidth() - self.hintCount = self.hintCount + len(args) // 2 - # # hint operators # @@ -957,7 +795,6 @@ self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) return args - class T1OutlineExtractor(T2OutlineExtractor): def __init__(self, pen, subrs): @@ -1100,15 +937,262 @@ def op_vstem3(self, index): self.popall() # XXX +class T2CharString(object): + + operandEncoding = t2OperandEncoding + operators, opcodes = buildOperatorDict(t2Operators) + decompilerClass = SimpleT2Decompiler + outlineExtractor = T2OutlineExtractor + isCFF2 = False + + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.private = private + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + + def __repr__(self): + if self.bytecode is None: + return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) + else: + return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) + + def getIntEncoder(self): + return encodeIntT2 + + def getFixedEncoder(self): + return encodeFixed + + def decompile(self): + if not self.needsDecompilation(): + return + subrs = getattr(self.private, "Subrs", []) + decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private) + decompiler.execute(self) + + def draw(self, pen): + subrs = getattr(self.private, "Subrs", []) + extractor = self.outlineExtractor(pen, subrs, self.globalSubrs, + self.private.nominalWidthX, self.private.defaultWidthX) + extractor.execute(self) + self.width = extractor.width + + def calcBounds(self): + boundsPen = BoundsPen(None) + self.draw(boundsPen) + return boundsPen.bounds + + def check_program(self, program, isCFF2=False): + if isCFF2: + if self.program: + assert self.program[-1] not in ("seac",), "illegal CFF2 CharString Termination" + else: + assert self.program, "illegal CharString: decompiled to empty program" + assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", "seac"), "illegal CharString" + + def compile(self, isCFF2=False): + if self.bytecode is not None: + return + opcodes = self.opcodes + program = self.program + self.check_program(program, isCFF2=isCFF2) + bytecode = [] + encodeInt = self.getIntEncoder() + encodeFixed = self.getFixedEncoder() + i = 0 + end = len(program) + while i < end: + token = program[i] + i = i + 1 + tp = type(token) + if issubclass(tp, basestring): + try: + bytecode.extend(bytechr(b) for b in opcodes[token]) + except KeyError: + raise CharStringCompileError("illegal operator: %s" % token) + if token in ('hintmask', 'cntrmask'): + bytecode.append(program[i]) # hint mask + i = i + 1 + elif tp == int: + bytecode.append(encodeInt(token)) + elif tp == float: + bytecode.append(encodeFixed(token)) + else: + assert 0, "unsupported type: %s" % tp + try: + bytecode = bytesjoin(bytecode) + except TypeError: + log.error(bytecode) + raise + self.setBytecode(bytecode) + + if isCFF2: + # If present, remove return and endchar operators. + if self.bytecode and (byteord(self.bytecode[-1]) in (11, 14)): + self.bytecode = self.bytecode[:-1] + + def needsDecompilation(self): + return self.bytecode is not None + + def setProgram(self, program): + self.program = program + self.bytecode = None + + def setBytecode(self, bytecode): + self.bytecode = bytecode + self.program = None + + def getToken(self, index, + len=len, byteord=byteord, basestring=basestring, + isinstance=isinstance): + if self.bytecode is not None: + if index >= len(self.bytecode): + return None, 0, 0 + b0 = byteord(self.bytecode[index]) + index = index + 1 + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) + else: + if index >= len(self.program): + return None, 0, 0 + token = self.program[index] + index = index + 1 + isOperator = isinstance(token, basestring) + return token, isOperator, index + + def getBytes(self, index, nBytes): + if self.bytecode is not None: + newIndex = index + nBytes + bytes = self.bytecode[index:newIndex] + index = newIndex + else: + bytes = self.program[index] + index = index + 1 + assert len(bytes) == nBytes + return bytes, index + + def handle_operator(self, operator): + return operator + + def toXML(self, xmlWriter): + from fontTools.misc.textTools import num2binary + if self.bytecode is not None: + xmlWriter.dumphex(self.bytecode) + else: + index = 0 + args = [] + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + if isOperator: + args = [str(arg) for arg in args] + if token in ('hintmask', 'cntrmask'): + hintMask, isOperator, index = self.getToken(index) + bits = [] + for byte in hintMask: + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) + else: + line = ' '.join(args + [token]) + xmlWriter.write(line) + xmlWriter.newline() + args = [] + else: + args.append(token) + if args: + if self.isCFF2: + # CFF2Subr's can have numeric arguments on the stack after the last operator. + args = [str(arg) for arg in args] + line = ' '.join(args) + xmlWriter.write(line) + else: + assert 0, "T2Charstring or Subr has items on the stack after last operator." + + def fromXML(self, name, attrs, content): + from fontTools.misc.textTools import binary2num, readHex + if attrs.get("raw"): + self.setBytecode(readHex(content)) + return + content = strjoin(content) + content = content.split() + program = [] + end = len(content) + i = 0 + while i < end: + token = content[i] + i = i + 1 + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + program.append(token) + if token in ('hintmask', 'cntrmask'): + mask = content[i] + maskBytes = b"" + for j in range(0, len(mask), 8): + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) + program.append(maskBytes) + i = i + 1 + else: + program.append(token) + else: + program.append(token) + self.setProgram(program) + +class CFF2Subr(T2CharString): + isCFF2 = True + +class T1CharString(T2CharString): + + operandEncoding = t1OperandEncoding + operators, opcodes = buildOperatorDict(t1Operators) + + def __init__(self, bytecode=None, program=None, subrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.subrs = subrs + + def getIntEncoder(self): + return encodeIntT1 + + def getFixedEncoder(self): + def encodeFixed(value): + raise TypeError("Type 1 charstrings don't support floating point operands") + + def decompile(self): + if self.bytecode is None: + return + program = [] + index = 0 + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + program.append(token) + self.setProgram(program) + + def draw(self, pen): + extractor = T1OutlineExtractor(pen, self.subrs) + extractor.execute(self) + self.width = extractor.width -class DictDecompiler(ByteCodeBase): +class DictDecompiler(object): operandEncoding = cffDictOperandEncoding - def __init__(self, strings): + def __init__(self, strings, parent=None): self.stack = [] self.strings = strings self.dict = {} + self.parent = parent def getDict(self): assert len(self.stack) == 0, "non-empty stack" @@ -1125,7 +1209,6 @@ value, index = handler(self, b0, data, index) if value is not None: push(value) - def pop(self): value = self.stack[-1] del self.stack[-1] @@ -1138,7 +1221,7 @@ def handle_operator(self, operator): operator, argType = operator - if isinstance(argType, type(())): + if isinstance(argType, tuple): value = () for i in range(len(argType)-1, -1, -1): arg = argType[i] @@ -1147,20 +1230,74 @@ else: arghandler = getattr(self, "arg_" + argType) value = arghandler(operator) - self.dict[operator] = value + if operator == "blend": + self.stack.extend(value) + else: + self.dict[operator] = value def arg_number(self, name): - return self.pop() + if isinstance(self.stack[0], list): + out = self.arg_blend_number(self.stack) + else: + out = self.pop() + return out + + def arg_blend_number(self, name): + out = [] + blendArgs = self.pop() + numMasters = len(blendArgs) + out.append(blendArgs) + out.append("blend") + dummy = self.popall() + return blendArgs + def arg_SID(self, name): return self.strings[self.pop()] def arg_array(self, name): return self.popall() + def arg_blendList(self, name): + """ + There may be non-blend args at the top of the stack. We first calculate + where the blend args start in the stack. These are the last + numMasters*numBlends) +1 args. + The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by + numBlends list of values. Each of value in one of these lists is the + Variable Font delta for the matching region. + + We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by + the delta values. We then convert the default values, the first item in each entry, to an absolute value. + """ + vsindex = self.dict.get('vsindex', 0) + numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops. + numBlends = self.pop() + args = self.popall() + numArgs = len(args) + # The spec says that there should be no non-blended Blue Values,. + assert(numArgs == numMasters * numBlends) + value = [None]*numBlends + numDeltas = numMasters-1 + i = 0 + prevVal = 0 + while i < numBlends: + newVal = args[i] + prevVal + prevVal = newVal + masterOffset = numBlends + (i* numDeltas) + blendList = [newVal] + args[masterOffset:masterOffset+numDeltas] + value[i] = blendList + i += 1 + return value + def arg_delta(self, name): + valueList = self.popall() out = [] - current = 0 - for v in self.popall(): - current = current + v - out.append(current) + if valueList and isinstance(valueList[0], list): + # arg_blendList() has already converted these to absolute values. + out = valueList + else: + current = 0 + for v in valueList: + current = current + v + out.append(current) return out diff -Nru fonttools-3.0/Lib/fontTools/misc/psLib.py fonttools-3.21.2/Lib/fontTools/misc/psLib.py --- fonttools-3.0/Lib/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/psLib.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,17 +5,20 @@ import re import collections from string import whitespace +import logging -ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently +log = logging.getLogger(__name__) -skipwhiteRE = re.compile("[%s]*" % whitespace) -endofthingPat = "[^][(){}<>/%%%s]*" % whitespace +ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently + +skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"])) +endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"]) endofthingRE = re.compile(endofthingPat) -commentRE = re.compile("%[^\n\r]*") +commentRE = re.compile(b"%[^\n\r]*") # XXX This not entirely correct as it doesn't allow *nested* embedded parens: -stringPat = r""" +stringPat = br""" \( ( ( @@ -29,16 +32,44 @@ [^()]* \) """ -stringPat = "".join(stringPat.split()) +stringPat = b"".join(stringPat.split()) stringRE = re.compile(stringPat) -hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace) +hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"])) class PSTokenError(Exception): pass class PSError(Exception): pass -class PSTokenizer(BytesIO): +class PSTokenizer(object): + + def __init__(self, buf=b''): + # Force self.buf to be a byte string + buf = tobytes(buf) + self.buf = buf + self.len = len(buf) + self.pos = 0 + self.closed = False + + def read(self, n=-1): + """Read at most 'n' bytes from the buffer, or less if the read + hits EOF before obtaining 'n' bytes. + If 'n' is negative or omitted, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + if n is None or n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + + def close(self): + if not self.closed: + self.closed = True + del self.buf, self.pos def getnexttoken(self, # localize some stuff, for performance @@ -47,32 +78,30 @@ stringmatch=stringRE.match, hexstringmatch=hexstringRE.match, commentmatch=commentRE.match, - endmatch=endofthingRE.match, - whitematch=skipwhiteRE.match): + endmatch=endofthingRE.match): - _, nextpos = whitematch(self.buf, self.pos).span() - self.pos = nextpos + self.skipwhite() if self.pos >= self.len: return None, None pos = self.pos buf = self.buf - char = buf[pos] + char = bytechr(byteord(buf[pos])) if char in ps_special: - if char in '{}[]': + if char in b'{}[]': tokentype = 'do_special' token = char - elif char == '%': + elif char == b'%': tokentype = 'do_comment' _, nextpos = commentmatch(buf, pos).span() token = buf[pos:nextpos] - elif char == '(': + elif char == b'(': tokentype = 'do_string' m = stringmatch(buf, pos) if m is None: raise PSTokenError('bad string at character %d' % pos) _, nextpos = m.span() token = buf[pos:nextpos] - elif char == '<': + elif char == b'<': tokentype = 'do_hexstring' m = hexstringmatch(buf, pos) if m is None: @@ -82,7 +111,7 @@ else: raise PSTokenError('bad token at character %d' % pos) else: - if char == '/': + if char == b'/': tokentype = 'do_literal' m = endmatch(buf, pos+1) else: @@ -93,6 +122,7 @@ _, nextpos = m.span() token = buf[pos:nextpos] self.pos = pos + len(token) + token = tostr(token, encoding='ascii') return tokentype, token def skipwhite(self, whitematch=skipwhiteRE.match): @@ -101,7 +131,6 @@ def starteexec(self): self.pos = self.pos + 1 - #self.skipwhite() self.dirtybuf = self.buf[self.pos:] self.buf, R = eexec.decrypt(self.dirtybuf, 55665) self.len = len(self.buf) @@ -113,11 +142,6 @@ self.buf = self.dirtybuf del self.dirtybuf - def flush(self): - if self.buflist: - self.buf = self.buf + "".join(self.buflist) - self.buflist = [] - class PSInterpreter(PSOperators): @@ -157,7 +181,6 @@ try: while 1: tokentype, token = getnexttoken() - #print token if not token: break if tokentype: @@ -169,14 +192,18 @@ handle_object(object) tokenizer.close() self.tokenizer = None - finally: + except: if self.tokenizer is not None: - if 0: - print('ps error:\n- - - - - - -') - print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) - print('>>>') - print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) - print('- - - - - - -') + log.debug( + 'ps error:\n' + '- - - - - - -\n' + '%s\n' + '>>>\n' + '%s\n' + '- - - - - - -', + self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos], + self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + raise def handle_object(self, object): if not (self.proclevel or object.literal or object.type == 'proceduretype'): @@ -339,12 +366,3 @@ rawfont = fontdir[fontNames[0]] interpreter.close() return unpack_item(rawfont) - - -if __name__ == "__main__": - import EasyDialogs - path = EasyDialogs.AskFileForOpen() - if path: - from fontTools import t1Lib - data, kind = t1Lib.read(path) - font = suckfont(data) diff -Nru fonttools-3.0/Lib/fontTools/misc/py23.py fonttools-3.21.2/Lib/fontTools/misc/py23.py --- fonttools-3.0/Lib/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/py23.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,18 +3,32 @@ from __future__ import print_function, division, absolute_import import sys + +__all__ = ['basestring', 'unicode', 'unichr', 'byteord', 'bytechr', 'BytesIO', + 'StringIO', 'UnicodeIO', 'strjoin', 'bytesjoin', 'tobytes', 'tostr', + 'tounicode', 'Tag', 'open', 'range', 'xrange', 'round', 'Py23Error'] + + +class Py23Error(NotImplementedError): + pass + + +PY3 = sys.version_info[0] == 3 +PY2 = sys.version_info[0] == 2 + + try: - basestring + basestring = basestring except NameError: basestring = str try: - unicode + unicode = unicode except NameError: unicode = str try: - unichr + unichr = unichr if sys.maxunicode < 0x10FFFF: # workarounds for Python 2 "narrow" builds with UCS2-only support. @@ -156,6 +170,379 @@ return tobytes(joiner).join(tobytes(item) for item in iterable) +import os +import io as _io + +try: + from msvcrt import setmode as _setmode +except ImportError: + _setmode = None # only available on the Windows platform + + +def open(file, mode='r', buffering=-1, encoding=None, errors=None, + newline=None, closefd=True, opener=None): + """ Wrapper around `io.open` that bridges the differences between Python 2 + and Python 3's built-in `open` functions. In Python 2, `io.open` is a + backport of Python 3's `open`, whereas in Python 3, it is an alias of the + built-in `open` function. + + One difference is that the 'opener' keyword argument is only supported in + Python 3. Here we pass the value of 'opener' only when it is not None. + This causes Python 2 to raise TypeError, complaining about the number of + expected arguments, so it must be avoided in py2 or py2-3 contexts. + + Another difference between 2 and 3, this time on Windows, has to do with + opening files by name or by file descriptor. + + On the Windows C runtime, the 'O_BINARY' flag is defined which disables + the newlines translation ('\r\n' <=> '\n') when reading/writing files. + On both Python 2 and 3 this flag is always set when opening files by name. + This way, the newlines translation at the MSVCRT level doesn't interfere + with the Python io module's own newlines translation. + + However, when opening files via fd, on Python 2 the fd is simply copied, + regardless of whether it has the 'O_BINARY' flag set or not. + This becomes a problem in the case of stdout, stdin, and stderr, because on + Windows these are opened in text mode by default (ie. don't have the + O_BINARY flag set). + + On Python 3, this issue has been fixed, and all fds are now opened in + binary mode on Windows, including standard streams. Similarly here, I use + the `_setmode` function to ensure that integer file descriptors are + O_BINARY'ed before I pass them on to io.open. + + For more info, see: https://bugs.python.org/issue10841 + """ + if isinstance(file, int): + # the 'file' argument is an integer file descriptor + fd = file + if fd < 0: + raise ValueError('negative file descriptor') + if _setmode: + # `_setmode` function sets the line-end translation and returns the + # value of the previous mode. AFAIK there's no `_getmode`, so to + # check if the previous mode already had the bit set, I fist need + # to duplicate the file descriptor, set the binary flag on the copy + # and check the returned value. + fdcopy = os.dup(fd) + current_mode = _setmode(fdcopy, os.O_BINARY) + if not (current_mode & os.O_BINARY): + # the binary mode was not set: use the file descriptor's copy + file = fdcopy + if closefd: + # close the original file descriptor + os.close(fd) + else: + # ensure the copy is closed when the file object is closed + closefd = True + else: + # original file descriptor already had binary flag, close copy + os.close(fdcopy) + + if opener is not None: + # "opener" is not supported on Python 2, use it at your own risk! + return _io.open( + file, mode, buffering, encoding, errors, newline, closefd, + opener=opener) + else: + return _io.open( + file, mode, buffering, encoding, errors, newline, closefd) + + +# always use iterator for 'range' on both py 2 and 3 +try: + range = xrange +except NameError: + range = range + +def xrange(*args, **kwargs): + raise Py23Error("'xrange' is not defined. Use 'range' instead.") + + +import math as _math + +try: + isclose = _math.isclose +except AttributeError: + # math.isclose() was only added in Python 3.5 + + _isinf = _math.isinf + _fabs = _math.fabs + + def isclose(a, b, rel_tol=1e-09, abs_tol=0): + """ + Python 2 implementation of Python 3.5 math.isclose() + https://hg.python.org/cpython/file/v3.5.2/Modules/mathmodule.c#l1993 + """ + # sanity check on the inputs + if rel_tol < 0 or abs_tol < 0: + raise ValueError("tolerances must be non-negative") + # short circuit exact equality -- needed to catch two infinities of + # the same sign. And perhaps speeds things up a bit sometimes. + if a == b: + return True + # This catches the case of two infinities of opposite sign, or + # one infinity and one finite number. Two infinities of opposite + # sign would otherwise have an infinite relative tolerance. + # Two infinities of the same sign are caught by the equality check + # above. + if _isinf(a) or _isinf(b): + return False + # Cast to float to allow decimal.Decimal arguments + if not isinstance(a, float): + a = float(a) + if not isinstance(b, float): + b = float(b) + # now do the regular computation + # this is essentially the "weak" test from the Boost library + diff = _fabs(b - a) + result = ((diff <= _fabs(rel_tol * a)) or + (diff <= _fabs(rel_tol * b)) or + (diff <= abs_tol)) + return result + + +import decimal as _decimal + +if PY3: + def round2(number, ndigits=None): + """ + Implementation of Python 2 built-in round() function. + + Rounds a number to a given precision in decimal digits (default + 0 digits). The result is a floating point number. Values are rounded + to the closest multiple of 10 to the power minus ndigits; if two + multiples are equally close, rounding is done away from 0. + + ndigits may be negative. + + See Python 2 documentation: + https://docs.python.org/2/library/functions.html?highlight=round#round + """ + if ndigits is None: + ndigits = 0 + + if ndigits < 0: + exponent = 10 ** (-ndigits) + quotient, remainder = divmod(number, exponent) + if remainder >= exponent//2 and number >= 0: + quotient += 1 + return float(quotient * exponent) + else: + exponent = _decimal.Decimal('10') ** (-ndigits) + + d = _decimal.Decimal.from_float(number).quantize( + exponent, rounding=_decimal.ROUND_HALF_UP) + + return float(d) + + if sys.version_info[:2] >= (3, 6): + # in Python 3.6, 'round3' is an alias to the built-in 'round' + round = round3 = round + else: + # in Python3 < 3.6 we need work around the inconsistent behavior of + # built-in round(), whereby floats accept a second None argument, + # while integers raise TypeError. See https://bugs.python.org/issue27936 + _round = round + + def round3(number, ndigits=None): + return _round(number) if ndigits is None else _round(number, ndigits) + + round = round3 + +else: + # in Python 2, 'round2' is an alias to the built-in 'round' and + # 'round' is shadowed by 'round3' + round2 = round + + def round3(number, ndigits=None): + """ + Implementation of Python 3 built-in round() function. + + Rounds a number to a given precision in decimal digits (default + 0 digits). This returns an int when ndigits is omitted or is None, + otherwise the same type as the number. + + Values are rounded to the closest multiple of 10 to the power minus + ndigits; if two multiples are equally close, rounding is done toward + the even choice (aka "Banker's Rounding"). For example, both round(0.5) + and round(-0.5) are 0, and round(1.5) is 2. + + ndigits may be negative. + + See Python 3 documentation: + https://docs.python.org/3/library/functions.html?highlight=round#round + + Derived from python-future: + https://github.com/PythonCharmers/python-future/blob/master/src/future/builtins/newround.py + """ + if ndigits is None: + ndigits = 0 + # return an int when called with one argument + totype = int + # shortcut if already an integer, or a float with no decimal digits + inumber = totype(number) + if inumber == number: + return inumber + else: + # return the same type as the number, when called with two arguments + totype = type(number) + + m = number * (10 ** ndigits) + # if number is half-way between two multiples, and the mutliple that is + # closer to zero is even, we use the (slow) pure-Python implementation + if isclose(m % 1, .5) and int(m) % 2 == 0: + if ndigits < 0: + exponent = 10 ** (-ndigits) + quotient, remainder = divmod(number, exponent) + half = exponent//2 + if remainder > half or (remainder == half and quotient % 2 != 0): + quotient += 1 + d = quotient * exponent + else: + exponent = _decimal.Decimal('10') ** (-ndigits) if ndigits != 0 else 1 + + d = _decimal.Decimal.from_float(number).quantize( + exponent, rounding=_decimal.ROUND_HALF_EVEN) + else: + # else we use the built-in round() as it produces the same results + d = round2(number, ndigits) + + return totype(d) + + round = round3 + + +import logging + + +class _Logger(logging.Logger): + """ Add support for 'lastResort' handler introduced in Python 3.2. """ + + def callHandlers(self, record): + # this is the same as Python 3.5's logging.Logger.callHandlers + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None # break out + else: + c = c.parent + if (found == 0): + if logging.lastResort: + if record.levelno >= logging.lastResort.level: + logging.lastResort.handle(record) + elif logging.raiseExceptions and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = True + + +class _StderrHandler(logging.StreamHandler): + """ This class is like a StreamHandler using sys.stderr, but always uses + whatever sys.stderr is currently set to rather than the value of + sys.stderr at handler construction time. + """ + def __init__(self, level=logging.NOTSET): + """ + Initialize the handler. + """ + logging.Handler.__init__(self, level) + + @property + def stream(self): + # the try/execept avoids failures during interpreter shutdown, when + # globals are set to None + try: + return sys.stderr + except AttributeError: + return __import__('sys').stderr + + +if not hasattr(logging, 'lastResort'): + # for Python pre-3.2, we need to define the "last resort" handler used when + # clients don't explicitly configure logging (in Python 3.2 and above this is + # already defined). The handler prints the bare message to sys.stderr, only + # for events of severity WARNING or greater. + # To obtain the pre-3.2 behaviour, you can set logging.lastResort to None. + # https://docs.python.org/3.5/howto/logging.html#what-happens-if-no-configuration-is-provided + logging.lastResort = _StderrHandler(logging.WARNING) + # Also, we need to set the Logger class to one which supports the last resort + # handler. All new loggers instantiated after this call will use the custom + # logger class (the already existing ones, like the 'root' logger, will not) + logging.setLoggerClass(_Logger) + + +try: + from types import SimpleNamespace +except ImportError: + class SimpleNamespace(object): + """ + A backport of Python 3.3's ``types.SimpleNamespace``. + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __repr__(self): + keys = sorted(self.__dict__) + items = ("{0}={1!r}".format(k, self.__dict__[k]) for k in keys) + return "{0}({1})".format(type(self).__name__, ", ".join(items)) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +if sys.version_info[:2] > (3, 4): + from contextlib import redirect_stdout, redirect_stderr +else: + # `redirect_stdout` was added with python3.4, while `redirect_stderr` + # with python3.5. For simplicity, I redefine both for any versions + # less than or equal to 3.4. + # The code below is copied from: + # https://github.com/python/cpython/blob/57161aa/Lib/contextlib.py + + class _RedirectStream(object): + + _stream = None + + def __init__(self, new_target): + self._new_target = new_target + # We use a list of old targets to make this CM re-entrant + self._old_targets = [] + + def __enter__(self): + self._old_targets.append(getattr(sys, self._stream)) + setattr(sys, self._stream, self._new_target) + return self._new_target + + def __exit__(self, exctype, excinst, exctb): + setattr(sys, self._stream, self._old_targets.pop()) + + + class redirect_stdout(_RedirectStream): + """Context manager for temporarily redirecting stdout to another file. + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + """ + + _stream = "stdout" + + + class redirect_stderr(_RedirectStream): + """Context manager for temporarily redirecting stderr to another file.""" + + _stream = "stderr" + + if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/sstruct.py fonttools-3.21.2/Lib/fontTools/misc/sstruct.py --- fonttools-3.0/Lib/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/sstruct.py 2018-01-08 12:40:40.000000000 +0000 @@ -133,6 +133,7 @@ _formatcache = {} def getformat(fmt): + fmt = tostr(fmt, encoding="ascii") try: formatstring, names, fixes = _formatcache[fmt] except KeyError: diff -Nru fonttools-3.0/Lib/fontTools/misc/symfont.py fonttools-3.21.2/Lib/fontTools/misc/symfont.py --- fonttools-3.0/Lib/fontTools/misc/symfont.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/symfont.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,196 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from functools import partial +from itertools import count +import sympy as sp +import sys + +n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic + +t, x, y = sp.symbols('t x y', real=True) +c = sp.symbols('c', real=False) # Complex representation instead of x/y + +X = tuple(sp.symbols('x:%d'%(n+1), real=True)) +Y = tuple(sp.symbols('y:%d'%(n+1), real=True)) +P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01'))) +C = tuple(sp.symbols('c:%d'%(n+1), real=False)) + +# Cubic Bernstein basis functions +BinomialCoefficient = [(1, 0)] +for i in range(1, n+1): + last = BinomialCoefficient[-1] + this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,) + BinomialCoefficient.append(this) +BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) +del last, this + +BernsteinPolynomial = tuple( + tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs)) + for n,coeffs in enumerate(BinomialCoefficient)) + +BezierCurve = tuple( + tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins)) + for j in range(2)) + for n,bernsteins in enumerate(BernsteinPolynomial)) +BezierCurveC = tuple( + sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins)) + for n,bernsteins in enumerate(BernsteinPolynomial)) + + +def green(f, curveXY): + f = -sp.integrate(sp.sympify(f), y) + f = f.subs({x:curveXY[0], y:curveXY[1]}) + f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) + return f + + +class _BezierFuncsLazy(dict): + + def __init__(self, symfunc): + self._symfunc = symfunc + self._bezfuncs = {} + + def __missing__(self, i): + args = ['p%d'%d for d in range(i+1)] + f = green(self._symfunc, BezierCurve[i]) + f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize + return sp.lambdify(args, f) + +class GreenPen(BasePen): + + _BezierFuncs = {} + + @classmethod + def _getGreenBezierFuncs(celf, func): + funcstr = str(func) + if not funcstr in celf._BezierFuncs: + celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) + return celf._BezierFuncs[funcstr] + + def __init__(self, func, glyphset=None): + BasePen.__init__(self, glyphset) + self._funcs = self._getGreenBezierFuncs(func) + self.value = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += self._funcs[1](p0, p1) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self.value += self._funcs[2](p0, p1, p2) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self.value += self._funcs[3](p0, p1, p2, p3) + +# Sample pens. +# Do not use this in real code. +# Use fontTools.pens.momentsPen.MomentsPen instead. +AreaPen = partial(GreenPen, func=1) +MomentXPen = partial(GreenPen, func=x) +MomentYPen = partial(GreenPen, func=y) +MomentXXPen = partial(GreenPen, func=x*x) +MomentYYPen = partial(GreenPen, func=y*y) +MomentXYPen = partial(GreenPen, func=x*y) + + +def printGreenPen(penName, funcs, file=sys.stdout): + + print( +'''from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + +class %s(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) +'''%penName, file=file) + for name,f in funcs: + print(' self.%s = 0' % name, file=file) + print(''' + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError +''', end='', file=file) + + for n in (1, 2, 3): + + if n == 1: + print(''' + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 +''', file=file) + elif n == 2: + print(''' + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 +''', file=file) + elif n == 3: + print(''' + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 +''', file=file) + subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)} + greens = [green(f, BezierCurve[n]) for name,f in funcs] + greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize + greens = [f.subs(subs) for f in greens] # Convert to p to x/y + defs, exprs = sp.cse(greens, + optimizations='basic', + symbols=(sp.Symbol('r%d'%i) for i in count())) + for name,value in defs: + print(' %s = %s' % (name, value), file=file) + print(file=file) + for name,value in zip([f[0] for f in funcs], exprs): + print(' self.%s += %s' % (name, value), file=file) + + print(''' +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('%s', ['''%penName, file=file) + for name,f in funcs: + print(" ('%s', %s)," % (name, str(f)), file=file) + print(' ])', file=file) + + +if __name__ == '__main__': + pen = AreaPen() + pen.moveTo((100,100)) + pen.lineTo((100,200)) + pen.lineTo((200,200)) + pen.curveTo((200,250),(300,300),(250,350)) + pen.lineTo((200,100)) + pen.closePath() + print(pen.value) diff -Nru fonttools-3.0/Lib/fontTools/misc/testTools.py fonttools-3.21.2/Lib/fontTools/misc/testTools.py --- fonttools-3.0/Lib/fontTools/misc/testTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/testTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ +"""Helpers for writing unit tests.""" + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +import collections +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter + + +def parseXML(xmlSnippet): + """Parses a snippet of XML. + + Input can be either a single string (unicode or UTF-8 bytes), or a + a sequence of strings. + + The result is in the same format that would be returned by + XMLReader, but the parser imposes no constraints on the root + element so it can be called on small snippets of TTX files. + """ + # To support snippets with multiple elements, we add a fake root. + reader = TestXMLReader_() + xml = b"" + if isinstance(xmlSnippet, bytes): + xml += xmlSnippet + elif isinstance(xmlSnippet, unicode): + xml += tobytes(xmlSnippet, 'utf-8') + elif isinstance(xmlSnippet, collections.Iterable): + xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet) + else: + raise TypeError("expected string or sequence of strings; found %r" + % type(xmlSnippet).__name__) + xml += b"" + reader.parser.Parse(xml, 0) + return reader.root[2] + + +class FakeFont: + def __init__(self, glyphs): + self.glyphOrder_ = glyphs + self.reverseGlyphOrderDict_ = {g:i for i,g in enumerate(glyphs)} + self.lazy = False + self.tables = {} + + def __getitem__(self, tag): + return self.tables[tag] + + def __setitem__(self, tag, table): + self.tables[tag] = table + + def get(self, tag, default=None): + return self.tables.get(tag, default) + + def getGlyphID(self, name): + return self.reverseGlyphOrderDict_[name] + + def getGlyphName(self, glyphID): + if glyphID < len(self.glyphOrder_): + return self.glyphOrder_[glyphID] + else: + return "glyph%.5d" % glyphID + + def getGlyphOrder(self): + return self.glyphOrder_ + + def getReverseGlyphMap(self): + return self.reverseGlyphOrderDict_ + + +class TestXMLReader_(object): + def __init__(self): + from xml.parsers.expat import ParserCreate + self.parser = ParserCreate() + self.parser.StartElementHandler = self.startElement_ + self.parser.EndElementHandler = self.endElement_ + self.parser.CharacterDataHandler = self.addCharacterData_ + self.root = None + self.stack = [] + + def startElement_(self, name, attrs): + element = (name, attrs, []) + if self.stack: + self.stack[-1][2].append(element) + else: + self.root = element + self.stack.append(element) + + def endElement_(self, name): + self.stack.pop() + + def addCharacterData_(self, data): + self.stack[-1][2].append(data) + + +def makeXMLWriter(newlinestr='\n'): + # don't write OS-specific new lines + writer = XMLWriter(BytesIO(), newlinestr=newlinestr) + # erase XML declaration + writer.file.seek(0) + writer.file.truncate() + return writer + + +def getXML(func, ttFont=None): + """Call the passed toXML function and return the written content as a + list of lines (unicode strings). + Result is stripped of XML declaration and OS-specific newline characters. + """ + writer = makeXMLWriter() + func(writer, ttFont) + xml = writer.file.getvalue().decode("utf-8") + # toXML methods must always end with a writer.newline() + assert xml.endswith("\n") + return xml.splitlines() + + +class MockFont(object): + """A font-like object that automatically adds any looked up glyphname + to its glyphOrder.""" + + def __init__(self): + self._glyphOrder = ['.notdef'] + class AllocatingDict(dict): + def __missing__(reverseDict, key): + self._glyphOrder.append(key) + gid = len(reverseDict) + reverseDict[key] = gid + return gid + self._reverseGlyphOrder = AllocatingDict({'.notdef': 0}) + self.lazy = False + + def getGlyphID(self, glyph, requireReal=None): + gid = self._reverseGlyphOrder[glyph] + return gid + + def getReverseGlyphMap(self): + return self._reverseGlyphOrder + + def getGlyphName(self, gid): + return self._glyphOrder[gid] + + def getGlyphOrder(self): + return self._glyphOrder diff -Nru fonttools-3.0/Lib/fontTools/misc/textTools.py fonttools-3.21.2/Lib/fontTools/misc/textTools.py --- fonttools-3.0/Lib/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/textTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -91,11 +91,12 @@ """ data = tobytes(data) if size > 1: - while len(data) % size != 0: - data += b"\0" + remainder = len(data) % size + if remainder: + data += b"\0" * (size - remainder) return data if __name__ == "__main__": - import doctest + import doctest, sys sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/timeTools.py fonttools-3.21.2/Lib/fontTools/misc/timeTools.py --- fonttools-3.0/Lib/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/timeTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,20 +3,62 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import os import time import calendar epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) +DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] + + +def asctime(t=None): + """ + Convert a tuple or struct_time representing a time as returned by gmtime() + or localtime() to a 24-character string of the following form: + + >>> asctime(time.gmtime(0)) + 'Thu Jan 1 00:00:00 1970' + + If t is not provided, the current time as returned by localtime() is used. + Locale information is not used by asctime(). + + This is meant to normalise the output of the built-in time.asctime() across + different platforms and Python versions. + In Python 3.x, the day of the month is right-justified, whereas on Windows + Python 2.7 it is padded with zeros. + + See https://github.com/behdad/fonttools/issues/455 + """ + if t is None: + t = time.localtime() + s = "%s %s %2s %s" % ( + DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday, + time.strftime("%H:%M:%S %Y", t)) + return s + + def timestampToString(value): - return time.asctime(time.gmtime(max(0, value + epoch_diff))) + return asctime(time.gmtime(max(0, value + epoch_diff))) def timestampFromString(value): return calendar.timegm(time.strptime(value)) - epoch_diff def timestampNow(): + # https://reproducible-builds.org/specs/source-date-epoch/ + source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH") + if source_date_epoch is not None: + return int(source_date_epoch) - epoch_diff return int(time.time() - epoch_diff) def timestampSinceEpoch(value): return int(value - epoch_diff) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/misc/transform.py fonttools-3.21.2/Lib/fontTools/misc/transform.py --- fonttools-3.0/Lib/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/transform.py 2018-01-08 12:40:40.000000000 +0000 @@ -320,6 +320,27 @@ """ return hash(self.__affine) + def __bool__(self): + """Returns True if transform is not identity, False otherwise. + >>> bool(Identity) + False + >>> bool(Transform()) + False + >>> bool(Scale(1.)) + False + >>> bool(Scale(2)) + True + >>> bool(Offset()) + False + >>> bool(Offset(0)) + False + >>> bool(Offset(2)) + True + """ + return self.__affine != Identity.__affine + + __nonzero__ = __bool__ + def __repr__(self): return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ + self.__affine) diff -Nru fonttools-3.0/Lib/fontTools/misc/xmlReader.py fonttools-3.21.2/Lib/fontTools/misc/xmlReader.py --- fonttools-3.0/Lib/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/xmlReader.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,9 +3,13 @@ from fontTools import ttLib from fontTools.misc.textTools import safeEval from fontTools.ttLib.tables.DefaultTable import DefaultTable +import sys import os +import logging +log = logging.getLogger(__name__) + class TTXParseError(Exception): pass BUFSIZE = 0x4000 @@ -13,22 +17,42 @@ class XMLReader(object): - def __init__(self, fileName, ttFont, progress=None, quiet=False): + def __init__(self, fileOrPath, ttFont, progress=None, quiet=None): + if fileOrPath == '-': + fileOrPath = sys.stdin + if not hasattr(fileOrPath, "read"): + self.file = open(fileOrPath, "rb") + self._closeStream = True + else: + # assume readable file object + self.file = fileOrPath + self._closeStream = False self.ttFont = ttFont - self.fileName = fileName self.progress = progress - self.quiet = quiet + if quiet is not None: + from fontTools.misc.loggingTools import deprecateArgument + deprecateArgument("quiet", "configure logging instead") + self.quiet = quiet self.root = None self.contentStack = [] self.stackSize = 0 - def read(self): + def read(self, rootless=False): + if rootless: + self.stackSize += 1 if self.progress: - import stat - self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) - file = open(self.fileName, 'rb') - self._parseFile(file) - file.close() + self.file.seek(0, 2) + fileSize = self.file.tell() + self.progress.set(0, fileSize // 100 or 1) + self.file.seek(0) + self._parseFile(self.file) + if self._closeStream: + self.close() + if rootless: + self.stackSize -= 1 + + def close(self): + self.file.close() def _parseFile(self, file): from xml.parsers.expat import ParserCreate @@ -63,20 +87,22 @@ elif stackSize == 1: subFile = attrs.get("src") if subFile is not None: - subFile = os.path.join(os.path.dirname(self.fileName), subFile) - subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) + if hasattr(self.file, 'name'): + # if file has a name, get its parent directory + dirname = os.path.dirname(self.file.name) + else: + # else fall back to using the current working directory + dirname = os.getcwd() + subFile = os.path.join(dirname, subFile) + subReader = XMLReader(subFile, self.ttFont, self.progress) subReader.read() self.contentStack.append([]) return tag = ttLib.xmlToTag(name) msg = "Parsing '%s' table..." % tag if self.progress: - self.progress.setlabel(msg) - elif self.ttFont.verbose: - ttLib.debugmsg(msg) - else: - if not self.quiet: - print(msg) + self.progress.setLabel(msg) + log.info(msg) if tag == "GlyphOrder": tableClass = ttLib.GlyphOrder elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): diff -Nru fonttools-3.0/Lib/fontTools/misc/xmlReader_test.py fonttools-3.21.2/Lib/fontTools/misc/xmlReader_test.py --- fonttools-3.0/Lib/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import os -import unittest -from fontTools.ttLib import TTFont -from .xmlReader import XMLReader -import tempfile - - -class TestXMLReader(unittest.TestCase): - - def test_decode_utf8(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.contents = [] - - def _endElementHandler(self, name): - if self.stackSize == 3: - name, attrs, content = self.root - self.contents.append(content) - super(DebugXMLReader, self)._endElementHandler(name) - - expected = 'fôôbär' - data = '''\ - - - - - %s - - - -''' % expected - - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - content = strjoin(reader.contents[0]).strip() - self.assertEqual(expected, content) - - def test_normalise_newlines(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.newlines = [] - - def _characterDataHandler(self, data): - self.newlines.extend([c for c in data if c in ('\r', '\n')]) - - # notice how when CR is escaped, it is not normalised by the XML parser - data = ( - '\r' # \r -> \n - ' \r\n' # \r\n -> \n - ' a line of text\n' # \n - ' escaped CR and unix newline \n' # \n -> \r\n - ' escaped CR and macintosh newline \r' # \r -> \r\n - ' escaped CR and windows newline \r\n' # \r\n -> \r\n - ' \n' # \n - '') - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] - self.assertEqual(expected, reader.newlines) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/misc/xmlWriter.py fonttools-3.21.2/Lib/fontTools/misc/xmlWriter.py --- fonttools-3.0/Lib/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/xmlWriter.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,7 +11,8 @@ class XMLWriter(object): - def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8", + newlinestr=None): if encoding.lower().replace('-','').replace('_','') != 'utf8': raise Exception('Only UTF-8 encoding is supported.') if fileOrPath == '-': @@ -33,7 +34,10 @@ self.file.write(tounicode('')) self.totype = tounicode self.indentwhite = self.totype(indentwhite) - self.newlinestr = self.totype(os.linesep) + if newlinestr is None: + self.newlinestr = self.totype(os.linesep) + else: + self.newlinestr = self.totype(newlinestr) self.indentlevel = 0 self.stack = [] self.needindent = 1 diff -Nru fonttools-3.0/Lib/fontTools/misc/xmlWriter_test.py fonttools-3.21.2/Lib/fontTools/misc/xmlWriter_test.py --- fonttools-3.0/Lib/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import os -import unittest -from .xmlWriter import XMLWriter - -linesep = tobytes(os.linesep) -HEADER = b'' + linesep - -class TestXMLWriter(unittest.TestCase): - - def test_comment_escaped(self): - writer = XMLWriter(BytesIO()) - writer.comment("This&that are ") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_comment_multiline(self): - writer = XMLWriter(BytesIO()) - writer.comment("Hello world\nHow are you?") - self.assertEqual(HEADER + b"", - writer.file.getvalue()) - - def test_encoding_default(self): - writer = XMLWriter(BytesIO()) - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_utf8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="utf8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF_8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF-8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_other(self): - self.assertRaises(Exception, XMLWriter, BytesIO(), - encoding="iso-8859-1") - - def test_write(self): - writer = XMLWriter(BytesIO()) - writer.write("foo&bar") - self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) - - def test_indent_dedent(self): - writer = XMLWriter(BytesIO()) - writer.write("foo") - writer.newline() - writer.indent() - writer.write("bar") - writer.newline() - writer.dedent() - writer.write("baz") - self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), - writer.file.getvalue()) - - def test_writecdata(self): - writer = XMLWriter(BytesIO()) - writer.writecdata("foo&bar") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_simpletag(self): - writer = XMLWriter(BytesIO()) - writer.simpletag("tag", a="1", b="2") - self.assertEqual(HEADER + b'', writer.file.getvalue()) - - def test_begintag_endtag(self): - writer = XMLWriter(BytesIO()) - writer.begintag("tag", attr="value") - writer.write("content") - writer.endtag("tag") - self.assertEqual(HEADER + b'content', writer.file.getvalue()) - - def test_dumphex(self): - writer = XMLWriter(BytesIO()) - writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") - self.assertEqual(HEADER + bytesjoin([ - "54797065 20697320 61206265 61757469", - "66756c20 67726f75 70206f66 206c6574", - "74657273 2c206e6f 74206120 67726f75", - "70206f66 20626561 75746966 756c206c", - "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) - - def test_stringifyattrs(self): - writer = XMLWriter(BytesIO()) - expected = ' attr="0"' - self.assertEqual(expected, writer.stringifyattrs(attr=0)) - self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) - self.assertEqual(expected, writer.stringifyattrs(attr='0')) - self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) - - def test_carriage_return_escaped(self): - writer = XMLWriter(BytesIO()) - writer.write("two lines\r\nseparated by Windows line endings") - self.assertEqual( - HEADER + b'two lines \nseparated by Windows line endings', - writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/mtiLib/__init__.py fonttools-3.21.2/Lib/fontTools/mtiLib/__init__.py --- fonttools-3.0/Lib/fontTools/mtiLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/mtiLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1196 @@ +#!/usr/bin/python + +# FontDame-to-FontTools for OpenType Layout tables +# +# Source language spec is available at: +# http://monotype.github.io/OpenType_Table_Source/otl_source.html +# https://github.com/Monotype/OpenType_Table_Source/ + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables._c_m_a_p import cmap_classes +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict +from fontTools.otlLib import builder as otl +from contextlib import contextmanager +from operator import setitem +import logging + +class MtiLibError(Exception): pass +class ReferenceNotFoundError(MtiLibError): pass +class FeatureNotFoundError(ReferenceNotFoundError): pass +class LookupNotFoundError(ReferenceNotFoundError): pass + + +log = logging.getLogger("fontTools.mtiLib") + + +def makeGlyph(s): + if s[:2] in ['U ', 'u ']: + return ttLib.TTFont._makeGlyphName(int(s[2:], 16)) + elif s[:2] == '# ': + return "glyph%.5d" % int(s[2:]) + assert s.find(' ') < 0, "Space found in glyph name: %s" % s + assert s, "Glyph name is empty" + return s + +def makeGlyphs(l): + return [makeGlyph(g) for g in l] + +def mapLookup(sym, mapping): + # Lookups are addressed by name. So resolved them using a map if available. + # Fallback to parsing as lookup index if a map isn't provided. + if mapping is not None: + try: + idx = mapping[sym] + except KeyError: + raise LookupNotFoundError(sym) + else: + idx = int(sym) + return idx + +def mapFeature(sym, mapping): + # Features are referenced by index according the spec. So, if symbol is an + # integer, use it directly. Otherwise look up in the map if provided. + try: + idx = int(sym) + except ValueError: + try: + idx = mapping[sym] + except KeyError: + raise FeatureNotFoundError(sym) + return idx + +def setReference(mapper, mapping, sym, setter, collection, key): + try: + mapped = mapper(sym, mapping) + except ReferenceNotFoundError as e: + try: + if mapping is not None: + mapping.addDeferredMapping(lambda ref: setter(collection, key, ref), sym, e) + return + except AttributeError: + pass + raise + setter(collection, key, mapped) + +class DeferredMapping(dict): + + def __init__(self): + self._deferredMappings = [] + + def addDeferredMapping(self, setter, sym, e): + log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__) + self._deferredMappings.append((setter,sym, e)) + + def applyDeferredMappings(self): + for setter,sym,e in self._deferredMappings: + log.debug("Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__) + try: + mapped = self[sym] + except KeyError: + raise e + setter(mapped) + log.debug("Set to %s", mapped) + self._deferredMappings = [] + + +def parseScriptList(lines, featureMap=None): + self = ot.ScriptList() + records = [] + with lines.between('script table'): + for line in lines: + while len(line) < 4: + line.append('') + scriptTag, langSysTag, defaultFeature, features = line + log.debug("Adding script %s language-system %s", scriptTag, langSysTag) + + langSys = ot.LangSys() + langSys.LookupOrder = None + if defaultFeature: + setReference(mapFeature, featureMap, defaultFeature, setattr, langSys, 'ReqFeatureIndex') + else: + langSys.ReqFeatureIndex = 0xFFFF + syms = stripSplitComma(features) + langSys.FeatureIndex = theList = [3] * len(syms) + for i,sym in enumerate(syms): + setReference(mapFeature, featureMap, sym, setitem, theList, i) + langSys.FeatureCount = len(langSys.FeatureIndex) + + script = [s for s in records if s.ScriptTag == scriptTag] + if script: + script = script[0].Script + else: + scriptRec = ot.ScriptRecord() + scriptRec.ScriptTag = scriptTag + scriptRec.Script = ot.Script() + records.append(scriptRec) + script = scriptRec.Script + script.DefaultLangSys = None + script.LangSysRecord = [] + script.LangSysCount = 0 + + if langSysTag == 'default': + script.DefaultLangSys = langSys + else: + langSysRec = ot.LangSysRecord() + langSysRec.LangSysTag = langSysTag + ' '*(4 - len(langSysTag)) + langSysRec.LangSys = langSys + script.LangSysRecord.append(langSysRec) + script.LangSysCount = len(script.LangSysRecord) + + for script in records: + script.Script.LangSysRecord = sorted(script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag) + self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag) + self.ScriptCount = len(self.ScriptRecord) + return self + +def parseFeatureList(lines, lookupMap=None, featureMap=None): + self = ot.FeatureList() + self.FeatureRecord = [] + with lines.between('feature table'): + for line in lines: + name, featureTag, lookups = line + if featureMap is not None: + assert name not in featureMap, "Duplicate feature name: %s" % name + featureMap[name] = len(self.FeatureRecord) + # If feature name is integer, make sure it matches its index. + try: + assert int(name) == len(self.FeatureRecord), "%d %d" % (name, len(self.FeatureRecord)) + except ValueError: + pass + featureRec = ot.FeatureRecord() + featureRec.FeatureTag = featureTag + featureRec.Feature = ot.Feature() + self.FeatureRecord.append(featureRec) + feature = featureRec.Feature + feature.FeatureParams = None + syms = stripSplitComma(lookups) + feature.LookupListIndex = theList = [None] * len(syms) + for i,sym in enumerate(syms): + setReference(mapLookup, lookupMap, sym, setitem, theList, i) + feature.LookupCount = len(feature.LookupListIndex) + + self.FeatureCount = len(self.FeatureRecord) + return self + +def parseLookupFlags(lines): + flags = 0 + filterset = None + allFlags = [ + 'righttoleft', + 'ignorebaseglyphs', + 'ignoreligatures', + 'ignoremarks', + 'markattachmenttype', + 'markfiltertype', + ] + while lines.peeks()[0].lower() in allFlags: + line = next(lines) + flag = { + 'righttoleft': 0x0001, + 'ignorebaseglyphs': 0x0002, + 'ignoreligatures': 0x0004, + 'ignoremarks': 0x0008, + }.get(line[0].lower()) + if flag: + assert line[1].lower() in ['yes', 'no'], line[1] + if line[1].lower() == 'yes': + flags |= flag + continue + if line[0].lower() == 'markattachmenttype': + flags |= int(line[1]) << 8 + continue + if line[0].lower() == 'markfiltertype': + flags |= 0x10 + filterset = int(line[1]) + return flags, filterset + +def parseSingleSubst(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + return otl.buildSingleSubstSubtable(mapping) + +def parseMultiple(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildMultipleSubstSubtable(mapping) + +def parseAlternate(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildAlternateSubstSubtable(mapping) + +def parseLigature(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) >= 2, line + line = makeGlyphs(line) + mapping[tuple(line[1:])] = line[0] + return otl.buildLigatureSubstSubtable(mapping) + +def parseSinglePos(lines, font, _lookupMap=None): + values = {} + for line in lines: + assert len(line) == 3, line + w = line[0].title().replace(' ', '') + assert w in valueRecordFormatDict + g = makeGlyph(line[1]) + v = int(line[2]) + if g not in values: + values[g] = ValueRecord() + assert not hasattr(values[g], w), (g, w) + setattr(values[g], w, v) + return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap()) + +def parsePair(lines, font, _lookupMap=None): + self = ot.PairPos() + self.ValueFormat1 = self.ValueFormat2 = 0 + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + self.Format = 1 + values = {} + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + glyph1, glyph2 = makeGlyphs(line[1:3]) + value = int(line[3]) + if not glyph1 in values: values[glyph1] = {} + if not glyph2 in values[glyph1]: values[glyph1][glyph2] = (ValueRecord(),ValueRecord()) + rec2 = values[glyph1][glyph2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2[0] + else: + self.ValueFormat2 |= mask + vr = rec2[1] + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + self.Coverage = makeCoverage(set(values.keys()), font) + self.PairSet = [] + for glyph1 in self.Coverage.glyphs: + values1 = values[glyph1] + pairset = ot.PairSet() + records = pairset.PairValueRecord = [] + for glyph2 in sorted(values1.keys(), key=font.getGlyphID): + values2 = values1[glyph2] + pair = ot.PairValueRecord() + pair.SecondGlyph = glyph2 + pair.Value1 = values2[0] + pair.Value2 = values2[1] if self.ValueFormat2 else None + records.append(pair) + pairset.PairValueCount = len(pairset.PairValueRecord) + self.PairSet.append(pairset) + self.PairSetCount = len(self.PairSet) + elif typ.endswith('class'): + self.Format = 2 + classDefs = [None, None] + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 'first': (0,ot.ClassDef1), + 'second': (1,ot.ClassDef2), + }[typ] + assert classDefs[idx] is None + classDefs[idx] = parseClassDef(lines, font, klass=klass) + self.ClassDef1, self.ClassDef2 = classDefs + self.Class1Count, self.Class2Count = (1+max(c.classDefs.values()) for c in classDefs) + self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)] + for rec1 in self.Class1Record: + rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)] + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord() + rec2.Value2 = ValueRecord() + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + class1, class2, value = (int(x) for x in line[1:4]) + rec2 = self.Class1Record[class1].Class2Record[class2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2.Value1 + else: + self.ValueFormat2 |= mask + vr = rec2.Value2 + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + for rec1 in self.Class1Record: + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1) + rec2.Value2 = ValueRecord(self.ValueFormat2, rec2.Value2) \ + if self.ValueFormat2 else None + + self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font) + else: + assert 0, typ + return self + +def parseKernset(lines, font, _lookupMap=None): + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + with lines.until(("firstclass definition begin", "secondclass definition begin")): + return parsePair(lines, font) + return parsePair(lines, font) + +def makeAnchor(data, klass=ot.Anchor): + assert len(data) <= 2 + anchor = klass() + anchor.Format = 1 + anchor.XCoordinate,anchor.YCoordinate = intSplitComma(data[0]) + if len(data) > 1 and data[1] != '': + anchor.Format = 2 + anchor.AnchorPoint = int(data[1]) + return anchor + +def parseCursive(lines, font, _lookupMap=None): + records = {} + for line in lines: + assert len(line) in [3,4], line + idx,klass = { + 'entry': (0,ot.EntryAnchor), + 'exit': (1,ot.ExitAnchor), + }[line[0]] + glyph = makeGlyph(line[1]) + if glyph not in records: + records[glyph] = [None,None] + assert records[glyph][idx] is None, (glyph, idx) + records[glyph][idx] = makeAnchor(line[2:], klass) + return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap()) + +def makeMarkRecords(data, coverage, c): + records = [] + for glyph in coverage.glyphs: + klass, anchor = data[glyph] + record = c.MarkRecordClass() + record.Class = klass + setattr(record, c.MarkAnchor, anchor) + records.append(record) + return records + +def makeBaseRecords(data, coverage, c, classCount): + records = [] + idx = {} + for glyph in coverage.glyphs: + idx[glyph] = len(records) + record = c.BaseRecordClass() + anchors = [None] * classCount + setattr(record, c.BaseAnchor, anchors) + records.append(record) + for (glyph,klass),anchor in data.items(): + record = records[idx[glyph]] + anchors = getattr(record, c.BaseAnchor) + assert anchors[klass] is None, (glyph, klass) + anchors[klass] = anchor + return records + +def makeLigatureRecords(data, coverage, c, classCount): + records = [None] * len(coverage.glyphs) + idx = {g:i for i,g in enumerate(coverage.glyphs)} + + for (glyph,klass,compIdx,compCount),anchor in data.items(): + record = records[idx[glyph]] + if record is None: + record = records[idx[glyph]] = ot.LigatureAttach() + record.ComponentCount = compCount + record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)] + for compRec in record.ComponentRecord: + compRec.LigatureAnchor = [None] * classCount + assert record.ComponentCount == compCount, (glyph, record.ComponentCount, compCount) + + anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor + assert anchors[klass] is None, (glyph, compIdx, klass) + anchors[klass] = anchor + return records + +def parseMarkToSomething(lines, font, c): + self = c.Type() + self.Format = 1 + markData = {} + baseData = {} + Data = { + 'mark': (markData, c.MarkAnchorClass), + 'base': (baseData, c.BaseAnchorClass), + 'ligature': (baseData, c.BaseAnchorClass), + } + maxKlass = 0 + for line in lines: + typ = line[0] + assert typ in ('mark', 'base', 'ligature') + glyph = makeGlyph(line[1]) + data, anchorClass = Data[typ] + extraItems = 2 if typ == 'ligature' else 0 + extras = tuple(int(i) for i in line[2:2+extraItems]) + klass = int(line[2+extraItems]) + anchor = makeAnchor(line[3+extraItems:], anchorClass) + if typ == 'mark': + key,value = glyph,(klass,anchor) + else: + key,value = ((glyph,klass)+extras),anchor + assert key not in data, key + data[key] = value + maxKlass = max(maxKlass, klass) + + # Mark + markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass) + markArray = c.MarkArrayClass() + markRecords = makeMarkRecords(markData, markCoverage, c) + setattr(markArray, c.MarkRecord, markRecords) + setattr(markArray, c.MarkCount, len(markRecords)) + setattr(self, c.MarkCoverage, markCoverage) + setattr(self, c.MarkArray, markArray) + self.ClassCount = maxKlass + 1 + + # Base + self.classCount = 0 if not baseData else 1+max(k[1] for k,v in baseData.items()) + baseCoverage = makeCoverage(set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass) + baseArray = c.BaseArrayClass() + if c.Base == 'Ligature': + baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount) + else: + baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount) + setattr(baseArray, c.BaseRecord, baseRecords) + setattr(baseArray, c.BaseCount, len(baseRecords)) + setattr(self, c.BaseCoverage, baseCoverage) + setattr(self, c.BaseArray, baseArray) + + return self + +class MarkHelper(object): + def __init__(self): + for Which in ('Mark', 'Base'): + for What in ('Coverage', 'Array', 'Count', 'Record', 'Anchor'): + key = Which + What + if Which == 'Mark' and What in ('Count', 'Record', 'Anchor'): + value = key + else: + value = getattr(self, Which) + What + if value == 'LigatureRecord': + value = 'LigatureAttach' + setattr(self, key, value) + if What != 'Count': + klass = getattr(ot, value) + setattr(self, key+'Class', klass) + +class MarkToBaseHelper(MarkHelper): + Mark = 'Mark' + Base = 'Base' + Type = ot.MarkBasePos +class MarkToMarkHelper(MarkHelper): + Mark = 'Mark1' + Base = 'Mark2' + Type = ot.MarkMarkPos +class MarkToLigatureHelper(MarkHelper): + Mark = 'Mark' + Base = 'Ligature' + Type = ot.MarkLigPos + +def parseMarkToBase(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToBaseHelper()) +def parseMarkToMark(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToMarkHelper()) +def parseMarkToLigature(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToLigatureHelper()) + +def stripSplitComma(line): + return [s.strip() for s in line.split(',')] if line else [] + +def intSplitComma(line): + return [int(i) for i in line.split(',')] if line else [] + +# Copied from fontTools.subset +class ContextHelper(object): + def __init__(self, klassName, Format): + if klassName.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klassName.startswith('Chain'): + Chain = 'Chain' + InputIdx = 1 + DataLen = 3 + else: + Chain = '' + InputIdx = 0 + DataLen = 1 + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + self.InputIdx = InputIdx + self.DataLen = DataLen + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + SetContextData = None + SetChainContextData = None + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Input,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + def SetContextData(r, d): + (r.ClassDef,) = d + def SetChainContextData(r, d): + (r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) = d + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Class,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + SetContextData = None + SetChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + def SetRuleData(r, d): + (r.Coverage,) = d + (r.GlyphCount,) = (len(x) for x in d) + def ChainSetRuleData(r, d): + (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.SetContextData = SetChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.SetContextData = SetContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + +def parseLookupRecords(items, klassName, lookupMap=None): + klass = getattr(ot, klassName) + lst = [] + for item in items: + rec = klass() + item = stripSplitComma(item) + assert len(item) == 2, item + idx = int(item[0]) + assert idx > 0, idx + rec.SequenceIndex = idx - 1 + setReference(mapLookup, lookupMap, item[1], setattr, rec, 'LookupListIndex') + lst.append(rec) + return lst + +def makeClassDef(classDefs, font, klass=ot.Coverage): + if not classDefs: return None + self = klass() + self.classDefs = dict(classDefs) + return self + +def parseClassDef(lines, font, klass=ot.ClassDef): + classDefs = {} + with lines.between('class definition'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in classDefs, glyph + classDefs[glyph] = int(line[1]) + return makeClassDef(classDefs, font, klass) + +def makeCoverage(glyphs, font, klass=ot.Coverage): + if not glyphs: return None + if isinstance(glyphs, set): + glyphs = sorted(glyphs) + coverage = klass() + coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID) + return coverage + +def parseCoverage(lines, font, klass=ot.Coverage): + glyphs = [] + with lines.between('coverage definition'): + for line in lines: + glyphs.append(makeGlyph(line[0])) + return makeCoverage(glyphs, font, klass) + +def bucketizeRules(self, c, rules, bucketKeys): + buckets = {} + for seq,recs in rules: + buckets.setdefault(seq[c.InputIdx][0], []).append((tuple(s[1 if i==c.InputIdx else 0:] for i,s in enumerate(seq)), recs)) + + rulesets = [] + for firstGlyph in bucketKeys: + if firstGlyph not in buckets: + rulesets.append(None) + continue + thisRules = [] + for seq,recs in buckets[firstGlyph]: + rule = getattr(ot, c.Rule)() + c.SetRuleData(rule, seq) + setattr(rule, c.Type+'Count', len(recs)) + setattr(rule, c.LookupRecord, recs) + thisRules.append(rule) + + ruleset = getattr(ot, c.RuleSet)() + setattr(ruleset, c.Rule, thisRules) + setattr(ruleset, c.RuleCount, len(thisRules)) + rulesets.append(ruleset) + + setattr(self, c.RuleSet, rulesets) + setattr(self, c.RuleSetCount, len(rulesets)) + +def parseContext(lines, font, Type, lookupMap=None): + self = getattr(ot, Type)() + typ = lines.peeks()[0].split()[0].lower() + if typ == 'glyph': + self.Format = 1 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + rules = [] + for line in lines: + assert line[0].lower() == 'glyph', line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + + firstGlyphs = set(seq[c.InputIdx][0] for seq,recs in rules) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, self.Coverage.glyphs) + elif typ.endswith('class'): + self.Format = 2 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + classDefs = [None] * c.DataLen + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.ClassDef), + }, + 3: { + 'backtrack': (0,ot.BacktrackClassDef), + '': (1,ot.InputClassDef), + 'lookahead': (2,ot.LookAheadClassDef), + }, + }[c.DataLen][typ] + assert classDefs[idx] is None, idx + classDefs[idx] = parseClassDef(lines, font, klass=klass) + c.SetContextData(self, classDefs) + rules = [] + for line in lines: + assert line[0].lower().startswith('class'), line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(intSplitComma(i) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + firstClasses = set(seq[c.InputIdx][0] for seq,recs in rules) + firstGlyphs = set(g for g,c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, range(max(firstClasses) + 1)) + elif typ.endswith('coverage'): + self.Format = 3 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + coverages = tuple([] for i in range(c.DataLen)) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.Coverage), + }, + 3: { + 'backtrack': (0,ot.BacktrackCoverage), + 'input': (1,ot.InputCoverage), + 'lookahead': (2,ot.LookAheadCoverage), + }, + }[c.DataLen][typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + c.SetRuleData(self, coverages) + lines = list(lines) + assert len(lines) == 1 + line = lines[0] + assert line[0].lower() == 'coverage', line[0] + recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap) + setattr(self, c.Type+'Count', len(recs)) + setattr(self, c.LookupRecord, recs) + else: + assert 0, typ + return self + +def parseContextSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap) +def parseContextPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextPos", lookupMap=lookupMap) +def parseChainedSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap) +def parseChainedPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap) + +def parseReverseChainedSubst(lines, font, _lookupMap=None): + self = ot.ReverseChainSingleSubst() + self.Format = 1 + coverages = ([], []) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 'backtrack': (0,ot.BacktrackCoverage), + 'lookahead': (1,ot.LookAheadCoverage), + }[typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + self.BacktrackCoverage = coverages[0] + self.BacktrackGlyphCount = len(self.BacktrackCoverage) + self.LookAheadCoverage = coverages[1] + self.LookAheadGlyphCount = len(self.LookAheadCoverage) + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + self.Coverage = makeCoverage(set(mapping.keys()), font) + self.Substitute = [mapping[k] for k in self.Coverage.glyphs] + self.GlyphCount = len(self.Substitute) + return self + +def parseLookup(lines, tableTag, font, lookupMap=None): + line = lines.expect('lookup') + _, name, typ = line + log.debug("Parsing lookup type %s %s", typ, name) + lookup = ot.Lookup() + lookup.LookupFlag,filterset = parseLookupFlags(lines) + if filterset is not None: + lookup.MarkFilteringSet = filterset + lookup.LookupType, parseLookupSubTable = { + 'GSUB': { + 'single': (1, parseSingleSubst), + 'multiple': (2, parseMultiple), + 'alternate': (3, parseAlternate), + 'ligature': (4, parseLigature), + 'context': (5, parseContextSubst), + 'chained': (6, parseChainedSubst), + 'reversechained':(8, parseReverseChainedSubst), + }, + 'GPOS': { + 'single': (1, parseSinglePos), + 'pair': (2, parsePair), + 'kernset': (2, parseKernset), + 'cursive': (3, parseCursive), + 'mark to base': (4, parseMarkToBase), + 'mark to ligature':(5, parseMarkToLigature), + 'mark to mark': (6, parseMarkToMark), + 'context': (7, parseContextPos), + 'chained': (8, parseChainedPos), + }, + }[tableTag][typ] + + with lines.until('lookup end'): + subtables = [] + + while lines.peek(): + with lines.until(('% subtable', 'subtable end')): + while lines.peek(): + subtable = parseLookupSubTable(lines, font, lookupMap) + assert lookup.LookupType == subtable.LookupType + subtables.append(subtable) + if lines.peeks()[0] in ('% subtable', 'subtable end'): + next(lines) + lines.expect('lookup end') + + lookup.SubTable = subtables + lookup.SubTableCount = len(lookup.SubTable) + if lookup.SubTableCount is 0: + # Remove this return when following is fixed: + # https://github.com/fonttools/fonttools/issues/789 + return None + return lookup + +def parseGSUBGPOS(lines, font, tableTag): + container = ttLib.getTableClass(tableTag)() + lookupMap = DeferredMapping() + featureMap = DeferredMapping() + assert tableTag in ('GSUB', 'GPOS') + log.debug("Parsing %s", tableTag) + self = getattr(ot, tableTag)() + self.Version = 0x00010000 + fields = { + 'script table begin': + ('ScriptList', + lambda lines: parseScriptList (lines, featureMap)), + 'feature table begin': + ('FeatureList', + lambda lines: parseFeatureList (lines, lookupMap, featureMap)), + 'lookup': + ('LookupList', + None), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', lines.peek()) + next(lines) + continue + attr,parser = fields[typ] + if typ == 'lookup': + if self.LookupList is None: + self.LookupList = ot.LookupList() + self.LookupList.Lookup = [] + _, name, _ = lines.peek() + lookup = parseLookup(lines, tableTag, font, lookupMap) + if lookupMap is not None: + assert name not in lookupMap, "Duplicate lookup name: %s" % name + lookupMap[name] = len(self.LookupList.Lookup) + else: + assert int(name) == len(self.LookupList.Lookup), "%d %d" % (name, len(self.Lookup)) + self.LookupList.Lookup.append(lookup) + else: + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines)) + if self.LookupList: + self.LookupList.LookupCount = len(self.LookupList.Lookup) + if lookupMap is not None: + lookupMap.applyDeferredMappings() + if featureMap is not None: + featureMap.applyDeferredMappings() + container.table = self + return container + +def parseGSUB(lines, font): + return parseGSUBGPOS(lines, font, 'GSUB') +def parseGPOS(lines, font): + return parseGSUBGPOS(lines, font, 'GPOS') + +def parseAttachList(lines, font): + points = {} + with lines.between('attachment list'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in points, glyph + points[glyph] = [int(i) for i in line[1:]] + return otl.buildAttachList(points, font.getReverseGlyphMap()) + +def parseCaretList(lines, font): + carets = {} + with lines.between('carets'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in carets, glyph + num = int(line[1]) + thisCarets = [int(i) for i in line[2:]] + assert num == len(thisCarets), line + carets[glyph] = thisCarets + return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap()) + +def makeMarkFilteringSets(sets, font): + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.MarkSetCount = 1 + max(sets.keys()) + self.Coverage = [None] * self.MarkSetCount + for k,v in sorted(sets.items()): + self.Coverage[k] = makeCoverage(set(v), font) + return self + +def parseMarkFilteringSets(lines, font): + sets = {} + with lines.between('set definition'): + for line in lines: + assert len(line) == 2, line + glyph = makeGlyph(line[0]) + # TODO accept set names + st = int(line[1]) + if st not in sets: + sets[st] = [] + sets[st].append(glyph) + return makeMarkFilteringSets(sets, font) + +def parseGDEF(lines, font): + container = ttLib.getTableClass('GDEF')() + log.debug("Parsing GDEF") + self = ot.GDEF() + fields = { + 'class definition begin': + ('GlyphClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef)), + 'attachment list begin': + ('AttachList', parseAttachList), + 'carets begin': + ('LigCaretList', parseCaretList), + 'mark attachment class definition begin': + ('MarkAttachClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef)), + 'markfilter set definition begin': + ('MarkGlyphSetsDef', parseMarkFilteringSets), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', typ) + next(lines) + continue + attr,parser = fields[typ] + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines, font)) + self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002 + container.table = self + return container + +def parseCmap(lines, font): + container = ttLib.getTableClass('cmap')() + log.debug("Parsing cmap") + tables = [] + while lines.peek() is not None: + lines.expect('cmap subtable %d' % len(tables)) + platId, encId, fmt, lang = [ + parseCmapId(lines, field) + for field in ('platformID', 'encodingID', 'format', 'language')] + table = cmap_classes[fmt](fmt) + table.platformID = platId + table.platEncID = encId + table.language = lang + table.cmap = {} + line = next(lines) + while line[0] != 'end subtable': + table.cmap[int(line[0], 16)] = line[1] + line = next(lines) + tables.append(table) + container.tableVersion = 0 + container.tables = tables + return container + +def parseCmapId(lines, field): + line = next(lines) + assert field == line[0] + return int(line[1]) + +def parseTable(lines, font, tableTag=None): + log.debug("Parsing table") + line = lines.peeks() + tag = None + if line[0].split()[0] == 'FontDame': + tag = line[0].split()[1] + elif ' '.join(line[0].split()[:3]) == 'Font Chef Table': + tag = line[0].split()[3] + if tag is not None: + next(lines) + tag = tag.ljust(4) + if tableTag is None: + tableTag = tag + else: + assert tableTag == tag, (tableTag, tag) + + assert tableTag is not None, "Don't know what table to parse and data doesn't specify" + + return { + 'GSUB': parseGSUB, + 'GPOS': parseGPOS, + 'GDEF': parseGDEF, + 'cmap': parseCmap, + }[tableTag](lines, font) + +class Tokenizer(object): + + def __init__(self, f): + # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode + lines = iter(f) + try: + self.filename = f.name + except: + self.filename = None + self.lines = iter(lines) + self.line = '' + self.lineno = 0 + self.stoppers = [] + self.buffer = None + + def __iter__(self): + return self + + def _next_line(self): + self.lineno += 1 + line = self.line = next(self.lines) + line = [s.strip() for s in line.split('\t')] + if len(line) == 1 and not line[0]: + del line[0] + if line and not line[-1]: + log.warning('trailing tab found on line %d: %s' % (self.lineno, self.line)) + while line and not line[-1]: + del line[-1] + return line + + def _next_nonempty(self): + while True: + line = self._next_line() + # Skip comments and empty lines + if line and line[0] and (line[0][0] != '%' or line[0] == '% subtable'): + return line + + def _next_buffered(self): + if self.buffer: + ret = self.buffer + self.buffer = None + return ret + else: + return self._next_nonempty() + + def __next__(self): + line = self._next_buffered() + if line[0].lower() in self.stoppers: + self.buffer = line + raise StopIteration + return line + + def next(self): + return self.__next__() + + def peek(self): + if not self.buffer: + try: + self.buffer = self._next_nonempty() + except StopIteration: + return None + if self.buffer[0].lower() in self.stoppers: + return None + return self.buffer + + def peeks(self): + ret = self.peek() + return ret if ret is not None else ('',) + + @contextmanager + def between(self, tag): + start = tag + ' begin' + end = tag + ' end' + self.expectendswith(start) + self.stoppers.append(end) + yield + del self.stoppers[-1] + self.expect(tag + ' end') + + @contextmanager + def until(self, tags): + if type(tags) is not tuple: + tags = (tags,) + self.stoppers.extend(tags) + yield + del self.stoppers[-len(tags):] + + def expect(self, s): + line = next(self) + tag = line[0].lower() + assert tag == s, "Expected '%s', got '%s'" % (s, tag) + return line + + def expectendswith(self, s): + line = next(self) + tag = line[0].lower() + assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag) + return line + +def build(f, font, tableTag=None): + lines = Tokenizer(f) + return parseTable(lines, font, tableTag=tableTag) + + +def main(args=None, font=None): + import sys + from fontTools import configLogger + from fontTools.misc.testTools import MockFont + + if args is None: + args = sys.argv[1:] + + # configure the library logger (for >= WARNING) + configLogger() + # comment this out to enable debug messages from mtiLib's logger + # log.setLevel(logging.DEBUG) + + if font is None: + font = MockFont() + + tableTag = None + if args[0].startswith('-t'): + tableTag = args[0][2:] + del args[0] + for f in args: + log.debug("Processing %s", f) + table = build(open(f, 'rt', encoding="utf-8"), font, tableTag=tableTag) + blob = table.compile(font) # Make sure it compiles + decompiled = table.__class__() + decompiled.decompile(blob, font) # Make sure it decompiles! + + #continue + from fontTools.misc import xmlWriter + tag = table.tableTag + writer = xmlWriter.XMLWriter(sys.stdout) + writer.begintag(tag) + writer.newline() + #table.toXML(writer, font) + decompiled.toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +if __name__ == '__main__': + import sys + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/mtiLib/__main__.py fonttools-3.21.2/Lib/fontTools/mtiLib/__main__.py --- fonttools-3.0/Lib/fontTools/mtiLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/mtiLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.mtiLib import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/otlLib/builder.py fonttools-3.21.2/Lib/fontTools/otlLib/builder.py --- fonttools-3.0/Lib/fontTools/otlLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/otlLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,640 @@ +from __future__ import print_function, division, absolute_import +from fontTools import ttLib +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict + + +def buildCoverage(glyphs, glyphMap): + if not glyphs: + return None + self = ot.Coverage() + self.glyphs = sorted(glyphs, key=glyphMap.__getitem__) + return self + + +LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001 +LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002 +LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004 +LOOKUP_FLAG_IGNORE_MARKS = 0x0008 +LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010 + + +def buildLookup(subtables, flags=0, markFilterSet=None): + if subtables is None: + return None + subtables = [st for st in subtables if st is not None] + if not subtables: + return None + assert all(t.LookupType == subtables[0].LookupType for t in subtables), \ + ("all subtables must have the same LookupType; got %s" % + repr([t.LookupType for t in subtables])) + self = ot.Lookup() + self.LookupType = subtables[0].LookupType + self.LookupFlag = flags + self.SubTable = subtables + self.SubTableCount = len(self.SubTable) + if markFilterSet is not None: + assert self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET, \ + ("if markFilterSet is not None, flags must set " + "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags) + assert isinstance(markFilterSet, int), markFilterSet + self.MarkFilteringSet = markFilterSet + else: + assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, \ + ("if markFilterSet is None, flags must not set " + "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags) + return self + + +# GSUB + + +def buildSingleSubstSubtable(mapping): + if not mapping: + return None + self = ot.SingleSubst() + self.mapping = dict(mapping) + return self + + +def buildMultipleSubstSubtable(mapping): + if not mapping: + return None + self = ot.MultipleSubst() + self.mapping = dict(mapping) + return self + + +def buildAlternateSubstSubtable(mapping): + if not mapping: + return None + self = ot.AlternateSubst() + self.alternates = dict(mapping) + return self + + +def _getLigatureKey(components): + """Computes a key for ordering ligatures in a GSUB Type-4 lookup. + + When building the OpenType lookup, we need to make sure that + the longest sequence of components is listed first, so we + use the negative length as the primary key for sorting. + To make buildLigatureSubstSubtable() deterministic, we use the + component sequence as the secondary key. + + For example, this will sort (f,f,f) < (f,f,i) < (f,f) < (f,i) < (f,l). + """ + return (-len(components), components) + + +def buildLigatureSubstSubtable(mapping): + if not mapping: + return None + self = ot.LigatureSubst() + # The following single line can replace the rest of this function + # with fontTools >= 3.1: + # self.ligatures = dict(mapping) + self.ligatures = {} + for components in sorted(mapping.keys(), key=_getLigatureKey): + ligature = ot.Ligature() + ligature.Component = components[1:] + ligature.CompCount = len(ligature.Component) + 1 + ligature.LigGlyph = mapping[components] + firstGlyph = components[0] + self.ligatures.setdefault(firstGlyph, []).append(ligature) + return self + + +# GPOS + + +def buildAnchor(x, y, point=None, deviceX=None, deviceY=None): + self = ot.Anchor() + self.XCoordinate, self.YCoordinate = x, y + self.Format = 1 + if point is not None: + self.AnchorPoint = point + self.Format = 2 + if deviceX is not None or deviceY is not None: + assert self.Format == 1, \ + "Either point, or both of deviceX/deviceY, must be None." + self.XDeviceTable = deviceX + self.YDeviceTable = deviceY + self.Format = 3 + return self + + +def buildBaseArray(bases, numMarkClasses, glyphMap): + self = ot.BaseArray() + self.BaseRecord = [] + for base in sorted(bases, key=glyphMap.__getitem__): + b = bases[base] + anchors = [b.get(markClass) for markClass in range(numMarkClasses)] + self.BaseRecord.append(buildBaseRecord(anchors)) + self.BaseCount = len(self.BaseRecord) + return self + + +def buildBaseRecord(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord""" + self = ot.BaseRecord() + self.BaseAnchor = anchors + return self + + +def buildComponentRecord(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.ComponentRecord""" + if not anchors: + return None + self = ot.ComponentRecord() + self.LigatureAnchor = anchors + return self + + +def buildCursivePosSubtable(attach, glyphMap): + """{"alef": (entry, exit)} --> otTables.CursivePos""" + if not attach: + return None + self = ot.CursivePos() + self.Format = 1 + self.Coverage = buildCoverage(attach.keys(), glyphMap) + self.EntryExitRecord = [] + for glyph in self.Coverage.glyphs: + entryAnchor, exitAnchor = attach[glyph] + rec = ot.EntryExitRecord() + rec.EntryAnchor = entryAnchor + rec.ExitAnchor = exitAnchor + self.EntryExitRecord.append(rec) + self.EntryExitCount = len(self.EntryExitRecord) + return self + + +def buildDevice(deltas): + """{8:+1, 10:-3, ...} --> otTables.Device""" + if not deltas: + return None + self = ot.Device() + keys = deltas.keys() + self.StartSize = startSize = min(keys) + self.EndSize = endSize = max(keys) + assert 0 <= startSize <= endSize + self.DeltaValue = deltaValues = [ + deltas.get(size, 0) + for size in range(startSize, endSize + 1)] + maxDelta = max(deltaValues) + minDelta = min(deltaValues) + assert minDelta > -129 and maxDelta < 128 + if minDelta > -3 and maxDelta < 2: + self.DeltaFormat = 1 + elif minDelta > -9 and maxDelta < 8: + self.DeltaFormat = 2 + else: + self.DeltaFormat = 3 + return self + + +def buildLigatureArray(ligs, numMarkClasses, glyphMap): + self = ot.LigatureArray() + self.LigatureAttach = [] + for lig in sorted(ligs, key=glyphMap.__getitem__): + anchors = [] + for component in ligs[lig]: + anchors.append([component.get(mc) for mc in range(numMarkClasses)]) + self.LigatureAttach.append(buildLigatureAttach(anchors)) + self.LigatureCount = len(self.LigatureAttach) + return self + + +def buildLigatureAttach(components): + """[[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach""" + self = ot.LigatureAttach() + self.ComponentRecord = [buildComponentRecord(c) for c in components] + self.ComponentCount = len(self.ComponentRecord) + return self + + +def buildMarkArray(marks, glyphMap): + """{"acute": (markClass, otTables.Anchor)} --> otTables.MarkArray""" + self = ot.MarkArray() + self.MarkRecord = [] + for mark in sorted(marks.keys(), key=glyphMap.__getitem__): + markClass, anchor = marks[mark] + markrec = buildMarkRecord(markClass, anchor) + self.MarkRecord.append(markrec) + self.MarkCount = len(self.MarkRecord) + return self + + +def buildMarkBasePos(marks, bases, glyphMap): + """Build a list of MarkBasePos subtables. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + """ + # TODO: Consider emitting multiple subtables to save space. + # Partition the marks and bases into disjoint subsets, so that + # MarkBasePos rules would only access glyphs from a single + # subset. This would likely lead to smaller mark/base + # matrices, so we might be able to omit many of the empty + # anchor tables that we currently produce. Of course, this + # would only work if the MarkBasePos rules of real-world fonts + # allow partitioning into multiple subsets. We should find out + # whether this is the case; if so, implement the optimization. + # On the other hand, a very large number of subtables could + # slow down layout engines; so this would need profiling. + return [buildMarkBasePosSubtable(marks, bases, glyphMap)] + + +def buildMarkBasePosSubtable(marks, bases, glyphMap): + """Build a single MarkBasePos subtable. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + """ + self = ot.MarkBasePos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.BaseCoverage = buildCoverage(bases, glyphMap) + self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap) + return self + + +def buildMarkLigPos(marks, ligs, glyphMap): + """Build a list of MarkLigPos subtables. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + ligs = {"f_i": [{0: a3, 1: a5}, {0: a4, 1: a5}], "c_t": [{...}, {...}]} + """ + # TODO: Consider splitting into multiple subtables to save space, + # as with MarkBasePos, this would be a trade-off that would need + # profiling. And, depending on how typical fonts are structured, + # it might not be worth doing at all. + return [buildMarkLigPosSubtable(marks, ligs, glyphMap)] + + +def buildMarkLigPosSubtable(marks, ligs, glyphMap): + """Build a single MarkLigPos subtable. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + ligs = {"f_i": [{0: a3, 1: a5}, {0: a4, 1: a5}], "c_t": [{...}, {...}]} + """ + self = ot.MarkLigPos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.LigatureCoverage = buildCoverage(ligs, glyphMap) + self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap) + return self + + +def buildMarkRecord(classID, anchor): + assert isinstance(classID, int) + assert isinstance(anchor, ot.Anchor) + self = ot.MarkRecord() + self.Class = classID + self.MarkAnchor = anchor + return self + + +def buildMark2Record(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record""" + self = ot.Mark2Record() + self.Mark2Anchor = anchors + return self + + +def _getValueFormat(f, values, i): + """Helper for buildPairPos{Glyphs|Classes}Subtable.""" + if f is not None: + return f + mask = 0 + for value in values: + if value is not None and value[i] is not None: + mask |= value[i].getFormat() + return mask + + +def buildPairPosClassesSubtable(pairs, glyphMap, + valueFormat1=None, valueFormat2=None): + coverage = set() + classDef1 = ClassDefBuilder(useClass0=True) + classDef2 = ClassDefBuilder(useClass0=False) + for gc1, gc2 in sorted(pairs): + coverage.update(gc1) + classDef1.add(gc1) + classDef2.add(gc2) + self = ot.PairPos() + self.Format = 2 + self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + self.Coverage = buildCoverage(coverage, glyphMap) + self.ClassDef1 = classDef1.build() + self.ClassDef2 = classDef2.build() + classes1 = classDef1.classes() + classes2 = classDef2.classes() + self.Class1Record = [] + for c1 in classes1: + rec1 = ot.Class1Record() + rec1.Class2Record = [] + self.Class1Record.append(rec1) + for c2 in classes2: + rec2 = ot.Class2Record() + rec2.Value1, rec2.Value2 = pairs.get((c1, c2), (None, None)) + rec1.Class2Record.append(rec2) + self.Class1Count = len(self.Class1Record) + self.Class2Count = len(classes2) + return self + + +def buildPairPosGlyphs(pairs, glyphMap): + p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + formatA = valA.getFormat() if valA is not None else 0 + formatB = valB.getFormat() if valB is not None else 0 + pos = p.setdefault((formatA, formatB), {}) + pos[(glyphA, glyphB)] = (valA, valB) + return [ + buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB) + for ((formatA, formatB), pos) in sorted(p.items())] + + +def buildPairPosGlyphsSubtable(pairs, glyphMap, + valueFormat1=None, valueFormat2=None): + self = ot.PairPos() + self.Format = 1 + self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + p = {} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + p.setdefault(glyphA, []).append((glyphB, valA, valB)) + self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap) + self.PairSet = [] + for glyph in self.Coverage.glyphs: + ps = ot.PairSet() + ps.PairValueRecord = [] + self.PairSet.append(ps) + for glyph2, val1, val2 in \ + sorted(p[glyph], key=lambda x: glyphMap[x[0]]): + pvr = ot.PairValueRecord() + pvr.SecondGlyph = glyph2 + pvr.Value1 = val1 if val1 and val1.getFormat() != 0 else None + pvr.Value2 = val2 if val2 and val2.getFormat() != 0 else None + ps.PairValueRecord.append(pvr) + ps.PairValueCount = len(ps.PairValueRecord) + self.PairSetCount = len(self.PairSet) + return self + + +def buildSinglePos(mapping, glyphMap): + """{"glyph": ValueRecord} --> [otTables.SinglePos*]""" + result, handled = [], set() + # In SinglePos format 1, the covered glyphs all share the same ValueRecord. + # In format 2, each glyph has its own ValueRecord, but these records + # all have the same properties (eg., all have an X but no Y placement). + coverages, masks, values = {}, {}, {} + for glyph, value in mapping.items(): + key = _getSinglePosValueKey(value) + coverages.setdefault(key, []).append(glyph) + masks.setdefault(key[0], []).append(key) + values[key] = value + + # If a ValueRecord is shared between multiple glyphs, we generate + # a SinglePos format 1 subtable; that is the most compact form. + for key, glyphs in coverages.items(): + if len(glyphs) > 1: + format1Mapping = {g: values[key] for g in glyphs} + result.append(buildSinglePosSubtable(format1Mapping, glyphMap)) + handled.add(key) + + # In the remaining ValueRecords, look for those whose valueFormat + # (the set of used properties) is shared between multiple records. + # These will get encoded in format 2. + for valueFormat, keys in masks.items(): + f2 = [k for k in keys if k not in handled] + if len(f2) > 1: + format2Mapping = {coverages[k][0]: values[k] for k in f2} + result.append(buildSinglePosSubtable(format2Mapping, glyphMap)) + handled.update(f2) + + # The remaining ValueRecords are singletons in the sense that + # they are only used by a single glyph, and their valueFormat + # is unique as well. We encode these in format 1 again. + for key, glyphs in coverages.items(): + if key not in handled: + assert len(glyphs) == 1, glyphs + st = buildSinglePosSubtable({glyphs[0]: values[key]}, glyphMap) + result.append(st) + + # When the OpenType layout engine traverses the subtables, it will + # stop after the first matching subtable. Therefore, we sort the + # resulting subtables by decreasing coverage size; this increases + # the chance that the layout engine can do an early exit. (Of course, + # this would only be true if all glyphs were equally frequent, which + # is not really the case; but we do not know their distribution). + # If two subtables cover the same number of glyphs, we sort them + # by glyph ID so that our output is deterministic. + result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap)) + return result + + +def buildSinglePosSubtable(values, glyphMap): + """{glyphName: otBase.ValueRecord} --> otTables.SinglePos""" + self = ot.SinglePos() + self.Coverage = buildCoverage(values.keys(), glyphMap) + valueRecords = [values[g] for g in self.Coverage.glyphs] + self.ValueFormat = 0 + for v in valueRecords: + self.ValueFormat |= v.getFormat() + if all(v == valueRecords[0] for v in valueRecords): + self.Format = 1 + if self.ValueFormat != 0: + self.Value = valueRecords[0] + else: + self.Value = None + else: + self.Format = 2 + self.Value = valueRecords + self.ValueCount = len(self.Value) + return self + + +def _getSinglePosTableKey(subtable, glyphMap): + assert isinstance(subtable, ot.SinglePos), subtable + glyphs = subtable.Coverage.glyphs + return (-len(glyphs), glyphMap[glyphs[0]]) + + +def _getSinglePosValueKey(valueRecord): + """otBase.ValueRecord --> (2, ("YPlacement": 12))""" + assert isinstance(valueRecord, ValueRecord), valueRecord + valueFormat, result = 0, [] + for name, value in valueRecord.__dict__.items(): + if isinstance(value, ot.Device): + result.append((name, _makeDeviceTuple(value))) + else: + result.append((name, value)) + valueFormat |= valueRecordFormatDict[name][0] + result.sort() + result.insert(0, valueFormat) + return tuple(result) + + +def _makeDeviceTuple(device): + """otTables.Device --> tuple, for making device tables unique""" + return (device.DeltaFormat, device.StartSize, device.EndSize, + tuple(device.DeltaValue)) + + +def buildValue(value): + self = ValueRecord() + for k, v in value.items(): + setattr(self, k, v) + return self + + +# GDEF + +def buildAttachList(attachPoints, glyphMap): + """{"glyphName": [4, 23]} --> otTables.AttachList, or None""" + if not attachPoints: + return None + self = ot.AttachList() + self.Coverage = buildCoverage(attachPoints.keys(), glyphMap) + self.AttachPoint = [buildAttachPoint(attachPoints[g]) + for g in self.Coverage.glyphs] + self.GlyphCount = len(self.AttachPoint) + return self + + +def buildAttachPoint(points): + """[4, 23, 41] --> otTables.AttachPoint""" + if not points: + return None + self = ot.AttachPoint() + self.PointIndex = sorted(set(points)) + self.PointCount = len(self.PointIndex) + return self + + +def buildCaretValueForCoord(coord): + """500 --> otTables.CaretValue, format 1""" + self = ot.CaretValue() + self.Format = 1 + self.Coordinate = coord + return self + + +def buildCaretValueForPoint(point): + """4 --> otTables.CaretValue, format 2""" + self = ot.CaretValue() + self.Format = 2 + self.CaretValuePoint = point + return self + + +def buildLigCaretList(coords, points, glyphMap): + """{"f_f_i":[300,600]}, {"c_t":[28]} --> otTables.LigCaretList, or None""" + glyphs = set(coords.keys()) if coords else set() + if points: + glyphs.update(points.keys()) + carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs} + carets = {g: c for g, c in carets.items() if c is not None} + if not carets: + return None + self = ot.LigCaretList() + self.Coverage = buildCoverage(carets.keys(), glyphMap) + self.LigGlyph = [carets[g] for g in self.Coverage.glyphs] + self.LigGlyphCount = len(self.LigGlyph) + return self + + +def buildLigGlyph(coords, points): + """([500], [4]) --> otTables.LigGlyph; None for empty coords/points""" + carets = [] + if coords: + carets.extend([buildCaretValueForCoord(c) for c in sorted(coords)]) + if points: + carets.extend([buildCaretValueForPoint(p) for p in sorted(points)]) + if not carets: + return None + self = ot.LigGlyph() + self.CaretValue = carets + self.CaretCount = len(self.CaretValue) + return self + + +def buildMarkGlyphSetsDef(markSets, glyphMap): + """[{"acute","grave"}, {"caron","grave"}] --> otTables.MarkGlyphSetsDef""" + if not markSets: + return None + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.Coverage = [buildCoverage(m, glyphMap) for m in markSets] + self.MarkSetCount = len(self.Coverage) + return self + + +class ClassDefBuilder(object): + """Helper for building ClassDef tables.""" + def __init__(self, useClass0): + self.classes_ = set() + self.glyphs_ = {} + self.useClass0_ = useClass0 + + def canAdd(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return True + for glyph in glyphs: + if glyph in self.glyphs_: + return False + return True + + def add(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return + self.classes_.add(glyphs) + for glyph in glyphs: + assert glyph not in self.glyphs_ + self.glyphs_[glyph] = glyphs + + def classes(self): + # In ClassDef1 tables, class id #0 does not need to be encoded + # because zero is the default. Therefore, we use id #0 for the + # glyph class that has the largest number of members. However, + # in other tables than ClassDef1, 0 means "every other glyph" + # so we should not use that ID for any real glyph classes; + # we implement this by inserting an empty set at position 0. + # + # TODO: Instead of counting the number of glyphs in each class, + # we should determine the encoded size. If the glyphs in a large + # class form a contiguous range, the encoding is actually quite + # compact, whereas a non-contiguous set might need a lot of bytes + # in the output file. We don't get this right with the key below. + result = sorted(self.classes_, key=lambda s: (len(s), s), reverse=True) + if not self.useClass0_: + result.insert(0, frozenset()) + return result + + def build(self): + glyphClasses = {} + for classID, glyphs in enumerate(self.classes()): + if classID == 0: + continue + for glyph in glyphs: + glyphClasses[glyph] = classID + classDef = ot.ClassDef() + classDef.classDefs = glyphClasses + return classDef diff -Nru fonttools-3.0/Lib/fontTools/otlLib/builder.py.sketch fonttools-3.21.2/Lib/fontTools/otlLib/builder.py.sketch --- fonttools-3.0/Lib/fontTools/otlLib/builder.py.sketch 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/otlLib/builder.py.sketch 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,105 @@ + +from fontTools.otlLib import builder as builder + +GDEF::mark filtering sets +name:: + +lookup_flags = builder.LOOKUP_FLAG_IGNORE_MARKS | builder.LOOKUP_FLAG_RTL +smcp_subtable = builder.buildSingleSubstitute({'a':'a.scmp'}) +smcp_lookup = builder.buildLookup([smcp_subtable], lookup_flags=lookup_flags, mark_filter_set=int) + +lookups = [smcp_lookup, ...] + +scmp_feature = builder.buildFeature('smcp', [scmp_lookup], lookup_list=lookups) +scmp_feature = builder.buildFeature('smcp', [0]) + +features = [smcp_feature] + +default_langsys = builder.buildLangSys(set([scmp_feature]), requiredFeature=None, featureOrder=features) +default_langsys = builder.buildLangSys(set([0]), requiredFeature=None) + +script = + + +#GSUB: + +builder.buildSingleSubst({'a':'a.scmp'}) +builder.buildLigatureSubst({('f','i'):'fi'}) +builder.buildMultipleSubst({'a':('a0','a1')}) +builder.buildAlternateSubst({'a':('a.0','a.1')}) + + +class ChainSequence : namedtuple(['backtrack', 'input', 'lookahead')]) + pass + +ChainSequence(backtrack=..., input=..., lookahead=...) + +klass0 = frozenset() + +builder.buildChainContextGlyphs( + [ + ( (None, ('f','f','i'), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + ], + glyphMap +) +builder.buildChainContextClass( + [ + ( (None, (2,0,1), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + ], + klasses = ( backtrackClass, ... ), + glyphMap +) +builder.buildChainContextCoverage( + ( (None, (frozenset('f'),frozenset('f'),frozenset('i')), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + glyphMap +) +builder.buildExtension(...) + +#GPOS: +device = builder.buildDevice() +builder.buildAnchor(100, -200) or (100,-200) +builder.buildAnchor(100, -200, device=device) +builder.buildAnchor(100, -200, point=2) + +valueRecord = builder.buildValue({'XAdvance':-200, ...}) + +builder.buildSinglePos({'a':valueRecord}) +builder.buildPairPosGlyphs( + { + ('a','b'): (valueRecord1,valueRecord2), + }, + glyphMap, + , valueFormat1=None, valueFormat2=None +) +builder.buildPairPosClasses( + { + (frozenset(['a']),frozenset(['b'])): (valueRecord1,valueRecord2), + }, + glyphMap, + , valueFormat1=None, valueFormat2=None +) + +builder.buildCursivePos( + { + 'alef': (entry,exit), + } + glyphMap +) +builder.buildMarkBasePos( + marks = { + 'mark1': (klass, anchor), + }, + bases = { + 'base0': [anchor0, anchor1, anchor2], + }, + glyphMap +) +builder.buildMarkBasePos( + marks = { + 'mark1': (name, anchor), + }, + bases = { + 'base0': {'top':anchor0, 'left':anchor1}, + }, + glyphMap +) diff -Nru fonttools-3.0/Lib/fontTools/otlLib/__init__.py fonttools-3.21.2/Lib/fontTools/otlLib/__init__.py --- fonttools-3.0/Lib/fontTools/otlLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/otlLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +"""OpenType Layout-related functionality.""" diff -Nru fonttools-3.0/Lib/fontTools/pens/areaPen.py fonttools-3.21.2/Lib/fontTools/pens/areaPen.py --- fonttools-3.0/Lib/fontTools/pens/areaPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/areaPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ +"""Calculate the area of a glyph.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["AreaPen"] + + +class AreaPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + self.value = 0 + + def _moveTo(self, p0): + self._p0 = self._startPoint = p0 + + def _lineTo(self, p1): + x0, y0 = self._p0 + x1, y1 = p1 + self.value -= (x1 - x0) * (y1 + y0) * .5 + self._p0 = p1 + + def _qCurveToOne(self, p1, p2): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + self.value -= (x2 * y1 - x1 * y2) / 3 + self._lineTo(p2) + self._p0 = p2 + + def _curveToOne(self, p1, p2, p3): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + x3, y3 = p3[0] - x0, p3[1] - y0 + self.value -= ( + x1 * ( - y2 - y3) + + x2 * (y1 - 2*y3) + + x3 * (y1 + 2*y2 ) + ) * 0.15 + self._lineTo(p3) + self._p0 = p3 + + def _closePath(self): + self._lineTo(self._startPoint) + del self._p0, self._startPoint + + def _endPath(self): + if self._p0 != self._startPoint: + # Area is not defined for open contours. + raise NotImplementedError + del self._p0, self._startPoint diff -Nru fonttools-3.0/Lib/fontTools/pens/basePen.py fonttools-3.21.2/Lib/fontTools/pens/basePen.py --- fonttools-3.0/Lib/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/basePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -38,6 +38,7 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import LogMixin __all__ = ["AbstractPen", "NullPen", "BasePen", "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] @@ -141,7 +142,50 @@ pass -class BasePen(AbstractPen): +class LoggingPen(LogMixin, AbstractPen): + """A pen with a `log` property (see fontTools.misc.loggingTools.LogMixin) + """ + pass + + +class DecomposingPen(LoggingPen): + + """ Implements a 'addComponent' method that decomposes components + (i.e. draws them onto self as simple contours). + It can also be used as a mixin class (e.g. see ContourRecordingPen). + + You must override moveTo, lineTo, curveTo and qCurveTo. You may + additionally override closePath, endPath and addComponent. + """ + + # By default a warning message is logged when a base glyph is missing; + # set this to False if you want to raise a 'KeyError' exception + skipMissingComponents = True + + def __init__(self, glyphSet): + """ Takes a single 'glyphSet' argument (dict), in which the glyphs + that are referenced as components are looked up by their name. + """ + super(DecomposingPen, self).__init__() + self.glyphSet = glyphSet + + def addComponent(self, glyphName, transformation): + """ Transform the points of the base glyph and draw it onto self. + """ + from fontTools.pens.transformPen import TransformPen + try: + glyph = self.glyphSet[glyphName] + except KeyError: + if not self.skipMissingComponents: + raise + self.log.warning( + "glyph '%s' is missing from glyphSet; skipped" % glyphName) + else: + tPen = TransformPen(self, transformation) + glyph.draw(tPen) + + +class BasePen(DecomposingPen): """Base class for drawing pens. You must override _moveTo, _lineTo and _curveToOne. You may additionally override _closePath, _endPath, @@ -149,8 +193,8 @@ methods. """ - def __init__(self, glyphSet): - self.glyphSet = glyphSet + def __init__(self, glyphSet=None): + super(BasePen, self).__init__(glyphSet) self.__currentPoint = None # must override @@ -186,19 +230,6 @@ mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) - def addComponent(self, glyphName, transformation): - """This default implementation simply transforms the points - of the base glyph and draws it onto self. - """ - from fontTools.pens.transformPen import TransformPen - try: - glyph = self.glyphSet[glyphName] - except KeyError: - pass - else: - tPen = TransformPen(self, transformation) - glyph.draw(tPen) - # don't override def _getCurrentPoint(self): diff -Nru fonttools-3.0/Lib/fontTools/pens/basePen_test.py fonttools-3.21.2/Lib/fontTools/pens/basePen_test.py --- fonttools-3.0/Lib/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,171 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import \ - BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment -import unittest - - -class _TestPen(BasePen): - def __init__(self): - BasePen.__init__(self, glyphSet={}) - self._commands = [] - - def __repr__(self): - return " ".join(self._commands) - - def getCurrentPoint(self): - return self._getCurrentPoint() - - def _moveTo(self, pt): - self._commands.append("%s %s moveto" % (pt[0], pt[1])) - - def _lineTo(self, pt): - self._commands.append("%s %s lineto" % (pt[0], pt[1])) - - def _curveToOne(self, bcp1, bcp2, pt): - self._commands.append("%s %s %s %s %s %s curveto" % - (bcp1[0], bcp1[1], - bcp2[0], bcp2[1], - pt[0], pt[1])) - - def _closePath(self): - self._commands.append("closepath") - - def _endPath(self): - self._commands.append("endpath") - - -class _TestGlyph: - def draw(self, pen): - pen.moveTo((0.0, 0.0)) - pen.lineTo((0.0, 100.0)) - pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) - pen.closePath() - - -class BasePenTest(unittest.TestCase): - def test_moveTo(self): - pen = _TestPen() - pen.moveTo((0.5, -4.3)) - self.assertEqual("0.5 -4.3 moveto", repr(pen)) - self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) - - def test_lineTo(self): - pen = _TestPen() - pen.moveTo((4, 5)) - pen.lineTo((7, 8)) - self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) - self.assertEqual((7, 8), pen.getCurrentPoint()) - - def test_curveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.curveTo) - - def test_curveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1)) - self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) - self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) - - def test_curveTo_twoPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_curveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) - self.assertEqual("0.0 0.0 moveto " - "1.0 1.1 1.5 1.6 2.0 2.1 curveto " - "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) - self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) - - def test_qCurveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.qCurveTo) - - def test_qCurveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((77.7, 99.9)) - self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) - self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) - - def test_qCurveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_qCurveTo_onlyOffCurvePoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) - self.assertEqual("0.0 0.0 moveto " - "12.0 -12.0 moveto " - "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " - "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " - "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) - self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) - - def test_closePath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.closePath() - self.assertEqual("3 4 lineto closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_endPath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.endPath() - self.assertEqual("3 4 lineto endpath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_addComponent(self): - pen = _TestPen() - pen.glyphSet["oslash"] = _TestGlyph() - pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) - self.assertEqual("-10.0 0.0 moveto " - "40.0 200.0 lineto " - "127.5 300.0 131.25 290.0 125.0 265.0 curveto " - "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " - "closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - -class DecomposeSegmentTest(unittest.TestCase): - def test_decomposeSuperBezierSegment(self): - decompose = decomposeSuperBezierSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) - self.assertEqual([((0, 0), (1, 1), (2, 2))], - decompose([(0, 0), (1, 1), (2, 2)])) - self.assertEqual( - [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], - decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) - - def test_decomposeQuadraticSegment(self): - decompose = decomposeQuadraticSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) - self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], - decompose([(0, 0), (4, 8), (9, -9)])) - self.assertEqual( - [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], - decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/pens/boundsPen.py fonttools-3.21.2/Lib/fontTools/pens/boundsPen.py --- fonttools-3.0/Lib/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/boundsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -17,25 +17,42 @@ When the shape has been drawn, the bounds are available as the 'bounds' attribute of the pen object. It's a 4-tuple: - (xMin, yMin, xMax, yMax) + (xMin, yMin, xMax, yMax). + + If 'ignoreSinglePoints' is True, single points are ignored. """ - def __init__(self, glyphSet): + def __init__(self, glyphSet, ignoreSinglePoints=False): BasePen.__init__(self, glyphSet) - self.bounds = None + self.ignoreSinglePoints = ignoreSinglePoints + self.init() + + def init(self): + self.bounds = None + self._start = None def _moveTo(self, pt): + self._start = pt + if not self.ignoreSinglePoints: + self._addMoveTo() + + def _addMoveTo(self): + if self._start is None: + return bounds = self.bounds if bounds: - self.bounds = updateBounds(bounds, pt) + self.bounds = updateBounds(bounds, self._start) else: - x, y = pt + x, y = self._start self.bounds = (x, y, x, y) + self._start = None def _lineTo(self, pt): + self._addMoveTo() self.bounds = updateBounds(self.bounds, pt) def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp1) bounds = updateBounds(bounds, bcp2) @@ -43,6 +60,7 @@ self.bounds = bounds def _qCurveToOne(self, bcp, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp) bounds = updateBounds(bounds, pt) @@ -62,6 +80,7 @@ """ def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): @@ -70,6 +89,7 @@ self.bounds = bounds def _qCurveToOne(self, bcp, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp, bounds): diff -Nru fonttools-3.0/Lib/fontTools/pens/boundsPen_test.py fonttools-3.21.2/Lib/fontTools/pens/boundsPen_test.py --- fonttools-3.0/Lib/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen -import unittest - - -def draw_(pen): - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) - pen.closePath() - - -def bounds_(pen): - return " ".join(["%.0f" % c for c in pen.bounds]) - - -class BoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = BoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 58 100", bounds_(pen)) - - def test_empty(self): - pen = BoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 45 20", bounds_(pen)) - - def test_quadraticCurve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 3", bounds_(pen)) - - -class ControlBoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = ControlBoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 60 100", bounds_(pen)) - - def test_empty(self): - pen = ControlBoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 90 40", bounds_(pen)) - - def test_quadraticCurve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 6", bounds_(pen)) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/pens/filterPen.py fonttools-3.21.2/Lib/fontTools/pens/filterPen.py --- fonttools-3.0/Lib/fontTools/pens/filterPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/filterPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.recordingPen import RecordingPen + + +class _PassThruComponentsMixin(object): + + def addComponent(self, glyphName, transformation): + self._outPen.addComponent(glyphName, transformation) + + +class FilterPen(_PassThruComponentsMixin, AbstractPen): + + """ Base class for pens that apply some transformation to the coordinates + they receive and pass them to another pen. + + You can override any of its methods. The default implementation does + nothing, but passes the commands unmodified to the other pen. + + >>> from fontTools.pens.recordingPen import RecordingPen + >>> rec = RecordingPen() + >>> pen = FilterPen(rec) + >>> v = iter(rec.value) + + >>> pen.moveTo((0, 0)) + >>> next(v) + ('moveTo', ((0, 0),)) + + >>> pen.lineTo((1, 1)) + >>> next(v) + ('lineTo', ((1, 1),)) + + >>> pen.curveTo((2, 2), (3, 3), (4, 4)) + >>> next(v) + ('curveTo', ((2, 2), (3, 3), (4, 4))) + + >>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8)) + >>> next(v) + ('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8))) + + >>> pen.closePath() + >>> next(v) + ('closePath', ()) + + >>> pen.moveTo((9, 9)) + >>> next(v) + ('moveTo', ((9, 9),)) + + >>> pen.endPath() + >>> next(v) + ('endPath', ()) + + >>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0)) + >>> next(v) + ('addComponent', ('foo', (1, 0, 0, 1, 0, 0))) + """ + + def __init__(self, outPen): + self._outPen = outPen + + def moveTo(self, pt): + self._outPen.moveTo(pt) + + def lineTo(self, pt): + self._outPen.lineTo(pt) + + def curveTo(self, *points): + self._outPen.curveTo(*points) + + def qCurveTo(self, *points): + self._outPen.qCurveTo(*points) + + def closePath(self): + self._outPen.closePath() + + def endPath(self): + self._outPen.endPath() + + +class ContourFilterPen(_PassThruComponentsMixin, RecordingPen): + """A "buffered" filter pen that accumulates contour data, passes + it through a ``filterContour`` method when the contour is closed or ended, + and finally draws the result with the output pen. + + Components are passed through unchanged. + """ + + def __init__(self, outPen): + super(ContourFilterPen, self).__init__() + self._outPen = outPen + + def closePath(self): + super(ContourFilterPen, self).closePath() + self._flushContour() + + def endPath(self): + super(ContourFilterPen, self).endPath() + self._flushContour() + + def _flushContour(self): + result = self.filterContour(self.value) + if result is not None: + self.value = result + self.replay(self._outPen) + self.value = [] + + def filterContour(self, contour): + """Subclasses must override this to perform the filtering. + + The contour is a list of pen (operator, operands) tuples. + Operators are strings corresponding to the AbstractPen methods: + "moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and + "endPath". The operands are the positional arguments that are + passed to each method. + + If the method doesn't return a value (i.e. returns None), it's + assumed that the argument was modified in-place. + Otherwise, the return value is drawn with the output pen. + """ + return # or return contour diff -Nru fonttools-3.0/Lib/fontTools/pens/momentsPen.py fonttools-3.21.2/Lib/fontTools/pens/momentsPen.py --- fonttools-3.0/Lib/fontTools/pens/momentsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/momentsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,294 @@ +"""Pen calculating 0th, 1st, and 2nd moments of area of glyph shapes. +This is low-level, autogenerated pen. Use statisticsPen instead.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["MomentsPen"] + + +class MomentsPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + + self.area = 0 + self.momentX = 0 + self.momentY = 0 + self.momentXX = 0 + self.momentXY = 0 + self.momentYY = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError + + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + + r0 = x1*y0 + r1 = x1*y1 + r2 = x1**2 + r3 = x0**2 + r4 = 2*y0 + r5 = y0 - y1 + r6 = r5*x0 + r7 = y0**2 + r8 = y1**2 + r9 = x1**3 + r10 = r4*y1 + r11 = y0**3 + r12 = y1**3 + + self.area += -r0/2 - r1/2 + x0*(y0 + y1)/2 + self.momentX += -r2*y0/6 - r2*y1/3 + r3*(r4 + y1)/6 - r6*x1/6 + self.momentY += -r0*y1/6 - r7*x1/6 - r8*x1/6 + x0*(r7 + r8 + y0*y1)/6 + self.momentXX += -r2*r6/12 - r3*r5*x1/12 - r9*y0/12 - r9*y1/4 + x0**3*(3*y0 + y1)/12 + self.momentXY += -r10*r2/24 - r2*r7/24 - r2*r8/8 + r3*(r10 + 3*r7 + r8)/24 - x0*x1*(r7 - r8)/12 + self.momentYY += -r0*r8/12 - r1*r7/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r7*y1 + r8*y0)/12 + + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + + r0 = 2*x1 + r1 = r0*y2 + r2 = 2*y1 + r3 = r2*x2 + r4 = 3*y2 + r5 = r4*x2 + r6 = 3*y0 + r7 = x1**2 + r8 = 2*y2 + r9 = x2**2 + r10 = 4*y1 + r11 = 10*y2 + r12 = r0*x2 + r13 = x0**2 + r14 = 10*y0 + r15 = x2*y2 + r16 = r0*y1 + r15 + r17 = 4*x1 + r18 = x2*y0 + r19 = r10*r15 + r20 = y1**2 + r21 = 2*r20 + r22 = y2**2 + r23 = r22*x2 + r24 = 5*r23 + r25 = y0**2 + r26 = y0*y2 + r27 = 5*r25 + r28 = 8*x1**3 + r29 = x2**3 + r30 = 30*y1 + r31 = 6*y1 + r32 = 10*r9*x1 + r33 = 4*r7 + r34 = 5*y2 + r35 = 12*r7 + r36 = r5 + 20*x1*y1 + r37 = 30*x1 + r38 = 12*x1 + r39 = 20*r7 + r40 = 8*r7*y1 + r41 = r34*r9 + r42 = 60*y1 + r43 = 20*r20 + r44 = 4*r20 + r45 = 15*r22 + r46 = r38*x2 + r47 = y1*y2 + r48 = 8*r20*x1 + r24 + r49 = 6*x1 + r50 = 8*y1**3 + r51 = y2**3 + r52 = y0**3 + r53 = 10*y1 + r54 = 12*y1 + r55 = 12*r20 + + self.area += r1/6 - r3/6 - r5/6 + x0*(r2 + r6 + y2)/6 - y0*(r0 + x2)/6 + self.momentX += -r10*r9/30 - r11*r9/30 - r12*(-r8 + y1)/30 + r13*(r10 + r14 + y2)/30 + r7*r8/30 + x0*(r1 + r16 - r17*y0 - r18)/30 - y0*(r12 + 2*r7 + r9)/30 + self.momentY += r1*(r8 + y1)/30 - r19/30 - r21*x2/30 - r24/30 - r25*(r17 + x2)/30 + x0*(r10*y0 + r2*y2 + r21 + r22 + r26 + r27)/30 - y0*(r16 + r3)/30 + self.momentXX += r13*(r11*x1 - 5*r18 + r3 + r36 - r37*y0)/420 + r28*y2/420 - r29*r30/420 - r29*y2/4 - r32*(r2 - r4)/420 - r33*x2*(r2 - r34)/420 + x0**3*(r31 + 21*y0 + y2)/84 - x0*(-r15*r38 + r18*r38 + r2*r9 - r35*y2 + r39*y0 - r40 - r41 + r6*r9)/420 - y0*(r28 + 5*r29 + r32 + r35*x2)/420 + self.momentXY += r13*(r14*y2 + 3*r22 + 105*r25 + r42*y0 + r43 + 12*r47)/840 - r17*x2*(r44 - r45)/840 - r22*r9/8 - r25*(r39 + r46 + 3*r9)/840 + r33*y2*(r10 + r34)/840 - r42*r9*y2/840 - r43*r9/840 + x0*(-r10*r18 + r17*r26 + r19 + r22*r49 - r25*r37 - r27*x2 + r38*r47 + r48)/420 - y0*(r15*r17 + r31*r9 + r40 + r41 + r46*y1)/420 + self.momentYY += r1*(r11*y1 + r44 + r45)/420 - r15*r43/420 - r23*r30/420 - r25*(r1 + r36 + r53*x2)/420 - r50*x2/420 - r51*x2/12 - r52*(r49 + x2)/84 + x0*(r22*r53 + r22*r6 + r25*r30 + r25*r34 + r26*r54 + r43*y0 + r50 + 5*r51 + 35*r52 + r55*y2)/420 - y0*(-r0*r22 + r15*r54 + r48 + r55*x2)/420 + + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 + + r0 = 6*x2 + r1 = r0*y3 + r2 = 6*y2 + r3 = 10*y3 + r4 = r3*x3 + r5 = 3*x1 + r6 = 3*y1 + r7 = 6*x1 + r8 = 3*x2 + r9 = 6*y1 + r10 = 3*y2 + r11 = x2**2 + r12 = r11*y3 + r13 = 45*r12 + r14 = x3**2 + r15 = r14*y2 + r16 = r14*y3 + r17 = x2*x3 + r18 = 15*r17 + r19 = 7*y3 + r20 = x1**2 + r21 = 9*r20 + r22 = x0**2 + r23 = 21*y1 + r24 = 9*r11 + r25 = 9*x2 + r26 = x2*y3 + r27 = 15*r26 + r28 = -r25*y1 + r27 + r29 = r25*y2 + r30 = r9*x3 + r31 = 45*x1 + r32 = x1*x3 + r33 = 45*r20 + r34 = 5*r14 + r35 = x2*y2 + r36 = 18*r35 + r37 = 5*x3 + r38 = r37*y3 + r39 = r31*y1 + r36 + r38 + r40 = x1*y0 + r41 = x1*y3 + r42 = x2*y0 + r43 = x3*y1 + r44 = r10*x3 + r45 = x3*y2*y3 + r46 = y2**2 + r47 = 45*r46 + r48 = r47*x3 + r49 = y3**2 + r50 = r49*x3 + r51 = y1**2 + r52 = 9*r51 + r53 = y0**2 + r54 = 21*x1 + r55 = x3*y2 + r56 = 15*r55 + r57 = 9*y2 + r58 = y2*y3 + r59 = 15*r58 + r60 = 9*r46 + r61 = 3*y3 + r62 = 45*y1 + r63 = r8*y3 + r64 = y0*y1 + r65 = y0*y2 + r66 = 30*r65 + r67 = 5*y3 + r68 = y1*y3 + r69 = 45*r51 + r70 = 5*r49 + r71 = x2**3 + r72 = x3**3 + r73 = 126*x3 + r74 = x1**3 + r75 = r14*x2 + r76 = 63*r11 + r77 = r76*x3 + r78 = 15*r35 + r79 = r19*x3 + r80 = x1*y1 + r81 = 63*r35 + r82 = r38 + 378*r80 + r81 + r83 = x1*y2 + r84 = x2*y1 + r85 = x3*y0 + r86 = x2*x3*y1 + r87 = x2*x3*y3 + r88 = r11*y2 + r89 = 27*r88 + r90 = 42*y3 + r91 = r14*r90 + r92 = 90*x1*x2 + r93 = 189*x2 + r94 = 30*x1*x3 + r95 = 14*r16 + 126*r20*y1 + 45*r88 + r94*y2 + r96 = x1*x2 + r97 = 252*r96 + r98 = x1*x2*y2 + r99 = 42*r32 + r100 = x1*x3*y1 + r101 = 30*r17 + r102 = 18*r17 + r103 = 378*r20 + r104 = 189*y2 + r105 = r20*y3 + r106 = r11*y1 + r107 = r14*y1 + r108 = 378*r46 + r109 = 252*y2 + r110 = y1*y2 + r111 = x2*x3*y2 + r112 = y0*y3 + r113 = 378*r51 + r114 = 63*r46 + r115 = 27*x2 + r116 = r115*r46 + 42*r50 + r117 = x2*y1*y3 + r118 = x3*y1*y2 + r119 = r49*x2 + r120 = r51*x3 + r121 = x3*y3 + r122 = 14*x3 + r123 = 30*r117 + r122*r49 + r47*x2 + 126*r51*x1 + r124 = x1*y1*y3 + r125 = x1*y2*y3 + r126 = x2*y1*y2 + r127 = 54*y3 + r128 = 21*r55 + r129 = 630*r53 + r130 = r46*x1 + r131 = r49*x1 + r132 = 126*r53 + r133 = y2**3 + r134 = y3**3 + r135 = 630*r49 + r136 = y1**3 + r137 = y0**3 + r138 = r114*y3 + r23*r49 + r139 = r49*y2 + + self.area += r1/20 - r2*x3/20 - r4/20 + r5*(y2 + y3)/20 - r6*(x2 + x3)/20 + x0*(r10 + r9 + 10*y0 + y3)/20 - y0*(r7 + r8 + x3)/20 + self.momentX += r13/840 - r15/8 - r16/3 - r18*(r10 - r19)/840 + r21*(r10 + 2*y3)/840 + r22*(r2 + r23 + 56*y0 + y3)/168 + r5*(r28 + r29 - r30 + r4)/840 - r6*(10*r14 + r18 + r24)/840 + x0*(12*r26 + r31*y2 - r37*y0 + r39 - 105*r40 + 15*r41 - 30*r42 - 3*r43 + r44)/840 - y0*(18*r11 + r18 + r31*x2 + 12*r32 + r33 + r34)/840 + self.momentY += r27*(r10 + r19)/840 - r45/8 - r48/840 + r5*(10*r49 + r57*y1 + r59 + r60 + r9*y3)/840 - r50/6 - r52*(r8 + 2*x3)/840 - r53*(r0 + r54 + x3)/168 - r6*(r29 + r4 + r56)/840 + x0*(18*r46 + 140*r53 + r59 + r62*y2 + 105*r64 + r66 + r67*y0 + 12*r68 + r69 + r70)/840 - y0*(r39 + 15*r43 + 12*r55 - r61*x1 + r62*x2 + r63)/840 + self.momentXX += -r11*r73*(-r61 + y2)/9240 + r21*(r28 - r37*y1 + r44 + r78 + r79)/9240 + r22*(21*r26 - 630*r40 + 42*r41 - 126*r42 + r57*x3 + r82 + 210*r83 + 42*r84 - 14*r85)/9240 - r5*(r11*r62 + r14*r23 + 14*r15 - r76*y3 + 54*r86 - 84*r87 - r89 - r91)/9240 - r6*(27*r71 + 42*r72 + 70*r75 + r77)/9240 + 3*r71*y3/220 - 3*r72*y2/44 - r72*y3/4 + 3*r74*(r57 + r67)/3080 - r75*(378*y2 - 630*y3)/9240 + x0**3*(r57 + r62 + 165*y0 + y3)/660 + x0*(-18*r100 - r101*y0 - r101*y1 + r102*y2 - r103*y0 + r104*r20 + 63*r105 - 27*r106 - 9*r107 + r13 - r34*y0 - r76*y0 + 42*r87 + r92*y3 + r94*y3 + r95 - r97*y0 + 162*r98 - r99*y0)/9240 - y0*(135*r11*x1 + r14*r54 + r20*r93 + r33*x3 + 45*r71 + 14*r72 + 126*r74 + 42*r75 + r77 + r92*x3)/9240 + self.momentXY += -r108*r14/18480 + r12*(r109 + 378*y3)/18480 - r14*r49/8 - 3*r14*r58/44 - r17*(252*r46 - 1260*r49)/18480 + r21*(18*r110 + r3*y1 + 15*r46 + 7*r49 + 18*r58)/18480 + r22*(252*r110 + 28*r112 + r113 + r114 + 2310*r53 + 30*r58 + 1260*r64 + 252*r65 + 42*r68 + r70)/18480 - r52*(r102 + 15*r11 + 7*r14)/18480 - r53*(r101 + r103 + r34 + r76 + r97 + r99)/18480 + r7*(-r115*r51 + r116 + 18*r117 - 18*r118 + 42*r119 - 15*r120 + 28*r45 + r81*y3)/18480 - r9*(63*r111 + 42*r15 + 28*r87 + r89 + r91)/18480 + x0*(r1*y0 + r104*r80 + r112*r54 + 21*r119 - 9*r120 - r122*r53 + r123 + 54*r124 + 60*r125 + 54*r126 + r127*r35 + r128*y3 - r129*x1 + 81*r130 + 15*r131 - r132*x2 - r2*r85 - r23*r85 + r30*y3 + 84*r40*y2 - 84*r42*y1 + r60*x3)/9240 - y0*(54*r100 - 9*r105 + 81*r106 + 15*r107 + 54*r111 + r121*r7 + 21*r15 + r24*y3 + 60*r86 + 21*r87 + r95 + 189*r96*y1 + 54*r98)/9240 + self.momentYY += -r108*r121/9240 - r133*r73/9240 - r134*x3/12 - r135*r55/9240 - 3*r136*(r25 + r37)/3080 - r137*(r25 + r31 + x3)/660 + r26*(r135 + 126*r46 + 378*y2*y3)/9240 + r5*(r110*r127 + 27*r133 + 42*r134 + r138 + 70*r139 + r46*r62 + 27*r51*y2 + 15*r51*y3)/9240 - r52*(r56 + r63 + r78 + r79)/9240 - r53*(r128 + r25*y3 + 42*r43 + r82 + 42*r83 + 210*r84)/9240 - r6*(r114*x3 + r116 - 14*r119 + 84*r45)/9240 + x0*(r104*r51 + r109*r64 + 90*r110*y3 + r113*y0 + r114*y0 + r129*y1 + r132*y2 + 45*r133 + 14*r134 + 126*r136 + 770*r137 + r138 + 42*r139 + 135*r46*y1 + 14*r53*y3 + r64*r90 + r66*y3 + r69*y3 + r70*y0)/9240 - y0*(90*r118 + 63*r120 + r123 - 18*r124 - 30*r125 + 162*r126 - 27*r130 - 9*r131 + r36*y3 + 30*r43*y3 + 42*r45 + r48 + r51*r93)/9240 + +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('MomentsPen', [ + ('area', 1), + ('momentX', x), + ('momentY', y), + ('momentXX', x**2), + ('momentXY', x*y), + ('momentYY', y**2), + ]) diff -Nru fonttools-3.0/Lib/fontTools/pens/perimeterPen.py fonttools-3.21.2/Lib/fontTools/pens/perimeterPen.py --- fonttools-3.0/Lib/fontTools/pens/perimeterPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/perimeterPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +"""Calculate the perimeter of a glyph.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import splitQuadraticAtT, splitCubicAtT, approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC +import math + + +__all__ = ["PerimeterPen"] + + +def _distance(p0, p1): + return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) + +def _split_cubic_into_two(p0, p1, p2, p3): + mid = (p0 + 3 * (p1 + p2) + p3) * .125 + deriv3 = (p3 + p2 - p1 - p0) * .125 + return ((p0, (p0 + p1) * .5, mid - deriv3, mid), + (mid, mid + deriv3, (p2 + p3) * .5, p3)) + +class PerimeterPen(BasePen): + + def __init__(self, glyphset=None, tolerance=0.005): + BasePen.__init__(self, glyphset) + self.value = 0 + self._mult = 1.+1.5*tolerance # The 1.5 is a empirical hack; no math + + # Choose which algorithm to use for quadratic and for cubic. + # Quadrature is faster but has fixed error characteristic with no strong + # error bound. The cutoff points are derived empirically. + self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive + self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += _distance(p0, p1) + + def _addQuadraticExact(self, c0, c1, c2): + self.value += calcQuadraticArcLengthC(c0, c1, c2) + + def _addQuadraticQuadrature(self, c0, c1, c2): + self.value += approximateQuadraticArcLengthC(c0, c1, c2) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self._addQuadratic(complex(*p0), complex(*p1), complex(*p2)) + + def _addCubicRecursive(self, p0, p1, p2, p3): + arch = abs(p0-p3) + box = abs(p0-p1) + abs(p1-p2) + abs(p2-p3) + if arch * self._mult >= box: + self.value += (arch + box) * .5 + else: + one,two = _split_cubic_into_two(p0,p1,p2,p3) + self._addCubicRecursive(*one) + self._addCubicRecursive(*two) + + def _addCubicQuadrature(self, c0, c1, c2, c3): + self.value += approximateCubicArcLengthC(c0, c1, c2, c3) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3)) diff -Nru fonttools-3.0/Lib/fontTools/pens/pointInsidePen.py fonttools-3.21.2/Lib/fontTools/pens/pointInsidePen.py --- fonttools-3.0/Lib/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/pointInsidePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,12 +11,6 @@ __all__ = ["PointInsidePen"] -# working around floating point errors -EPSILON = 1e-10 -ONE_PLUS_EPSILON = 1 + EPSILON -ZERO_MINUS_EPSILON = 0 - EPSILON - - class PointInsidePen(BasePen): """This pen implements "point inside" testing: to test whether @@ -46,29 +40,33 @@ # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html # I extended the principles outlined on that page to curves. - def __init__(self, glyphSet, testPoint, evenOdd=0): + def __init__(self, glyphSet, testPoint, evenOdd=False): BasePen.__init__(self, glyphSet) self.setTestPoint(testPoint, evenOdd) - def setTestPoint(self, testPoint, evenOdd=0): + def setTestPoint(self, testPoint, evenOdd=False): """Set the point to test. Call this _before_ the outline gets drawn.""" self.testPoint = testPoint self.evenOdd = evenOdd self.firstPoint = None self.intersectionCount = 0 - def getResult(self): - """After the shape has been drawn, getResult() returns True if the test - point lies within the (black) shape, and False if it doesn't. - """ + def getWinding(self): if self.firstPoint is not None: # always make sure the sub paths are closed; the algorithm only works # for closed paths. self.closePath() + return self.intersectionCount + + def getResult(self): + """After the shape has been drawn, getResult() returns True if the test + point lies within the (black) shape, and False if it doesn't. + """ + winding = self.getWinding() if self.evenOdd: - result = self.intersectionCount % 2 - else: - result = self.intersectionCount + result = winding % 2 + else: # non-zero + result = self.intersectionCount != 0 return not not result def _addIntersection(self, goingUp): @@ -123,7 +121,7 @@ by = (y3 - y2) * 3.0 - cy ay = y4 - dy - cy - by solutions = sorted(solveCubic(ay, by, cy, dy - y)) - solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + solutions = [t for t in solutions if -0. <= t <= 1.] if not solutions: return @@ -142,29 +140,30 @@ t3 = t2 * t direction = 3*ay*t2 + 2*by*t + cy + incomingGoingUp = outgoingGoingUp = direction > 0.0 if direction == 0.0: direction = 6*ay*t + 2*by + outgoingGoingUp = direction > 0.0 + incomingGoingUp = not outgoingGoingUp if direction == 0.0: direction = ay - goingUp = direction > 0.0 + incomingGoingUp = outgoingGoingUp = direction > 0.0 xt = ax*t3 + bx*t2 + cx*t + dx if xt < x: - above = goingUp continue - if t == 0.0: - if not goingUp: - self._addIntersection(goingUp) + if t in (0.0, -0.0): + if not outgoingGoingUp: + self._addIntersection(outgoingGoingUp) elif t == 1.0: - if not above: - self._addIntersection(goingUp) + if incomingGoingUp: + self._addIntersection(incomingGoingUp) else: - if above != goingUp: - self._addIntersection(goingUp) + if incomingGoingUp == outgoingGoingUp: + self._addIntersection(outgoingGoingUp) #else: - # we're not really intersecting, merely touching the 'top' - above = goingUp + # we're not really intersecting, merely touching def _qCurveToOne_unfinished(self, bcp, point): # XXX need to finish this, for now doing it through a cubic @@ -188,4 +187,6 @@ self.lineTo(self.firstPoint) self.firstPoint = None - _endPath = _closePath + def _endPath(self): + """Insideness is not defined for open contours.""" + raise NotImplementedError diff -Nru fonttools-3.0/Lib/fontTools/pens/pointInsidePen_test.py fonttools-3.21.2/Lib/fontTools/pens/pointInsidePen_test.py --- fonttools-3.0/Lib/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.pointInsidePen import PointInsidePen -import unittest - - -class PointInsidePenTest(unittest.TestCase): - def test_line(self): - def draw_triangles(pen): - pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) - pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) - pen.closePath() - - self.assertEqual( - " *********" - " ** *" - " ** *" - " * *" - " *", - self.render(draw_triangles, even_odd=True)) - - self.assertEqual( - " *********" - " *******" - " *****" - " ***" - " *", - self.render(draw_triangles, even_odd=False)) - - def test_curve(self): - def draw_curves(pen): - pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) - pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) - pen.closePath() - - self.assertEqual( - "*** ***" - "**** ****" - "*** ***" - "**** ****" - "*** ***", - self.render(draw_curves, even_odd=True)) - - self.assertEqual( - "*** ***" - "**********" - "**********" - "**********" - "*** ***", - self.render(draw_curves, even_odd=False)) - - def test_qCurve(self): - def draw_qCurves(pen): - pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) - pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) - pen.closePath() - - self.assertEqual( - "*** **" - "**** ***" - "*** ***" - "*** ****" - "** ***", - self.render(draw_qCurves, even_odd=True)) - - self.assertEqual( - "*** **" - "**********" - "**********" - "**********" - "** ***", - self.render(draw_qCurves, even_odd=False)) - - @staticmethod - def render(draw_function, even_odd): - result = BytesIO() - for y in range(5): - for x in range(10): - pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) - draw_function(pen) - if pen.getResult(): - result.write(b"*") - else: - result.write(b" ") - return tounicode(result.getvalue()) - - -if __name__ == "__main__": - unittest.main() - diff -Nru fonttools-3.0/Lib/fontTools/pens/qtPen.py fonttools-3.21.2/Lib/fontTools/pens/qtPen.py --- fonttools-3.0/Lib/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/qtPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -24,5 +24,8 @@ def _curveToOne(self, p1, p2, p3): self.path.cubicTo(*p1+p2+p3) + def _qCurveToOne(self, p1, p2): + self.path.quadTo(*p1+p2) + def _closePath(self): self.path.closeSubpath() diff -Nru fonttools-3.0/Lib/fontTools/pens/recordingPen.py fonttools-3.21.2/Lib/fontTools/pens/recordingPen.py --- fonttools-3.0/Lib/fontTools/pens/recordingPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/recordingPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ +"""Pen recording operations that can be accessed or replayed.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen, DecomposingPen + + +__all__ = ["replayRecording", "RecordingPen", "DecomposingRecordingPen"] + + +def replayRecording(recording, pen): + """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen, + to a pen. + + Note that recording does not have to be produced by those pens. + It can be any iterable of tuples of method name and tuple-of-arguments. + Likewise, pen can be any objects receiving those method calls. + """ + for operator,operands in recording: + getattr(pen, operator)(*operands) + + +class RecordingPen(AbstractPen): + """Pen recording operations that can be accessed or replayed. + + The recording can be accessed as pen.value; or replayed using + pen.replay(otherPen). + + Usage example: + ============== + from fontTools.ttLib import TTFont + from fontTools.pens.recordingPen import RecordingPen + + glyph_name = 'dollar' + font_path = 'MyFont.otf' + + font = TTFont(font_path) + glyphset = font.getGlyphSet() + glyph = glyphset[glyph_name] + + pen = RecordingPen() + glyph.draw(pen) + print(pen.value) + """ + + def __init__(self): + self.value = [] + def moveTo(self, p0): + self.value.append(('moveTo', (p0,))) + def lineTo(self, p1): + self.value.append(('lineTo', (p1,))) + def qCurveTo(self, *points): + self.value.append(('qCurveTo', points)) + def curveTo(self, *points): + self.value.append(('curveTo', points)) + def closePath(self): + self.value.append(('closePath', ())) + def endPath(self): + self.value.append(('endPath', ())) + def addComponent(self, glyphName, transformation): + self.value.append(('addComponent', (glyphName, transformation))) + def replay(self, pen): + replayRecording(self.value, pen) + + +class DecomposingRecordingPen(DecomposingPen, RecordingPen): + """ Same as RecordingPen, except that it doesn't keep components + as references, but draws them decomposed as regular contours. + + The constructor takes a single 'glyphSet' positional argument, + a dictionary of glyph objects (i.e. with a 'draw' method) keyed + by thir name. + + >>> class SimpleGlyph(object): + ... def draw(self, pen): + ... pen.moveTo((0, 0)) + ... pen.curveTo((1, 1), (2, 2), (3, 3)) + ... pen.closePath() + >>> class CompositeGlyph(object): + ... def draw(self, pen): + ... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) + >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()} + >>> for name, glyph in sorted(glyphSet.items()): + ... pen = DecomposingRecordingPen(glyphSet) + ... glyph.draw(pen) + ... print("{}: {}".format(name, pen.value)) + a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] + b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] + """ + # raises KeyError if base glyph is not found in glyphSet + skipMissingComponents = False + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = RecordingPen() + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() + from pprint import pprint + pprint(pen.value) diff -Nru fonttools-3.0/Lib/fontTools/pens/reportLabPen.py fonttools-3.21.2/Lib/fontTools/pens/reportLabPen.py --- fonttools-3.0/Lib/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/reportLabPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,6 +4,9 @@ from reportlab.graphics.shapes import Path +__all__ = ["ReportLabPen"] + + class ReportLabPen(BasePen): """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" diff -Nru fonttools-3.0/Lib/fontTools/pens/reverseContourPen.py fonttools-3.21.2/Lib/fontTools/pens/reverseContourPen.py --- fonttools-3.0/Lib/fontTools/pens/reverseContourPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/reverseContourPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,97 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import pairwise +from fontTools.pens.filterPen import ContourFilterPen + + +__all__ = ["reversedContour", "ReverseContourPen"] + + +class ReverseContourPen(ContourFilterPen): + """Filter pen that passes outline data to another pen, but reversing + the winding direction of all contours. Components are simply passed + through unchanged. + + Closed contours are reversed in such a way that the first point remains + the first point. + """ + + def filterContour(self, contour): + return reversedContour(contour) + + +def reversedContour(contour): + """ Generator that takes a list of pen's (operator, operands) tuples, + and yields them with the winding direction reversed. + """ + if not contour: + return # nothing to do, stop iteration + + # valid contours must have at least a starting and ending command, + # can't have one without the other + assert len(contour) > 1, "invalid contour" + + # the type of the last command determines if the contour is closed + contourType = contour.pop()[0] + assert contourType in ("endPath", "closePath") + closed = contourType == "closePath" + + firstType, firstPts = contour.pop(0) + assert firstType in ("moveTo", "qCurveTo"), ( + "invalid initial segment type: %r" % firstType) + firstOnCurve = firstPts[-1] + if firstType == "qCurveTo": + # special case for TrueType paths contaning only off-curve points + assert firstOnCurve is None, ( + "off-curve only paths must end with 'None'") + assert not contour, ( + "only one qCurveTo allowed per off-curve path") + firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) + + (None,)) + + if not contour: + # contour contains only one segment, nothing to reverse + if firstType == "moveTo": + closed = False # single-point paths can't be closed + else: + closed = True # off-curve paths are closed by definition + yield firstType, firstPts + else: + lastType, lastPts = contour[-1] + lastOnCurve = lastPts[-1] + if closed: + # for closed paths, we keep the starting point + yield firstType, firstPts + if firstOnCurve != lastOnCurve: + # emit an implied line between the last and first points + yield "lineTo", (lastOnCurve,) + contour[-1] = (lastType, + tuple(lastPts[:-1]) + (firstOnCurve,)) + + if len(contour) > 1: + secondType, secondPts = contour[0] + else: + # contour has only two points, the second and last are the same + secondType, secondPts = lastType, lastPts + # if a lineTo follows the initial moveTo, after reversing it + # will be implied by the closePath, so we don't emit one; + # unless the lineTo and moveTo overlap, in which case we keep the + # duplicate points + if secondType == "lineTo" and firstPts != secondPts: + del contour[0] + if contour: + contour[-1] = (lastType, + tuple(lastPts[:-1]) + secondPts) + else: + # for open paths, the last point will become the first + yield firstType, (lastOnCurve,) + contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,)) + + # we iterate over all segment pairs in reverse order, and yield + # each one with the off-curve points reversed (if any), and + # with the on-curve point of the following segment + for (curType, curPts), (_, nextPts) in pairwise( + contour, reverse=True): + yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],) + + yield "closePath" if closed else "endPath", () diff -Nru fonttools-3.0/Lib/fontTools/pens/statisticsPen.py fonttools-3.21.2/Lib/fontTools/pens/statisticsPen.py --- fonttools-3.0/Lib/fontTools/pens/statisticsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/statisticsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,102 @@ +"""Pen calculating area, center of mass, variance and standard-deviation, +covariance and correlation, and slant, of glyph shapes.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import math +from fontTools.pens.momentsPen import MomentsPen + +__all__ = ["StatisticsPen"] + + +class StatisticsPen(MomentsPen): + + """Pen calculating area, center of mass, variance and + standard-deviation, covariance and correlation, and slant, + of glyph shapes. + + Note that all the calculated values are 'signed'. Ie. if the + glyph shape is self-intersecting, the values are not correct + (but well-defined). As such, area will be negative if contour + directions are clockwise. Moreover, variance might be negative + if the shapes are self-intersecting in certain ways.""" + + def __init__(self, glyphset=None): + MomentsPen.__init__(self, glyphset=glyphset) + self.__zero() + + def _closePath(self): + MomentsPen._closePath(self) + self.__update() + + def __zero(self): + self.meanX = 0 + self.meanY = 0 + self.varianceX = 0 + self.varianceY = 0 + self.stddevX = 0 + self.stddevY = 0 + self.covariance = 0 + self.correlation = 0 + self.slant = 0 + + def __update(self): + + area = self.area + if not area: + self.__zero() + return + + # Center of mass + # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume + self.meanX = meanX = self.momentX / area + self.meanY = meanY = self.momentY / area + + # Var(X) = E[X^2] - E[X]^2 + self.varianceX = varianceX = self.momentXX / area - meanX**2 + self.varianceY = varianceY = self.momentYY / area - meanY**2 + + self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX) + self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY) + + # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] ) + self.covariance = covariance = self.momentXY / area - meanX*meanY + + # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) ) + # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient + correlation = covariance / (stddevX * stddevY) + self.correlation = correlation if abs(correlation) > 1e-3 else 0 + + slant = covariance / varianceY + self.slant = slant if abs(slant) > 1e-3 else 0 + + +def _test(glyphset, upem, glyphs): + from fontTools.pens.transformPen import TransformPen + from fontTools.misc.transform import Scale + + print('upem', upem) + + for glyph_name in glyphs: + print() + print("glyph:", glyph_name) + glyph = glyphset[glyph_name] + pen = StatisticsPen(glyphset=glyphset) + transformer = TransformPen(pen, Scale(1./upem)) + glyph.draw(transformer) + for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']: + if item[0] == '_': continue + print ("%s: %g" % (item, getattr(pen, item))) + +def main(args): + if not args: + return + filename, glyphs = args[0], args[1:] + if not glyphs: + glyphs = ['e', 'o', 'I', 'slash', 'E', 'zero', 'eight', 'minus', 'equal'] + from fontTools.ttLib import TTFont + font = TTFont(filename) + _test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs) + +if __name__ == '__main__': + import sys + main(sys.argv[1:]) diff -Nru fonttools-3.0/Lib/fontTools/pens/svgPathPen.py fonttools-3.21.2/Lib/fontTools/pens/svgPathPen.py --- fonttools-3.0/Lib/fontTools/pens/svgPathPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/svgPathPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,178 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +def pointToString(pt): + return " ".join([str(i) for i in pt]) + + +class SVGPathPen(BasePen): + + def __init__(self, glyphSet): + BasePen.__init__(self, glyphSet) + self._commands = [] + self._lastCommand = None + self._lastX = None + self._lastY = None + + def _handleAnchor(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.moveTo((10, 10)) + >>> pen._commands + ['M10 10'] + """ + if self._lastCommand == "M": + self._commands.pop(-1) + + def _moveTo(self, pt): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen._commands + ['M0 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 0)) + >>> pen._commands + ['M10 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 10)) + >>> pen._commands + ['M0 10'] + """ + self._handleAnchor() + t = "M%s" % (pointToString(pt)) + self._commands.append(t) + self._lastCommand = "M" + self._lastX, self._lastY = pt + + def _lineTo(self, pt): + """ + # duplicate point + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M10 10'] + + # vertical line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 0)) + >>> pen._commands + ['M10 10', 'V0'] + + # horizontal line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((0, 10)) + >>> pen._commands + ['M10 10', 'H0'] + + # basic + >>> pen = SVGPathPen(None) + >>> pen.lineTo((70, 80)) + >>> pen._commands + ['L70 80'] + + # basic following a moveto + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M0 0', ' 10 10'] + """ + x, y = pt + # duplicate point + if x == self._lastX and y == self._lastY: + return + # vertical line + elif x == self._lastX: + cmd = "V" + pts = str(y) + # horizontal line + elif y == self._lastY: + cmd = "H" + pts = str(x) + # previous was a moveto + elif self._lastCommand == "M": + cmd = None + pts = " " + pointToString(pt) + # basic + else: + cmd = "L" + pts = pointToString(pt) + # write the string + t = "" + if cmd: + t += cmd + self._lastCommand = cmd + t += pts + self._commands.append(t) + # store for future reference + self._lastX, self._lastY = pt + + def _curveToOne(self, pt1, pt2, pt3): + """ + >>> pen = SVGPathPen(None) + >>> pen.curveTo((10, 20), (30, 40), (50, 60)) + >>> pen._commands + ['C10 20 30 40 50 60'] + """ + t = "C" + t += pointToString(pt1) + " " + t += pointToString(pt2) + " " + t += pointToString(pt3) + self._commands.append(t) + self._lastCommand = "C" + self._lastX, self._lastY = pt3 + + def _qCurveToOne(self, pt1, pt2): + """ + >>> pen = SVGPathPen(None) + >>> pen.qCurveTo((10, 20), (30, 40)) + >>> pen._commands + ['Q10 20 30 40'] + """ + assert pt2 is not None + t = "Q" + t += pointToString(pt1) + " " + t += pointToString(pt2) + self._commands.append(t) + self._lastCommand = "Q" + self._lastX, self._lastY = pt2 + + def _closePath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.closePath() + >>> pen._commands + ['Z'] + """ + self._commands.append("Z") + self._lastCommand = "Z" + self._lastX = self._lastY = None + + def _endPath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.endPath() + >>> pen._commands + ['Z'] + """ + self._closePath() + self._lastCommand = None + self._lastX = self._lastY = None + + def getCommands(self): + return "".join(self._commands) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/pens/t2CharStringPen.py fonttools-3.21.2/Lib/fontTools/pens/t2CharStringPen.py --- fonttools-3.0/Lib/fontTools/pens/t2CharStringPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/t2CharStringPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,90 @@ +# Copyright (c) 2009 Type Supply LLC +# Author: Tal Leming + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.psCharStrings import T2CharString +from fontTools.pens.basePen import BasePen +from fontTools.cffLib.specializer import specializeCommands, commandsToProgram + + +def makeRoundFunc(tolerance): + if tolerance < 0: + raise ValueError("Rounding tolerance must be positive") + + def _round(number): + if tolerance == 0: + return number # no-op + rounded = round(number) + # return rounded integer if the tolerance >= 0.5, or if the absolute + # difference between the original float and the rounded integer is + # within the tolerance + if tolerance >= .5 or abs(rounded - number) <= tolerance: + return rounded + else: + # else return the value un-rounded + return number + + def roundPoint(point): + x, y = point + return _round(x), _round(y) + + return roundPoint + + +class T2CharStringPen(BasePen): + """Pen to draw Type 2 CharStrings. + + The 'roundTolerance' argument controls the rounding of point coordinates. + It is defined as the maximum absolute difference between the original + float and the rounded integer value. + The default tolerance of 0.5 means that all floats are rounded to integer; + a value of 0 disables rounding; values in between will only round floats + which are close to their integral part within the tolerated range. + """ + + def __init__(self, width, glyphSet, roundTolerance=0.5, CFF2=False): + super(T2CharStringPen, self).__init__(glyphSet) + self.roundPoint = makeRoundFunc(roundTolerance) + self._CFF2 = CFF2 + self._width = width + self._commands = [] + self._p0 = (0,0) + + def _p(self, pt): + p0 = self._p0 + pt = self._p0 = self.roundPoint(pt) + return [pt[0]-p0[0], pt[1]-p0[1]] + + def _moveTo(self, pt): + self._commands.append(('rmoveto', self._p(pt))) + + def _lineTo(self, pt): + self._commands.append(('rlineto', self._p(pt))) + + def _curveToOne(self, pt1, pt2, pt3): + _p = self._p + self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3))) + + def _closePath(self): + pass + + def _endPath(self): + pass + + def getCharString(self, private=None, globalSubrs=None, optimize=True): + commands = self._commands + if optimize: + maxstack = 48 if not self._CFF2 else 513 + commands = specializeCommands(commands, + generalizeFirst=False, + maxstack=maxstack) + program = commandsToProgram(commands) + if self._width is not None: + assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString." + program.insert(0, round(self._width)) + if not self._CFF2: + program.append('endchar') + charString = T2CharString( + program=program, private=private, globalSubrs=globalSubrs) + return charString diff -Nru fonttools-3.0/Lib/fontTools/pens/teePen.py fonttools-3.21.2/Lib/fontTools/pens/teePen.py --- fonttools-3.0/Lib/fontTools/pens/teePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/teePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ +"""Pen multiplexing drawing to one or more pens.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen + + +__all__ = ["TeePen"] + + +class TeePen(AbstractPen): + """Pen multiplexing drawing to one or more pens. + + Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens).""" + + def __init__(self, *pens): + if len(pens) == 1: + pens = pens[0] + self.pens = pens + def moveTo(self, p0): + for pen in self.pens: + pen.moveTo(p0) + def lineTo(self, p1): + for pen in self.pens: + pen.lineTo(p1) + def qCurveTo(self, *points): + for pen in self.pens: + pen.qCurveTo(*points) + def curveTo(self, *points): + for pen in self.pens: + pen.curveTo(*points) + def closePath(self): + for pen in self.pens: + pen.closePath() + def endPath(self): + for pen in self.pens: + pen.endPath() + def addComponent(self, glyphName, transformation): + for pen in self.pens: + pen.addComponent(glyphName, transformation) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TeePen(_TestPen(), _TestPen()) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() diff -Nru fonttools-3.0/Lib/fontTools/pens/transformPen.py fonttools-3.21.2/Lib/fontTools/pens/transformPen.py --- fonttools-3.0/Lib/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/transformPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,12 +1,12 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from fontTools.pens.basePen import AbstractPen +from fontTools.pens.filterPen import FilterPen __all__ = ["TransformPen"] -class TransformPen(AbstractPen): +class TransformPen(FilterPen): """Pen that transforms all coordinates using a Affine transformation, and passes them to another pen. @@ -17,12 +17,12 @@ transformed coordinates. The 'transformation' argument can either be a six-tuple, or a fontTools.misc.transform.Transform object. """ + super(TransformPen, self).__init__(outPen) if not hasattr(transformation, "transformPoint"): from fontTools.misc.transform import Transform transformation = Transform(*transformation) self._transformation = transformation self._transformPoint = transformation.transformPoint - self._outPen = outPen self._stack = [] def moveTo(self, pt): @@ -42,15 +42,15 @@ self._outPen.qCurveTo(*points) def _transformPoints(self, points): - new = [] transformPoint = self._transformPoint - for pt in points: - new.append(transformPoint(pt)) - return new + return [transformPoint(pt) for pt in points] def closePath(self): self._outPen.closePath() + def endPath(self): + self._outPen.endPath() + def addComponent(self, glyphName, transformation): transformation = self._transformation.transform(transformation) self._outPen.addComponent(glyphName, transformation) diff -Nru fonttools-3.0/Lib/fontTools/pens/ttGlyphPen.py fonttools-3.21.2/Lib/fontTools/pens/ttGlyphPen.py --- fonttools-3.0/Lib/fontTools/pens/ttGlyphPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/ttGlyphPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,115 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from array import array +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.transformPen import TransformPen +from fontTools.ttLib.tables import ttProgram +from fontTools.ttLib.tables._g_l_y_f import Glyph +from fontTools.ttLib.tables._g_l_y_f import GlyphComponent +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates + + +__all__ = ["TTGlyphPen"] + + +class TTGlyphPen(AbstractPen): + """Pen used for drawing to a TrueType glyph.""" + + def __init__(self, glyphSet): + self.glyphSet = glyphSet + self.init() + + def init(self): + self.points = [] + self.endPts = [] + self.types = [] + self.components = [] + + def _addPoint(self, pt, onCurve): + self.points.append(pt) + self.types.append(onCurve) + + def _popPoint(self): + self.points.pop() + self.types.pop() + + def _isClosed(self): + return ( + (not self.points) or + (self.endPts and self.endPts[-1] == len(self.points) - 1)) + + def lineTo(self, pt): + self._addPoint(pt, 1) + + def moveTo(self, pt): + assert self._isClosed(), '"move"-type point must begin a new contour.' + self._addPoint(pt, 1) + + def qCurveTo(self, *points): + assert len(points) >= 1 + for pt in points[:-1]: + self._addPoint(pt, 0) + + # last point is None if there are no on-curve points + if points[-1] is not None: + self._addPoint(points[-1], 1) + + def closePath(self): + endPt = len(self.points) - 1 + + # ignore anchors (one-point paths) + if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1): + self._popPoint() + return + + # if first and last point on this path are the same, remove last + startPt = 0 + if self.endPts: + startPt = self.endPts[-1] + 1 + if self.points[startPt] == self.points[endPt]: + self._popPoint() + endPt -= 1 + + self.endPts.append(endPt) + + def endPath(self): + # TrueType contours are always "closed" + self.closePath() + + def addComponent(self, glyphName, transformation): + self.components.append((glyphName, transformation)) + + def glyph(self, componentFlags=0x4): + assert self._isClosed(), "Didn't close last contour." + + components = [] + for glyphName, transformation in self.components: + if self.points: + # can't have both, so decompose the glyph + tpen = TransformPen(self, transformation) + self.glyphSet[glyphName].draw(tpen) + continue + + component = GlyphComponent() + component.glyphName = glyphName + if transformation[:4] != (1, 0, 0, 1): + component.transform = (transformation[:2], transformation[2:4]) + component.x, component.y = transformation[4:] + component.flags = componentFlags + components.append(component) + + glyph = Glyph() + glyph.coordinates = GlyphCoordinates(self.points) + glyph.endPtsOfContours = self.endPts + glyph.flags = array("B", self.types) + self.init() + + if components: + glyph.components = components + glyph.numberOfContours = -1 + else: + glyph.numberOfContours = len(glyph.endPtsOfContours) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(b"") + + return glyph diff -Nru fonttools-3.0/Lib/fontTools/pens/wxPen.py fonttools-3.21.2/Lib/fontTools/pens/wxPen.py --- fonttools-3.0/Lib/fontTools/pens/wxPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/pens/wxPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,31 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["WxPen"] + + +class WxPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + import wx + path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath() + self.path = path + + def _moveTo(self, p): + self.path.MoveToPoint(*p) + + def _lineTo(self, p): + self.path.AddLineToPoint(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.AddCurveToPoint(*p1+p2+p3) + + def _qCurveToOne(self, p1, p2): + self.path.AddQuadCurveToPoint(*p1+p2) + + def _closePath(self): + self.path.CloseSubpath() diff -Nru fonttools-3.0/Lib/fontTools/subset/__init__.py fonttools-3.21.2/Lib/fontTools/subset/__init__.py --- fonttools-3.0/Lib/fontTools/subset/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/subset/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3146 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.misc import psCharStrings +from fontTools.pens.basePen import NullPen +from fontTools.misc.loggingTools import Timer +import sys +import struct +import array +import logging +from collections import Counter +from types import MethodType + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' will keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [1, 2] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + --with-zopfli + Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 % + smaller than pure zlib, but the compression speed is much slower. + The Zopfli Python bindings are available at: + https://pypi.python.org/pypi/zopfli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt', + 'rlig', 'rvrn', and all features required for script shaping are + preserved. To see the full list, try '--layout-features=?'. + Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + +Hinting options: + --hinting + Keep hinting [default] + --no-hinting + Drop glyph-specific hinting and font-wide hinting tables, as well + as remove hinting-related bits and pieces from other tables (eg. GPOS). + See --hinting-tables for list of tables that are dropped by default. + Instructions and hints are stripped from 'glyf' and 'CFF ' tables + respectively. This produces (sometimes up to 30%) smaller fonts that + are suitable for extremely high-resolution systems, like high-end + mobile devices and retina displays. + +Optimization options: + --desubroutinize + Remove CFF use of subroutinizes. Subroutinization is a way to make CFF + fonts smaller. For small subsets however, desubroutinizing might make + the font smaller. It has even been reported that desubroutinized CFF + fonts compress better (produce smaller output) WOFF and WOFF2 fonts. + Also see note under --no-hinting. + --no-desubroutinize [default] + Leave CFF subroutinizes as is, only throw away unused subroutinizes. + +Font table options: + --drop-tables[+|-]=[,
...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of tables that will be be dropped. + By default, the following tables are dropped: + 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' + and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' + and color tables: 'CBLC', 'CBDT', 'sbix'. + The tool will attempt to subset the remaining tables. + Examples: + --drop-tables-='SVG ' + * Drop the default set of tables but keep 'SVG '. + --drop-tables+=GSUB + * Drop the default set of tables and 'GSUB'. + --drop-tables=DSIG + * Only drop the 'DSIG' table, keep all others. + --drop-tables= + * Keep all tables. + --no-subset-tables+=
[,
...] + Add to the set of tables that will not be subsetted. + By default, the following tables are included in this list, as + they do not need subsetting (ignore the fact that 'loca' is listed + here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', + 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'STAT'. + By default, tables that the tool does not know how to subset and are not + specified here will be dropped from the font, unless --passthrough-tables + option is passed. + Example: + --no-subset-tables+=FFTM + * Keep 'FFTM' table in the font by preventing subsetting. + --passthrough-tables + Do not drop tables that the tool does not know how to subset. + --no-passthrough-tables + Tables that the tool does not know how to subset and are not specified + in --no-subset-tables will be dropped from the font. [default] + --hinting-tables[-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the list of font-wide + hinting tables that will be dropped if --no-hinting is specified, + Examples: + --hinting-tables-='VDMX' + * Drop font-wide hinting tables except 'VDMX'. + --hinting-tables='' + * Keep all font-wide hinting tables (but strip hints from glyphs). + --legacy-kern + Keep TrueType 'kern' table even when OpenType 'GPOS' is available. + --no-legacy-kern + Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] + +Font naming options: + These options control what is retained in the 'name' table. For numerical + codes, see: http://www.microsoft.com/typography/otspec/name.htm + --name-IDs[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + entry nameIDs that will be preserved. By default only nameID 1 (Family) + and nameID 2 (Style) are preserved. Use '*' to keep all entries. + Examples: + --name-IDs+=0,4,6 + * Also keep Copyright, Full name and PostScript name entry. + --name-IDs='' + * Drop all 'name' table entries. + --name-IDs='*' + * keep all 'name' table entries + --name-legacy + Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). + XXX Note: This might be needed for some fonts that have no Unicode name + entires for English. See: https://github.com/behdad/fonttools/issues/146 + --no-name-legacy + Drop legacy (non-Unicode) 'name' table entries [default] + --name-languages[+|-]=[,] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + langIDs that will be preserved. By default only records with langID + 0x0409 (English) are preserved. Use '*' to keep all langIDs. + --obfuscate-names + Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, + and 6 with dummy strings (it is still fully functional as webfont). + +Glyph naming and encoding options: + --glyph-names + Keep PS glyph names in TT-flavored fonts. In general glyph names are + not needed for correct use of the font. However, some PDF generators + and PDF viewers might rely on glyph names to extract Unicode text + from PDF documents. + --no-glyph-names + Drop PS glyph names in TT-flavored fonts, by using 'post' table + version 3.0. [default] + --legacy-cmap + Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). + --no-legacy-cmap + Drop the legacy 'cmap' subtables. [default] + --symbol-cmap + Keep the 3.0 symbol 'cmap'. + --no-symbol-cmap + Drop the 3.0 symbol 'cmap'. [default] + +Other font-specific options: + --recalc-bounds + Recalculate font bounding boxes. + --no-recalc-bounds + Keep original font bounding boxes. This is faster and still safe + for all practical purposes. [default] + --recalc-timestamp + Set font 'modified' timestamp to current time. + --no-recalc-timestamp + Do not modify font 'modified' timestamp. [default] + --canonical-order + Order tables as recommended in the OpenType standard. This is not + required by the standard, nor by any known implementation. + --no-canonical-order + Keep original order of font tables. This is faster. [default] + --prune-unicode-ranges + Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode + ranges defined in the OpenType specification v1.7 are intersected with + the Unicode codepoints specified in the font's Unicode 'cmap' subtables: + when no overlap is found, the bit will be switched off. However, it will + *not* be switched on if an intersection is found. [default] + --no-prune-unicode-ranges + Don't change the 'OS/2 ulUnicodeRange*' bits. + --recalc-average-width + Update the 'OS/2 xAvgCharWidth' field after subsetting. + --no-recalc-average-width + Don't change the 'OS/2 xAvgCharWidth' field. [default] + +Application options: + --verbose + Display verbose information of the subsetting process. + --timing + Display detailed timing information of the subsetting process. + --xml + Display the TTX XML representation of subsetted font. + +Example: + Produce a subset containing the characters ' !"#$%' without performing + size-reducing optimizations: + + $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ + --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ + --notdef-glyph --notdef-outline --recommended-glyphs \\ + --name-IDs='*' --name-legacy --name-languages='*' +""" + + +log = logging.getLogger("fontTools.subset") + +def _log_glyphs(self, glyphs, font=None): + self.info("Glyph names: %s", sorted(glyphs)) + if font: + reverseGlyphMap = font.getReverseGlyphMap() + self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs)) + +# bind "glyphs" function to 'log' object +log.glyphs = MethodType(_log_glyphs, log) + +# I use a different timing channel so I can configure it separately from the +# main module's logger +timer = Timer(logger=logging.getLogger("fontTools.subset.timer")) + + +def _add_method(*clazzes): + """Returns a decorator function that adds a new method to one or + more classes.""" + def wrapper(method): + done = [] + for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) + assert clazz.__name__ != 'DefaultTable', \ + 'Oops, table class not found.' + assert not hasattr(clazz, method.__name__), \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, + method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +def _uniq_sort(l): + return sorted(set(l)) + +def _set_update(s, *others): + # Jython's set.update only takes one other argument. + # Emulate real set.update... + for other in others: + s.update(other) + +def _dict_subset(d, glyphs): + return {g:d[g] for g in glyphs} + + +@_add_method(otTables.Coverage) +def intersect(self, glyphs): + """Returns ascending list of matching coverage values.""" + return [i for i,g in enumerate(self.glyphs) if g in glyphs] + +@_add_method(otTables.Coverage) +def intersect_glyphs(self, glyphs): + """Returns set of intersecting glyphs.""" + return set(g for g in self.glyphs if g in glyphs) + +@_add_method(otTables.Coverage) +def subset(self, glyphs): + """Returns ascending list of remaining coverage values.""" + indices = self.intersect(glyphs) + self.glyphs = [g for g in self.glyphs if g in glyphs] + return indices + +@_add_method(otTables.Coverage) +def remap(self, coverage_map): + """Remaps coverage.""" + self.glyphs = [self.glyphs[i] for i in coverage_map] + +@_add_method(otTables.ClassDef) +def intersect(self, glyphs): + """Returns ascending list of matching class values.""" + return _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + [v for g,v in self.classDefs.items() if g in glyphs]) + +@_add_method(otTables.ClassDef) +def intersect_class(self, glyphs, klass): + """Returns set of glyphs matching class.""" + if klass == 0: + return set(g for g in glyphs if g not in self.classDefs) + return set(g for g,v in self.classDefs.items() + if v == klass and g in glyphs) + +@_add_method(otTables.ClassDef) +def subset(self, glyphs, remap=False): + """Returns ascending list of remaining classes.""" + self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} + # Note: while class 0 has the special meaning of "not matched", + # if no glyph will ever /not match/, we can optimize class 0 out too. + indices = _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + list(self.classDefs.values())) + if remap: + self.remap(indices) + return indices + +@_add_method(otTables.ClassDef) +def remap(self, class_map): + """Remaps classes.""" + self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} + +@_add_method(otTables.SingleSubst) +def closure_glyphs(self, s, cur_glyphs): + s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) + +@_add_method(otTables.SingleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and v in s.glyphs} + return bool(self.mapping) + +@_add_method(otTables.MultipleSubst) +def closure_glyphs(self, s, cur_glyphs): + for glyph, subst in self.mapping.items(): + if glyph in cur_glyphs: + _set_update(s.glyphs, subst) + +@_add_method(otTables.MultipleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and all(sub in s.glyphs for sub in v)} + return bool(self.mapping) + +@_add_method(otTables.AlternateSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() + if g in cur_glyphs)) + +@_add_method(otTables.AlternateSubst) +def subset_glyphs(self, s): + self.alternates = {g:vlist + for g,vlist in self.alternates.items() + if g in s.glyphs and + all(v in s.glyphs for v in vlist)} + return bool(self.alternates) + +@_add_method(otTables.LigatureSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs + if all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items() + if g in cur_glyphs)) + +@_add_method(otTables.LigatureSubst) +def subset_glyphs(self, s): + self.ligatures = {g:v for g,v in self.ligatures.items() + if g in s.glyphs} + self.ligatures = {g:[seq for seq in seqs + if seq.LigGlyph in s.glyphs and + all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items()} + self.ligatures = {g:v for g,v in self.ligatures.items() if v} + return bool(self.ligatures) + +@_add_method(otTables.ReverseChainSingleSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + indices = self.Coverage.intersect(cur_glyphs) + if(not indices or + not all(c.intersect(s.glyphs) + for c in self.LookAheadCoverage + self.BacktrackCoverage)): + return + s.glyphs.update(self.Substitute[i] for i in indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ReverseChainSingleSubst) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.Substitute = [self.Substitute[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,sub in enumerate(self.Substitute) + if sub in s.glyphs] + self.Substitute = [self.Substitute[i] for i in indices] + self.Coverage.remap(indices) + self.GlyphCount = len(self.Substitute) + return bool(self.GlyphCount and + all(c.subset(s.glyphs) + for c in self.LookAheadCoverage+self.BacktrackCoverage)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def subset_glyphs(self, s): + if self.Format == 1: + return len(self.Coverage.subset(s.glyphs)) + elif self.Format == 2: + indices = self.Coverage.subset(s.glyphs) + values = self.Value + count = len(values) + self.Value = [values[i] for i in indices if i < count] + self.ValueCount = len(self.Value) + return bool(self.ValueCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat &= ~0x00F0 + return True + +@_add_method(otTables.PairPos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + pairs = self.PairSet + count = len(pairs) + self.PairSet = [pairs[i] for i in indices if i < count] + for p in self.PairSet: + p.PairValueRecord = [r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs] + p.PairValueCount = len(p.PairValueRecord) + # Remove empty pairsets + indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] + self.Coverage.remap(indices) + self.PairSet = [self.PairSet[i] for i in indices] + self.PairSetCount = len(self.PairSet) + return bool(self.PairSetCount) + elif self.Format == 2: + class1_map = [c for c in self.ClassDef1.subset(s.glyphs, remap=True) if c < self.Class1Count] + class2_map = [c for c in self.ClassDef2.subset(s.glyphs, remap=True) if c < self.Class2Count] + self.Class1Record = [self.Class1Record[i] for i in class1_map] + for c in self.Class1Record: + c.Class2Record = [c.Class2Record[i] for i in class2_map] + self.Class1Count = len(class1_map) + self.Class2Count = len(class2_map) + return bool(self.Class1Count and + self.Class2Count and + self.Coverage.subset(s.glyphs)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.PairPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat1 &= ~0x00F0 + self.ValueFormat2 &= ~0x00F0 + return True + +@_add_method(otTables.CursivePos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + records = self.EntryExitRecord + count = len(records) + self.EntryExitRecord = [records[i] for i in indices if i < count] + self.EntryExitCount = len(self.EntryExitRecord) + return bool(self.EntryExitCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Anchor) +def prune_hints(self): + # Drop device tables / contour anchor point + self.ensureDecompiled() + self.Format = 1 + +@_add_method(otTables.CursivePos) +def prune_post_subset(self, options): + if not options.hinting: + for rec in self.EntryExitRecord: + if rec.EntryAnchor: rec.EntryAnchor.prune_hints() + if rec.ExitAnchor: rec.ExitAnchor.prune_hints() + return True + +@_add_method(otTables.MarkBasePos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + base_indices = self.BaseCoverage.subset(s.glyphs) + self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] for i in base_indices] + self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.BaseArray.BaseRecord: + b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.BaseArray.BaseCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkBasePos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.BaseArray.BaseRecord: + for a in b.BaseAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkLigPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + ligature_indices = self.LigatureCoverage.subset(s.glyphs) + self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] for i in ligature_indices] + self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.LigatureArray.LigatureCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkLigPos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + for a in c.LigatureAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkMarkPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark1_indices = self.Mark1Coverage.subset(s.glyphs) + self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] for i in mark1_indices] + self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) + mark2_indices = self.Mark2Coverage.subset(s.glyphs) + self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] for i in mark2_indices] + self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.Mark1Array.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.Mark2Array.Mark2Record: + b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] + return bool(self.ClassCount and + self.Mark1Array.MarkCount and + self.Mark2Array.MarkCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkMarkPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables or contour anchor point + for m in self.Mark1Array.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.Mark2Array.Mark2Record: + for m in b.Mark2Anchor: + if m: + m.prune_hints() + return True + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def subset_lookups(self, lookup_indices): + pass + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def collect_lookups(self): + return [] + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.SingleSubst, + otTables.AlternateSubst, + otTables.ReverseChainSingleSubst) +def may_have_non_1to1(self): + return False + +@_add_method(otTables.MultipleSubst, + otTables.LigatureSubst, + otTables.ContextSubst, + otTables.ChainContextSubst) +def may_have_non_1to1(self): + return True + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __subset_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + InputIdx = 1 + DataLen = 3 + else: + Chain = '' + InputIdx = 0 + DataLen = 1 + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + self.InputIdx = InputIdx + self.DataLen = DataLen + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + SetContextData = None + SetChainContextData = None + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Input,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + def SetContextData(r, d): + (r.ClassDef,) = d + def SetChainContextData(r, d): + (r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) = d + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Class,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + SetContextData = None + SetChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + def SetRuleData(r, d): + (r.Coverage,) = d + (r.GlyphCount,) = (len(x) for x in d) + def ChainSetRuleData(r, d): + (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.SetContextData = SetChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.SetContextData = SetContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst) +def closure_glyphs(self, s, cur_glyphs): + c = self.__subset_classify_context() + + indices = c.Coverage(self).intersect(cur_glyphs) + if not indices: + return [] + cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) + + if self.Format == 1: + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) + else: + pos_glyphs = frozenset([r.Input[seqi - 1]]) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.Input)+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 2: + ClassDef = getattr(self, c.ClassDef) + indices = ClassDef.intersect(cur_glyphs) + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) + else: + pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(getattr(r, c.Input))+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 3: + if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): + return [] + r = self + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(cur_glyphs) + else: + pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.InputCoverage)+1)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ContextPos, + otTables.ChainContextSubst, + otTables.ChainContextPos) +def subset_glyphs(self, s): + c = self.__subset_classify_context() + + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(g in s.glyphs for g in glist) + for glist in c.RuleData(r))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + # Prune empty rulesets + indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] + self.Coverage.remap(indices) + rss = [rss[i] for i in indices] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + return bool(rss) + elif self.Format == 2: + if not self.Coverage.subset(s.glyphs): + return False + ContextData = c.ContextData(self) + klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] + + # Keep rulesets for class numbers that survived. + indices = klass_maps[c.ClassDefIndex] + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + del rssCount + # Delete, but not renumber, unreachable rulesets. + indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) + rss = [rss if i in indices else None for i,rss in enumerate(rss)] + + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(k in klass_map for k in klist) + for klass_map,klist in zip(klass_maps, c.RuleData(r)))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + + # Remap rule classes + for r in ss: + c.SetRuleData(r, [[klass_map.index(k) for k in klist] + for klass_map,klist in zip(klass_maps, c.RuleData(r))]) + + # Prune empty rulesets + rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] + while rss and rss[-1] is None: + del rss[-1] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + + # TODO: We can do a second round of remapping class values based + # on classes that are actually used in at least one rule. Right + # now we subset classes to c.glyphs only. Or better, rewrite + # the above to do that. + + return bool(rss) + elif self.Format == 3: + return all(x.subset(s.glyphs) for x in c.RuleData(self)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def subset_lookups(self, lookup_indices): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + setattr(r, c.LookupRecord, + [ll for ll in getattr(r, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + elif self.Format == 3: + setattr(self, c.LookupRecord, + [ll for ll in getattr(self, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def collect_lookups(self): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + return [ll.LookupListIndex + for rs in getattr(self, c.RuleSet) if rs + for r in getattr(rs, c.Rule) if r + for ll in getattr(r, c.LookupRecord) if ll] + elif self.Format == 3: + return [ll.LookupListIndex + for ll in getattr(self, c.LookupRecord) if ll] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + self.ExtSubTable.closure_glyphs(s, cur_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def may_have_non_1to1(self): + if self.Format == 1: + return self.ExtSubTable.may_have_non_1to1() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_glyphs(self, s): + if self.Format == 1: + return self.ExtSubTable.subset_glyphs(s) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def prune_post_subset(self, options): + if self.Format == 1: + return self.ExtSubTable.prune_post_subset(options) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_lookups(self, lookup_indices): + if self.Format == 1: + return self.ExtSubTable.subset_lookups(lookup_indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def collect_lookups(self): + if self.Format == 1: + return self.ExtSubTable.collect_lookups() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def closure_glyphs(self, s, cur_glyphs=None): + if cur_glyphs is None: + cur_glyphs = frozenset(s.glyphs) + + # Memoize + if (id(self), cur_glyphs) in s._doneLookups: + return + s._doneLookups.add((id(self), cur_glyphs)) + + if self in s._activeLookups: + raise Exception("Circular loop in lookup recursion") + s._activeLookups.append(self) + for st in self.SubTable: + if not st: continue + st.closure_glyphs(s, cur_glyphs) + assert(s._activeLookups[-1] == self) + del s._activeLookups[-1] + +@_add_method(otTables.Lookup) +def subset_glyphs(self, s): + self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] + self.SubTableCount = len(self.SubTable) + return bool(self.SubTableCount) + +@_add_method(otTables.Lookup) +def prune_post_subset(self, options): + ret = False + for st in self.SubTable: + if not st: continue + if st.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.Lookup) +def subset_lookups(self, lookup_indices): + for s in self.SubTable: + s.subset_lookups(lookup_indices) + +@_add_method(otTables.Lookup) +def collect_lookups(self): + return sum((st.collect_lookups() for st in self.SubTable if st), []) + +@_add_method(otTables.Lookup) +def may_have_non_1to1(self): + return any(st.may_have_non_1to1() for st in self.SubTable if st) + +@_add_method(otTables.LookupList) +def subset_glyphs(self, s): + """Returns the indices of nonempty lookups.""" + return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] + +@_add_method(otTables.LookupList) +def prune_post_subset(self, options): + ret = False + for l in self.Lookup: + if not l: continue + if l.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.LookupList) +def subset_lookups(self, lookup_indices): + self.ensureDecompiled() + self.Lookup = [self.Lookup[i] for i in lookup_indices + if i < self.LookupCount] + self.LookupCount = len(self.Lookup) + for l in self.Lookup: + l.subset_lookups(lookup_indices) + +@_add_method(otTables.LookupList) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + self.ensureDecompiled() + self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] + +@_add_method(otTables.LookupList) +def closure_lookups(self, lookup_indices): + """Returns sorted index of all lookups reachable from lookup_indices.""" + lookup_indices = _uniq_sort(lookup_indices) + recurse = lookup_indices + while True: + recurse_lookups = sum((self.Lookup[i].collect_lookups() + for i in recurse if i < self.LookupCount), []) + recurse_lookups = [l for l in recurse_lookups + if l not in lookup_indices and l < self.LookupCount] + if not recurse_lookups: + return _uniq_sort(lookup_indices) + recurse_lookups = _uniq_sort(recurse_lookups) + lookup_indices.extend(recurse_lookups) + recurse = recurse_lookups + +@_add_method(otTables.Feature) +def subset_lookups(self, lookup_indices): + """"Returns True if feature is non-empty afterwards.""" + self.LookupListIndex = [l for l in self.LookupListIndex + if l in lookup_indices] + # Now map them. + self.LookupListIndex = [lookup_indices.index(l) + for l in self.LookupListIndex] + self.LookupCount = len(self.LookupListIndex) + return self.LookupCount or self.FeatureParams + +@_add_method(otTables.FeatureList) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + # Note: Never ever drop feature 'pref', even if it's empty. + # HarfBuzz chooses shaper for Khmer based on presence of this + # feature. See thread at: + # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html + return [i for i,f in enumerate(self.FeatureRecord) + if (f.Feature.subset_lookups(lookup_indices) or + f.FeatureTag == 'pref')] + +@_add_method(otTables.FeatureList) +def collect_lookups(self, feature_indices): + return sum((self.FeatureRecord[i].Feature.LookupListIndex + for i in feature_indices + if i < self.FeatureCount), []) + +@_add_method(otTables.FeatureList) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] + self.FeatureCount = len(self.FeatureRecord) + return bool(self.FeatureCount) + +@_add_method(otTables.FeatureTableSubstitution) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + return [r.FeatureIndex for r in self.SubstitutionRecord + if r.Feature.subset_lookups(lookup_indices)] + +@_add_method(otTables.FeatureVariations) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + return sum((f.FeatureTableSubstitution.subset_lookups(lookup_indices) + for f in self.FeatureVariationRecord), []) + +@_add_method(otTables.FeatureVariations) +def collect_lookups(self, feature_indices): + return sum((r.Feature.LookupListIndex + for vr in self.FeatureVariationRecord + for r in vr.FeatureTableSubstitution.SubstitutionRecord + if r.FeatureIndex in feature_indices), []) + +@_add_method(otTables.FeatureTableSubstitution) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.SubstitutionRecord = [r for r in self.SubstitutionRecord + if r.FeatureIndex in feature_indices] + self.SubstitutionCount = len(self.SubstitutionRecord) + return bool(self.SubstitutionCount) + +@_add_method(otTables.FeatureVariations) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeaturVariationRecord = [r for r in self.FeatureVariationRecord + if r.FeatureTableSubstitution.subset_features(feature_indices)] + self.FeatureVariationCount = len(self.FeatureVariationRecord) + return bool(self.FeatureVariationCount) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def subset_features(self, feature_indices): + if self.ReqFeatureIndex in feature_indices: + self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) + else: + self.ReqFeatureIndex = 65535 + self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] + # Now map them. + self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex + if f in feature_indices] + self.FeatureCount = len(self.FeatureIndex) + return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def collect_features(self): + feature_indices = self.FeatureIndex[:] + if self.ReqFeatureIndex != 65535: + feature_indices.append(self.ReqFeatureIndex) + return _uniq_sort(feature_indices) + +@_add_method(otTables.Script) +def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False): + if(self.DefaultLangSys and + not self.DefaultLangSys.subset_features(feature_indices) and + not keepEmptyDefaultLangSys): + self.DefaultLangSys = None + self.LangSysRecord = [l for l in self.LangSysRecord + if l.LangSys.subset_features(feature_indices)] + self.LangSysCount = len(self.LangSysRecord) + return bool(self.LangSysCount or self.DefaultLangSys) + +@_add_method(otTables.Script) +def collect_features(self): + feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] + if self.DefaultLangSys: + feature_indices.append(self.DefaultLangSys.collect_features()) + return _uniq_sort(sum(feature_indices, [])) + +@_add_method(otTables.ScriptList) +def subset_features(self, feature_indices, retain_empty): + # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32 + self.ScriptRecord = [s for s in self.ScriptRecord + if s.Script.subset_features(feature_indices, s.ScriptTag=='DFLT') or + retain_empty] + self.ScriptCount = len(self.ScriptRecord) + return bool(self.ScriptCount) + +@_add_method(otTables.ScriptList) +def collect_features(self): + return _uniq_sort(sum((s.Script.collect_features() + for s in self.ScriptRecord), [])) + +# CBLC will inherit it +@_add_method(ttLib.getTableClass('EBLC')) +def subset_glyphs(self, s): + for strike in self.strikes: + for indexSubTable in strike.indexSubTables: + indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs] + strike.indexSubTables = [i for i in strike.indexSubTables if i.names] + self.strikes = [s for s in self.strikes if s.indexSubTables] + + return True + +# CBDC will inherit it +@_add_method(ttLib.getTableClass('EBDT')) +def subset_glyphs(self, s): + self.strikeData = [{g: strike[g] for g in s.glyphs if g in strike} + for strike in self.strikeData] + return True + +@_add_method(ttLib.getTableClass('GSUB')) +def closure_glyphs(self, s): + s.table = self.table + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if getattr(self.table, 'FeatureVariations', None): + lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) + lookup_indices = _uniq_sort(lookup_indices) + if self.table.LookupList: + while True: + orig_glyphs = frozenset(s.glyphs) + s._activeLookups = [] + s._doneLookups = set() + for i in lookup_indices: + if i >= self.table.LookupList.LookupCount: continue + if not self.table.LookupList.Lookup[i]: continue + self.table.LookupList.Lookup[i].closure_glyphs(s) + del s._activeLookups, s._doneLookups + if orig_glyphs == s.glyphs: + break + del s.table + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_gsubed + if self.table.LookupList: + lookup_indices = self.table.LookupList.subset_glyphs(s) + else: + lookup_indices = [] + self.subset_lookups(lookup_indices) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def retain_empty_scripts(self): + # https://github.com/behdad/fonttools/issues/518 + # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15 + return self.__class__ == ttLib.getTableClass('GSUB') + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_lookups(self, lookup_indices): + """Retains specified lookups, then removes empty features, language + systems, and scripts.""" + if self.table.LookupList: + self.table.LookupList.subset_lookups(lookup_indices) + if self.table.FeatureList: + feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) + else: + feature_indices = [] + if getattr(self.table, 'FeatureVariations', None): + feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices) + feature_indices = _uniq_sort(feature_indices) + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + if self.table.LookupList: + self.table.LookupList.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_lookups(self, remap=True): + """Remove (default) or neuter unreferenced lookups""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if getattr(self.table, 'FeatureVariations', None): + lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) + lookup_indices = _uniq_sort(lookup_indices) + if self.table.LookupList: + lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) + else: + lookup_indices = [] + if remap: + self.subset_lookups(lookup_indices) + else: + self.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_feature_tags(self, feature_tags): + if self.table.FeatureList: + feature_indices = \ + [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) + if f.FeatureTag in feature_tags] + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_features(self): + """Remove unreferenced features""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_pre_subset(self, font, options): + # Drop undesired features + if '*' not in options.layout_features: + self.subset_feature_tags(options.layout_features) + # Neuter unreferenced lookups + self.prune_lookups(remap=False) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def remove_redundant_langsys(self): + table = self.table + if not table.ScriptList or not table.FeatureList: + return + + features = table.FeatureList.FeatureRecord + + for s in table.ScriptList.ScriptRecord: + d = s.Script.DefaultLangSys + if not d: + continue + for lr in s.Script.LangSysRecord[:]: + l = lr.LangSys + # Compare d and l + if len(d.FeatureIndex) != len(l.FeatureIndex): + continue + if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): + continue + + if d.ReqFeatureIndex != 65535: + if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: + continue + + for i in range(len(d.FeatureIndex)): + if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: + break + else: + # LangSys and default are equal; delete LangSys + s.Script.LangSysRecord.remove(lr) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_post_subset(self, options): + table = self.table + + self.prune_lookups() # XXX Is this actually needed?! + + if table.LookupList: + table.LookupList.prune_post_subset(options) + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if not table.LookupList.Lookup: + # table.LookupList = None + + if not table.LookupList: + table.FeatureList = None + + + if table.FeatureList: + self.remove_redundant_langsys() + # Remove unreferenced features + self.prune_features() + + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.FeatureList and not table.FeatureList.FeatureRecord: + # table.FeatureList = None + + # Never drop scripts themselves as them just being available + # holds semantic significance. + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.ScriptList and not table.ScriptList.ScriptRecord: + # table.ScriptList = None + + if not table.FeatureList and hasattr(table, 'FeatureVariations'): + table.FeatureVariations = None + + if hasattr(table, 'FeatureVariations') and not table.FeatureVariations: + if table.Version == 0x00010001: + table.Version = 0x00010000 + + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + table = self.table + if table.LigCaretList: + indices = table.LigCaretList.Coverage.subset(glyphs) + table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] for i in indices] + table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) + if table.MarkAttachClassDef: + table.MarkAttachClassDef.classDefs = \ + {g:v for g,v in table.MarkAttachClassDef.classDefs.items() + if g in glyphs} + if table.GlyphClassDef: + table.GlyphClassDef.classDefs = \ + {g:v for g,v in table.GlyphClassDef.classDefs.items() + if g in glyphs} + if table.AttachList: + indices = table.AttachList.Coverage.subset(glyphs) + GlyphCount = table.AttachList.GlyphCount + table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] + for i in indices if i < GlyphCount] + table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) + if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: + for coverage in table.MarkGlyphSetsDef.Coverage: + coverage.subset(glyphs) + # TODO: The following is disabled. If enabling, we need to go fixup all + # lookups that use MarkFilteringSet and map their set. + # indices = table.MarkGlyphSetsDef.Coverage = \ + # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def prune_post_subset(self, options): + table = self.table + # XXX check these against OTS + if table.LigCaretList and not table.LigCaretList.LigGlyphCount: + table.LigCaretList = None + if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: + table.MarkAttachClassDef = None + if table.GlyphClassDef and not table.GlyphClassDef.classDefs: + table.GlyphClassDef = None + if table.AttachList and not table.AttachList.GlyphCount: + table.AttachList = None + if (hasattr(table, "MarkGlyphSetsDef") and + table.MarkGlyphSetsDef and + not table.MarkGlyphSetsDef.Coverage): + table.MarkGlyphSetsDef = None + if table.Version == 0x00010002: + table.Version = 0x00010000 + return bool(table.LigCaretList or + table.MarkAttachClassDef or + table.GlyphClassDef or + table.AttachList or + (table.Version >= 0x00010002 and table.MarkGlyphSetsDef)) + +@_add_method(ttLib.getTableClass('kern')) +def prune_pre_subset(self, font, options): + # Prune unknown kern table types + self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('kern')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + for t in self.kernTables: + t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() + if a in glyphs and b in glyphs} + self.kernTables = [t for t in self.kernTables if t.kernTable] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('vmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return bool(self.metrics) + +@_add_method(ttLib.getTableClass('hmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return True # Required table + +@_add_method(ttLib.getTableClass('hdmx')) +def subset_glyphs(self, s): + self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} + return bool(self.hdmx) + +@_add_method(ttLib.getTableClass('ankr')) +def subset_glyphs(self, s): + table = self.table.AnchorPoints + assert table.Format == 0, "unknown 'ankr' format %s" % table.Format + table.Anchors = {glyph: table.Anchors[glyph] for glyph in s.glyphs + if glyph in table.Anchors} + return len(table.Anchors) > 0 + +@_add_method(ttLib.getTableClass('bsln')) +def closure_glyphs(self, s): + table = self.table.Baseline + if table.Format in (2, 3): + s.glyphs.add(table.StandardGlyph) + +@_add_method(ttLib.getTableClass('bsln')) +def subset_glyphs(self, s): + table = self.table.Baseline + if table.Format in (1, 3): + baselines = {glyph: table.BaselineValues.get(glyph, table.DefaultBaseline) + for glyph in s.glyphs} + if len(baselines) > 0: + mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0] + table.DefaultBaseline = mostCommon + baselines = {glyph: b for glyph, b in baselines.items() + if b != mostCommon} + if len(baselines) > 0: + table.BaselineValues = baselines + else: + table.Format = {1: 0, 3: 2}[table.Format] + del table.BaselineValues + return True + +@_add_method(ttLib.getTableClass('lcar')) +def subset_glyphs(self, s): + table = self.table.LigatureCarets + if table.Format in (0, 1): + table.Carets = {glyph: table.Carets[glyph] for glyph in s.glyphs + if glyph in table.Carets} + return len(table.Carets) > 0 + else: + assert False, "unknown 'lcar' format %s" % table.Format + +@_add_method(ttLib.getTableClass('gvar')) +def prune_pre_subset(self, font, options): + if options.notdef_glyph and not options.notdef_outline: + self.variations[font.glyphOrder[0]] = [] + return True + +@_add_method(ttLib.getTableClass('gvar')) +def subset_glyphs(self, s): + self.variations = _dict_subset(self.variations, s.glyphs) + self.glyphCount = len(self.variations) + return bool(self.variations) + +@_add_method(ttLib.getTableClass('VORG')) +def subset_glyphs(self, s): + self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() + if g in s.glyphs} + self.numVertOriginYMetrics = len(self.VOriginRecords) + return True # Never drop; has default metrics + +@_add_method(ttLib.getTableClass('opbd')) +def subset_glyphs(self, s): + table = self.table.OpticalBounds + if table.Format == 0: + table.OpticalBoundsDeltas = {glyph: table.OpticalBoundsDeltas[glyph] + for glyph in s.glyphs + if glyph in table.OpticalBoundsDeltas} + return len(table.OpticalBoundsDeltas) > 0 + elif table.Format == 1: + table.OpticalBoundsPoints = {glyph: table.OpticalBoundsPoints[glyph] + for glyph in s.glyphs + if glyph in table.OpticalBoundsPoints} + return len(table.OpticalBoundsPoints) > 0 + else: + assert False, "unknown 'opbd' format %s" % table.Format + +@_add_method(ttLib.getTableClass('post')) +def prune_pre_subset(self, font, options): + if not options.glyph_names: + self.formatType = 3.0 + return True # Required table + +@_add_method(ttLib.getTableClass('post')) +def subset_glyphs(self, s): + self.extraNames = [] # This seems to do it + return True # Required table + +@_add_method(ttLib.getTableClass('prop')) +def subset_glyphs(self, s): + prop = self.table.GlyphProperties + if prop.Format == 0: + return prop.DefaultProperties != 0 + elif prop.Format == 1: + prop.Properties = {g: prop.Properties.get(g, prop.DefaultProperties) + for g in s.glyphs} + mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0] + prop.DefaultProperties = mostCommon + prop.Properties = {g: prop for g, prop in prop.Properties.items() + if prop != mostCommon} + if len(prop.Properties) == 0: + del prop.Properties + prop.Format = 0 + return prop.DefaultProperties != 0 + return True + else: + assert False, "unknown 'prop' format %s" % prop.Format + +@_add_method(ttLib.getTableClass('COLR')) +def closure_glyphs(self, s): + decompose = s.glyphs + while decompose: + layers = set() + for g in decompose: + for l in self.ColorLayers.get(g, []): + layers.add(l.name) + layers -= s.glyphs + s.glyphs.update(layers) + decompose = layers + +@_add_method(ttLib.getTableClass('COLR')) +def subset_glyphs(self, s): + self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers} + return bool(self.ColorLayers) + +# TODO: prune unused palettes +@_add_method(ttLib.getTableClass('CPAL')) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.MathGlyphConstruction) +def closure_glyphs(self, glyphs): + variants = set() + for v in self.MathGlyphVariantRecord: + variants.add(v.VariantGlyph) + if self.GlyphAssembly: + for p in self.GlyphAssembly.PartRecords: + variants.add(p.glyph) + return variants + +@_add_method(otTables.MathVariants) +def closure_glyphs(self, s): + glyphs = frozenset(s.glyphs) + variants = set() + + if self.VertGlyphCoverage: + indices = self.VertGlyphCoverage.intersect(glyphs) + for i in indices: + variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs)) + + if self.HorizGlyphCoverage: + indices = self.HorizGlyphCoverage.intersect(glyphs) + for i in indices: + variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs)) + + s.glyphs.update(variants) + +@_add_method(ttLib.getTableClass('MATH')) +def closure_glyphs(self, s): + self.table.MathVariants.closure_glyphs(s) + +@_add_method(otTables.MathItalicsCorrectionInfo) +def subset_glyphs(self, s): + indices = self.Coverage.subset(s.glyphs) + self.ItalicsCorrection = [self.ItalicsCorrection[i] for i in indices] + self.ItalicsCorrectionCount = len(self.ItalicsCorrection) + return bool(self.ItalicsCorrectionCount) + +@_add_method(otTables.MathTopAccentAttachment) +def subset_glyphs(self, s): + indices = self.TopAccentCoverage.subset(s.glyphs) + self.TopAccentAttachment = [self.TopAccentAttachment[i] for i in indices] + self.TopAccentAttachmentCount = len(self.TopAccentAttachment) + return bool(self.TopAccentAttachmentCount) + +@_add_method(otTables.MathKernInfo) +def subset_glyphs(self, s): + indices = self.MathKernCoverage.subset(s.glyphs) + self.MathKernInfoRecords = [self.MathKernInfoRecords[i] for i in indices] + self.MathKernCount = len(self.MathKernInfoRecords) + return bool(self.MathKernCount) + +@_add_method(otTables.MathGlyphInfo) +def subset_glyphs(self, s): + if self.MathItalicsCorrectionInfo: + self.MathItalicsCorrectionInfo.subset_glyphs(s) + if self.MathTopAccentAttachment: + self.MathTopAccentAttachment.subset_glyphs(s) + if self.MathKernInfo: + self.MathKernInfo.subset_glyphs(s) + if self.ExtendedShapeCoverage: + self.ExtendedShapeCoverage.subset(s.glyphs) + return True + +@_add_method(otTables.MathVariants) +def subset_glyphs(self, s): + if self.VertGlyphCoverage: + indices = self.VertGlyphCoverage.subset(s.glyphs) + self.VertGlyphConstruction = [self.VertGlyphConstruction[i] for i in indices] + self.VertGlyphCount = len(self.VertGlyphConstruction) + + if self.HorizGlyphCoverage: + indices = self.HorizGlyphCoverage.subset(s.glyphs) + self.HorizGlyphConstruction = [self.HorizGlyphConstruction[i] for i in indices] + self.HorizGlyphCount = len(self.HorizGlyphConstruction) + + return True + +@_add_method(ttLib.getTableClass('MATH')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_mathed + self.table.MathGlyphInfo.subset_glyphs(s) + self.table.MathVariants.subset_glyphs(s) + return True + +@_add_method(ttLib.getTableModule('glyf').Glyph) +def remapComponentsFast(self, indices): + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return # Not composite + data = array.array("B", self.data) + i = 10 + more = 1 + while more: + flags =(data[i] << 8) | data[i+1] + glyphID =(data[i+2] << 8) | data[i+3] + # Remap + glyphID = indices.index(glyphID) + data[i+2] = glyphID >> 8 + data[i+3] = glyphID & 0xFF + i += 4 + flags = int(flags) + + if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS + else: i += 2 + if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE + elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE + elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO + more = flags & 0x0020 # MORE_COMPONENTS + + self.data = data.tostring() + +@_add_method(ttLib.getTableClass('glyf')) +def closure_glyphs(self, s): + decompose = s.glyphs + while decompose: + components = set() + for g in decompose: + if g not in self.glyphs: + continue + gl = self.glyphs[g] + for c in gl.getComponentNames(self): + components.add(c) + components -= s.glyphs + s.glyphs.update(components) + decompose = components + +@_add_method(ttLib.getTableClass('glyf')) +def prune_pre_subset(self, font, options): + if options.notdef_glyph and not options.notdef_outline: + g = self[self.glyphOrder[0]] + # Yay, easy! + g.__dict__.clear() + g.data = "" + return True + +@_add_method(ttLib.getTableClass('glyf')) +def subset_glyphs(self, s): + self.glyphs = _dict_subset(self.glyphs, s.glyphs) + indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] + for v in self.glyphs.values(): + if hasattr(v, "data"): + v.remapComponentsFast(indices) + else: + pass # No need + self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] + # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. + return True + +@_add_method(ttLib.getTableClass('glyf')) +def prune_post_subset(self, options): + remove_hinting = not options.hinting + for v in self.glyphs.values(): + v.trim(remove_hinting=remove_hinting) + return True + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_pre_subset(self, font, options): + cff = self.cff + # CFF table must have one font only + cff.fontNames = cff.fontNames[:1] + + if options.notdef_glyph and not options.notdef_outline: + for fontname in cff.keys(): + font = cff[fontname] + c, fdSelectIndex = font.CharStrings.getItemAndSelector('.notdef') + if hasattr(font, 'FDArray') and font.FDArray is not None: + private = font.FDArray[fdSelectIndex].Private + else: + private = font.Private + dfltWdX = private.defaultWidthX + nmnlWdX = private.nominalWidthX + pen = NullPen() + c.draw(pen) # this will set the charstring's width + if c.width != dfltWdX: + c.program = [c.width - nmnlWdX, 'endchar'] + else: + c.program = ['endchar'] + + # Clear useless Encoding + for fontname in cff.keys(): + font = cff[fontname] + # https://github.com/behdad/fonttools/issues/620 + font.Encoding = "StandardEncoding" + + return True # bool(cff.fontNames) + +@_add_method(ttLib.getTableClass('CFF ')) +def subset_glyphs(self, s): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Load all glyphs + for g in font.charset: + if g not in s.glyphs: continue + c, _ = cs.getItemAndSelector(g) + + if cs.charStringsAreIndexed: + indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] + csi = cs.charStringsIndex + csi.items = [csi.items[i] for i in indices] + del csi.file, csi.offsets + if hasattr(font, "FDSelect"): + sel = font.FDSelect + # XXX We want to set sel.format to None, such that the + # most compact format is selected. However, OTS was + # broken and couldn't parse a FDSelect format 0 that + # happened before CharStrings. As such, always force + # format 3 until we fix cffLib to always generate + # FDSelect after CharStrings. + # https://github.com/khaledhosny/ots/pull/31 + #sel.format = None + sel.format = 3 + sel.gidArray = [sel.gidArray[i] for i in indices] + cs.charStrings = {g:indices.index(v) + for g,v in cs.charStrings.items() + if g in s.glyphs} + else: + cs.charStrings = {g:v + for g,v in cs.charStrings.items() + if g in s.glyphs} + font.charset = [g for g in font.charset if g in s.glyphs] + font.numGlyphs = len(font.charset) + + return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) + +@_add_method(psCharStrings.T2CharString) +def subset_subroutines(self, subrs, gsubrs): + p = self.program + assert len(p) + for i in range(1, len(p)): + if p[i] == 'callsubr': + assert isinstance(p[i-1], int) + p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias + elif p[i] == 'callgsubr': + assert isinstance(p[i-1], int) + p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias + +@_add_method(psCharStrings.T2CharString) +def drop_hints(self): + hints = self._hints + + if hints.deletions: + p = self.program + for idx in reversed(hints.deletions): + del p[idx-2:idx] + + if hints.has_hint: + assert not hints.deletions or hints.last_hint <= hints.deletions[0] + self.program = self.program[hints.last_hint:] + if hasattr(self, 'width'): + # Insert width back if needed + if self.width != self.private.defaultWidthX: + self.program.insert(0, self.width - self.private.nominalWidthX) + + if hints.has_hintmask: + i = 0 + p = self.program + while i < len(p): + if p[i] in ['hintmask', 'cntrmask']: + assert i + 1 <= len(p) + del p[i:i+2] + continue + i += 1 + + assert len(self.program) + + del self._hints + +class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1]+self.localBias) + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + +class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor): + + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + # Note that if a charstring starts with hintmask, it will + # have has_hint set to True, because it *might* produce an + # implicit vstem if called under certain conditions. + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + # List of indices of calls to empty subroutines to remove. + self.deletions = [] + pass + + def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + self._css = css + psCharStrings.T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX) + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, '_hints') else None + charString._hints = self.Hints() + + psCharStrings.T2WidthExtractor.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.T2WidthExtractor.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.T2WidthExtractor.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + psCharStrings.T2WidthExtractor.op_hstem(self, index) + self.processHint(index) + def op_vstem(self, index): + psCharStrings.T2WidthExtractor.op_vstem(self, index) + self.processHint(index) + def op_hstemhm(self, index): + psCharStrings.T2WidthExtractor.op_hstemhm(self, index) + self.processHint(index) + def op_vstemhm(self, index): + psCharStrings.T2WidthExtractor.op_vstemhm(self, index) + self.processHint(index) + def op_hintmask(self, index): + rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index) + self.processHintmask(index) + return rv + def op_cntrmask(self, index): + rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index) + self.processHintmask(index) + return rv + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + else: + # We are an implicit vstem + hints.has_hint = True + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + # Check from last_check, make sure we didn't have + # any operators. + if hints.status != 2: + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + + if hints.status != 2: + if subr_hints.has_hint: + hints.has_hint = True + + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + elif subr_hints.status == 0: + hints.deletions.append(index) + + hints.status = max(hints.status, subr_hints.status) + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + # Note: Currently we recompute _desubroutinized each time. + # This is more robust in some cases, but in other places we assume + # that each subroutine always expands to the same code, so + # maybe it doesn't matter. To speed up we can just not + # recompute _desubroutinized if it's there. For now I just + # double-check that it desubroutinized to the same thing. + old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx,expansion in reversed (charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + if old_desubroutinized: + assert desubroutinized == old_desubroutinized + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + cs._patches.append((index, subr._desubroutinized)) + + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_post_subset(self, options): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Drop unused FontDictionaries + if hasattr(font, "FDSelect"): + sel = font.FDSelect + indices = _uniq_sort(sel.gidArray) + sel.gidArray = [indices.index (ss) for ss in sel.gidArray] + arr = font.FDArray + arr.items = [arr[i] for i in indices] + del arr.file, arr.offsets + + # Desubroutinize if asked for + if options.desubroutinize: + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + c.program = c._desubroutinized + + # Drop hints if not needed + if not options.hinting: + + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs, + c.private.nominalWidthX, + c.private.defaultWidthX) + decompiler.execute(c) + c.width = decompiler.width + for charstring in css: + charstring.drop_hints() + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, 'FDSelect'): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in ['BlueValues', 'OtherBlues', + 'FamilyBlues', 'FamilyOtherBlues', + 'BlueScale', 'BlueShift', 'BlueFuzz', + 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: + if hasattr(priv, k): + setattr(priv, k, None) + + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, 'FDSelect'): + all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) + elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, '_used'): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = psCharStrings.calcSubrBias(subrs) + subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + c.subset_subroutines (subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): + local_subrs = font.Private.Subrs + else: + local_subrs = [] + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + if hasattr(subrs, 'file'): + del subrs.file + if hasattr(subrs, 'offsets'): + del subrs.offsets + + for subr in subrs.items: + subr.subset_subroutines (local_subrs, font.GlobalSubrs) + + # Delete local SubrsIndex if empty + if hasattr(font, 'FDSelect'): + for fd in font.FDArray: + _delete_empty_subrs(fd.Private) + else: + _delete_empty_subrs(font.Private) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias + + return True + + +def _delete_empty_subrs(private_dict): + if hasattr(private_dict, 'Subrs') and not private_dict.Subrs: + if 'Subrs' in private_dict.rawDict: + del private_dict.rawDict['Subrs'] + del private_dict.Subrs + + +@_add_method(ttLib.getTableClass('cmap')) +def closure_glyphs(self, s): + tables = [t for t in self.tables if t.isUnicode()] + + # Close glyphs + for table in tables: + if table.format == 14: + for cmap in table.uvsDict.values(): + glyphs = {g for u,g in cmap if u in s.unicodes_requested} + if None in glyphs: + glyphs.remove(None) + s.glyphs.update(glyphs) + else: + cmap = table.cmap + intersection = s.unicodes_requested.intersection(cmap.keys()) + s.glyphs.update(cmap[u] for u in intersection) + + # Calculate unicodes_missing + s.unicodes_missing = s.unicodes_requested.copy() + for table in tables: + s.unicodes_missing.difference_update(table.cmap) + +@_add_method(ttLib.getTableClass('cmap')) +def prune_pre_subset(self, font, options): + if not options.legacy_cmap: + # Drop non-Unicode / non-Symbol cmaps + self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] + if not options.symbol_cmap: + self.tables = [t for t in self.tables if not t.isSymbol()] + # TODO(behdad) Only keep one subtable? + # For now, drop format=0 which can't be subset_glyphs easily? + self.tables = [t for t in self.tables if t.format != 0] + self.numSubTables = len(self.tables) + return True # Required table + +@_add_method(ttLib.getTableClass('cmap')) +def subset_glyphs(self, s): + s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only + for t in self.tables: + if t.format == 14: + # TODO(behdad) We drop all the default-UVS mappings + # for glyphs_requested. So it's the caller's responsibility to make + # sure those are included. + t.uvsDict = {v:[(u,g) for u,g in l + if g in s.glyphs_requested or u in s.unicodes_requested] + for v,l in t.uvsDict.items()} + t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} + elif t.isUnicode(): + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested or u in s.unicodes_requested} + else: + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested} + self.tables = [t for t in self.tables + if (t.cmap if t.format != 14 else t.uvsDict)] + self.numSubTables = len(self.tables) + # TODO(behdad) Convert formats when needed. + # In particular, if we have a format=12 without non-BMP + # characters, either drop format=12 one or convert it + # to format=4 if there's not one. + return True # Required table + +@_add_method(ttLib.getTableClass('DSIG')) +def prune_pre_subset(self, font, options): + # Drop all signatures since they will be invalid + self.usNumSigs = 0 + self.signatureRecords = [] + return True + +@_add_method(ttLib.getTableClass('maxp')) +def prune_pre_subset(self, font, options): + if not options.hinting: + if self.tableVersion == 0x00010000: + self.maxZones = 1 + self.maxTwilightPoints = 0 + self.maxStorage = 0 + self.maxFunctionDefs = 0 + self.maxInstructionDefs = 0 + self.maxStackElements = 0 + self.maxSizeOfInstructions = 0 + return True + +@_add_method(ttLib.getTableClass('name')) +def prune_pre_subset(self, font, options): + nameIDs = set(options.name_IDs) + fvar = font.get('fvar') + if fvar: + nameIDs.update([axis.axisNameID for axis in fvar.axes]) + nameIDs.update([inst.subfamilyNameID for inst in fvar.instances]) + nameIDs.update([inst.postscriptNameID for inst in fvar.instances + if inst.postscriptNameID != 0xFFFF]) + if '*' not in options.name_IDs: + self.names = [n for n in self.names if n.nameID in nameIDs] + if not options.name_legacy: + # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman + # entry for Latin and no Unicode names. + self.names = [n for n in self.names if n.isUnicode()] + # TODO(behdad) Option to keep only one platform's + if '*' not in options.name_languages: + # TODO(behdad) This is Windows-platform specific! + self.names = [n for n in self.names + if n.langID in options.name_languages] + if options.obfuscate_names: + namerecs = [] + for n in self.names: + if n.nameID in [1, 4]: + n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" + elif n.nameID in [2, 6]: + n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" + elif n.nameID == 3: + n.string = "" + elif n.nameID in [16, 17, 18]: + continue + namerecs.append(n) + self.names = namerecs + return True # Required table + + +# TODO(behdad) OS/2 ulCodePageRange? +# TODO(behdad) Drop AAT tables. +# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. +# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left +# TODO(behdad) Drop GDEF subitems if unused by lookups +# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) +# TODO(behdad) Text direction considerations. +# TODO(behdad) Text script / language considerations. +# TODO(behdad) Optionally drop 'kern' table if GPOS available +# TODO(behdad) Implement --unicode='*' to choose all cmap'ed +# TODO(behdad) Drop old-spec Indic scripts + + +class Options(object): + + class OptionError(Exception): pass + class UnknownOptionError(OptionError): pass + + # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser + _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', + 'EBSC', 'SVG', 'PCLT', 'LTSH'] + _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite + _drop_tables_default += ['sbix'] # Color + _no_subset_tables_default = ['avar', 'fvar', + 'gasp', 'head', 'hhea', 'maxp', + 'vhea', 'OS/2', 'loca', 'name', 'cvt', + 'fpgm', 'prep', 'VDMX', 'DSIG', 'CPAL', + 'MVAR', 'STAT'] + _hinting_tables_default = ['cvar', 'cvt', 'fpgm', 'prep', 'hdmx', 'VDMX'] + + # Based on HarfBuzz shapers + _layout_features_groups = { + # Default shaper + 'common': ['rvrn', 'ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], + 'fractions': ['frac', 'numr', 'dnom'], + 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], + 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], + 'ltr': ['ltra', 'ltrm'], + 'rtl': ['rtla', 'rtlm'], + # Complex shapers + 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', + 'cswh', 'mset', 'stch'], + 'hangul': ['ljmo', 'vjmo', 'tjmo'], + 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], + 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', + 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', + 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], + } + _layout_features_default = _uniq_sort(sum( + iter(_layout_features_groups.values()), [])) + + def __init__(self, **kwargs): + + self.drop_tables = self._drop_tables_default[:] + self.no_subset_tables = self._no_subset_tables_default[:] + self.passthrough_tables = False # keep/drop tables we can't subset + self.hinting_tables = self._hinting_tables_default[:] + self.legacy_kern = False # drop 'kern' table if GPOS available + self.layout_features = self._layout_features_default[:] + self.ignore_missing_glyphs = False + self.ignore_missing_unicodes = True + self.hinting = True + self.glyph_names = False + self.legacy_cmap = False + self.symbol_cmap = False + self.name_IDs = [1, 2] # Family and Style + self.name_legacy = False + self.name_languages = [0x0409] # English + self.obfuscate_names = False # to make webfont unusable as a system font + self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF + self.notdef_outline = False # No need for notdef to have an outline really + self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType + self.recalc_bounds = False # Recalculate font bounding boxes + self.recalc_timestamp = False # Recalculate font modified timestamp + self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits + self.recalc_average_width = False # update 'xAvgCharWidth' + self.canonical_order = None # Order tables as recommended + self.flavor = None # May be 'woff' or 'woff2' + self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0 + self.desubroutinize = False # Desubroutinize CFF CharStrings + self.verbose = False + self.timing = False + self.xml = False + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=[]): + posargs = [] + passthru_options = [] + for a in argv: + orig_a = a + if not a.startswith('--'): + posargs.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + if k == "canonical-order": + # reorderTables=None is faster than False (the latter + # still reorders to "keep" the original table order) + v = None + else: + v = False + else: + k = a + v = True + if k.endswith("?"): + k = k[:-1] + v = '?' + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Op is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + passthru_options.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if v == '?': + print("Current setting for '%s' is: %s" % (ok, ov)) + continue + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, str): + v = str(v) # redundant + elif isinstance(ov, list): + if isinstance(v, bool): + raise self.OptionError("Option '%s' requires values to be specified using '='" % a) + vv = v.replace(',', ' ').split() + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert False + + setattr(self, k, v) + + return posargs + passthru_options + + +class Subsetter(object): + + class SubsettingError(Exception): pass + class MissingGlyphsSubsettingError(SubsettingError): pass + class MissingUnicodesSubsettingError(SubsettingError): pass + + def __init__(self, options=None): + + if not options: + options = Options() + + self.options = options + self.unicodes_requested = set() + self.glyph_names_requested = set() + self.glyph_ids_requested = set() + + def populate(self, glyphs=[], gids=[], unicodes=[], text=""): + self.unicodes_requested.update(unicodes) + if isinstance(text, bytes): + text = text.decode("utf_8") + text_utf32 = text.encode("utf-32-be") + nchars = len(text_utf32)//4 + for u in struct.unpack('>%dL' % nchars, text_utf32): + self.unicodes_requested.add(u) + self.glyph_names_requested.update(glyphs) + self.glyph_ids_requested.update(gids) + + def _prune_pre_subset(self, font): + for tag in self._sort_tables(font): + if(tag.strip() in self.options.drop_tables or + (tag.strip() in self.options.hinting_tables and not self.options.hinting) or + (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): + log.info("%s dropped", tag) + del font[tag] + continue + + clazz = ttLib.getTableClass(tag) + + if hasattr(clazz, 'prune_pre_subset'): + with timer("load '%s'" % tag): + table = font[tag] + with timer("prune '%s'" % tag): + retain = table.prune_pre_subset(font, self.options) + if not retain: + log.info("%s pruned to empty; dropped", tag) + del font[tag] + continue + else: + log.info("%s pruned", tag) + + def _closure_glyphs(self, font): + + realGlyphs = set(font.getGlyphOrder()) + glyph_order = font.getGlyphOrder() + + self.glyphs_requested = set() + self.glyphs_requested.update(self.glyph_names_requested) + self.glyphs_requested.update(glyph_order[i] + for i in self.glyph_ids_requested + if i < len(glyph_order)) + + self.glyphs_missing = set() + self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) + self.glyphs_missing.update(i for i in self.glyph_ids_requested + if i >= len(glyph_order)) + if self.glyphs_missing: + log.info("Missing requested glyphs: %s", self.glyphs_missing) + if not self.options.ignore_missing_glyphs: + raise self.MissingGlyphsSubsettingError(self.glyphs_missing) + + self.glyphs = self.glyphs_requested.copy() + + self.unicodes_missing = set() + if 'cmap' in font: + with timer("close glyph list over 'cmap'"): + font['cmap'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.glyphs_cmaped = frozenset(self.glyphs) + if self.unicodes_missing: + missing = ["U+%04X" % u for u in self.unicodes_missing] + log.info("Missing glyphs for requested Unicodes: %s", missing) + if not self.options.ignore_missing_unicodes: + raise self.MissingUnicodesSubsettingError(missing) + del missing + + if self.options.notdef_glyph: + if 'glyf' in font: + self.glyphs.add(font.getGlyphName(0)) + log.info("Added gid0 to subset") + else: + self.glyphs.add('.notdef') + log.info("Added .notdef to subset") + if self.options.recommended_glyphs: + if 'glyf' in font: + for i in range(min(4, len(font.getGlyphOrder()))): + self.glyphs.add(font.getGlyphName(i)) + log.info("Added first four glyphs to subset") + + if 'GSUB' in font: + with timer("close glyph list over 'GSUB'"): + log.info("Closing glyph list over 'GSUB': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['GSUB'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'GSUB': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_gsubed = frozenset(self.glyphs) + + if 'MATH' in font: + with timer("close glyph list over 'MATH'"): + log.info("Closing glyph list over 'MATH': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['MATH'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'MATH': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_mathed = frozenset(self.glyphs) + + for table in ('COLR', 'bsln'): + if table in font: + with timer("close glyph list over '%s'" % table): + log.info("Closing glyph list over '%s': %d glyphs before", + table, len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font[table].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over '%s': %d glyphs after", + table, len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + + if 'glyf' in font: + with timer("close glyph list over 'glyf'"): + log.info("Closing glyph list over 'glyf': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['glyf'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'glyf': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_glyfed = frozenset(self.glyphs) + + self.glyphs_all = frozenset(self.glyphs) + + log.info("Retaining %d glyphs", len(self.glyphs_all)) + + del self.glyphs + + def _subset_glyphs(self, font): + for tag in self._sort_tables(font): + clazz = ttLib.getTableClass(tag) + + if tag.strip() in self.options.no_subset_tables: + log.info("%s subsetting not needed", tag) + elif hasattr(clazz, 'subset_glyphs'): + with timer("subset '%s'" % tag): + table = font[tag] + self.glyphs = self.glyphs_all + retain = table.subset_glyphs(self) + del self.glyphs + if not retain: + log.info("%s subsetted to empty; dropped", tag) + del font[tag] + else: + log.info("%s subsetted", tag) + elif self.options.passthrough_tables: + log.info("%s NOT subset; don't know how to subset", tag) + else: + log.info("%s NOT subset; don't know how to subset; dropped", tag) + del font[tag] + + with timer("subset GlyphOrder"): + glyphOrder = font.getGlyphOrder() + glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] + font.setGlyphOrder(glyphOrder) + font._buildReverseGlyphOrderDict() + + def _prune_post_subset(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + if tag == 'OS/2' and self.options.prune_unicode_ranges: + old_uniranges = font[tag].getUnicodeRanges() + new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True) + if old_uniranges != new_uniranges: + log.info("%s Unicode ranges pruned: %s", tag, sorted(new_uniranges)) + if self.options.recalc_average_width: + widths = [m[0] for m in font["hmtx"].metrics.values() if m[0] > 0] + avg_width = round(sum(widths) / len(widths)) + if avg_width != font[tag].xAvgCharWidth: + font[tag].xAvgCharWidth = avg_width + log.info("%s xAvgCharWidth updated: %d", tag, avg_width) + clazz = ttLib.getTableClass(tag) + if hasattr(clazz, 'prune_post_subset'): + with timer("prune '%s'" % tag): + table = font[tag] + retain = table.prune_post_subset(self.options) + if not retain: + log.info("%s pruned to empty; dropped", tag) + del font[tag] + else: + log.info("%s pruned", tag) + + def _sort_tables(self, font): + tagOrder = ['fvar', 'avar', 'gvar', 'name', 'glyf'] + tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)} + tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0)) + return [t for t in tags if t != 'GlyphOrder'] + + def subset(self, font): + self._prune_pre_subset(font) + self._closure_glyphs(font) + self._subset_glyphs(font) + self._prune_post_subset(font) + + +@timer("load font") +def load_font(fontFile, + options, + allowVID=False, + checkChecksums=False, + dontLoadGlyphNames=False, + lazy=True): + + font = ttLib.TTFont(fontFile, + allowVID=allowVID, + checkChecksums=checkChecksums, + recalcBBoxes=options.recalc_bounds, + recalcTimestamp=options.recalc_timestamp, + lazy=lazy) + + # Hack: + # + # If we don't need glyph names, change 'post' class to not try to + # load them. It avoid lots of headache with broken fonts as well + # as loading time. + # + # Ideally ttLib should provide a way to ask it to skip loading + # glyph names. But it currently doesn't provide such a thing. + # + if dontLoadGlyphNames: + post = ttLib.getTableClass('post') + saved = post.decode_format_2_0 + post.decode_format_2_0 = post.decode_format_3_0 + f = font['post'] + if f.formatType == 2.0: + f.formatType = 3.0 + post.decode_format_2_0 = saved + + return font + +@timer("compile and save font") +def save_font(font, outfile, options): + if options.flavor and not hasattr(font, 'flavor'): + raise Exception("fonttools version does not support flavors.") + if options.with_zopfli and options.flavor == "woff": + from fontTools.ttLib import sfnt + sfnt.USE_ZOPFLI = True + font.flavor = options.flavor + font.save(outfile, reorderTables=options.canonical_order) + +def parse_unicodes(s): + import re + s = re.sub (r"0[xX]", " ", s) + s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) + l = [] + for item in s.split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(item, 16)) + else: + start,end = fields + l.extend(range(int(start, 16), int(end, 16)+1)) + return l + +def parse_gids(s): + l = [] + for item in s.replace(',', ' ').split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(fields[0])) + else: + l.extend(range(int(fields[0]), int(fields[1])+1)) + return l + +def parse_glyphs(s): + return s.replace(',', ' ').split() + +def usage(): + print("usage:", __usage__, file=sys.stderr) + print("Try pyftsubset --help for more information.\n", file=sys.stderr) + +@timer("make one with everything (TOTAL TIME)") +def main(args=None): + from os.path import splitext + from fontTools import configLogger + + if args is None: + args = sys.argv[1:] + + if '--help' in args: + print(__doc__) + return 0 + + options = Options() + try: + args = options.parse_opts(args, + ignore_unknown=['gids', 'gids-file', + 'glyphs', 'glyphs-file', + 'text', 'text-file', + 'unicodes', 'unicodes-file', + 'output-file']) + except options.OptionError as e: + usage() + print("ERROR:", e, file=sys.stderr) + return 2 + + if len(args) < 2: + usage() + return 1 + + configLogger(level=logging.INFO if options.verbose else logging.WARNING) + if options.timing: + timer.logger.setLevel(logging.DEBUG) + else: + timer.logger.disabled = True + + fontfile = args[0] + args = args[1:] + + subsetter = Subsetter(options=options) + basename, extension = splitext(fontfile) + outfile = basename + '.subset' + extension + glyphs = [] + gids = [] + unicodes = [] + wildcard_glyphs = False + wildcard_unicodes = False + text = "" + for g in args: + if g == '*': + wildcard_glyphs = True + continue + if g.startswith('--output-file='): + outfile = g[14:] + continue + if g.startswith('--text='): + text += g[7:] + continue + if g.startswith('--text-file='): + text += open(g[12:], encoding='utf-8').read().replace('\n', '') + continue + if g.startswith('--unicodes='): + if g[11:] == '*': + wildcard_unicodes = True + else: + unicodes.extend(parse_unicodes(g[11:])) + continue + if g.startswith('--unicodes-file='): + for line in open(g[16:]).readlines(): + unicodes.extend(parse_unicodes(line.split('#')[0])) + continue + if g.startswith('--gids='): + gids.extend(parse_gids(g[7:])) + continue + if g.startswith('--gids-file='): + for line in open(g[12:]).readlines(): + gids.extend(parse_gids(line.split('#')[0])) + continue + if g.startswith('--glyphs='): + if g[9:] == '*': + wildcard_glyphs = True + else: + glyphs.extend(parse_glyphs(g[9:])) + continue + if g.startswith('--glyphs-file='): + for line in open(g[14:]).readlines(): + glyphs.extend(parse_glyphs(line.split('#')[0])) + continue + glyphs.append(g) + + dontLoadGlyphNames = not options.glyph_names and not glyphs + font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) + + with timer("compile glyph list"): + if wildcard_glyphs: + glyphs.extend(font.getGlyphOrder()) + if wildcard_unicodes: + for t in font['cmap'].tables: + if t.isUnicode(): + unicodes.extend(t.cmap.keys()) + assert '' not in glyphs + + log.info("Text: '%s'" % text) + log.info("Unicodes: %s", unicodes) + log.info("Glyphs: %s", glyphs) + log.info("Gids: %s", gids) + + subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) + subsetter.subset(font) + + save_font(font, outfile, options) + + if options.verbose: + import os + log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) + log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) + + if options.xml: + font.saveXML(sys.stdout) + + font.close() + + +__all__ = [ + 'Options', + 'Subsetter', + 'load_font', + 'save_font', + 'parse_gids', + 'parse_glyphs', + 'parse_unicodes', + 'main' +] + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/subset/__main__.py fonttools-3.21.2/Lib/fontTools/subset/__main__.py --- fonttools-3.0/Lib/fontTools/subset/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/subset/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.subset import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/subset.py fonttools-3.21.2/Lib/fontTools/subset.py --- fonttools-3.0/Lib/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2742 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.ttLib.tables import otTables -from fontTools.misc import psCharStrings -import sys -import struct -import time -import array - -__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." - -__doc__="""\ -pyftsubset -- OpenType font subsetter and optimizer - - pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. - It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) - font file. The subsetted glyph set is based on the specified glyphs - or characters, and specified OpenType layout features. - - The tool also performs some size-reducing optimizations, aimed for using - subset fonts as webfonts. Individual optimizations can be enabled or - disabled, and are enabled by default when they are safe. - -Usage: - """+__usage__+""" - - At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, - --text, --text-file, --unicodes, or --unicodes-file, must be specified. - -Arguments: - font-file - The input font file. - glyph - Specify one or more glyph identifiers to include in the subset. Must be - PS glyph names, or the special string '*' to keep the entire glyph set. - -Initial glyph set specification: - These options populate the initial glyph set. Same option can appear - multiple times, and the results are accummulated. - --gids=[,...] - Specify comma/whitespace-separated list of glyph IDs or ranges as - decimal numbers. For example, --gids=10-12,14 adds glyphs with - numbers 10, 11, 12, and 14. - --gids-file= - Like --gids but reads from a file. Anything after a '#' on any line - is ignored as comments. - --glyphs=[,...] - Specify comma/whitespace-separated PS glyph names to add to the subset. - Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc - that are accepted on the command line. The special string '*' wil keep - the entire glyph set. - --glyphs-file= - Like --glyphs but reads from a file. Anything after a '#' on any line - is ignored as comments. - --text= - Specify characters to include in the subset, as UTF-8 string. - --text-file= - Like --text but reads from a file. Newline character are not added to - the subset. - --unicodes=[,...] - Specify comma/whitespace-separated list of Unicode codepoints or - ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. - For example, --unicodes=41-5a,61-7a adds ASCII letters, so does - the more verbose --unicodes=U+0041-005A,U+0061-007A. - The special strings '*' will choose all Unicode characters mapped - by the font. - --unicodes-file= - Like --unicodes, but reads from a file. Anything after a '#' on any - line in the file is ignored as comments. - --ignore-missing-glyphs - Do not fail if some requested glyphs or gids are not available in - the font. - --no-ignore-missing-glyphs - Stop and fail if some requested glyphs or gids are not available - in the font. [default] - --ignore-missing-unicodes [default] - Do not fail if some requested Unicode characters (including those - indirectly specified using --text or --text-file) are not available - in the font. - --no-ignore-missing-unicodes - Stop and fail if some requested Unicode characters are not available - in the font. - Note the default discrepancy between ignoring missing glyphs versus - unicodes. This is for historical reasons and in the future - --no-ignore-missing-unicodes might become default. - -Other options: - For the other options listed below, to see the current value of the option, - pass a value of '?' to it, with or without a '='. - Examples: - $ pyftsubset --glyph-names? - Current setting for 'glyph-names' is: False - $ ./pyftsubset --name-IDs=? - Current setting for 'name-IDs' is: [1, 2] - $ ./pyftsubset --hinting? --no-hinting --hinting? - Current setting for 'hinting' is: True - Current setting for 'hinting' is: False - -Output options: - --output-file= - The output font file. If not specified, the subsetted font - will be saved in as font-file.subset. - --flavor= - Specify flavor of output font file. May be 'woff' or 'woff2'. - Note that WOFF2 requires the Brotli Python extension, available - at https://github.com/google/brotli - -Glyph set expansion: - These options control how additional glyphs are added to the subset. - --notdef-glyph - Add the '.notdef' glyph to the subset (ie, keep it). [default] - --no-notdef-glyph - Drop the '.notdef' glyph unless specified in the glyph set. This - saves a few bytes, but is not possible for Postscript-flavored - fonts, as those require '.notdef'. For TrueType-flavored fonts, - this works fine as long as no unsupported glyphs are requested - from the font. - --notdef-outline - Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is - used when glyphs not supported by the font are to be shown. It is not - needed otherwise. - --no-notdef-outline - When including a '.notdef' glyph, remove its outline. This saves - a few bytes. [default] - --recommended-glyphs - Add glyphs 0, 1, 2, and 3 to the subset, as recommended for - TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. - Some legacy software might require this, but no modern system does. - --no-recommended-glyphs - Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in - glyph set. [default] - --layout-features[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of OpenType layout feature tags that will be preserved. - Glyph variants used by the preserved features are added to the - specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', - 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features - required for script shaping are preserved. To see the full list, try - '--layout-features=?'. Use '*' to keep all features. - Multiple --layout-features options can be provided if necessary. - Examples: - --layout-features+=onum,pnum,ss01 - * Keep the default set of features and 'onum', 'pnum', 'ss01'. - --layout-features-='mark','mkmk' - * Keep the default set of features but drop 'mark' and 'mkmk'. - --layout-features='kern' - * Only keep the 'kern' feature, drop all others. - --layout-features='' - * Drop all features. - --layout-features='*' - * Keep all features. - --layout-features+=aalt --layout-features-=vrt2 - * Keep default set of features plus 'aalt', but drop 'vrt2'. - -Hinting options: - --hinting - Keep hinting [default] - --no-hinting - Drop glyph-specific hinting and font-wide hinting tables, as well - as remove hinting-related bits and pieces from other tables (eg. GPOS). - See --hinting-tables for list of tables that are dropped by default. - Instructions and hints are stripped from 'glyf' and 'CFF ' tables - respectively. This produces (sometimes up to 30%) smaller fonts that - are suitable for extremely high-resolution systems, like high-end - mobile devices and retina displays. - XXX Note: Currently there is a known bug in 'CFF ' hint stripping that - might make the font unusable as a webfont as they will be rejected by - OpenType Sanitizer used in common browsers. For more information see: - https://github.com/behdad/fonttools/issues/144 - The --desubroutinize options works around that bug. - -Optimization options: - --desubroutinize - Remove CFF use of subroutinizes. Subroutinization is a way to make CFF - fonts smaller. For small subsets however, desubroutinizing might make - the font smaller. It has even been reported that desubroutinized CFF - fonts compress better (produce smaller output) WOFF and WOFF2 fonts. - Also see note under --no-hinting. - --no-desubroutinize [default] - Leave CFF subroutinizes as is, only throw away unused subroutinizes. - -Font table options: - --drop-tables[+|-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of tables that will be be dropped. - By default, the following tables are dropped: - 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' - and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' - and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. - The tool will attempt to subset the remaining tables. - Examples: - --drop-tables-='SVG ' - * Drop the default set of tables but keep 'SVG '. - --drop-tables+=GSUB - * Drop the default set of tables and 'GSUB'. - --drop-tables=DSIG - * Only drop the 'DSIG' table, keep all others. - --drop-tables= - * Keep all tables. - --no-subset-tables+=
[,
...] - Add to the set of tables that will not be subsetted. - By default, the following tables are included in this list, as - they do not need subsetting (ignore the fact that 'loca' is listed - here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', - 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool - does not know how to subset and are not specified here will be dropped - from the font. - Example: - --no-subset-tables+=FFTM - * Keep 'FFTM' table in the font by preventing subsetting. - --hinting-tables[-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the list of font-wide - hinting tables that will be dropped if --no-hinting is specified, - Examples: - --hinting-tables-='VDMX' - * Drop font-wide hinting tables except 'VDMX'. - --hinting-tables='' - * Keep all font-wide hinting tables (but strip hints from glyphs). - --legacy-kern - Keep TrueType 'kern' table even when OpenType 'GPOS' is available. - --no-legacy-kern - Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] - -Font naming options: - These options control what is retained in the 'name' table. For numerical - codes, see: http://www.microsoft.com/typography/otspec/name.htm - --name-IDs[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - entry nameIDs that will be preserved. By default only nameID 1 (Family) - and nameID 2 (Style) are preserved. Use '*' to keep all entries. - Examples: - --name-IDs+=0,4,6 - * Also keep Copyright, Full name and PostScript name entry. - --name-IDs='' - * Drop all 'name' table entries. - --name-IDs='*' - * keep all 'name' table entries - --name-legacy - Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). - XXX Note: This might be needed for some fonts that have no Unicode name - entires for English. See: https://github.com/behdad/fonttools/issues/146 - --no-name-legacy - Drop legacy (non-Unicode) 'name' table entries [default] - --name-languages[+|-]=[,] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - langIDs that will be preserved. By default only records with langID - 0x0409 (English) are preserved. Use '*' to keep all langIDs. - --obfuscate-names - Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, - and 6 with dummy strings (it is still fully functional as webfont). - -Glyph naming and encoding options: - --glyph-names - Keep PS glyph names in TT-flavored fonts. In general glyph names are - not needed for correct use of the font. However, some PDF generators - and PDF viewers might rely on glyph names to extract Unicode text - from PDF documents. - --no-glyph-names - Drop PS glyph names in TT-flavored fonts, by using 'post' table - version 3.0. [default] - --legacy-cmap - Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). - --no-legacy-cmap - Drop the legacy 'cmap' subtables. [default] - --symbol-cmap - Keep the 3.0 symbol 'cmap'. - --no-symbol-cmap - Drop the 3.0 symbol 'cmap'. [default] - -Other font-specific options: - --recalc-bounds - Recalculate font bounding boxes. - --no-recalc-bounds - Keep original font bounding boxes. This is faster and still safe - for all practical purposes. [default] - --recalc-timestamp - Set font 'modified' timestamp to current time. - --no-recalc-timestamp - Do not modify font 'modified' timestamp. [default] - --canonical-order - Order tables as recommended in the OpenType standard. This is not - required by the standard, nor by any known implementation. - --no-canonical-order - Keep original order of font tables. This is faster. [default] - -Application options: - --verbose - Display verbose information of the subsetting process. - --timing - Display detailed timing information of the subsetting process. - --xml - Display the TTX XML representation of subsetted font. - -Example: - Produce a subset containing the characters ' !"#$%' without performing - size-reducing optimizations: - - $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ - --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ - --notdef-glyph --notdef-outline --recommended-glyphs \\ - --name-IDs='*' --name-legacy --name-languages='*' -""" - - -def _add_method(*clazzes): - """Returns a decorator function that adds a new method to one or - more classes.""" - def wrapper(method): - for clazz in clazzes: - assert clazz.__name__ != 'DefaultTable', \ - 'Oops, table class not found.' - assert not hasattr(clazz, method.__name__), \ - "Oops, class '%s' has method '%s'." % (clazz.__name__, - method.__name__) - setattr(clazz, method.__name__, method) - return None - return wrapper - -def _uniq_sort(l): - return sorted(set(l)) - -def _set_update(s, *others): - # Jython's set.update only takes one other argument. - # Emulate real set.update... - for other in others: - s.update(other) - -def _dict_subset(d, glyphs): - return {g:d[g] for g in glyphs} - - -@_add_method(otTables.Coverage) -def intersect(self, glyphs): - """Returns ascending list of matching coverage values.""" - return [i for i,g in enumerate(self.glyphs) if g in glyphs] - -@_add_method(otTables.Coverage) -def intersect_glyphs(self, glyphs): - """Returns set of intersecting glyphs.""" - return set(g for g in self.glyphs if g in glyphs) - -@_add_method(otTables.Coverage) -def subset(self, glyphs): - """Returns ascending list of remaining coverage values.""" - indices = self.intersect(glyphs) - self.glyphs = [g for g in self.glyphs if g in glyphs] - return indices - -@_add_method(otTables.Coverage) -def remap(self, coverage_map): - """Remaps coverage.""" - self.glyphs = [self.glyphs[i] for i in coverage_map] - -@_add_method(otTables.ClassDef) -def intersect(self, glyphs): - """Returns ascending list of matching class values.""" - return _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - [v for g,v in self.classDefs.items() if g in glyphs]) - -@_add_method(otTables.ClassDef) -def intersect_class(self, glyphs, klass): - """Returns set of glyphs matching class.""" - if klass == 0: - return set(g for g in glyphs if g not in self.classDefs) - return set(g for g,v in self.classDefs.items() - if v == klass and g in glyphs) - -@_add_method(otTables.ClassDef) -def subset(self, glyphs, remap=False): - """Returns ascending list of remaining classes.""" - self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} - # Note: while class 0 has the special meaning of "not matched", - # if no glyph will ever /not match/, we can optimize class 0 out too. - indices = _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - list(self.classDefs.values())) - if remap: - self.remap(indices) - return indices - -@_add_method(otTables.ClassDef) -def remap(self, class_map): - """Remaps classes.""" - self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} - -@_add_method(otTables.SingleSubst) -def closure_glyphs(self, s, cur_glyphs): - s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) - -@_add_method(otTables.SingleSubst) -def subset_glyphs(self, s): - self.mapping = {g:v for g,v in self.mapping.items() - if g in s.glyphs and v in s.glyphs} - return bool(self.mapping) - -@_add_method(otTables.MultipleSubst) -def closure_glyphs(self, s, cur_glyphs): - indices = self.Coverage.intersect(cur_glyphs) - _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) - -@_add_method(otTables.MultipleSubst) -def subset_glyphs(self, s): - indices = self.Coverage.subset(s.glyphs) - self.Sequence = [self.Sequence[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,seq in enumerate(self.Sequence) - if all(sub in s.glyphs for sub in seq.Substitute)] - self.Sequence = [self.Sequence[i] for i in indices] - self.Coverage.remap(indices) - self.SequenceCount = len(self.Sequence) - return bool(self.SequenceCount) - -@_add_method(otTables.AlternateSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() - if g in cur_glyphs)) - -@_add_method(otTables.AlternateSubst) -def subset_glyphs(self, s): - self.alternates = {g:vlist - for g,vlist in self.alternates.items() - if g in s.glyphs and - all(v in s.glyphs for v in vlist)} - return bool(self.alternates) - -@_add_method(otTables.LigatureSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs - if all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items() - if g in cur_glyphs)) - -@_add_method(otTables.LigatureSubst) -def subset_glyphs(self, s): - self.ligatures = {g:v for g,v in self.ligatures.items() - if g in s.glyphs} - self.ligatures = {g:[seq for seq in seqs - if seq.LigGlyph in s.glyphs and - all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items()} - self.ligatures = {g:v for g,v in self.ligatures.items() if v} - return bool(self.ligatures) - -@_add_method(otTables.ReverseChainSingleSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - indices = self.Coverage.intersect(cur_glyphs) - if(not indices or - not all(c.intersect(s.glyphs) - for c in self.LookAheadCoverage + self.BacktrackCoverage)): - return - s.glyphs.update(self.Substitute[i] for i in indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ReverseChainSingleSubst) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.Substitute = [self.Substitute[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,sub in enumerate(self.Substitute) - if sub in s.glyphs] - self.Substitute = [self.Substitute[i] for i in indices] - self.Coverage.remap(indices) - self.GlyphCount = len(self.Substitute) - return bool(self.GlyphCount and - all(c.subset(s.glyphs) - for c in self.LookAheadCoverage+self.BacktrackCoverage)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def subset_glyphs(self, s): - if self.Format == 1: - return len(self.Coverage.subset(s.glyphs)) - elif self.Format == 2: - indices = self.Coverage.subset(s.glyphs) - self.Value = [self.Value[i] for i in indices] - self.ValueCount = len(self.Value) - return bool(self.ValueCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat &= ~0x00F0 - return True - -@_add_method(otTables.PairPos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.PairSet = [self.PairSet[i] for i in indices] - for p in self.PairSet: - p.PairValueRecord = [r for r in p.PairValueRecord - if r.SecondGlyph in s.glyphs] - p.PairValueCount = len(p.PairValueRecord) - # Remove empty pairsets - indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] - self.Coverage.remap(indices) - self.PairSet = [self.PairSet[i] for i in indices] - self.PairSetCount = len(self.PairSet) - return bool(self.PairSetCount) - elif self.Format == 2: - class1_map = self.ClassDef1.subset(s.glyphs, remap=True) - class2_map = self.ClassDef2.subset(s.glyphs, remap=True) - self.Class1Record = [self.Class1Record[i] for i in class1_map] - for c in self.Class1Record: - c.Class2Record = [c.Class2Record[i] for i in class2_map] - self.Class1Count = len(class1_map) - self.Class2Count = len(class2_map) - return bool(self.Class1Count and - self.Class2Count and - self.Coverage.subset(s.glyphs)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.PairPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat1 &= ~0x00F0 - self.ValueFormat2 &= ~0x00F0 - return True - -@_add_method(otTables.CursivePos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] - self.EntryExitCount = len(self.EntryExitRecord) - return bool(self.EntryExitCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Anchor) -def prune_hints(self): - # Drop device tables / contour anchor point - self.ensureDecompiled() - self.Format = 1 - -@_add_method(otTables.CursivePos) -def prune_post_subset(self, options): - if not options.hinting: - for rec in self.EntryExitRecord: - if rec.EntryAnchor: rec.EntryAnchor.prune_hints() - if rec.ExitAnchor: rec.ExitAnchor.prune_hints() - return True - -@_add_method(otTables.MarkBasePos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - base_indices = self.BaseCoverage.subset(s.glyphs) - self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] - for i in base_indices] - self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.BaseArray.BaseRecord: - b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.BaseArray.BaseCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkBasePos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.BaseArray.BaseRecord: - for a in b.BaseAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkLigPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - ligature_indices = self.LigatureCoverage.subset(s.glyphs) - self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] - for i in ligature_indices] - self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.LigatureArray.LigatureCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkLigPos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - for a in c.LigatureAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkMarkPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark1_indices = self.Mark1Coverage.subset(s.glyphs) - self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] - for i in mark1_indices] - self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) - mark2_indices = self.Mark2Coverage.subset(s.glyphs) - self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] - for i in mark2_indices] - self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.Mark1Array.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.Mark2Array.Mark2Record: - b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] - return bool(self.ClassCount and - self.Mark1Array.MarkCount and - self.Mark2Array.MarkCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkMarkPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables or contour anchor point - for m in self.Mark1Array.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.Mark2Array.Mark2Record: - for m in b.Mark2Anchor: - if m: - m.prune_hints() - return True - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def subset_lookups(self, lookup_indices): - pass - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def collect_lookups(self): - return [] - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def prune_post_subset(self, options): - return True - -@_add_method(otTables.SingleSubst, - otTables.AlternateSubst, - otTables.ReverseChainSingleSubst) -def may_have_non_1to1(self): - return False - -@_add_method(otTables.MultipleSubst, - otTables.LigatureSubst, - otTables.ContextSubst, - otTables.ChainContextSubst) -def may_have_non_1to1(self): - return True - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def __subset_classify_context(self): - - class ContextHelper(object): - def __init__(self, klass, Format): - if klass.__name__.endswith('Subst'): - Typ = 'Sub' - Type = 'Subst' - else: - Typ = 'Pos' - Type = 'Pos' - if klass.__name__.startswith('Chain'): - Chain = 'Chain' - else: - Chain = '' - ChainTyp = Chain+Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - - self.LookupRecord = Type+'LookupRecord' - - if Format == 1: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(None,) - ChainContextData = lambda r:(None, None, None) - RuleData = lambda r:(r.Input,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - SetRuleData = None - ChainSetRuleData = None - elif Format == 2: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(r.ClassDef,) - ChainContextData = lambda r:(r.BacktrackClassDef, - r.InputClassDef, - r.LookAheadClassDef) - RuleData = lambda r:(r.Class,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - def SetRuleData(r, d):(r.Class,) = d - def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d - elif Format == 3: - Coverage = lambda r: r.Coverage[0] - ChainCoverage = lambda r: r.InputCoverage[0] - ContextData = None - ChainContextData = None - RuleData = lambda r: r.Coverage - ChainRuleData = lambda r:(r.BacktrackCoverage + - r.InputCoverage + - r.LookAheadCoverage) - SetRuleData = None - ChainSetRuleData = None - else: - assert 0, "unknown format: %s" % Format - - if Chain: - self.Coverage = ChainCoverage - self.ContextData = ChainContextData - self.RuleData = ChainRuleData - self.SetRuleData = ChainSetRuleData - else: - self.Coverage = Coverage - self.ContextData = ContextData - self.RuleData = RuleData - self.SetRuleData = SetRuleData - - if Format == 1: - self.Rule = ChainTyp+'Rule' - self.RuleCount = ChainTyp+'RuleCount' - self.RuleSet = ChainTyp+'RuleSet' - self.RuleSetCount = ChainTyp+'RuleSetCount' - self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] - elif Format == 2: - self.Rule = ChainTyp+'ClassRule' - self.RuleCount = ChainTyp+'ClassRuleCount' - self.RuleSet = ChainTyp+'ClassSet' - self.RuleSetCount = ChainTyp+'ClassSetCount' - self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c - else (set(glyphs) if r == 0 else set())) - - self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' - self.ClassDefIndex = 1 if Chain else 0 - self.Input = 'Input' if Chain else 'Class' - - if self.Format not in [1, 2, 3]: - return None # Don't shoot the messenger; let it go - if not hasattr(self.__class__, "__ContextHelpers"): - self.__class__.__ContextHelpers = {} - if self.Format not in self.__class__.__ContextHelpers: - helper = ContextHelper(self.__class__, self.Format) - self.__class__.__ContextHelpers[self.Format] = helper - return self.__class__.__ContextHelpers[self.Format] - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst) -def closure_glyphs(self, s, cur_glyphs): - c = self.__subset_classify_context() - - indices = c.Coverage(self).intersect(cur_glyphs) - if not indices: - return [] - cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) - - if self.Format == 1: - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) - else: - pos_glyphs = frozenset([r.Input[seqi - 1]]) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.Input)+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 2: - ClassDef = getattr(self, c.ClassDef) - indices = ClassDef.intersect(cur_glyphs) - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) - else: - pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(getattr(r, c.Input))+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 3: - if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): - return [] - r = self - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(cur_glyphs) - else: - pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.InputCoverage)+1)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ContextPos, - otTables.ChainContextSubst, - otTables.ChainContextPos) -def subset_glyphs(self, s): - c = self.__subset_classify_context() - - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(g in s.glyphs for g in glist) - for glist in c.RuleData(r))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - # Prune empty rulesets - indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] - self.Coverage.remap(indices) - rss = [rss[i] for i in indices] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - return bool(rss) - elif self.Format == 2: - if not self.Coverage.subset(s.glyphs): - return False - ContextData = c.ContextData(self) - klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] - - # Keep rulesets for class numbers that survived. - indices = klass_maps[c.ClassDefIndex] - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - del rssCount - # Delete, but not renumber, unreachable rulesets. - indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) - rss = [rss if i in indices else None for i,rss in enumerate(rss)] - - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(k in klass_map for k in klist) - for klass_map,klist in zip(klass_maps, c.RuleData(r)))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - - # Remap rule classes - for r in ss: - c.SetRuleData(r, [[klass_map.index(k) for k in klist] - for klass_map,klist in zip(klass_maps, c.RuleData(r))]) - - # Prune empty rulesets - rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] - while rss and rss[-1] is None: - del rss[-1] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - - # TODO: We can do a second round of remapping class values based - # on classes that are actually used in at least one rule. Right - # now we subset classes to c.glyphs only. Or better, rewrite - # the above to do that. - - return bool(rss) - elif self.Format == 3: - return all(x.subset(s.glyphs) for x in c.RuleData(self)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def subset_lookups(self, lookup_indices): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - for rs in getattr(self, c.RuleSet): - if not rs: continue - for r in getattr(rs, c.Rule): - if not r: continue - setattr(r, c.LookupRecord, - [ll for ll in getattr(r, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(r, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - elif self.Format == 3: - setattr(self, c.LookupRecord, - [ll for ll in getattr(self, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(self, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def collect_lookups(self): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - return [ll.LookupListIndex - for rs in getattr(self, c.RuleSet) if rs - for r in getattr(rs, c.Rule) if r - for ll in getattr(r, c.LookupRecord) if ll] - elif self.Format == 3: - return [ll.LookupListIndex - for ll in getattr(self, c.LookupRecord) if ll] - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - self.ExtSubTable.closure_glyphs(s, cur_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def may_have_non_1to1(self): - if self.Format == 1: - return self.ExtSubTable.may_have_non_1to1() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_glyphs(self, s): - if self.Format == 1: - return self.ExtSubTable.subset_glyphs(s) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def prune_post_subset(self, options): - if self.Format == 1: - return self.ExtSubTable.prune_post_subset(options) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_lookups(self, lookup_indices): - if self.Format == 1: - return self.ExtSubTable.subset_lookups(lookup_indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def collect_lookups(self): - if self.Format == 1: - return self.ExtSubTable.collect_lookups() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Lookup) -def closure_glyphs(self, s, cur_glyphs=None): - if cur_glyphs is None: - cur_glyphs = frozenset(s.glyphs) - - # Memoize - if (id(self), cur_glyphs) in s._doneLookups: - return - s._doneLookups.add((id(self), cur_glyphs)) - - if self in s._activeLookups: - raise Exception("Circular loop in lookup recursion") - s._activeLookups.append(self) - for st in self.SubTable: - if not st: continue - st.closure_glyphs(s, cur_glyphs) - assert(s._activeLookups[-1] == self) - del s._activeLookups[-1] - -@_add_method(otTables.Lookup) -def subset_glyphs(self, s): - self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] - self.SubTableCount = len(self.SubTable) - return bool(self.SubTableCount) - -@_add_method(otTables.Lookup) -def prune_post_subset(self, options): - ret = False - for st in self.SubTable: - if not st: continue - if st.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.Lookup) -def subset_lookups(self, lookup_indices): - for s in self.SubTable: - s.subset_lookups(lookup_indices) - -@_add_method(otTables.Lookup) -def collect_lookups(self): - return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable - if st), [])) - -@_add_method(otTables.Lookup) -def may_have_non_1to1(self): - return any(st.may_have_non_1to1() for st in self.SubTable if st) - -@_add_method(otTables.LookupList) -def subset_glyphs(self, s): - """Returns the indices of nonempty lookups.""" - return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] - -@_add_method(otTables.LookupList) -def prune_post_subset(self, options): - ret = False - for l in self.Lookup: - if not l: continue - if l.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.LookupList) -def subset_lookups(self, lookup_indices): - self.ensureDecompiled() - self.Lookup = [self.Lookup[i] for i in lookup_indices - if i < self.LookupCount] - self.LookupCount = len(self.Lookup) - for l in self.Lookup: - l.subset_lookups(lookup_indices) - -@_add_method(otTables.LookupList) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - self.ensureDecompiled() - self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] - -@_add_method(otTables.LookupList) -def closure_lookups(self, lookup_indices): - lookup_indices = _uniq_sort(lookup_indices) - recurse = lookup_indices - while True: - recurse_lookups = sum((self.Lookup[i].collect_lookups() - for i in recurse if i < self.LookupCount), []) - recurse_lookups = [l for l in recurse_lookups - if l not in lookup_indices and l < self.LookupCount] - if not recurse_lookups: - return _uniq_sort(lookup_indices) - recurse_lookups = _uniq_sort(recurse_lookups) - lookup_indices.extend(recurse_lookups) - recurse = recurse_lookups - -@_add_method(otTables.Feature) -def subset_lookups(self, lookup_indices): - self.LookupListIndex = [l for l in self.LookupListIndex - if l in lookup_indices] - # Now map them. - self.LookupListIndex = [lookup_indices.index(l) - for l in self.LookupListIndex] - self.LookupCount = len(self.LookupListIndex) - return self.LookupCount or self.FeatureParams - -@_add_method(otTables.Feature) -def collect_lookups(self): - return self.LookupListIndex[:] - -@_add_method(otTables.FeatureList) -def subset_lookups(self, lookup_indices): - """Returns the indices of nonempty features.""" - # Note: Never ever drop feature 'pref', even if it's empty. - # HarfBuzz chooses shaper for Khmer based on presence of this - # feature. See thread at: - # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html - feature_indices = [i for i,f in enumerate(self.FeatureRecord) - if (f.Feature.subset_lookups(lookup_indices) or - f.FeatureTag == 'pref')] - self.subset_features(feature_indices) - return feature_indices - -@_add_method(otTables.FeatureList) -def collect_lookups(self, feature_indices): - return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() - for i in feature_indices - if i < self.FeatureCount), [])) - -@_add_method(otTables.FeatureList) -def subset_features(self, feature_indices): - self.ensureDecompiled() - self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] - self.FeatureCount = len(self.FeatureRecord) - return bool(self.FeatureCount) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def subset_features(self, feature_indices): - if self.ReqFeatureIndex in feature_indices: - self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) - else: - self.ReqFeatureIndex = 65535 - self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] - # Now map them. - self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex - if f in feature_indices] - self.FeatureCount = len(self.FeatureIndex) - return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def collect_features(self): - feature_indices = self.FeatureIndex[:] - if self.ReqFeatureIndex != 65535: - feature_indices.append(self.ReqFeatureIndex) - return _uniq_sort(feature_indices) - -@_add_method(otTables.Script) -def subset_features(self, feature_indices): - if(self.DefaultLangSys and - not self.DefaultLangSys.subset_features(feature_indices)): - self.DefaultLangSys = None - self.LangSysRecord = [l for l in self.LangSysRecord - if l.LangSys.subset_features(feature_indices)] - self.LangSysCount = len(self.LangSysRecord) - return bool(self.LangSysCount or self.DefaultLangSys) - -@_add_method(otTables.Script) -def collect_features(self): - feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] - if self.DefaultLangSys: - feature_indices.append(self.DefaultLangSys.collect_features()) - return _uniq_sort(sum(feature_indices, [])) - -@_add_method(otTables.ScriptList) -def subset_features(self, feature_indices): - self.ScriptRecord = [s for s in self.ScriptRecord - if s.Script.subset_features(feature_indices)] - self.ScriptCount = len(self.ScriptRecord) - return bool(self.ScriptCount) - -@_add_method(otTables.ScriptList) -def collect_features(self): - return _uniq_sort(sum((s.Script.collect_features() - for s in self.ScriptRecord), [])) - -@_add_method(ttLib.getTableClass('GSUB')) -def closure_glyphs(self, s): - s.table = self.table - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - while True: - orig_glyphs = frozenset(s.glyphs) - s._activeLookups = [] - s._doneLookups = set() - for i in lookup_indices: - if i >= self.table.LookupList.LookupCount: continue - if not self.table.LookupList.Lookup[i]: continue - self.table.LookupList.Lookup[i].closure_glyphs(s) - del s._activeLookups, s._doneLookups - if orig_glyphs == s.glyphs: - break - del s.table - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_glyphs(self, s): - s.glyphs = s.glyphs_gsubed - if self.table.LookupList: - lookup_indices = self.table.LookupList.subset_glyphs(s) - else: - lookup_indices = [] - self.subset_lookups(lookup_indices) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_lookups(self, lookup_indices): - """Retains specified lookups, then removes empty features, language - systems, and scripts.""" - if self.table.LookupList: - self.table.LookupList.subset_lookups(lookup_indices) - if self.table.FeatureList: - feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - if self.table.LookupList: - self.table.LookupList.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_lookups(self, remap=True): - """Remove (default) or neuter unreferenced lookups""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) - else: - lookup_indices = [] - if remap: - self.subset_lookups(lookup_indices) - else: - self.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_feature_tags(self, feature_tags): - if self.table.FeatureList: - feature_indices = \ - [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) - if f.FeatureTag in feature_tags] - self.table.FeatureList.subset_features(feature_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_features(self): - """Remove unreferenced features""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - self.table.FeatureList.subset_features(feature_indices) - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_pre_subset(self, options): - # Drop undesired features - if '*' not in options.layout_features: - self.subset_feature_tags(options.layout_features) - # Neuter unreferenced lookups - self.prune_lookups(remap=False) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def remove_redundant_langsys(self): - table = self.table - if not table.ScriptList or not table.FeatureList: - return - - features = table.FeatureList.FeatureRecord - - for s in table.ScriptList.ScriptRecord: - d = s.Script.DefaultLangSys - if not d: - continue - for lr in s.Script.LangSysRecord[:]: - l = lr.LangSys - # Compare d and l - if len(d.FeatureIndex) != len(l.FeatureIndex): - continue - if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): - continue - - if d.ReqFeatureIndex != 65535: - if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: - continue - - for i in range(len(d.FeatureIndex)): - if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: - break - else: - # LangSys and default are equal; delete LangSys - s.Script.LangSysRecord.remove(lr) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_post_subset(self, options): - table = self.table - - self.prune_lookups() # XXX Is this actually needed?! - - if table.LookupList: - table.LookupList.prune_post_subset(options) - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if not table.LookupList.Lookup: - # table.LookupList = None - - if not table.LookupList: - table.FeatureList = None - - if table.FeatureList: - self.remove_redundant_langsys() - # Remove unreferenced features - self.prune_features() - - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.FeatureList and not table.FeatureList.FeatureRecord: - # table.FeatureList = None - - # Never drop scripts themselves as them just being available - # holds semantic significance. - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.ScriptList and not table.ScriptList.ScriptRecord: - # table.ScriptList = None - - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - table = self.table - if table.LigCaretList: - indices = table.LigCaretList.Coverage.subset(glyphs) - table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] - for i in indices] - table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) - if table.MarkAttachClassDef: - table.MarkAttachClassDef.classDefs = \ - {g:v for g,v in table.MarkAttachClassDef.classDefs.items() - if g in glyphs} - if table.GlyphClassDef: - table.GlyphClassDef.classDefs = \ - {g:v for g,v in table.GlyphClassDef.classDefs.items() - if g in glyphs} - if table.AttachList: - indices = table.AttachList.Coverage.subset(glyphs) - GlyphCount = table.AttachList.GlyphCount - table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] - for i in indices - if i < GlyphCount] - table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) - if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: - for coverage in table.MarkGlyphSetsDef.Coverage: - coverage.subset(glyphs) - # TODO: The following is disabled. If enabling, we need to go fixup all - # lookups that use MarkFilteringSet and map their set. - # indices = table.MarkGlyphSetsDef.Coverage = \ - # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def prune_post_subset(self, options): - table = self.table - # XXX check these against OTS - if table.LigCaretList and not table.LigCaretList.LigGlyphCount: - table.LigCaretList = None - if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: - table.MarkAttachClassDef = None - if table.GlyphClassDef and not table.GlyphClassDef.classDefs: - table.GlyphClassDef = None - if table.AttachList and not table.AttachList.GlyphCount: - table.AttachList = None - if (hasattr(table, "MarkGlyphSetsDef") and - table.MarkGlyphSetsDef and - not table.MarkGlyphSetsDef.Coverage): - table.MarkGlyphSetsDef = None - if table.Version == 0x00010002/0x10000: - table.Version = 1.0 - return bool(table.LigCaretList or - table.MarkAttachClassDef or - table.GlyphClassDef or - table.AttachList or - (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) - -@_add_method(ttLib.getTableClass('kern')) -def prune_pre_subset(self, options): - # Prune unknown kern table types - self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('kern')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - for t in self.kernTables: - t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() - if a in glyphs and b in glyphs} - self.kernTables = [t for t in self.kernTables if t.kernTable] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('vmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return bool(self.metrics) - -@_add_method(ttLib.getTableClass('hmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return True # Required table - -@_add_method(ttLib.getTableClass('hdmx')) -def subset_glyphs(self, s): - self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} - return bool(self.hdmx) - -@_add_method(ttLib.getTableClass('VORG')) -def subset_glyphs(self, s): - self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() - if g in s.glyphs} - self.numVertOriginYMetrics = len(self.VOriginRecords) - return True # Never drop; has default metrics - -@_add_method(ttLib.getTableClass('post')) -def prune_pre_subset(self, options): - if not options.glyph_names: - self.formatType = 3.0 - return True # Required table - -@_add_method(ttLib.getTableClass('post')) -def subset_glyphs(self, s): - self.extraNames = [] # This seems to do it - return True # Required table - -@_add_method(ttLib.getTableModule('glyf').Glyph) -def remapComponentsFast(self, indices): - if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: - return # Not composite - data = array.array("B", self.data) - i = 10 - more = 1 - while more: - flags =(data[i] << 8) | data[i+1] - glyphID =(data[i+2] << 8) | data[i+3] - # Remap - glyphID = indices.index(glyphID) - data[i+2] = glyphID >> 8 - data[i+3] = glyphID & 0xFF - i += 4 - flags = int(flags) - - if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS - else: i += 2 - if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE - elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE - elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO - more = flags & 0x0020 # MORE_COMPONENTS - - self.data = data.tostring() - -@_add_method(ttLib.getTableClass('glyf')) -def closure_glyphs(self, s): - decompose = s.glyphs - while True: - components = set() - for g in decompose: - if g not in self.glyphs: - continue - gl = self.glyphs[g] - for c in gl.getComponentNames(self): - if c not in s.glyphs: - components.add(c) - components = set(c for c in components if c not in s.glyphs) - if not components: - break - decompose = components - s.glyphs.update(components) - -@_add_method(ttLib.getTableClass('glyf')) -def prune_pre_subset(self, options): - if options.notdef_glyph and not options.notdef_outline: - g = self[self.glyphOrder[0]] - # Yay, easy! - g.__dict__.clear() - g.data = "" - return True - -@_add_method(ttLib.getTableClass('glyf')) -def subset_glyphs(self, s): - self.glyphs = _dict_subset(self.glyphs, s.glyphs) - indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] - for v in self.glyphs.values(): - if hasattr(v, "data"): - v.remapComponentsFast(indices) - else: - pass # No need - self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] - # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. - return True - -@_add_method(ttLib.getTableClass('glyf')) -def prune_post_subset(self, options): - remove_hinting = not options.hinting - for v in self.glyphs.values(): - v.trim(remove_hinting=remove_hinting) - return True - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_pre_subset(self, options): - cff = self.cff - # CFF table must have one font only - cff.fontNames = cff.fontNames[:1] - - if options.notdef_glyph and not options.notdef_outline: - for fontname in cff.keys(): - font = cff[fontname] - c,_ = font.CharStrings.getItemAndSelector('.notdef') - # XXX we should preserve the glyph width - c.bytecode = '\x0e' # endchar - c.program = None - - return True # bool(cff.fontNames) - -@_add_method(ttLib.getTableClass('CFF ')) -def subset_glyphs(self, s): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Load all glyphs - for g in font.charset: - if g not in s.glyphs: continue - c,sel = cs.getItemAndSelector(g) - - if cs.charStringsAreIndexed: - indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] - csi = cs.charStringsIndex - csi.items = [csi.items[i] for i in indices] - del csi.file, csi.offsets - if hasattr(font, "FDSelect"): - sel = font.FDSelect - # XXX We want to set sel.format to None, such that the - # most compact format is selected. However, OTS was - # broken and couldn't parse a FDSelect format 0 that - # happened before CharStrings. As such, always force - # format 3 until we fix cffLib to always generate - # FDSelect after CharStrings. - # https://github.com/khaledhosny/ots/pull/31 - #sel.format = None - sel.format = 3 - sel.gidArray = [sel.gidArray[i] for i in indices] - cs.charStrings = {g:indices.index(v) - for g,v in cs.charStrings.items() - if g in s.glyphs} - else: - cs.charStrings = {g:v - for g,v in cs.charStrings.items() - if g in s.glyphs} - font.charset = [g for g in font.charset if g in s.glyphs] - font.numGlyphs = len(font.charset) - - return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) - -@_add_method(psCharStrings.T2CharString) -def subset_subroutines(self, subrs, gsubrs): - p = self.program - assert len(p) - for i in range(1, len(p)): - if p[i] == 'callsubr': - assert isinstance(p[i-1], int) - p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias - elif p[i] == 'callgsubr': - assert isinstance(p[i-1], int) - p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias - -@_add_method(psCharStrings.T2CharString) -def drop_hints(self): - hints = self._hints - - if hints.has_hint: - self.program = self.program[hints.last_hint:] - if hasattr(self, 'width'): - # Insert width back if needed - if self.width != self.private.defaultWidthX: - self.program.insert(0, self.width - self.private.nominalWidthX) - - if hints.has_hintmask: - i = 0 - p = self.program - while i < len(p): - if p[i] in ['hintmask', 'cntrmask']: - assert i + 1 <= len(p) - del p[i:i+2] - continue - i += 1 - - # TODO: we currently don't drop calls to "empty" subroutines. - - assert len(self.program) - - del self._hints - -class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - for subrs in [localSubrs, globalSubrs]: - if subrs and not hasattr(subrs, "_used"): - subrs._used = set() - - def op_callsubr(self, index): - self.localSubrs._used.add(self.operandStack[-1]+self.localBias) - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - - def op_callgsubr(self, index): - self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - -class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - class Hints(object): - def __init__(self): - # Whether calling this charstring produces any hint stems - self.has_hint = False - # Index to start at to drop all hints - self.last_hint = 0 - # Index up to which we know more hints are possible. - # Only relevant if status is 0 or 1. - self.last_checked = 0 - # The status means: - # 0: after dropping hints, this charstring is empty - # 1: after dropping hints, there may be more hints - # continuing after this - # 2: no more hints possible after this charstring - self.status = 0 - # Has hintmask instructions; not recursive - self.has_hintmask = False - pass - - def __init__(self, css, localSubrs, globalSubrs): - self._css = css - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - old_hints = charString._hints if hasattr(charString, '_hints') else None - charString._hints = self.Hints() - - psCharStrings.SimpleT2Decompiler.execute(self, charString) - - hints = charString._hints - - if hints.has_hint or hints.has_hintmask: - self._css.add(charString) - - if hints.status != 2: - # Check from last_check, make sure we didn't have any operators. - for i in range(hints.last_checked, len(charString.program) - 1): - if isinstance(charString.program[i], str): - hints.status = 2 - break - else: - hints.status = 1 # There's *something* here - hints.last_checked = len(charString.program) - - if old_hints: - assert hints.__dict__ == old_hints.__dict__ - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def op_hstem(self, index): - psCharStrings.SimpleT2Decompiler.op_hstem(self, index) - self.processHint(index) - def op_vstem(self, index): - psCharStrings.SimpleT2Decompiler.op_vstem(self, index) - self.processHint(index) - def op_hstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) - self.processHint(index) - def op_vstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) - self.processHint(index) - def op_hintmask(self, index): - psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) - self.processHintmask(index) - def op_cntrmask(self, index): - psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) - self.processHintmask(index) - - def processHintmask(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hintmask = True - if hints.status != 2 and hints.has_hint: - # Check from last_check, see if we may be an implicit vstem - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - if hints.status != 2: - # We are an implicit vstem - hints.last_hint = index + 1 - hints.status = 0 - hints.last_checked = index + 1 - - def processHint(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hint = True - hints.last_hint = index - hints.last_checked = index - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - hints = cs._hints - subr_hints = subr._hints - - if subr_hints.has_hint: - if hints.status != 2: - hints.has_hint = True - hints.last_checked = index - hints.status = subr_hints.status - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - else: - # In my understanding, this is a font bug. - # I.e., it has hint stems *after* path construction. - # I've seen this in widespread fonts. - # Best to ignore the hints I suppose... - pass - #assert 0 - else: - hints.status = max(hints.status, subr_hints.status) - if hints.status != 2: - # Check from last_check, make sure we didn't have - # any operators. - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - hints.last_checked = index - if hints.status != 2: - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - -class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - # Note: Currently we recompute _desubroutinized each time. - # This is more robust in some cases, but in other places we assume - # that each subroutine always expands to the same code, so - # maybe it doesn't matter. To speed up we can just not - # recompute _desubroutinized if it's there. For now I just - # double-check that it desubroutinized to the same thing. - old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None - - charString._patches = [] - psCharStrings.SimpleT2Decompiler.execute(self, charString) - desubroutinized = charString.program[:] - for idx,expansion in reversed (charString._patches): - assert idx >= 2 - assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] - assert type(desubroutinized[idx - 2]) == int - if expansion[-1] == 'return': - expansion = expansion[:-1] - desubroutinized[idx-2:idx] = expansion - if 'endchar' in desubroutinized: - # Cut off after first endchar - desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] - else: - if not len(desubroutinized) or desubroutinized[-1] != 'return': - desubroutinized.append('return') - - charString._desubroutinized = desubroutinized - del charString._patches - - if old_desubroutinized: - assert desubroutinized == old_desubroutinized - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - cs._patches.append((index, subr._desubroutinized)) - - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_post_subset(self, options): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Drop unused FontDictionaries - if hasattr(font, "FDSelect"): - sel = font.FDSelect - indices = _uniq_sort(sel.gidArray) - sel.gidArray = [indices.index (ss) for ss in sel.gidArray] - arr = font.FDArray - arr.items = [arr[i] for i in indices] - del arr.file, arr.offsets - - # Desubroutinize if asked for - if options.desubroutinize: - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - c.program = c._desubroutinized - - # Drop hints if not needed - if not options.hinting: - - # This can be tricky, but doesn't have to. What we do is: - # - # - Run all used glyph charstrings and recurse into subroutines, - # - For each charstring (including subroutines), if it has any - # of the hint stem operators, we mark it as such. - # Upon returning, for each charstring we note all the - # subroutine calls it makes that (recursively) contain a stem, - # - Dropping hinting then consists of the following two ops: - # * Drop the piece of the program in each charstring before the - # last call to a stem op or a stem-calling subroutine, - # * Drop all hintmask operations. - # - It's trickier... A hintmask right after hints and a few numbers - # will act as an implicit vstemhm. As such, we track whether - # we have seen any non-hint operators so far and do the right - # thing, recursively... Good luck understanding that :( - css = set() - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) - decompiler.execute(c) - for charstring in css: - charstring.drop_hints() - del css - - # Drop font-wide hinting values - all_privs = [] - if hasattr(font, 'FDSelect'): - all_privs.extend(fd.Private for fd in font.FDArray) - else: - all_privs.append(font.Private) - for priv in all_privs: - for k in ['BlueValues', 'OtherBlues', - 'FamilyBlues', 'FamilyOtherBlues', - 'BlueScale', 'BlueShift', 'BlueFuzz', - 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: - if hasattr(priv, k): - setattr(priv, k, None) - - # Renumber subroutines to remove unused ones - - # Mark all used subroutines - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - - all_subrs = [font.GlobalSubrs] - if hasattr(font, 'FDSelect'): - all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) - elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: - all_subrs.append(font.Private.Subrs) - - subrs = set(subrs) # Remove duplicates - - # Prepare - for subrs in all_subrs: - if not hasattr(subrs, '_used'): - subrs._used = set() - subrs._used = _uniq_sort(subrs._used) - subrs._old_bias = psCharStrings.calcSubrBias(subrs) - subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) - - # Renumber glyph charstrings - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - c.subset_subroutines (subrs, font.GlobalSubrs) - - # Renumber subroutines themselves - for subrs in all_subrs: - if subrs == font.GlobalSubrs: - if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): - local_subrs = font.Private.Subrs - else: - local_subrs = [] - else: - local_subrs = subrs - - subrs.items = [subrs.items[i] for i in subrs._used] - del subrs.file - if hasattr(subrs, 'offsets'): - del subrs.offsets - - for subr in subrs.items: - subr.subset_subroutines (local_subrs, font.GlobalSubrs) - - # Cleanup - for subrs in all_subrs: - del subrs._used, subrs._old_bias, subrs._new_bias - - return True - -@_add_method(ttLib.getTableClass('cmap')) -def closure_glyphs(self, s): - tables = [t for t in self.tables if t.isUnicode()] - - # Close glyphs - for table in tables: - if table.format == 14: - for cmap in table.uvsDict.values(): - glyphs = {g for u,g in cmap if u in s.unicodes_requested} - if None in glyphs: - glyphs.remove(None) - s.glyphs.update(glyphs) - else: - cmap = table.cmap - intersection = s.unicodes_requested.intersection(cmap.keys()) - s.glyphs.update(cmap[u] for u in intersection) - - # Calculate unicodes_missing - s.unicodes_missing = s.unicodes_requested.copy() - for table in tables: - s.unicodes_missing.difference_update(table.cmap) - -@_add_method(ttLib.getTableClass('cmap')) -def prune_pre_subset(self, options): - if not options.legacy_cmap: - # Drop non-Unicode / non-Symbol cmaps - self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] - if not options.symbol_cmap: - self.tables = [t for t in self.tables if not t.isSymbol()] - # TODO(behdad) Only keep one subtable? - # For now, drop format=0 which can't be subset_glyphs easily? - self.tables = [t for t in self.tables if t.format != 0] - self.numSubTables = len(self.tables) - return True # Required table - -@_add_method(ttLib.getTableClass('cmap')) -def subset_glyphs(self, s): - s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only - for t in self.tables: - if t.format == 14: - # TODO(behdad) We drop all the default-UVS mappings - # for glyphs_requested. So it's the caller's responsibility to make - # sure those are included. - t.uvsDict = {v:[(u,g) for u,g in l - if g in s.glyphs_requested or u in s.unicodes_requested] - for v,l in t.uvsDict.items()} - t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} - elif t.isUnicode(): - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested or u in s.unicodes_requested} - else: - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested} - self.tables = [t for t in self.tables - if (t.cmap if t.format != 14 else t.uvsDict)] - self.numSubTables = len(self.tables) - # TODO(behdad) Convert formats when needed. - # In particular, if we have a format=12 without non-BMP - # characters, either drop format=12 one or convert it - # to format=4 if there's not one. - return True # Required table - -@_add_method(ttLib.getTableClass('DSIG')) -def prune_pre_subset(self, options): - # Drop all signatures since they will be invalid - self.usNumSigs = 0 - self.signatureRecords = [] - return True - -@_add_method(ttLib.getTableClass('maxp')) -def prune_pre_subset(self, options): - if not options.hinting: - if self.tableVersion == 0x00010000: - self.maxZones = 1 - self.maxTwilightPoints = 0 - self.maxFunctionDefs = 0 - self.maxInstructionDefs = 0 - self.maxStackElements = 0 - self.maxSizeOfInstructions = 0 - return True - -@_add_method(ttLib.getTableClass('name')) -def prune_pre_subset(self, options): - if '*' not in options.name_IDs: - self.names = [n for n in self.names if n.nameID in options.name_IDs] - if not options.name_legacy: - # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman - # entry for Latin and no Unicode names. - self.names = [n for n in self.names if n.isUnicode()] - # TODO(behdad) Option to keep only one platform's - if '*' not in options.name_languages: - # TODO(behdad) This is Windows-platform specific! - self.names = [n for n in self.names - if n.langID in options.name_languages] - if options.obfuscate_names: - namerecs = [] - for n in self.names: - if n.nameID in [1, 4]: - n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" - elif n.nameID in [2, 6]: - n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" - elif n.nameID == 3: - n.string = "" - elif n.nameID in [16, 17, 18]: - continue - namerecs.append(n) - self.names = namerecs - return True # Required table - - -# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? -# TODO(behdad) Drop AAT tables. -# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. -# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left -# TODO(behdad) Drop GDEF subitems if unused by lookups -# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) -# TODO(behdad) Text direction considerations. -# TODO(behdad) Text script / language considerations. -# TODO(behdad) Optionally drop 'kern' table if GPOS available -# TODO(behdad) Implement --unicode='*' to choose all cmap'ed -# TODO(behdad) Drop old-spec Indic scripts - - -class Options(object): - - class OptionError(Exception): pass - class UnknownOptionError(OptionError): pass - - _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', - 'EBSC', 'SVG ', 'PCLT', 'LTSH'] - _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite - _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color - _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', - 'vhea', 'OS/2', 'loca', 'name', 'cvt ', - 'fpgm', 'prep', 'VDMX', 'DSIG'] - _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] - - # Based on HarfBuzz shapers - _layout_features_groups = { - # Default shaper - 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], - 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], - 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], - 'ltr': ['ltra', 'ltrm'], - 'rtl': ['rtla', 'rtlm'], - # Complex shapers - 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', - 'cswh', 'mset'], - 'hangul': ['ljmo', 'vjmo', 'tjmo'], - 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], - 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', - 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', - 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], - } - _layout_features_default = _uniq_sort(sum( - iter(_layout_features_groups.values()), [])) - - drop_tables = _drop_tables_default - no_subset_tables = _no_subset_tables_default - hinting_tables = _hinting_tables_default - legacy_kern = False # drop 'kern' table if GPOS available - layout_features = _layout_features_default - ignore_missing_glyphs = False - ignore_missing_unicodes = True - hinting = True - glyph_names = False - legacy_cmap = False - symbol_cmap = False - name_IDs = [1, 2] # Family and Style - name_legacy = False - name_languages = [0x0409] # English - obfuscate_names = False # to make webfont unusable as a system font - notdef_glyph = True # gid0 for TrueType / .notdef for CFF - notdef_outline = False # No need for notdef to have an outline really - recommended_glyphs = False # gid1, gid2, gid3 for TrueType - recalc_bounds = False # Recalculate font bounding boxes - recalc_timestamp = False # Recalculate font modified timestamp - canonical_order = False # Order tables as recommended - flavor = None # May be 'woff' or 'woff2' - desubroutinize = False # Desubroutinize CFF CharStrings - - def __init__(self, **kwargs): - self.set(**kwargs) - - def set(self, **kwargs): - for k,v in kwargs.items(): - if not hasattr(self, k): - raise self.UnknownOptionError("Unknown option '%s'" % k) - setattr(self, k, v) - - def parse_opts(self, argv, ignore_unknown=False): - ret = [] - for a in argv: - orig_a = a - if not a.startswith('--'): - ret.append(a) - continue - a = a[2:] - i = a.find('=') - op = '=' - if i == -1: - if a.startswith("no-"): - k = a[3:] - v = False - else: - k = a - v = True - if k.endswith("?"): - k = k[:-1] - v = '?' - else: - k = a[:i] - if k[-1] in "-+": - op = k[-1]+'=' # Op is '-=' or '+=' now. - k = k[:-1] - v = a[i+1:] - ok = k - k = k.replace('-', '_') - if not hasattr(self, k): - if ignore_unknown is True or ok in ignore_unknown: - ret.append(orig_a) - continue - else: - raise self.UnknownOptionError("Unknown option '%s'" % a) - - ov = getattr(self, k) - if v == '?': - print("Current setting for '%s' is: %s" % (ok, ov)) - continue - if isinstance(ov, bool): - v = bool(v) - elif isinstance(ov, int): - v = int(v) - elif isinstance(ov, str): - v = str(v) # redundant - elif isinstance(ov, list): - if isinstance(v, bool): - raise self.OptionError("Option '%s' requires values to be specified using '='" % a) - vv = v.replace(',', ' ').split() - if vv == ['']: - vv = [] - vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] - if op == '=': - v = vv - elif op == '+=': - v = ov - v.extend(vv) - elif op == '-=': - v = ov - for x in vv: - if x in v: - v.remove(x) - else: - assert False - - setattr(self, k, v) - - return ret - - -class Subsetter(object): - - class SubsettingError(Exception): pass - class MissingGlyphsSubsettingError(SubsettingError): pass - class MissingUnicodesSubsettingError(SubsettingError): pass - - def __init__(self, options=None, log=None): - - if not log: - log = Logger() - if not options: - options = Options() - - self.options = options - self.log = log - self.unicodes_requested = set() - self.glyph_names_requested = set() - self.glyph_ids_requested = set() - - def populate(self, glyphs=[], gids=[], unicodes=[], text=""): - self.unicodes_requested.update(unicodes) - if isinstance(text, bytes): - text = text.decode("utf_8") - for u in text: - self.unicodes_requested.add(ord(u)) - self.glyph_names_requested.update(glyphs) - self.glyph_ids_requested.update(gids) - - def _prune_pre_subset(self, font): - - for tag in font.keys(): - if tag == 'GlyphOrder': continue - - if(tag in self.options.drop_tables or - (tag in self.options.hinting_tables and not self.options.hinting) or - (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): - self.log(tag, "dropped") - del font[tag] - continue - - clazz = ttLib.getTableClass(tag) - - if hasattr(clazz, 'prune_pre_subset'): - table = font[tag] - self.log.lapse("load '%s'" % tag) - retain = table.prune_pre_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - continue - else: - self.log(tag, "pruned") - - def _closure_glyphs(self, font): - - realGlyphs = set(font.getGlyphOrder()) - glyph_order = font.getGlyphOrder() - - self.glyphs_requested = set() - self.glyphs_requested.update(self.glyph_names_requested) - self.glyphs_requested.update(glyph_order[i] - for i in self.glyph_ids_requested - if i < len(glyph_order)) - - self.glyphs_missing = set() - self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) - self.glyphs_missing.update(i for i in self.glyph_ids_requested - if i >= len(glyph_order)) - if self.glyphs_missing: - self.log("Missing requested glyphs: %s" % self.glyphs_missing) - if not self.options.ignore_missing_glyphs: - raise self.MissingGlyphsSubsettingError(self.glyphs_missing) - - self.glyphs = self.glyphs_requested.copy() - - self.unicodes_missing = set() - if 'cmap' in font: - font['cmap'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log.lapse("close glyph list over 'cmap'") - self.glyphs_cmaped = frozenset(self.glyphs) - if self.unicodes_missing: - missing = ["U+%04X" % u for u in self.unicodes_missing] - self.log("Missing glyphs for requested Unicodes: %s" % missing) - if not self.options.ignore_missing_unicodes: - raise self.MissingUnicodesSubsettingError(missing) - del missing - - if self.options.notdef_glyph: - if 'glyf' in font: - self.glyphs.add(font.getGlyphName(0)) - self.log("Added gid0 to subset") - else: - self.glyphs.add('.notdef') - self.log("Added .notdef to subset") - if self.options.recommended_glyphs: - if 'glyf' in font: - for i in range(min(4, len(font.getGlyphOrder()))): - self.glyphs.add(font.getGlyphName(i)) - self.log("Added first four glyphs to subset") - - if 'GSUB' in font: - self.log("Closing glyph list over 'GSUB': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['GSUB'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'GSUB': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'GSUB'") - self.glyphs_gsubed = frozenset(self.glyphs) - - if 'glyf' in font: - self.log("Closing glyph list over 'glyf': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['glyf'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'glyf': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'glyf'") - self.glyphs_glyfed = frozenset(self.glyphs) - - self.glyphs_all = frozenset(self.glyphs) - - self.log("Retaining %d glyphs: " % len(self.glyphs_all)) - - del self.glyphs - - def _subset_glyphs(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - - if tag in self.options.no_subset_tables: - self.log(tag, "subsetting not needed") - elif hasattr(clazz, 'subset_glyphs'): - table = font[tag] - self.glyphs = self.glyphs_all - retain = table.subset_glyphs(self) - del self.glyphs - self.log.lapse("subset '%s'" % tag) - if not retain: - self.log(tag, "subsetted to empty; dropped") - del font[tag] - else: - self.log(tag, "subsetted") - else: - self.log(tag, "NOT subset; don't know how to subset; dropped") - del font[tag] - - glyphOrder = font.getGlyphOrder() - glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] - font.setGlyphOrder(glyphOrder) - font._buildReverseGlyphOrderDict() - self.log.lapse("subset GlyphOrder") - - def _prune_post_subset(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - if hasattr(clazz, 'prune_post_subset'): - table = font[tag] - retain = table.prune_post_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - else: - self.log(tag, "pruned") - - def subset(self, font): - - self._prune_pre_subset(font) - self._closure_glyphs(font) - self._subset_glyphs(font) - self._prune_post_subset(font) - - -class Logger(object): - - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def glyphs(self, glyphs, font=None): - if not self.verbose: - return - self("Glyph names:", sorted(glyphs)) - if font: - reverseGlyphMap = font.getReverseGlyphMap() - self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() - - -def load_font(fontFile, - options, - allowVID=False, - checkChecksums=False, - dontLoadGlyphNames=False, - lazy=True): - - font = ttLib.TTFont(fontFile, - allowVID=allowVID, - checkChecksums=checkChecksums, - recalcBBoxes=options.recalc_bounds, - recalcTimestamp=options.recalc_timestamp, - lazy=lazy) - - # Hack: - # - # If we don't need glyph names, change 'post' class to not try to - # load them. It avoid lots of headache with broken fonts as well - # as loading time. - # - # Ideally ttLib should provide a way to ask it to skip loading - # glyph names. But it currently doesn't provide such a thing. - # - if dontLoadGlyphNames: - post = ttLib.getTableClass('post') - saved = post.decode_format_2_0 - post.decode_format_2_0 = post.decode_format_3_0 - f = font['post'] - if f.formatType == 2.0: - f.formatType = 3.0 - post.decode_format_2_0 = saved - - return font - -def save_font(font, outfile, options): - if options.flavor and not hasattr(font, 'flavor'): - raise Exception("fonttools version does not support flavors.") - font.flavor = options.flavor - font.save(outfile, reorderTables=options.canonical_order) - -def parse_unicodes(s): - import re - s = re.sub (r"0[xX]", " ", s) - s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) - l = [] - for item in s.split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(item, 16)) - else: - start,end = fields - l.extend(range(int(start, 16), int(end, 16)+1)) - return l - -def parse_gids(s): - l = [] - for item in s.replace(',', ' ').split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(fields[0])) - else: - l.extend(range(int(fields[0]), int(fields[1])+1)) - return l - -def parse_glyphs(s): - return s.replace(',', ' ').split() - -def main(args=None): - - if args is None: - args = sys.argv[1:] - - if '--help' in args: - print(__doc__) - sys.exit(0) - - log = Logger() - args = log.parse_opts(args) - - options = Options() - args = options.parse_opts(args, - ignore_unknown=['gids', 'gids-file', - 'glyphs', 'glyphs-file', - 'text', 'text-file', - 'unicodes', 'unicodes-file', - 'output-file']) - - if len(args) < 2: - print("usage:", __usage__, file=sys.stderr) - print("Try pyftsubset --help for more information.", file=sys.stderr) - sys.exit(1) - - fontfile = args[0] - args = args[1:] - - subsetter = Subsetter(options=options, log=log) - outfile = fontfile + '.subset' - glyphs = [] - gids = [] - unicodes = [] - wildcard_glyphs = False - wildcard_unicodes = False - text = "" - for g in args: - if g == '*': - wildcard_glyphs = True - continue - if g.startswith('--output-file='): - outfile = g[14:] - continue - if g.startswith('--text='): - text += g[7:] - continue - if g.startswith('--text-file='): - text += open(g[12:]).read().replace('\n', '') - continue - if g.startswith('--unicodes='): - if g[11:] == '*': - wildcard_unicodes = True - else: - unicodes.extend(parse_unicodes(g[11:])) - continue - if g.startswith('--unicodes-file='): - for line in open(g[16:]).readlines(): - unicodes.extend(parse_unicodes(line.split('#')[0])) - continue - if g.startswith('--gids='): - gids.extend(parse_gids(g[7:])) - continue - if g.startswith('--gids-file='): - for line in open(g[12:]).readlines(): - gids.extend(parse_gids(line.split('#')[0])) - continue - if g.startswith('--glyphs='): - if g[9:] == '*': - wildcard_glyphs = True - else: - glyphs.extend(parse_glyphs(g[9:])) - continue - if g.startswith('--glyphs-file='): - for line in open(g[14:]).readlines(): - glyphs.extend(parse_glyphs(line.split('#')[0])) - continue - glyphs.append(g) - - dontLoadGlyphNames = not options.glyph_names and not glyphs - font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) - log.lapse("load font") - if wildcard_glyphs: - glyphs.extend(font.getGlyphOrder()) - if wildcard_unicodes: - for t in font['cmap'].tables: - if t.isUnicode(): - unicodes.extend(t.cmap.keys()) - assert '' not in glyphs - - log.lapse("compile glyph list") - log("Text: '%s'" % text) - log("Unicodes:", unicodes) - log("Glyphs:", glyphs) - log("Gids:", gids) - - subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) - subsetter.subset(font) - - save_font (font, outfile, options) - log.lapse("compile and save font") - - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") - - if log.verbose: - import os - log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) - log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) - - log.font(font) - - font.close() - - -__all__ = [ - 'Options', - 'Subsetter', - 'Logger', - 'load_font', - 'save_font', - 'parse_gids', - 'parse_glyphs', - 'parse_unicodes', - 'main' -] - -if __name__ == '__main__': - main() diff -Nru fonttools-3.0/Lib/fontTools/svgLib/__init__.py fonttools-3.21.2/Lib/fontTools/svgLib/__init__.py --- fonttools-3.0/Lib/fontTools/svgLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/svgLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +from .path import SVGPath, parse_path + +__all__ = ["SVGPath", "parse_path"] diff -Nru fonttools-3.0/Lib/fontTools/svgLib/path/__init__.py fonttools-3.21.2/Lib/fontTools/svgLib/path/__init__.py --- fonttools-3.0/Lib/fontTools/svgLib/path/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/svgLib/path/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,58 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals) +from fontTools.misc.py23 import * + +from fontTools.pens.transformPen import TransformPen +from .parser import parse_path + +try: + from xml.etree import cElementTree as ElementTree # python 2 +except ImportError: # pragma nocover + from xml.etree import ElementTree # python 3 + + +__all__ = [tostr(s) for s in ("SVGPath", "parse_path")] + + +class SVGPath(object): + """ Parse SVG ``path`` elements from a file or string, and draw them + onto a glyph object that supports the FontTools Pen protocol. + + For example, reading from an SVG file and drawing to a Defcon Glyph: + + import defcon + glyph = defcon.Glyph() + pen = glyph.getPen() + svg = SVGPath("path/to/a.svg") + svg.draw(pen) + + Or reading from a string containing SVG data, using the alternative + 'fromstring' (a class method): + + data = ' elements) + and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath + methods. + + If 'current_pos' (2-float tuple) is provided, the initial moveTo will + be relative to that instead being absolute. + + Arc segments (commands "A" or "a") are not currently supported, and raise + NotImplementedError. + """ + # In the SVG specs, initial movetos are absolute, even if + # specified as 'm'. This is the default behavior here as well. + # But if you pass in a current_pos variable, the initial moveto + # will be relative to that current_pos. This is useful. + current_pos = complex(*current_pos) + + elements = list(_tokenize_path(pathdef)) + # Reverse for easy use of .pop() + elements.reverse() + + start_pos = None + command = None + last_control = None + + while elements: + + if elements[-1] in COMMANDS: + # New command. + last_command = command # Used by S and T + command = elements.pop() + absolute = command in UPPERCASE + command = command.upper() + else: + # If this element starts with numbers, it is an implicit command + # and we don't change the command. Check that it's allowed: + if command is None: + raise ValueError("Unallowed implicit command in %s, position %s" % ( + pathdef, len(pathdef.split()) - len(elements))) + last_command = command # Used by S and T + + if command == 'M': + # Moveto command. + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if absolute: + current_pos = pos + else: + current_pos += pos + + # M is not preceded by Z; it's an open subpath + if start_pos is not None: + pen.endPath() + + pen.moveTo((current_pos.real, current_pos.imag)) + + # when M is called, reset start_pos + # This behavior of Z is defined in svg spec: + # http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand + start_pos = current_pos + + # Implicit moveto commands are treated as lineto commands. + # So we set command to lineto here, in case there are + # further implicit commands after this moveto. + command = 'L' + + elif command == 'Z': + # Close path + if current_pos != start_pos: + pen.lineTo((start_pos.real, start_pos.imag)) + pen.closePath() + current_pos = start_pos + start_pos = None + command = None # You can't have implicit commands after closing. + + elif command == 'L': + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if not absolute: + pos += current_pos + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'H': + x = elements.pop() + pos = float(x) + current_pos.imag * 1j + if not absolute: + pos += current_pos.real + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'V': + y = elements.pop() + pos = current_pos.real + float(y) * 1j + if not absolute: + pos += current_pos.imag * 1j + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'C': + control1 = float(elements.pop()) + float(elements.pop()) * 1j + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control1 += current_pos + control2 += current_pos + end += current_pos + + pen.curveTo((control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag)) + current_pos = end + last_control = control2 + + elif command == 'S': + # Smooth curve. First control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in 'CS': + # If there is no previous command or if the previous command + # was not an C, c, S or s, assume the first control point is + # coincident with the current point. + control1 = current_pos + else: + # The first control point is assumed to be the reflection of + # the second control point on the previous command relative + # to the current point. + control1 = current_pos + current_pos - last_control + + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control2 += current_pos + end += current_pos + + pen.curveTo((control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag)) + current_pos = end + last_control = control2 + + elif command == 'Q': + control = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control += current_pos + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == 'T': + # Smooth curve. Control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in 'QT': + # If there is no previous command or if the previous command + # was not an Q, q, T or t, assume the first control point is + # coincident with the current point. + control = current_pos + else: + # The control point is assumed to be the reflection of + # the control point on the previous command relative + # to the current point. + control = current_pos + current_pos - last_control + + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == 'A': + raise NotImplementedError('arcs are not supported') + + # no final Z command, it's an open path + if start_pos is not None: + pen.endPath() diff -Nru fonttools-3.0/Lib/fontTools/t1Lib/__init__.py fonttools-3.21.2/Lib/fontTools/t1Lib/__init__.py --- fonttools-3.0/Lib/fontTools/t1Lib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/t1Lib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,369 @@ +"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only) + +Functions for reading and writing raw Type 1 data: + +read(path) + reads any Type 1 font file, returns the raw data and a type indicator: + 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed + to by 'path'. + Raises an error when the file does not contain valid Type 1 data. + +write(path, data, kind='OTHER', dohex=False) + writes raw Type 1 data to the file pointed to by 'path'. + 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. + 'dohex' is a flag which determines whether the eexec encrypted + part should be written as hexadecimal or binary, but only if kind + is 'OTHER'. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from fontTools.misc.macCreatorType import getMacCreatorAndType +import os +import re + +__author__ = "jvr" +__version__ = "1.0b2" +DEBUG = 0 + + +try: + try: + from Carbon import Res + except ImportError: + import Res # MacPython < 2.2 +except ImportError: + haveMacSupport = 0 +else: + haveMacSupport = 1 + + +class T1Error(Exception): pass + + +class T1Font(object): + + """Type 1 font class. + + Uses a minimal interpeter that supports just about enough PS to parse + Type 1 fonts. + """ + + def __init__(self, path=None): + if path is not None: + self.data, type = read(path) + else: + pass # XXX + + def saveAs(self, path, type, dohex=False): + write(path, self.getData(), type, dohex) + + def getData(self): + # XXX Todo: if the data has been converted to Python object, + # recreate the PS stream + return self.data + + def getGlyphSet(self): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width', but only *after* the .draw() method + has been called. + + In the case of Type 1, the GlyphSet is simply the CharStrings dict. + """ + return self["CharStrings"] + + def __getitem__(self, key): + if not hasattr(self, "font"): + self.parse() + return self.font[key] + + def parse(self): + from fontTools.misc import psLib + from fontTools.misc import psCharStrings + self.font = psLib.suckfont(self.data) + charStrings = self.font["CharStrings"] + lenIV = self.font["Private"].get("lenIV", 4) + assert lenIV >= 0 + subrs = self.font["Private"]["Subrs"] + for glyphName, charString in charStrings.items(): + charString, R = eexec.decrypt(charString, 4330) + charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], + subrs=subrs) + for i in range(len(subrs)): + charString, R = eexec.decrypt(subrs[i], 4330) + subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) + del self.data + + +# low level T1 data read and write functions + +def read(path, onlyHeader=False): + """reads any Type 1 font file, returns raw data""" + normpath = path.lower() + creator, typ = getMacCreatorAndType(path) + if typ == 'LWFN': + return readLWFN(path, onlyHeader), 'LWFN' + if normpath[-4:] == '.pfb': + return readPFB(path, onlyHeader), 'PFB' + else: + return readOther(path), 'OTHER' + +def write(path, data, kind='OTHER', dohex=False): + assertType1(data) + kind = kind.upper() + try: + os.remove(path) + except os.error: + pass + err = 1 + try: + if kind == 'LWFN': + writeLWFN(path, data) + elif kind == 'PFB': + writePFB(path, data) + else: + writeOther(path, data, dohex) + err = 0 + finally: + if err and not DEBUG: + try: + os.remove(path) + except os.error: + pass + + +# -- internal -- + +LWFNCHUNKSIZE = 2000 +HEXLINELENGTH = 80 + + +def readLWFN(path, onlyHeader=False): + """reads an LWFN font file, returns raw data""" + from fontTools.misc.macRes import ResourceReader + reader = ResourceReader(path) + try: + data = [] + for res in reader.get('POST', []): + code = byteord(res.data[0]) + if byteord(res.data[1]) != 0: + raise T1Error('corrupt LWFN file') + if code in [1, 2]: + if onlyHeader and code == 2: + break + data.append(res.data[2:]) + elif code in [3, 5]: + break + elif code == 4: + f = open(path, "rb") + data.append(f.read()) + f.close() + elif code == 0: + pass # comment, ignore + else: + raise T1Error('bad chunk code: ' + repr(code)) + finally: + reader.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readPFB(path, onlyHeader=False): + """reads a PFB font file, returns raw data""" + f = open(path, "rb") + data = [] + while True: + if f.read(1) != bytechr(128): + raise T1Error('corrupt PFB file') + code = byteord(f.read(1)) + if code in [1, 2]: + chunklen = stringToLong(f.read(4)) + chunk = f.read(chunklen) + assert len(chunk) == chunklen + data.append(chunk) + elif code == 3: + break + else: + raise T1Error('bad chunk code: ' + repr(code)) + if onlyHeader: + break + f.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readOther(path): + """reads any (font) file, returns raw data""" + f = open(path, "rb") + data = f.read() + f.close() + assertType1(data) + + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted and isHex(chunk[:4]): + data.append(deHexString(chunk)) + else: + data.append(chunk) + return bytesjoin(data) + +# file writing tools + +def writeLWFN(path, data): + # Res.FSpCreateResFile was deprecated in OS X 10.5 + Res.FSpCreateResFile(path, "just", "LWFN", 0) + resRef = Res.FSOpenResFile(path, 2) # write-only + try: + Res.UseResFile(resRef) + resID = 501 + chunks = findEncryptedChunks(data) + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + while chunk: + res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) + res.AddResource('POST', resID, '') + chunk = chunk[LWFNCHUNKSIZE - 2:] + resID = resID + 1 + res = Res.Resource(bytechr(5) + '\0') + res.AddResource('POST', resID, '') + finally: + Res.CloseResFile(resRef) + +def writePFB(path, data): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + f.write(bytechr(128) + bytechr(code)) + f.write(longToString(len(chunk))) + f.write(chunk) + f.write(bytechr(128) + bytechr(3)) + finally: + f.close() + +def writeOther(path, data, dohex=False): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + hexlinelen = HEXLINELENGTH // 2 + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + if code == 2 and dohex: + while chunk: + f.write(eexec.hexString(chunk[:hexlinelen])) + f.write(b'\r') + chunk = chunk[hexlinelen:] + else: + f.write(chunk) + finally: + f.close() + + +# decryption tools + +EEXECBEGIN = b"currentfile eexec" +EEXECEND = b'0' * 64 +EEXECINTERNALEND = b"currentfile closefile" +EEXECBEGINMARKER = b"%-- eexec start\r" +EEXECENDMARKER = b"%-- eexec end\r" + +_ishexRE = re.compile(b'[0-9A-Fa-f]*$') + +def isHex(text): + return _ishexRE.match(text) is not None + + +def decryptType1(data): + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted: + if isHex(chunk[:4]): + chunk = deHexString(chunk) + decrypted, R = eexec.decrypt(chunk, 55665) + decrypted = decrypted[4:] + if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ + and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: + raise T1Error("invalid end of eexec part") + decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r' + data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) + else: + if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: + data.append(chunk[:-len(EEXECBEGIN)-1]) + else: + data.append(chunk) + return bytesjoin(data) + +def findEncryptedChunks(data): + chunks = [] + while True: + eBegin = data.find(EEXECBEGIN) + if eBegin < 0: + break + eBegin = eBegin + len(EEXECBEGIN) + 1 + eEnd = data.find(EEXECEND, eBegin) + if eEnd < 0: + raise T1Error("can't find end of eexec part") + cypherText = data[eBegin:eEnd + 2] + if isHex(cypherText[:4]): + cypherText = deHexString(cypherText) + plainText, R = eexec.decrypt(cypherText, 55665) + eEndLocal = plainText.find(EEXECINTERNALEND) + if eEndLocal < 0: + raise T1Error("can't find end of eexec part") + chunks.append((0, data[:eBegin])) + chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) + data = data[eEnd:] + chunks.append((0, data)) + return chunks + +def deHexString(hexstring): + return eexec.deHexString(bytesjoin(hexstring.split())) + + +# Type 1 assertion + +_fontType1RE = re.compile(br"/FontType\s+1\s+def") + +def assertType1(data): + for head in [b'%!PS-AdobeFont', b'%!FontType1']: + if data[:len(head)] == head: + break + else: + raise T1Error("not a PostScript font") + if not _fontType1RE.search(data): + raise T1Error("not a Type 1 font") + if data.find(b"currentfile eexec") < 0: + raise T1Error("not an encrypted Type 1 font") + # XXX what else? + return data + + +# pfb helpers + +def longToString(long): + s = b"" + for i in range(4): + s += bytechr((long & (0xff << (i * 8))) >> i * 8) + return s + +def stringToLong(s): + if len(s) != 4: + raise ValueError('string must be 4 bytes long') + l = 0 + for i in range(4): + l += byteord(s[i]) << (i * 8) + return l diff -Nru fonttools-3.0/Lib/fontTools/t1Lib.py fonttools-3.21.2/Lib/fontTools/t1Lib.py --- fonttools-3.0/Lib/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/t1Lib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts - -Functions for reading and writing raw Type 1 data: - -read(path) - reads any Type 1 font file, returns the raw data and a type indicator: - 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed - to by 'path'. - Raises an error when the file does not contain valid Type 1 data. - -write(path, data, kind='OTHER', dohex=False) - writes raw Type 1 data to the file pointed to by 'path'. - 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. - 'dohex' is a flag which determines whether the eexec encrypted - part should be written as hexadecimal or binary, but only if kind - is 'LWFN' or 'PFB'. -""" -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import eexec -from fontTools.misc.macCreatorType import getMacCreatorAndType -import os -import re - -__author__ = "jvr" -__version__ = "1.0b2" -DEBUG = 0 - - -try: - try: - from Carbon import Res - except ImportError: - import Res # MacPython < 2.2 -except ImportError: - haveMacSupport = 0 -else: - haveMacSupport = 1 - import MacOS - - -class T1Error(Exception): pass - - -class T1Font(object): - - """Type 1 font class. - - Uses a minimal interpeter that supports just about enough PS to parse - Type 1 fonts. - """ - - def __init__(self, path=None): - if path is not None: - self.data, type = read(path) - else: - pass # XXX - - def saveAs(self, path, type): - write(path, self.getData(), type) - - def getData(self): - # XXX Todo: if the data has been converted to Python object, - # recreate the PS stream - return self.data - - def getGlyphSet(self): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a .draw() method that supports the Pen protocol, and will - have an attribute named 'width', but only *after* the .draw() method - has been called. - - In the case of Type 1, the GlyphSet is simply the CharStrings dict. - """ - return self["CharStrings"] - - def __getitem__(self, key): - if not hasattr(self, "font"): - self.parse() - return self.font[key] - - def parse(self): - from fontTools.misc import psLib - from fontTools.misc import psCharStrings - self.font = psLib.suckfont(self.data) - charStrings = self.font["CharStrings"] - lenIV = self.font["Private"].get("lenIV", 4) - assert lenIV >= 0 - subrs = self.font["Private"]["Subrs"] - for glyphName, charString in charStrings.items(): - charString, R = eexec.decrypt(charString, 4330) - charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], - subrs=subrs) - for i in range(len(subrs)): - charString, R = eexec.decrypt(subrs[i], 4330) - subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) - del self.data - - -# low level T1 data read and write functions - -def read(path, onlyHeader=False): - """reads any Type 1 font file, returns raw data""" - normpath = path.lower() - creator, typ = getMacCreatorAndType(path) - if typ == 'LWFN': - return readLWFN(path, onlyHeader), 'LWFN' - if normpath[-4:] == '.pfb': - return readPFB(path, onlyHeader), 'PFB' - else: - return readOther(path), 'OTHER' - -def write(path, data, kind='OTHER', dohex=False): - assertType1(data) - kind = kind.upper() - try: - os.remove(path) - except os.error: - pass - err = 1 - try: - if kind == 'LWFN': - writeLWFN(path, data) - elif kind == 'PFB': - writePFB(path, data) - else: - writeOther(path, data, dohex) - err = 0 - finally: - if err and not DEBUG: - try: - os.remove(path) - except os.error: - pass - - -# -- internal -- - -LWFNCHUNKSIZE = 2000 -HEXLINELENGTH = 80 - - -def readLWFN(path, onlyHeader=False): - """reads an LWFN font file, returns raw data""" - resRef = Res.FSOpenResFile(path, 1) # read-only - try: - Res.UseResFile(resRef) - n = Res.Count1Resources('POST') - data = [] - for i in range(501, 501 + n): - res = Res.Get1Resource('POST', i) - code = byteord(res.data[0]) - if byteord(res.data[1]) != 0: - raise T1Error('corrupt LWFN file') - if code in [1, 2]: - if onlyHeader and code == 2: - break - data.append(res.data[2:]) - elif code in [3, 5]: - break - elif code == 4: - f = open(path, "rb") - data.append(f.read()) - f.close() - elif code == 0: - pass # comment, ignore - else: - raise T1Error('bad chunk code: ' + repr(code)) - finally: - Res.CloseResFile(resRef) - data = bytesjoin(data) - assertType1(data) - return data - -def readPFB(path, onlyHeader=False): - """reads a PFB font file, returns raw data""" - f = open(path, "rb") - data = [] - while True: - if f.read(1) != bytechr(128): - raise T1Error('corrupt PFB file') - code = byteord(f.read(1)) - if code in [1, 2]: - chunklen = stringToLong(f.read(4)) - chunk = f.read(chunklen) - assert len(chunk) == chunklen - data.append(chunk) - elif code == 3: - break - else: - raise T1Error('bad chunk code: ' + repr(code)) - if onlyHeader: - break - f.close() - data = bytesjoin(data) - assertType1(data) - return data - -def readOther(path): - """reads any (font) file, returns raw data""" - f = open(path, "rb") - data = f.read() - f.close() - assertType1(data) - - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted and isHex(chunk[:4]): - data.append(deHexString(chunk)) - else: - data.append(chunk) - return bytesjoin(data) - -# file writing tools - -def writeLWFN(path, data): - Res.FSpCreateResFile(path, "just", "LWFN", 0) - resRef = Res.FSOpenResFile(path, 2) # write-only - try: - Res.UseResFile(resRef) - resID = 501 - chunks = findEncryptedChunks(data) - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - while chunk: - res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) - res.AddResource('POST', resID, '') - chunk = chunk[LWFNCHUNKSIZE - 2:] - resID = resID + 1 - res = Res.Resource(bytechr(5) + '\0') - res.AddResource('POST', resID, '') - finally: - Res.CloseResFile(resRef) - -def writePFB(path, data): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - f.write(bytechr(128) + bytechr(code)) - f.write(longToString(len(chunk))) - f.write(chunk) - f.write(bytechr(128) + bytechr(3)) - finally: - f.close() - -def writeOther(path, data, dohex=False): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - hexlinelen = HEXLINELENGTH // 2 - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - if code == 2 and dohex: - while chunk: - f.write(eexec.hexString(chunk[:hexlinelen])) - f.write('\r') - chunk = chunk[hexlinelen:] - else: - f.write(chunk) - finally: - f.close() - - -# decryption tools - -EEXECBEGIN = "currentfile eexec" -EEXECEND = '0' * 64 -EEXECINTERNALEND = "currentfile closefile" -EEXECBEGINMARKER = "%-- eexec start\r" -EEXECENDMARKER = "%-- eexec end\r" - -_ishexRE = re.compile('[0-9A-Fa-f]*$') - -def isHex(text): - return _ishexRE.match(text) is not None - - -def decryptType1(data): - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted: - if isHex(chunk[:4]): - chunk = deHexString(chunk) - decrypted, R = eexec.decrypt(chunk, 55665) - decrypted = decrypted[4:] - if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ - and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: - raise T1Error("invalid end of eexec part") - decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' - data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) - else: - if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: - data.append(chunk[:-len(EEXECBEGIN)-1]) - else: - data.append(chunk) - return bytesjoin(data) - -def findEncryptedChunks(data): - chunks = [] - while True: - eBegin = data.find(EEXECBEGIN) - if eBegin < 0: - break - eBegin = eBegin + len(EEXECBEGIN) + 1 - eEnd = data.find(EEXECEND, eBegin) - if eEnd < 0: - raise T1Error("can't find end of eexec part") - cypherText = data[eBegin:eEnd + 2] - if isHex(cypherText[:4]): - cypherText = deHexString(cypherText) - plainText, R = eexec.decrypt(cypherText, 55665) - eEndLocal = plainText.find(EEXECINTERNALEND) - if eEndLocal < 0: - raise T1Error("can't find end of eexec part") - chunks.append((0, data[:eBegin])) - chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) - data = data[eEnd:] - chunks.append((0, data)) - return chunks - -def deHexString(hexstring): - return eexec.deHexString(strjoin(hexstring.split())) - - -# Type 1 assertion - -_fontType1RE = re.compile(br"/FontType\s+1\s+def") - -def assertType1(data): - for head in [b'%!PS-AdobeFont', b'%!FontType1']: - if data[:len(head)] == head: - break - else: - raise T1Error("not a PostScript font") - if not _fontType1RE.search(data): - raise T1Error("not a Type 1 font") - if data.find(b"currentfile eexec") < 0: - raise T1Error("not an encrypted Type 1 font") - # XXX what else? - return data - - -# pfb helpers - -def longToString(long): - s = "" - for i in range(4): - s += bytechr((long & (0xff << (i * 8))) >> i * 8) - return s - -def stringToLong(s): - if len(s) != 4: - raise ValueError('string must be 4 bytes long') - l = 0 - for i in range(4): - l += byteord(s[i]) << (i * 8) - return l diff -Nru fonttools-3.0/Lib/fontTools/ttLib/__init__.py fonttools-3.21.2/Lib/fontTools/ttLib/__init__.py --- fonttools-3.0/Lib/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,15 +8,15 @@ Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL] Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam ->>> from fontTools import ttLib ->>> tt = ttLib.TTFont("afont.ttf") ->>> tt['maxp'].numGlyphs +>> from fontTools import ttLib +>> tt = ttLib.TTFont("afont.ttf") +>> tt['maxp'].numGlyphs 242 ->>> tt['OS/2'].achVendID +>> tt['OS/2'].achVendID 'B&H\000' ->>> tt['head'].unitsPerEm +>> tt['head'].unitsPerEm 2048 ->>> tt.saveXML("afont.ttx") +>> tt.saveXML("afont.ttx") Dumping 'LTSH' table... Dumping 'OS/2' table... Dumping 'VDMX' table... @@ -33,28 +33,23 @@ Dumping 'name' table... Dumping 'post' table... Dumping 'prep' table... ->>> tt2 = ttLib.TTFont() ->>> tt2.importXML("afont.ttx") ->>> tt2['maxp'].numGlyphs +>> tt2 = ttLib.TTFont() +>> tt2.importXML("afont.ttx") +>> tt2['maxp'].numGlyphs 242 ->>> +>> """ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import deprecateArgument, deprecateFunction import os import sys +import logging -haveMacSupport = 0 -if sys.platform == "mac": - haveMacSupport = 1 -elif sys.platform == "darwin": - if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): - # Python 2.2's Mac support is broken, so don't enable it there. - # Python 3 does not have Res used by macUtils - haveMacSupport = 1 +log = logging.getLogger(__name__) class TTLibError(Exception): pass @@ -69,8 +64,8 @@ def __init__(self, file=None, res_name_or_index=None, sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, - verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, - recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): + verbose=None, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, + recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None): """The constructor can be called with a few different arguments. When reading a font from disk, 'file' should be either a pathname @@ -91,13 +86,15 @@ The TTFont constructor can also be called without a 'file' argument: this is the way to create a new empty font. In this case you can optionally supply the 'sfntVersion' argument, - and a 'flavor' which can be None, or 'woff'. + and a 'flavor' which can be None, 'woff', or 'woff2'. If the recalcBBoxes argument is false, a number of things will *not* be recalculated upon save/compile: - 1) glyph bounding boxes - 2) maxp font bounding box - 3) hhea min/max values + 1) 'glyf' glyph bounding boxes + 2) 'CFF ' font bounding box + 3) 'head' font bounding box + 4) 'hhea' min/max values + 5) 'vhea' min/max values (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). Additionally, upon importing an TTX file, this option cause glyphs to be compiled right away. This should reduce memory consumption @@ -128,8 +125,13 @@ """ from fontTools.ttLib import sfnt - self.verbose = verbose - self.quiet = quiet + + for name in ("verbose", "quiet"): + val = locals().get(name) + if val is not None: + deprecateArgument(name, "configure logging instead") + setattr(self, name, val) + self.lazy = lazy self.recalcBBoxes = recalcBBoxes self.recalcTimestamp = recalcTimestamp @@ -151,8 +153,8 @@ if not hasattr(file, "read"): closeStream = True # assume file is a string - if haveMacSupport and res_name_or_index is not None: - # on the mac, we deal with sfnt resources as well as flat files + if res_name_or_index is not None: + # see if it contains 'sfnt' resources in the resource or data fork from . import macUtils if res_name_or_index == 0: if macUtils.getSFNTResIndices(file): @@ -168,14 +170,16 @@ else: # assume "file" is a readable file object closeStream = False - # read input file in memory and wrap a stream around it to allow overwriting - tmp = BytesIO(file.read()) - if hasattr(file, 'name'): - # save reference to input file name - tmp.name = file.name - if closeStream: - file.close() - self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) + if not self.lazy: + # read input file in memory and wrap a stream around it to allow overwriting + tmp = BytesIO(file.read()) + if hasattr(file, 'name'): + # save reference to input file name + tmp.name = file.name + if closeStream: + file.close() + file = tmp + self.reader = sfnt.SFNTReader(file, checkChecksums, fontNumber=fontNumber) self.sfntVersion = self.reader.sfntVersion self.flavor = self.reader.flavor self.flavorData = self.reader.flavorData @@ -185,28 +189,24 @@ if self.reader is not None: self.reader.close() - def save(self, file, makeSuitcase=False, reorderTables=True): + def save(self, file, reorderTables=True): """Save the font to disk. Similarly to the constructor, the 'file' argument can be either a pathname or a writable file object. - - On the Mac, if makeSuitcase is true, a suitcase (resource fork) - file will we made instead of a flat .ttf file. """ from fontTools.ttLib import sfnt if not hasattr(file, "write"): - closeStream = 1 - if os.name == "mac" and makeSuitcase: - from . import macUtils - file = macUtils.SFNTResourceWriter(file, self) - else: - file = open(file, "wb") - if os.name == "mac": - from fontTools.misc.macCreator import setMacCreatorAndType - setMacCreatorAndType(file.name, 'mdos', 'BINA') + if self.lazy and self.reader.file.name == file: + raise TTLibError( + "Can't overwrite TTFont when 'lazy' attribute is True") + closeStream = True + file = open(file, "wb") else: # assume "file" is a writable file object - closeStream = 0 + closeStream = False + + if self.recalcTimestamp and 'head' in self: + self['head'] # make sure 'head' is loaded so the recalculation is actually done tags = list(self.keys()) if "GlyphOrder" in tags: @@ -245,9 +245,9 @@ if closeStream: file.close() - def saveXML(self, fileOrPath, progress=None, quiet=False, + def saveXML(self, fileOrPath, progress=None, quiet=None, tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, - bitmapGlyphDataFormat='raw'): + bitmapGlyphDataFormat='raw', newlinestr=None): """Export the font as TTX (an XML-based text file), or as a series of text files when splitTables is true. In the latter case, the 'fileOrPath' argument should be a path to a directory. @@ -258,6 +258,13 @@ from fontTools import version from fontTools.misc import xmlWriter + # only write the MAJOR.MINOR version in the 'ttLibVersion' attribute of + # TTX files' root element (without PATCH or .dev suffixes) + version = ".".join(version.split('.')[:2]) + + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") + self.disassembleInstructions = disassembleInstructions self.bitmapGlyphDataFormat = bitmapGlyphDataFormat if not tables: @@ -275,8 +282,9 @@ else: idlefunc = None - writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) - writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], + writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc, + newlinestr=newlinestr) + writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1], ttLibVersion=version) writer.newline() @@ -293,7 +301,8 @@ tag = tables[i] if splitTables: tablePath = fileNameTemplate % tagToIdentifier(tag) - tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc) + tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc, + newlinestr=newlinestr) tableWriter.begintag("ttFont", ttLibVersion=version) tableWriter.newline() tableWriter.newline() @@ -301,7 +310,7 @@ writer.newline() else: tableWriter = writer - self._tableToXML(tableWriter, tag, progress, quiet) + self._tableToXML(tableWriter, tag, progress) if splitTables: tableWriter.endtag("ttFont") tableWriter.newline() @@ -310,11 +319,14 @@ progress.set((i + 1)) writer.endtag("ttFont") writer.newline() - writer.close() - if self.verbose: - debugmsg("Done dumping TTX") - - def _tableToXML(self, writer, tag, progress, quiet): + # close if 'fileOrPath' is a path; leave it open if it's a file. + # The special string "-" means standard output so leave that open too + if not hasattr(fileOrPath, "write") and fileOrPath != "-": + writer.close() + + def _tableToXML(self, writer, tag, progress, quiet=None): + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") if tag in self: table = self[tag] report = "Dumping '%s' table..." % tag @@ -322,11 +334,7 @@ report = "No '%s' table found." % tag if progress: progress.setLabel(report) - elif self.verbose: - debugmsg(report) - else: - if not quiet: - print(report) + log.info(report) if tag not in self: return xmlTag = tagToXML(tag) @@ -346,10 +354,13 @@ writer.newline() writer.newline() - def importXML(self, file, progress=None, quiet=False): + def importXML(self, fileOrPath, progress=None, quiet=None): """Import a TTX file (an XML-based text format), so as to recreate a font object. """ + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") + if "maxp" in self and "post" in self: # Make sure the glyph order is loaded, as it otherwise gets # lost if the XML doesn't contain the glyph order, yet does @@ -359,7 +370,7 @@ from fontTools.misc import xmlReader - reader = xmlReader.XMLReader(file, self, progress, quiet) + reader = xmlReader.XMLReader(fileOrPath, self, progress) reader.read() def isLoaded(self, tag): @@ -405,21 +416,20 @@ return table if self.reader is not None: import traceback - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) + log.debug("Reading '%s' table from disk", tag) data = self.reader[tag] tableClass = getTableClass(tag) table = tableClass(tag) self.tables[tag] = table - if self.verbose: - debugmsg("Decompiling '%s' table" % tag) + log.debug("Decompiling '%s' table", tag) try: table.decompile(data, self) except: if not self.ignoreDecompileErrors: raise # fall back to DefaultTable, retaining the binary table data - print("An exception occurred during the decompilation of the '%s' table" % tag) + log.exception( + "An exception occurred during the decompilation of the '%s' table", tag) from .tables.DefaultTable import DefaultTable file = StringIO() traceback.print_exc(file=file) @@ -511,50 +521,47 @@ # Set the glyph order, so the cmap parser has something # to work with (so we don't get called recursively). self.glyphOrder = glyphOrder - # Get a (new) temporary cmap (based on the just invented names) - try: - tempcmap = self['cmap'].getcmap(3, 1) - except KeyError: - tempcmap = None - if tempcmap is not None: - # we have a unicode cmap - from fontTools import agl - cmap = tempcmap.cmap - # create a reverse cmap dict - reversecmap = {} - for unicode, name in list(cmap.items()): - reversecmap[name] = unicode - allNames = {} - for i in range(numGlyphs): - tempName = glyphOrder[i] - if tempName in reversecmap: - unicode = reversecmap[tempName] - if unicode in agl.UV2AGL: - # get name from the Adobe Glyph List - glyphName = agl.UV2AGL[unicode] - else: - # create uni name - glyphName = "uni%04X" % unicode - tempName = glyphName - n = allNames.get(tempName, 0) - if n: - tempName = glyphName + "#" + str(n) - glyphOrder[i] = tempName - allNames[tempName] = n + 1 - # Delete the temporary cmap table from the cache, so it can - # be parsed again with the right names. - del self.tables['cmap'] - else: - pass # no unicode cmap available, stick with the invented names + + # Make up glyph names based on the reversed cmap table. Because some + # glyphs (eg. ligatures or alternates) may not be reachable via cmap, + # this naming table will usually not cover all glyphs in the font. + # If the font has no Unicode cmap table, reversecmap will be empty. + reversecmap = self['cmap'].buildReversed() + useCount = {} + for i in range(numGlyphs): + tempName = glyphOrder[i] + if tempName in reversecmap: + # If a font maps both U+0041 LATIN CAPITAL LETTER A and + # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph, + # we prefer naming the glyph as "A". + glyphName = self._makeGlyphName(min(reversecmap[tempName])) + numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1 + if numUses > 1: + glyphName = "%s.alt%d" % (glyphName, numUses - 1) + glyphOrder[i] = glyphName + + # Delete the temporary cmap table from the cache, so it can + # be parsed again with the right names. + del self.tables['cmap'] self.glyphOrder = glyphOrder if cmapLoading: # restore partially loaded cmap, so it can continue loading # using the proper names. self.tables['cmap'] = cmapLoading + @staticmethod + def _makeGlyphName(codepoint): + from fontTools import agl # Adobe Glyph List + if codepoint in agl.UV2AGL: + return agl.UV2AGL[codepoint] + elif codepoint <= 0xFFFF: + return "uni%04X" % codepoint + else: + return "u%X" % codepoint + def getGlyphNames(self): """Get a list of glyph names, sorted alphabetically.""" - glyphNames = sorted(self.getGlyphOrder()[:]) + glyphNames = sorted(self.getGlyphOrder()) return glyphNames def getGlyphNames2(self): @@ -651,8 +658,7 @@ else: done.append(masterTable) tabledata = self.getTableData(tag) - if self.verbose: - debugmsg("writing '%s' table to disk" % tag) + log.debug("writing '%s' table to disk", tag) writer[tag] = tabledata done.append(tag) @@ -661,12 +667,10 @@ """ tag = Tag(tag) if self.isLoaded(tag): - if self.verbose: - debugmsg("compiling '%s' table" % tag) + log.debug("compiling '%s' table", tag) return self.tables[tag].compile(self) elif self.reader and tag in self.reader: - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) + log.debug("Reading '%s' table from disk", tag) return self.reader[tag] else: raise KeyError(tag) @@ -677,14 +681,18 @@ have a .draw() method that supports the Pen protocol, and will have an attribute named 'width'. - If the font is CFF-based, the outlines will be taken from the 'CFF ' - table. Otherwise the outlines will be taken from the 'glyf' table. - If the font contains both a 'CFF ' and a 'glyf' table, you can use - the 'preferCFF' argument to specify which one should be taken. + If the font is CFF-based, the outlines will be taken from the 'CFF ' or + 'CFF2' tables. Otherwise the outlines will be taken from the 'glyf' table. + If the font contains both a 'CFF '/'CFF2' and a 'glyf' table, you can use + the 'preferCFF' argument to specify which one should be taken. If the + font contains both a 'CFF ' and a 'CFF2' table, the latter is taken. """ glyphs = None - if (preferCFF and "CFF " in self) or "glyf" not in self: - glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) + if (preferCFF and any(tb in self for tb in ["CFF ", "CFF2"]) or + ("glyf" not in self and any(tb in self for tb in ["CFF ", "CFF2"]))): + table_tag = "CFF2" if "CFF2" in self else "CFF " + glyphs = _TTGlyphSet(self, + list(self[table_tag].cff.values())[0].CharStrings, _TTGlyphCFF) if glyphs is None and "glyf" in self: glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) @@ -694,6 +702,17 @@ return glyphs + def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))): + """Return the 'best' unicode cmap dictionary available in the font, + or None, if no unicode cmap subtable is available. + + By default it will search for the following (platformID, platEncID) + pairs: + (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0) + This can be customized via the cmapPreferences argument. + """ + return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) + class _TTGlyphSet(object): @@ -704,6 +723,7 @@ def __init__(self, ttFont, glyphs, glyphType): self._glyphs = glyphs self._hmtx = ttFont['hmtx'] + self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None self._glyphType = glyphType def keys(self): @@ -715,7 +735,10 @@ __contains__ = has_key def __getitem__(self, glyphName): - return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) + horizontalMetrics = self._hmtx[glyphName] + verticalMetrics = self._vmtx[glyphName] if self._vmtx else None + return self._glyphType( + self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics) def get(self, glyphName, default=None): try: @@ -727,13 +750,21 @@ """Wrapper for a TrueType glyph that supports the Pen protocol, meaning that it has a .draw() method that takes a pen object as its only - argument. Additionally there is a 'width' attribute. + argument. Additionally there are 'width' and 'lsb' attributes, read from + the 'hmtx' table. + + If the font contains a 'vmtx' table, there will also be 'height' and 'tsb' + attributes. """ - def __init__(self, glyphset, glyph, metrics): + def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None): self._glyphset = glyphset self._glyph = glyph - self.width, self.lsb = metrics + self.width, self.lsb = horizontalMetrics + if verticalMetrics: + self.height, self.tsb = verticalMetrics + else: + self.height, self.tsb = None, None def draw(self, pen): """Draw the glyph onto Pen. See fontTools.pens.basePen for details @@ -918,6 +949,7 @@ return Tag(tag + " " * (4 - len(tag))) +@deprecateFunction("use logging instead", category=DeprecationWarning) def debugmsg(msg): import time print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/macUtils.py fonttools-3.21.2/Lib/fontTools/ttLib/macUtils.py --- fonttools-3.0/Lib/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/macUtils.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,37 +1,18 @@ """ttLib.macUtils.py -- Various Mac-specific stuff.""" - from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -import sys -import os -if sys.platform not in ("mac", "darwin"): - raise ImportError("This module is Mac-only!") -try: - from Carbon import Res -except ImportError: - import Res - - -def MyOpenResFile(path): - mode = 1 # read only - try: - resref = Res.FSOpenResFile(path, mode) - except Res.Error: - # try data fork - resref = Res.FSOpenResourceFile(path, unicode(), mode) - return resref +from fontTools.misc.macRes import ResourceReader, ResourceError def getSFNTResIndices(path): - """Determine whether a file has a resource fork or not.""" + """Determine whether a file has a 'sfnt' resource fork or not.""" try: - resref = MyOpenResFile(path) - except Res.Error: + reader = ResourceReader(path) + indices = reader.getIndices('sfnt') + reader.close() + return indices + except ResourceError: return [] - Res.UseResFile(resref) - numSFNTs = Res.Count1Resources('sfnt') - Res.CloseResFile(resref) - return list(range(1, numSFNTs + 1)) def openTTFonts(path): @@ -53,21 +34,20 @@ return fonts -class SFNTResourceReader(object): +class SFNTResourceReader(BytesIO): - """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" + """Simple read-only file wrapper for 'sfnt' resources.""" def __init__(self, path, res_name_or_index): - resref = MyOpenResFile(path) - Res.UseResFile(resref) + from fontTools import ttLib + reader = ResourceReader(path) if isinstance(res_name_or_index, basestring): - res = Res.Get1NamedResource('sfnt', res_name_or_index) + rsrc = reader.getNamedResource('sfnt', res_name_or_index) else: - res = Res.Get1IndResource('sfnt', res_name_or_index) - self.file = BytesIO(res.data) - Res.CloseResFile(resref) + rsrc = reader.getIndResource('sfnt', res_name_or_index) + if rsrc is None: + raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index) + reader.close() + self.rsrc = rsrc + super(SFNTResourceReader, self).__init__(rsrc.data) self.name = path - - def __getattr__(self, attr): - # cheap inheritance - return getattr(self.file, attr) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/sfnt.py fonttools-3.21.2/Lib/fontTools/ttLib/sfnt.py --- fonttools-3.0/Lib/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/sfnt.py 2018-01-08 12:40:40.000000000 +0000 @@ -18,6 +18,10 @@ from fontTools.ttLib import getSearchRange import struct from collections import OrderedDict +import logging + + +log = logging.getLogger(__name__) class SFNTReader(object): @@ -84,12 +88,13 @@ if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): from fontTools import ttLib raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") - self.tables = OrderedDict() + tables = {} for i in range(self.numTables): entry = self.DirectoryEntry() entry.fromFile(self.file) tag = Tag(entry.tag) - self.tables[tag] = entry + tables[tag] = entry + self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset)) # Load flavor data if any if self.flavor == "woff": @@ -117,8 +122,8 @@ # Be obnoxious, and barf when it's wrong assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag elif checksum != entry.checkSum: - # Be friendly, and just print a warning. - print("bad checksum for '%s' table" % tag) + # Be friendly, and just log a warning. + log.warning("bad checksum for '%s' table", tag) return data def __delitem__(self, tag): @@ -128,6 +133,46 @@ self.file.close() +# default compression level for WOFF 1.0 tables and metadata +ZLIB_COMPRESSION_LEVEL = 6 + +# if set to True, use zopfli instead of zlib for compressing WOFF 1.0. +# The Python bindings are available at https://pypi.python.org/pypi/zopfli +USE_ZOPFLI = False + +# mapping between zlib's compression levels and zopfli's 'numiterations'. +# Use lower values for files over several MB in size or it will be too slow +ZOPFLI_LEVELS = { + # 0: 0, # can't do 0 iterations... + 1: 1, + 2: 3, + 3: 5, + 4: 8, + 5: 10, + 6: 15, + 7: 25, + 8: 50, + 9: 100, +} + + +def compress(data, level=ZLIB_COMPRESSION_LEVEL): + """ Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, + zopfli is used instead of the zlib module. + The compression 'level' must be between 0 and 9. 1 gives best speed, + 9 gives best compression (0 gives no compression at all). + The default value is a compromise between speed and compression (6). + """ + if not (0 <= level <= 9): + raise ValueError('Bad compression level: %s' % level) + if not USE_ZOPFLI or level == 0: + from zlib import compress + return compress(data, level) + else: + from zopfli.zlib import compress + return compress(data, numiterations=ZOPFLI_LEVELS[level]) + + class SFNTWriter(object): def __new__(cls, *args, **kwargs): @@ -241,8 +286,7 @@ self.metaOrigLength = len(data.metaData) self.file.seek(0,2) self.metaOffset = self.file.tell() - import zlib - compressedMetaData = zlib.compress(data.metaData) + compressedMetaData = compress(data.metaData) self.metaLength = len(compressedMetaData) self.file.write(compressedMetaData) else: @@ -434,7 +478,17 @@ format = woffDirectoryEntryFormat formatSize = woffDirectoryEntrySize - zlibCompressionLevel = 6 + + def __init__(self): + super(WOFFDirectoryEntry, self).__init__() + # With fonttools<=3.1.2, the only way to set a different zlib + # compression level for WOFF directory entries was to set the class + # attribute 'zlibCompressionLevel'. This is now replaced by a globally + # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when + # compressing the metadata. For backward compatibility, we still + # use the class attribute if it was already set. + if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'): + self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL def decodeData(self, rawData): import zlib @@ -443,14 +497,13 @@ else: assert self.length < self.origLength data = zlib.decompress(rawData) - assert len (data) == self.origLength + assert len(data) == self.origLength return data def encodeData(self, data): - import zlib self.origLength = len(data) if not self.uncompressed: - compressedData = zlib.compress(data, self.zlibCompressionLevel) + compressedData = compress(data, self.zlibCompressionLevel) if self.uncompressed or len(compressedData) >= self.origLength: # Encode uncompressed rawData = data diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_a_n_k_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_n_k_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_a_n_k_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_n_k_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# The anchor point table provides a way to define anchor points. +# These are points within the coordinate space of a given glyph, +# independent of the control points used to render the glyph. +# Anchor points are used in conjunction with the 'kerx' table. +# +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html +class table__a_n_k_r(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/asciiTable.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/asciiTable.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/asciiTable.py 2018-01-08 12:40:40.000000000 +0000 @@ -12,11 +12,11 @@ data = strjoin(data) writer.begintag("source") writer.newline() - writer.write_noindent(data.replace("\r", "\n")) + writer.write_noindent(data) writer.newline() writer.endtag("source") writer.newline() def fromXML(self, name, attrs, content, ttFont): - lines = strjoin(content).replace("\r", "\n").split("\n") - self.data = tobytes("\r".join(lines[1:-1])) + lines = strjoin(content).split("\n") + self.data = tobytes("\n".join(lines[1:-1])) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,20 +8,26 @@ from . import DefaultTable import array import struct -import warnings +import logging +log = logging.getLogger(__name__) + # Apple's documentation of 'avar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html AVAR_HEADER_FORMAT = """ > # big endian - version: L - axisCount: L + majorVersion: H + minorVersion: H + reserved: H + axisCount: H """ +assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT) class table__a_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar"] def __init__(self, tag=None): @@ -30,7 +36,12 @@ def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {"version": 0x00010000, "axisCount": len(axisTags)} + header = { + "majorVersion": 1, + "minorVersion": 0, + "reserved": 0, + "axisCount": len(axisTags) + } result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] for axis in axisTags: mappings = sorted(self.segments[axis].items()) @@ -46,8 +57,9 @@ header = {} headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) - if header["version"] != 0x00010000: - raise TTLibError("unsupported 'avar' version %04x" % header["version"]) + majorVersion = header["majorVersion"] + if majorVersion != 1: + raise TTLibError("unsupported 'avar' version %d" % majorVersion) pos = headerSize for axis in axisTags: segments = self.segments[axis] = {} @@ -57,7 +69,6 @@ fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) pos = pos + 4 - self.fixupSegments_(warn=warnings.warn) def toXML(self, writer, ttFont, progress=None): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] @@ -65,6 +76,10 @@ writer.begintag("segment", axis=axis) writer.newline() for key, value in sorted(self.segments[axis].items()): + # roundtrip float -> fixed -> float to normalize TTX output + # as dumped after decompiling or straight from varLib + key = fixedToFloat(floatToFixed(key, 14), 14) + value = fixedToFloat(floatToFixed(value, 14), 14) writer.simpletag("mapping", **{"from": key, "to": value}) writer.newline() writer.endtag("segment") @@ -81,14 +96,6 @@ fromValue = safeEval(elementAttrs["from"]) toValue = safeEval(elementAttrs["to"]) if fromValue in segment: - warnings.warn("duplicate entry for %s in axis '%s'" % - (fromValue, axis)) + log.warning("duplicate entry for %s in axis '%s'", + fromValue, axis) segment[fromValue] = toValue - self.fixupSegments_(warn=warnings.warn) - - def fixupSegments_(self, warn): - for axis, mappings in self.segments.items(): - for k in [-1.0, 0.0, 1.0]: - if mappings.get(k) != k: - warn("avar axis '%s' should map %s to %s" % (axis, k, k)) - mappings[k] = k diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis -import collections -import unittest - - -TEST_DATA = deHexStr( - "00 01 00 00 00 00 00 02 " - "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " - "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") - - -class AxisVariationTableTest(unittest.TestCase): - def test_compile(self): - avar = table__a_v_a_r() - avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) - - def test_decompile(self): - avar = table__a_v_a_r() - avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) - self.assertEqual({ - "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, - "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - }, avar.segments) - - def test_decompile_unsupportedVersion(self): - avar = table__a_v_a_r() - font = self.makeFont(["wdth", "wght"]) - self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) - - def test_toXML(self): - avar = table__a_v_a_r() - avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - writer = XMLWriter(BytesIO()) - avar.toXML(writer, self.makeFont(["opsz"])) - self.assertEqual([ - '', - '', - '', - '', - '', - '' - ], self.xml_lines(writer)) - - def test_fromXML(self): - avar = table__a_v_a_r() - avar.fromXML("segment", {"axis":"wdth"}, [ - ("mapping", {"from": "-1.0", "to": "-1.0"}, []), - ("mapping", {"from": "0.0", "to": "0.0"}, []), - ("mapping", {"from": "0.7", "to": "0.2"}, []), - ("mapping", {"from": "1.0", "to": "1.0"}, []) - ], ttFont=None) - self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) - - def test_fixupSegments(self): - avar = table__a_v_a_r() - avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} - warnings = [] - avar.fixupSegments_(lambda w: warnings.append(w)) - self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) - self.assertEqual([ - "avar axis 'wdth' should map -1.0 to -1.0", - "avar axis 'wdth' should map 0.0 to 0.0", - "avar axis 'wdth' should map 1.0 to 1.0" - ], warnings) - - @staticmethod - def makeFont(axisTags): - """['opsz', 'wdth'] --> ttFont""" - fvar = table__f_v_a_r() - for tag in axisTags: - axis = Axis() - axis.axisTag = tag - fvar.axes.append(axis) - return {"fvar": fvar} - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,8 +4,11 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +import logging +log = logging.getLogger(__name__) + bigGlyphMetricsFormat = """ > # big endian height: B @@ -48,7 +51,7 @@ if name in metricNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) + log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__) class BigGlyphMetrics(BitmapGlyphMetrics): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_b_s_l_n.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_b_s_l_n.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_b_s_l_n.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_b_s_l_n.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html +class table__b_s_l_n(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F__2.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_F_F__2.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F__2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_F_F__2.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import cffLib +from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_ + + +class table_C_F_F__2(table_C_F_F_): + + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont, isCFF2=True) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont, isCFF2=True) + return f.getvalue() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_F_F_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_F_F_.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,18 +6,18 @@ class table_C_F_F_(DefaultTable.DefaultTable): - def __init__(self, tag): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.cff = cffLib.CFFFontSet() self._gaveGlyphOrder = False def decompile(self, data, otFont): - self.cff.decompile(BytesIO(data), otFont) + self.cff.decompile(BytesIO(data), otFont, isCFF2=False) assert len(self.cff) == 1, "can't deal with multi-font CFF tables." def compile(self, otFont): f = BytesIO() - self.cff.compile(f, otFont) + self.cff.compile(f, otFont, isCFF2=False) return f.getvalue() def haveGlyphNames(self): @@ -44,4 +44,4 @@ def fromXML(self, name, attrs, content, otFont): if not hasattr(self, "cff"): self.cff = cffLib.CFFFontSet() - self.cff.fromXML(name, attrs, content) + self.cff.fromXML(name, attrs, content, otFont) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_c_i_d_g.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_i_d_g.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_c_i_d_g.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_i_d_g.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# The AAT ‘cidg’ table has almost the same structure as ‘gidc’, +# just mapping CIDs to GlyphIDs instead of the reverse direction. +# +# It is useful for fonts that may be used by a PDF renderer in lieu of +# a font reference with a known glyph collection but no subsetted +# glyphs. For instance, a PDF can say “please use a font conforming +# to Adobe-Japan-1”; the ‘cidg’ mapping is necessary if the font is, +# say, a TrueType font. ‘gidc’ is lossy for this purpose and is +# obsoleted by ‘cidg’. +# +# For example, the first font in /System/Library/Fonts/PingFang.ttc +# (which Apple ships pre-installed on MacOS 10.12.6) has a ‘cidg’ table. +class table__c_i_d_g(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_m_a_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -9,8 +9,26 @@ import struct import array import operator +import logging +log = logging.getLogger(__name__) + + +def _make_map(font, chars, gids): + assert len(chars) == len(gids) + cmap = {} + glyphOrder = font.getGlyphOrder() + for char,gid in zip(chars,gids): + if gid is 0: + continue + try: + name = glyphOrder[gid] + except IndexError: + name = font.getGlyphName(gid) + cmap[char] = name + return cmap + class table__c_m_a_p(DefaultTable.DefaultTable): def getcmap(self, platformID, platEncID): @@ -20,6 +38,36 @@ return subtable return None # not found + def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))): + """Return the 'best' unicode cmap dictionary available in the font, + or None, if no unicode cmap subtable is available. + + By default it will search for the following (platformID, platEncID) + pairs: + (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0) + This can be customized via the cmapPreferences argument. + """ + for platformID, platEncID in cmapPreferences: + cmapSubtable = self.getcmap(platformID, platEncID) + if cmapSubtable is not None: + return cmapSubtable.cmap + return None # None of the requested cmap subtables were found + + def buildReversed(self): + """Returns a reverse cmap such as {'one':{0x31}, 'A':{0x41,0x391}}. + + The values are sets of Unicode codepoints because + some fonts map different codepoints to the same glyph. + For example, U+0041 LATIN CAPITAL LETTER A and U+0391 + GREEK CAPITAL LETTER ALPHA are sometimes the same glyph. + """ + result = {} + for subtable in self.tables: + if subtable.isUnicode(): + for codepoint, name in subtable.cmap.items(): + result.setdefault(name, set()).add(codepoint) + return result + def decompile(self, data, ttFont): tableVersion, numSubTables = struct.unpack(">HH", data[:4]) self.tableVersion = int(tableVersion) @@ -36,7 +84,10 @@ format, length = struct.unpack(">HL", data[offset:offset+6]) if not length: - print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) + log.error( + "cmap subtable is reported as having zero length: platformID %s, " + "platEncID %s, format %s offset %s. Skipping table.", + platformID, platEncID, format, offset) continue table = CmapSubtable.newSubtable(format) table.platformID = platformID @@ -202,26 +253,22 @@ assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" - glyphIdArray = array.array("B") - glyphIdArray.fromstring(self.data) - self.cmap = cmap = {} - lenArray = len(glyphIdArray) - charCodes = list(range(lenArray)) - names = map(self.ttFont.getGlyphName, glyphIdArray) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + gids = array.array("B") + gids.fromstring(self.data) + charCodes = list(range(len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return struct.pack(">HHH", 0, 262, self.language) + self.data - charCodeList = sorted(self.cmap.items()) - charCodes = [entry[0] for entry in charCodeList] - valueList = [entry[1] for entry in charCodeList] - assert charCodes == list(range(256)) - valueList = map(ttFont.getGlyphID, valueList) + cmap = self.cmap + assert set(cmap.keys()).issubset(range(256)) + getGlyphID = ttFont.getGlyphID + valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)] - glyphIdArray = array.array("B", valueList) - data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() + gids = array.array("B", valueList) + data = struct.pack(">HHH", 0, 262, self.language) + gids.tostring() assert len(data) == 262 return data @@ -351,7 +398,7 @@ # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! self.data = b"" - self.cmap = cmap = {} + cmap = {} notdefGI = 0 for firstByte in range(256): subHeadindex = subHeaderKeys[firstByte] @@ -382,17 +429,10 @@ # If not subHeader.entryCount, then all char codes with this first byte are # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the # same as mapping it to .notdef. - # cmap values are GID's. - glyphOrder = self.ttFont.getGlyphOrder() + gids = list(cmap.values()) charCodes = list(cmap.keys()) - lenCmap = len(gids) - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -700,15 +740,7 @@ glyphID = 0 # missing glyph gids.append(glyphID & 0xFFFF) - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -838,23 +870,14 @@ firstCode = int(firstCode) data = data[4:] #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! - glyphIndexArray = array.array("H") - glyphIndexArray.fromstring(data[:2 * int(entryCount)]) + gids = array.array("H") + gids.fromstring(data[:2 * int(entryCount)]) if sys.byteorder != "big": - glyphIndexArray.byteswap() + gids.byteswap() self.data = data = None - self.cmap = cmap = {} - - lenArray = len(glyphIndexArray) - charCodes = list(range(firstCode, firstCode + lenArray)) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, glyphIndexArray )) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + charCodes = list(range(firstCode, firstCode + len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -864,12 +887,14 @@ if codes: # yes, there are empty cmap tables. codes = list(range(codes[0], codes[-1] + 1)) firstCode = codes[0] - valueList = [cmap.get(code, ".notdef") for code in codes] - valueList = map(ttFont.getGlyphID, valueList) - glyphIndexArray = array.array("H", valueList) + valueList = [ + ttFont.getGlyphID(cmap[code]) if code in cmap else 0 + for code in codes + ] + gids = array.array("H", valueList) if sys.byteorder != "big": - glyphIndexArray.byteswap() - data = glyphIndexArray.tostring() + gids.byteswap() + data = gids.tostring() else: data = b"" firstCode = 0 @@ -930,15 +955,7 @@ charCodes.extend(list(range(startCharCode, endCharCode +1))) gids.extend(self._computeGIDs(glyphID, lenGroup)) self.data = data = None - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -1243,9 +1260,8 @@ data = bytesjoin(varSelectorRecords) + bytesjoin(data) self.length = 10 + len(data) headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) - self.data = headerdata + data - return self.data + return headerdata + data class cmap_format_unknown(CmapSubtable): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._c_m_a_p import CmapSubtable - -class CmapSubtableTest(unittest.TestCase): - - def makeSubtable(self, platformID, platEncID, langID): - subtable = CmapSubtable(None) - subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) - return subtable - - def test_toUnicode_utf16be(self): - subtable = self.makeSubtable(0, 2, 7) - self.assertEqual("utf_16_be", subtable.getEncoding()) - self.assertEqual(True, subtable.isUnicode()) - - def test_toUnicode_macroman(self): - subtable = self.makeSubtable(1, 0, 7) # MacRoman - self.assertEqual("mac_roman", subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_toUnicode_macromanian(self): - subtable = self.makeSubtable(1, 0, 37) # Mac Romanian - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_mac_encodings(self): - subtable = self.makeSubtable(1, 1, 0) # Mac Japanese - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_unknown(self): - subtable = self.makeSubtable(10, 11, 12) - self.assertEqual(subtable.getEncoding(), None) - self.assertEqual(subtable.getEncoding("ascii"), "ascii") - self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") - - def test_decompile_4(self): - subtable = CmapSubtable.newSubtable(4) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) - - def test_decompile_12(self): - subtable = CmapSubtable.newSubtable(12) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/C_P_A_L_.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,14 +6,23 @@ from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval from . import DefaultTable +import array import struct +import sys class table_C_P_A_L_(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.palettes = [] + self.paletteTypes = [] + self.paletteLabels = [] + self.paletteEntryLabels = [] + def decompile(self, data, ttFont): self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) - assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle" self.palettes = [] pos = 12 for i in range(numPalettes): @@ -26,51 +35,202 @@ palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) ppos += 4 self.palettes.append(palette) + if self.version == 0: + offsetToPaletteTypeArray = 0 + offsetToPaletteLabelArray = 0 + offsetToPaletteEntryLabelArray = 0 + else: + pos = 12 + numPalettes * 2 + (offsetToPaletteTypeArray, offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray) = ( + struct.unpack(">LLL", data[pos:pos+12])) + self.paletteTypes = self._decompileUInt32Array( + data, offsetToPaletteTypeArray, numPalettes) + self.paletteLabels = self._decompileUInt16Array( + data, offsetToPaletteLabelArray, numPalettes) + self.paletteEntryLabels = self._decompileUInt16Array( + data, offsetToPaletteEntryLabelArray, + self.numPaletteEntries) + + def _decompileUInt16Array(self, data, offset, numElements): + if offset == 0: + return [0] * numElements + result = array.array("H", data[offset : offset + 2 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() + + def _decompileUInt32Array(self, data, offset, numElements): + if offset == 0: + return [0] * numElements + result = array.array("I", data[offset : offset + 4 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() def compile(self, ttFont): - dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] - for i in range(len(self.palettes)): - dataList.append(struct.pack(">H", i*self.numPaletteEntries)) + colorRecordIndices, colorRecords = self._compileColorRecords() + paletteTypes = self._compilePaletteTypes() + paletteLabels = self._compilePaletteLabels() + paletteEntryLabels = self._compilePaletteEntryLabels() + numColorRecords = len(colorRecords) // 4 + offsetToFirstColorRecord = 12 + len(colorRecordIndices) + if self.version >= 1: + offsetToFirstColorRecord += 12 + header = struct.pack(">HHHHL", self.version, + self.numPaletteEntries, len(self.palettes), + numColorRecords, offsetToFirstColorRecord) + if self.version == 0: + dataList = [header, colorRecordIndices, colorRecords] + else: + pos = offsetToFirstColorRecord + len(colorRecords) + if len(paletteTypes) == 0: + offsetToPaletteTypeArray = 0 + else: + offsetToPaletteTypeArray = pos + pos += len(paletteTypes) + if len(paletteLabels) == 0: + offsetToPaletteLabelArray = 0 + else: + offsetToPaletteLabelArray = pos + pos += len(paletteLabels) + if len(paletteEntryLabels) == 0: + offsetToPaletteEntryLabelArray = 0 + else: + offsetToPaletteEntryLabelArray = pos + pos += len(paletteLabels) + header1 = struct.pack(">LLL", + offsetToPaletteTypeArray, + offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray) + dataList = [header, colorRecordIndices, header1, + colorRecords, paletteTypes, paletteLabels, + paletteEntryLabels] + return bytesjoin(dataList) + + def _compilePalette(self, palette): + assert(len(palette) == self.numPaletteEntries) + pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha) + return bytesjoin([pack(color) for color in palette]) + + def _compileColorRecords(self): + colorRecords, colorRecordIndices, pool = [], [], {} for palette in self.palettes: - assert(len(palette) == self.numPaletteEntries) - for color in palette: - dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) - data = bytesjoin(dataList) - return data + packedPalette = self._compilePalette(palette) + if packedPalette in pool: + index = pool[packedPalette] + else: + index = len(colorRecords) + colorRecords.append(packedPalette) + pool[packedPalette] = index + colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries)) + return bytesjoin(colorRecordIndices), bytesjoin(colorRecords) + + def _compilePaletteTypes(self): + if self.version == 0 or not any(self.paletteTypes): + return b'' + assert len(self.paletteTypes) == len(self.palettes) + result = bytesjoin([struct.pack(">I", ptype) + for ptype in self.paletteTypes]) + assert len(result) == 4 * len(self.palettes) + return result + + def _compilePaletteLabels(self): + if self.version == 0 or not any(self.paletteLabels): + return b'' + assert len(self.paletteLabels) == len(self.palettes) + result = bytesjoin([struct.pack(">H", label) + for label in self.paletteLabels]) + assert len(result) == 2 * len(self.palettes) + return result + + def _compilePaletteEntryLabels(self): + if self.version == 0 or not any(self.paletteEntryLabels): + return b'' + assert len(self.paletteEntryLabels) == self.numPaletteEntries + result = bytesjoin([struct.pack(">H", label) + for label in self.paletteEntryLabels]) + assert len(result) == 2 * self.numPaletteEntries + return result def toXML(self, writer, ttFont): + numPalettes = len(self.palettes) + paletteLabels = {i: nameID + for (i, nameID) in enumerate(self.paletteLabels)} + paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)} writer.simpletag("version", value=self.version) writer.newline() - writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.simpletag("numPaletteEntries", + value=self.numPaletteEntries) writer.newline() for index, palette in enumerate(self.palettes): - writer.begintag("palette", index=index) + attrs = {"index": index} + paletteType = paletteTypes.get(index) + paletteLabel = paletteLabels.get(index) + if self.version > 0 and paletteLabel is not None: + attrs["label"] = paletteLabel + if self.version > 0 and paletteType is not None: + attrs["type"] = paletteType + writer.begintag("palette", **attrs) writer.newline() + if (self.version > 0 and paletteLabel and + ttFont and "name" in ttFont): + name = ttFont["name"].getDebugName(paletteLabel) + if name is not None: + writer.comment(name) + writer.newline() assert(len(palette) == self.numPaletteEntries) for cindex, color in enumerate(palette): color.toXML(writer, ttFont, cindex) writer.endtag("palette") writer.newline() + if self.version > 0 and any(self.paletteEntryLabels): + writer.begintag("paletteEntryLabels") + writer.newline() + for index, label in enumerate(self.paletteEntryLabels): + if label: + writer.simpletag("label", index=index, value=label) + if (self.version > 0 and label and ttFont and "name" in ttFont): + name = ttFont["name"].getDebugName(label) + if name is not None: + writer.comment(name) + writer.newline() + writer.endtag("paletteEntryLabels") + writer.newline() def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "palettes"): - self.palettes = [] if name == "palette": - palette = [] - for element in content: - if isinstance(element, basestring): - continue + self.paletteLabels.append(int(attrs.get("label", "0"))) + self.paletteTypes.append(int(attrs.get("type", "0"))) palette = [] for element in content: if isinstance(element, basestring): continue color = Color() color.fromXML(element[0], element[1], element[2], ttFont) - palette.append (color) + palette.append(color) self.palettes.append(palette) + elif name == "paletteEntryLabels": + colorLabels = {} + for element in content: + if isinstance(element, basestring): + continue + elementName, elementAttr, _ = element + if elementName == "label": + labelIndex = safeEval(elementAttr["index"]) + nameID = safeEval(elementAttr["value"]) + colorLabels[labelIndex] = nameID + self.paletteEntryLabels = [ + colorLabels.get(i, 0) + for i in range(self.numPaletteEntries)] elif "value" in attrs: value = safeEval(attrs["value"]) setattr(self, name, value) + if name == "numPaletteEntries": + self.paletteEntryLabels = [0] * self.numPaletteEntries + class Color(object): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_c_v_a_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_v_a_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_c_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_c_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,84 @@ +from __future__ import \ + print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.ttLib.tables.TupleVariation import \ + compileTupleVariationStore, decompileTupleVariationStore, TupleVariation + + +# https://www.microsoft.com/typography/otspec/cvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html + +CVAR_HEADER_FORMAT = """ + > # big endian + majorVersion: H + minorVersion: H + tupleVariationCount: H + offsetToData: H +""" + +CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) + + +class table__c_v_a_r(DefaultTable.DefaultTable): + dependencies = ["cvt ", "fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.majorVersion, self.minorVersion = 1, 0 + self.variations = [] + + def compile(self, ttFont, useSharedPoints=False): + tupleVariationCount, tuples, data = compileTupleVariationStore( + variations=[v for v in self.variations if v.hasImpact()], + pointCount=len(ttFont["cvt "].values), + axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], + sharedTupleIndices={}, + useSharedPoints=useSharedPoints) + header = { + "majorVersion": self.majorVersion, + "minorVersion": self.minorVersion, + "tupleVariationCount": tupleVariationCount, + "offsetToData": CVAR_HEADER_SIZE + len(tuples), + } + return bytesjoin([ + sstruct.pack(CVAR_HEADER_FORMAT, header), + tuples, + data + ]) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) + self.majorVersion = header["majorVersion"] + self.minorVersion = header["minorVersion"] + assert self.majorVersion == 1, self.majorVersion + self.variations = decompileTupleVariationStore( + tableTag=self.tableTag, axisTags=axisTags, + tupleVariationCount=header["tupleVariationCount"], + pointCount=len(ttFont["cvt "].values), sharedTuples=None, + data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"]) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.majorVersion = int(attrs.get("major", "1")) + self.minorVersion = int(attrs.get("minor", "0")) + elif name == "tuple": + valueCount = len(ttFont["cvt "].values) + var = TupleVariation({}, [None] * valueCount) + self.variations.append(var) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + var.fromXML(tupleName, tupleAttrs, tupleContent) + + def toXML(self, writer, ttFont, progress=None): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + writer.simpletag("version", + major=self.majorVersion, minor=self.minorVersion) + writer.newline() + for var in self.variations: + var.toXML(writer, axisTags) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/DefaultTable.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/DefaultTable.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/DefaultTable.py 2018-01-08 12:40:40.000000000 +0000 @@ -39,9 +39,11 @@ def __repr__(self): return "<'%s' table at %x>" % (self.tableTag, id(self)) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/E_B_D_T_.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,10 @@ import itertools import os import struct +import logging + + +log = logging.getLogger(__name__) ebdtTableVersionFormat = """ > # big endian @@ -166,7 +170,7 @@ assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName bitmapGlyphDict[glyphName] = curGlyph else: - print("Warning: %s being ignored by %s", name, self.__class__.__name__) + log.warning("%s being ignored by %s", name, self.__class__.__name__) # Grow the strike data array to the appropriate size. The XML # format allows the strike index value to be out of order. @@ -196,7 +200,7 @@ if name in componentNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) + log.warning("unknown name '%s' being ignored by EbdtComponent.", name) # Helper functions for dealing with binary. @@ -478,7 +482,7 @@ self.metrics = metricsClass() self.metrics.fromXML(name, attrs, content, ttFont) elif name == oppositeMetricsName: - print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) + log.warning("Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat()) return BitmapPlusMetricsMixin @@ -692,7 +696,7 @@ curComponent.fromXML(name, attrs, content, ttFont) self.componentArray.append(curComponent) else: - print("Warning: '%s' being ignored in component array." % name) + log.warning("'%s' being ignored in component array.", name) class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/E_B_L_C_.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,10 @@ import struct import itertools from collections import deque +import logging + + +log = logging.getLogger(__name__) eblcHeaderFormat = """ > # big endian @@ -71,44 +75,47 @@ # Save the original data because offsets are from the start of the table. origData = data + i = 0; - dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) + dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self) + i += 8; self.strikes = [] for curStrikeIndex in range(self.numSizes): curStrike = Strike() self.strikes.append(curStrike) curTable = curStrike.bitmapSizeTable - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) + dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable) + i += 16 for metric in ('hori', 'vert'): metricObj = SbitLineMetrics() vars(curTable)[metric] = metricObj - dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) + dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj) + i += 12 + dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable) + i += 8 for curStrike in self.strikes: curTable = curStrike.bitmapSizeTable for subtableIndex in range(curTable.numberOfIndexSubTables): - lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize - upperBound = lowerBound + indexSubTableArraySize - data = origData[lowerBound:upperBound] + i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize - tup = struct.unpack(indexSubTableArrayFormat, data) + tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize]) (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup - offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable - data = origData[offsetToIndexSubTable:] + i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable - tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) + tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize]) (indexFormat, imageFormat, imageDataOffset) = tup indexFormatClass = self.getIndexFormatClass(indexFormat) - indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) + indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont) indexSubTable.firstGlyphIndex = firstGlyphIndex indexSubTable.lastGlyphIndex = lastGlyphIndex indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable indexSubTable.indexFormat = indexFormat indexSubTable.imageFormat = imageFormat indexSubTable.imageDataOffset = imageDataOffset + indexSubTable.decompile() # https://github.com/behdad/fonttools/issues/317 curStrike.indexSubTables.append(indexSubTable) def compile(self, ttFont): @@ -293,7 +300,7 @@ elif name in dataNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) + log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name) class SbitLineMetrics(object): @@ -336,7 +343,6 @@ if not hasattr(self, "data"): raise AttributeError(attr) self.decompile() - del self.data, self.ttFont return getattr(self, attr) # This method just takes care of the indexSubHeader. Implementing subclasses @@ -439,6 +445,7 @@ self.names = list(map(self.ttFont.getGlyphName, glyphIds)) self.removeSkipGlyphs() + del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Formats 1 and 3 @@ -503,7 +510,7 @@ self.metrics = BigGlyphMetrics() self.metrics.fromXML(name, attrs, content, ttFont) elif name == SmallGlyphMetrics.__name__: - print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) + log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat) def padBitmapData(self, data): # Make sure that the data isn't bigger than the fixed size. @@ -525,12 +532,13 @@ offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): glyphIds = list(map(ttFont.getGlyphID, self.names)) # Make sure all the ids are consecutive. This is required by Format 2. assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." - self.imageDataOffset = min(zip(*self.locations)[0]) + self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) @@ -556,6 +564,7 @@ offsets = [offset + self.imageDataOffset for offset in offsets] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Format 4 @@ -594,9 +603,10 @@ offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): - self.imageDataOffset = min(zip(*self.locations)[0]) + self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/F__e_a_t.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/F__e_a_t.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/F__e_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/F__e_a_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from .otBase import BaseTTXConverter +from . import DefaultTable +from . import grUtils +import struct + +Feat_hdr_format=''' + > + version: 16.16F +''' + +class table_F__e_a_t(DefaultTable.DefaultTable): + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.features = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Feat_hdr_format, data, self) + numFeats, = struct.unpack('>H', data[:2]) + data = data[8:] + allfeats = [] + maxsetting = 0 + for i in range(numFeats): + if self.version >= 2.0: + (fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH", + data[16*i:16*(i+1)]) + offset = int((offset - 12 - 16 * numFeats) / 4) + else: + (fid, nums, offset, flags, lid) = struct.unpack(">HHLHH", + data[12*i:12*(i+1)]) + offset = int((offset - 12 - 12 * numFeats) / 4) + allfeats.append((fid, nums, offset, flags, lid)) + maxsetting = max(maxsetting, offset + nums) + data = data[16*numFeats:] + allsettings = [] + for i in range(maxsetting): + if len(data) >= 4 * (i + 1): + (val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)]) + allsettings.append((val, lid)) + for i,f in enumerate(allfeats): + (fid, nums, offset, flags, lid) = f + fobj = Feature() + fobj.flags = flags + fobj.label = lid + self.features[grUtils.num2tag(fid)] = fobj + fobj.settings = {} + fobj.default = None + fobj.index = i + for i in range(offset, offset + nums): + if i >= len(allsettings): continue + (vid, vlid) = allsettings[i] + fobj.settings[vid] = vlid + if fobj.default is None: + fobj.default = vid + + def compile(self, ttFont): + fdat = "" + vdat = "" + offset = 0 + for f, v in sorted(self.features.items(), key=lambda x:x[1].index): + fnum = grUtils.tag2num(f) + if self.version >= 2.0: + fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings), + 0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label) + elif fnum > 65535: # self healing for alphabetic ids + self.version = 2.0 + return self.compile(ttFont) + else: + fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings), + offset * 4 + 12 + 12 * len(self.features), v.flags, v.label) + for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x): + vdat += struct.pack(">HH", s, l) + offset += len(v.settings) + hdr = sstruct.pack(Feat_hdr_format, self) + return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version) + writer.newline() + for f, v in sorted(self.features.items(), key=lambda x:x[1].index): + writer.begintag('feature', fid=f, label=v.label, flags=v.flags, + default=(v.default if v.default else 0)) + writer.newline() + for s, l in sorted(v.settings.items()): + writer.simpletag('setting', value=s, label=l) + writer.newline() + writer.endtag('feature') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = float(safeEval(attrs['version'])) + elif name == 'feature': + fid = attrs['fid'] + fobj = Feature() + fobj.flags = int(safeEval(attrs['flags'])) + fobj.label = int(safeEval(attrs['label'])) + fobj.default = int(safeEval(attrs.get('default','0'))) + fobj.index = len(self.features) + self.features[fid] = fobj + fobj.settings = {} + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag == 'setting': + fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label'])) + +class Feature(object): + pass + diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_p_g_m.py 2018-01-08 12:40:40.000000000 +0000 @@ -15,7 +15,6 @@ def toXML(self, writer, ttFont): self.program.toXML(writer, ttFont) - writer.newline() def fromXML(self, name, attrs, content, ttFont): program = ttProgram.Program() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -29,24 +29,29 @@ defaultValue: 16.16F maxValue: 16.16F flags: H - nameID: H + axisNameID: H """ FVAR_INSTANCE_FORMAT = """ > # big endian - nameID: H + subfamilyNameID: H flags: H """ class table__f_v_a_r(DefaultTable.DefaultTable): dependencies = ["name"] - def __init__(self, tag="fvar"): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.axes = [] self.instances = [] def compile(self, ttFont): + instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4) + includePostScriptNames = any(instance.postscriptNameID != 0xFFFF + for instance in self.instances) + if includePostScriptNames: + instanceSize += 2 header = { "version": 0x00010000, "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), @@ -54,12 +59,13 @@ "axisCount": len(self.axes), "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), "instanceCount": len(self.instances), - "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 + "instanceSize": instanceSize, } result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] result.extend([axis.compile() for axis in self.axes]) axisTags = [axis.axisTag for axis in self.axes] - result.extend([instance.compile(axisTags) for instance in self.instances]) + for instance in self.instances: + result.append(instance.compile(axisTags, includePostScriptNames)) return bytesjoin(result) def decompile(self, data, ttFont): @@ -102,8 +108,8 @@ class Axis(object): def __init__(self): self.axisTag = None - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values + self.axisNameID = 0 + self.flags = 0 self.minValue = -1.0 self.defaultValue = 0.0 self.maxValue = 1.0 @@ -115,7 +121,7 @@ sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) + name = ttFont["name"].getDebugName(self.axisNameID) if name is not None: writer.newline() writer.comment(name) @@ -123,10 +129,11 @@ writer.begintag("Axis") writer.newline() for tag, value in [("AxisTag", self.axisTag), + ("Flags", "0x%X" % self.flags), ("MinValue", str(self.minValue)), ("DefaultValue", str(self.defaultValue)), ("MaxValue", str(self.maxValue)), - ("NameID", str(self.nameID))]: + ("AxisNameID", str(self.axisNameID))]: writer.begintag(tag) writer.write(value) writer.endtag(tag) @@ -139,21 +146,26 @@ for tag, _, value in filter(lambda t: type(t) is tuple, content): value = ''.join(value) if tag == "AxisTag": - self.axisTag = value - elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: + self.axisTag = Tag(value) + elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", + "AxisNameID"}: setattr(self, tag[0].lower() + tag[1:], safeEval(value)) + class NamedInstance(object): def __init__(self): - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values + self.subfamilyNameID = 0 + self.postscriptNameID = 0xFFFF + self.flags = 0 self.coordinates = {} - def compile(self, axisTags): + def compile(self, axisTags, includePostScriptName): result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] for axis in axisTags: fixedCoord = floatToFixed(self.coordinates[axis], 16) result.append(struct.pack(">l", fixedCoord)) + if includePostScriptName: + result.append(struct.pack(">H", self.postscriptNameID)) return bytesjoin(result) def decompile(self, data, axisTags): @@ -163,14 +175,28 @@ value = struct.unpack(">l", data[pos : pos + 4])[0] self.coordinates[axis] = fixedToFloat(value, 16) pos += 4 + if pos + 2 <= len(data): + self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0] + else: + self.postscriptNameID = 0xFFFF def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) + name = ttFont["name"].getDebugName(self.subfamilyNameID) if name is not None: writer.newline() writer.comment(name) writer.newline() - writer.begintag("NamedInstance", nameID=self.nameID) + psname = ttFont["name"].getDebugName(self.postscriptNameID) + if psname is not None: + writer.comment(u"PostScript: " + psname) + writer.newline() + if self.postscriptNameID == 0xFFFF: + writer.begintag("NamedInstance", flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID) + else: + writer.begintag("NamedInstance", flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID, + postscriptNameID=self.postscriptNameID, ) writer.newline() for axis in ttFont["fvar"].axes: writer.simpletag("coord", axis=axis.axisTag, @@ -181,7 +207,13 @@ def fromXML(self, name, attrs, content, ttFont): assert(name == "NamedInstance") - self.nameID = safeEval(attrs["nameID"]) + self.subfamilyNameID = safeEval(attrs["subfamilyNameID"]) + self.flags = safeEval(attrs.get("flags", "0")) + if "postscriptNameID" in attrs: + self.postscriptNameID = safeEval(attrs["postscriptNameID"]) + else: + self.postscriptNameID = 0xFFFF + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): if tag == "coord": self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance -from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord -import unittest - - - -FVAR_DATA = deHexStr( - "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " - "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " - "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " - "01 03 00 00 01 2c 00 00 00 64 00 00 " - "01 04 00 00 01 2c 00 00 00 4b 00 00") - -FVAR_AXIS_DATA = deHexStr( - "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") - -FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") - - -def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -def AddName(font, name): - nameTable = font.get("name") - if nameTable is None: - nameTable = font["name"] = table__n_a_m_e() - nameTable.names = [] - namerec = NameRecord() - namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) - namerec.string = name.encode('mac_roman') - namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) - nameTable.names.append(namerec) - return namerec - - -def MakeFont(): - axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] - instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] - fvarTable = table__f_v_a_r() - font = {"fvar": fvarTable} - for tag, name, minValue, defaultValue, maxValue in axes: - axis = Axis() - axis.axisTag = tag - axis.defaultValue = defaultValue - axis.minValue, axis.maxValue = minValue, maxValue - axis.nameID = AddName(font, name).nameID - fvarTable.axes.append(axis) - for name, weight, width in instances: - inst = NamedInstance() - inst.nameID = AddName(font, name).nameID - inst.coordinates = {"wght": weight, "wdth": width} - fvarTable.instances.append(inst) - return font - - -class FontVariationTableTest(unittest.TestCase): - def test_compile(self): - font = MakeFont() - h = font["fvar"].compile(font) - self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) - - def test_decompile(self): - fvar = table__f_v_a_r() - fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) - self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) - self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) - - def test_toXML(self): - font = MakeFont() - writer = XMLWriter(BytesIO()) - font["fvar"].toXML(writer, font) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual(2, xml.count("")) - self.assertTrue("wght" in xml) - self.assertTrue("wdth" in xml) - self.assertEqual(2, xml.count("" in xml) - self.assertTrue("" in xml) - - def test_fromXML(self): - fvar = table__f_v_a_r() - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) - self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) - self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) - - -class AxisTest(unittest.TestCase): - def test_compile(self): - axis = Axis() - axis.axisTag, axis.nameID = ('opsz', 345) - axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) - self.assertEqual(FVAR_AXIS_DATA, axis.compile()) - - def test_decompile(self): - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - self.assertEqual("opsz", axis.axisTag) - self.assertEqual(345, axis.nameID) - self.assertEqual(-0.5, axis.minValue) - self.assertEqual(1.3, axis.defaultValue) - self.assertEqual(1.5, axis.maxValue) - - def test_toXML(self): - font = MakeFont() - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - AddName(font, "Optical Size").nameID = 256 - axis.nameID = 256 - writer = XMLWriter(BytesIO()) - axis.toXML(writer, font) - self.assertEqual([ - '', - '', - '', - 'opsz', - '-0.5', - '1.3', - '1.5', - '256', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - axis = Axis() - axis.fromXML("Axis", {}, [ - ("AxisTag", {}, ["wght"]), - ("MinValue", {}, ["100"]), - ("DefaultValue", {}, ["400"]), - ("MaxValue", {}, ["900"]), - ("NameID", {}, ["256"]) - ], ttFont=None) - self.assertEqual("wght", axis.axisTag) - self.assertEqual(100, axis.minValue) - self.assertEqual(400, axis.defaultValue) - self.assertEqual(900, axis.maxValue) - self.assertEqual(256, axis.nameID) - - -class NamedInstanceTest(unittest.TestCase): - def test_compile(self): - inst = NamedInstance() - inst.nameID = 345 - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) - - def test_decompile(self): - inst = NamedInstance() - inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - def test_toXML(self): - font = MakeFont() - inst = NamedInstance() - inst.nameID = AddName(font, "Light Condensed").nameID - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - writer = XMLWriter(BytesIO()) - inst.toXML(writer, font) - self.assertEqual([ - '', - '', - '' % inst.nameID, - '', - '', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - inst = NamedInstance() - attrs = {"nameID": "345"} - inst.fromXML("NamedInstance", attrs, [ - ("coord", {"axis": "wght", "value": "0.7"}, []), - ("coord", {"axis": "wdth", "value": "0.5"}, []), - ], ttFont=MakeFont()) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_g_c_i_d.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_c_i_d.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_g_c_i_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_c_i_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html +class table__g_c_i_d(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/G__l_a_t.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/G__l_a_t.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/G__l_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/G__l_a_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,221 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from itertools import * +from functools import partial +from . import DefaultTable +from . import grUtils +import struct, operator, warnings +try: + import lz4 +except: + lz4 = None + + +Glat_format_0 = """ + > # big endian + version: 16.16F +""" + +Glat_format_3 = """ + > + version: 16.16F + compression:L # compression scheme or reserved +""" + +Glat_format_1_entry = """ + > + attNum: B # Attribute number of first attribute + num: B # Number of attributes in this run +""" +Glat_format_23_entry = """ + > + attNum: H # Attribute number of first attribute + num: H # Number of attributes in this run +""" + +Glat_format_3_octabox_metrics = """ + > + subboxBitmap: H # Which subboxes exist on 4x4 grid + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + +Glat_format_3_subbox_entry = """ + > + left: B # xi + right: B # xa + bottom: B # yi + top: B # ya + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + +class _Object() : + pass + +class _Dict(dict) : + pass + +class table_G__l_a_t(DefaultTable.DefaultTable): + ''' + Support Graphite Glat tables + ''' + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.scheme = 0 + + def decompile(self, data, ttFont): + sstruct.unpack2(Glat_format_0, data, self) + if self.version <= 1.9: + decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry) + elif self.version <= 2.9: + decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry) + elif self.version >= 3.0: + (data, self.scheme) = grUtils.decompress(data) + sstruct.unpack2(Glat_format_3, data, self) + self.hasOctaboxes = (self.compression & 1) == 1 + decoder = self.decompileAttributes3 + + gloc = ttFont['Gloc'] + self.attributes = {} + count = 0 + for s,e in zip(gloc,gloc[1:]): + self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e]) + count += 1 + + def decompileAttributes12(self, data, fmt): + attributes = _Dict() + while len(data) > 3: + e, data = sstruct.unpack2(fmt, data, _Object()) + keys = range(e.attNum, e.attNum+e.num) + if len(data) >= 2 * e.num : + vals = struct.unpack_from(('>%dh' % e.num), data) + attributes.update(zip(keys,vals)) + data = data[2*e.num:] + return attributes + + def decompileAttributes3(self, data): + if self.hasOctaboxes: + o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object()) + numsub = bin(o.subboxBitmap).count("1") + o.subboxes = [] + for b in range(numsub): + if len(data) >= 8 : + subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry, + data, _Object()) + o.subboxes.append(subbox) + attrs = self.decompileAttributes12(data, Glat_format_23_entry) + if self.hasOctaboxes: + attrs.octabox = o + return attrs + + def compile(self, ttFont): + data = sstruct.pack(Glat_format_0, self) + if self.version <= 1.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version <= 2.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version >= 3.0: + self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0) + data = sstruct.pack(Glat_format_3, self) + encoder = self.compileAttributes3 + + glocs = [] + for n in range(len(self.attributes)): + glocs.append(len(data)) + data += encoder(self.attributes[ttFont.getGlyphName(n)]) + glocs.append(len(data)) + ttFont['Gloc'].set(glocs) + + if self.version >= 3.0: + data = grUtils.compress(self.scheme, data) + return data + + def compileAttributes12(self, attrs, fmt): + data = [] + for e in grUtils.entries(attrs): + data.extend(sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]})) + data.extend(struct.pack(('>%dh' % len(e[2])), *e[2])) + return "".join(data) + + def compileAttributes3(self, attrs): + if self.hasOctaboxes: + o = attrs.octabox + data = sstruct.pack(Glat_format_3_octabox_metrics, o) + numsub = bin(o.subboxBitmap).count("1") + for b in range(numsub) : + data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b]) + else: + data = "" + return data + self.compileAttributes12(attrs, Glat_format_23_entry) + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version, compressionScheme=self.scheme) + writer.newline() + for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])): + writer.begintag('glyph', name=n) + writer.newline() + if hasattr(a, 'octabox'): + o = a.octabox + formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics) + vals = {} + for k in names: + if k == 'subboxBitmap': continue + vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 256) + vals['bitmap'] = "{:0X}".format(o.subboxBitmap) + writer.begintag('octaboxes', **vals) + writer.newline() + formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry) + for s in o.subboxes: + vals = {} + for k in names: + vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 256) + writer.simpletag('octabox', **vals) + writer.newline() + writer.endtag('octaboxes') + writer.newline() + for k, v in sorted(a.items()): + writer.simpletag('attribute', index=k, value=v) + writer.newline() + writer.endtag('glyph') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version' : + self.version = float(safeEval(attrs['version'])) + if name != 'glyph' : return + if not hasattr(self, 'attributes'): + self.attributes = {} + gname = attrs['name'] + attributes = _Dict() + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'attribute' : + k = int(safeEval(attrs['index'])) + v = int(safeEval(attrs['value'])) + attributes[k]=v + elif tag == 'octaboxes': + self.hasOctaboxes = True + o = _Object() + o.subboxBitmap = int(attrs['bitmap'], 16) + o.subboxes = [] + del attrs['bitmap'] + for k, v in attrs.items(): + setattr(o, k, int(float(v[:-1]) * 256. / 100. + 0.5)) + for element in subcontent: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + so = _Object() + for k, v in attrs.items(): + setattr(so, k, int(float(v[:-1]) * 256. / 100. + 0.5)) + o.subboxes.append(so) + attributes.octabox = o + self.attributes[gname] = attributes diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/G__l_o_c.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/G__l_o_c.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/G__l_o_c.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/G__l_o_c.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,71 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import array + +Gloc_header = ''' + > # big endian + version: 16.16F # Table version + flags: H # bit 0: 1=long format, 0=short format + # bit 1: 1=attribute names, 0=no names + numAttribs: H # NUmber of attributes +''' + +class table_G__l_o_c(DefaultTable.DefaultTable): + """ + Support Graphite Gloc tables + """ + dependencies = ['Glat'] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.attribIds = None + self.numAttribs = 0 + + def decompile(self, data, ttFont): + _, data = sstruct.unpack2(Gloc_header, data, self) + flags = self.flags + del self.flags + self.locations = array.array('I' if flags & 1 else 'H') + self.locations.fromstring(data[:len(data) - self.numAttribs * (flags & 2)]) + self.locations.byteswap() + self.attribIds = array.array('H') + if flags & 2: + self.attribIds.fromstring(data[-self.numAttribs * 2:]) + self.attribIds.byteswap() + + def compile(self, ttFont): + data = sstruct.pack(Gloc_header, dict(version=1.0, + flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'), + numAttribs=self.numAttribs)) + self.locations.byteswap() + data += self.locations.tostring() + self.locations.byteswap() + if self.attribIds: + self.attribIds.byteswap() + data += self.attribIds.tostring() + self.attribIds.byteswap() + return data + + def set(self, locations): + long_format = max(locations) >= 65536 + self.locations = array.array('I' if long_format else 'H', locations) + + def toXML(self, writer, ttFont): + writer.simpletag("attributes", number=self.numAttribs) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'attributes': + self.numAttribs = int(safeEval(attrs['number'])) + + def __getitem__(self, index): + return self.locations[index] + + def __len__(self): + return len(self.locations) + + def __iter__(self): + return iter(self.locations) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_l_y_f.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,7 @@ """_g_l_y_f.py -- Converter classes for the 'glyf' table.""" from __future__ import print_function, division, absolute_import +from collections import namedtuple from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools import ttLib @@ -8,12 +9,16 @@ from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect from fontTools.misc.bezierTools import calcQuadraticBounds from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from numbers import Number from . import DefaultTable from . import ttProgram import sys import struct import array -import warnings +import logging + + +log = logging.getLogger(__name__) # # The Apple and MS rasterizers behave differently for @@ -30,6 +35,12 @@ class table__g_l_y_f(DefaultTable.DefaultTable): + # this attribute controls the amount of padding applied to glyph data upon compile. + # Glyph lenghts are aligned to multiples of the specified value. + # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means + # no padding, except for when padding would allow to use short loca offsets. + padding = 1 + def decompile(self, data, ttFont): loca = ttFont['loca'] last = int(loca[0]) @@ -50,10 +61,11 @@ self.glyphs[glyphName] = glyph last = next if len(data) - next >= 4: - warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % - (next, len(data))) + log.warning( + "too much 'glyf' table data: expected %d, received %d bytes", + next, len(data)) if noname: - warnings.warn('%s glyphs have no name' % noname) + log.warning('%s glyphs have no name', noname) if ttFont.lazy is False: # Be lazy for None and True for glyph in self.glyphs.values(): glyph.expand(self) @@ -61,7 +73,8 @@ def compile(self, ttFont): if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() - padding = self.padding if hasattr(self, 'padding') else None + padding = self.padding + assert padding in (0, 1, 2, 4) locations = [] currentLocation = 0 dataList = [] @@ -69,14 +82,14 @@ for glyphName in self.glyphOrder: glyph = self.glyphs[glyphName] glyphData = glyph.compile(self, recalcBBoxes) - if padding: + if padding > 1: glyphData = pad(glyphData, size=padding) locations.append(currentLocation) currentLocation = currentLocation + len(glyphData) dataList.append(glyphData) locations.append(currentLocation) - if padding is None and currentLocation < 0x20000: + if padding == 1 and currentLocation < 0x20000: # See if we can pad any odd-lengthed glyphs to allow loca # table to use the short offsets. indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] @@ -138,8 +151,7 @@ if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() glyphName = attrs["name"] - if ttFont.verbose: - ttLib.debugmsg("unpacking glyph '%s'" % glyphName) + log.debug("unpacking glyph '%s'", glyphName) glyph = Glyph() for attr in ['xMin', 'yMin', 'xMax', 'yMax']: setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) @@ -162,6 +174,10 @@ # XXX optimize with reverse dict!!! return self.glyphOrder.index(glyphName) + def removeHinting(self): + for glyph in self.glyphs.values(): + glyph.removeHinting() + def keys(self): return self.glyphs.keys() @@ -286,6 +302,9 @@ UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) +CompositeMaxpValues = namedtuple('CompositeMaxpValues', ['nPoints', 'nContours', 'maxComponentDepth']) + + class Glyph(object): def __init__(self, data=""): @@ -306,10 +325,16 @@ return if not self.data: # empty char + del self.data self.numberOfContours = 0 return dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) del self.data + # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in + # some glyphs; decompileCoordinates assumes that there's at least + # one, so short-circuit here. + if self.numberOfContours == 0: + return if self.isComposite(): self.decompileComponents(data, glyfTable) else: @@ -317,7 +342,11 @@ def compile(self, glyfTable, recalcBBoxes=True): if hasattr(self, "data"): - return self.data + if recalcBBoxes: + # must unpack glyph in order to recalculate bounding box + self.expand(glyfTable) + else: + return self.data if self.numberOfContours == 0: return "" if recalcBBoxes: @@ -333,11 +362,7 @@ if self.isComposite(): for compo in self.components: compo.toXML(writer, ttFont) - if hasattr(self, "program"): - writer.begintag("instructions") - self.program.toXML(writer, ttFont) - writer.endtag("instructions") - writer.newline() + haveInstructions = hasattr(self, "program") else: last = 0 for i in range(self.numberOfContours): @@ -352,11 +377,16 @@ last = self.endPtsOfContours[i] + 1 writer.endtag("contour") writer.newline() - if self.numberOfContours: + haveInstructions = self.numberOfContours > 0 + if haveInstructions: + if self.program: writer.begintag("instructions") + writer.newline() self.program.toXML(writer, ttFont) writer.endtag("instructions") - writer.newline() + else: + writer.simpletag("instructions") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "contour": @@ -414,7 +444,7 @@ glyfTable, maxComponentDepth + 1) nPoints = nPoints + nP nContours = nContours + nC - return nPoints, nContours, maxComponentDepth + return CompositeMaxpValues(nPoints, nContours, maxComponentDepth) def getMaxpValues(self): assert self.numberOfContours > 0 @@ -436,7 +466,9 @@ self.program.fromBytecode(data[:numInstructions]) data = data[numInstructions:] if len(data) >= 4: - warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) + log.warning( + "too much glyph data at the end of composite glyph: %d excess bytes", + len(data)) def decompileCoordinates(self, data): endPtsOfContours = array.array("h") @@ -528,7 +560,8 @@ xDataLen = struct.calcsize(xFormat) yDataLen = struct.calcsize(yFormat) if len(data) - (xDataLen + yDataLen) >= 4: - warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) + log.warning( + "too much glyph data: %d excess bytes", len(data) - (xDataLen + yDataLen)) xCoordinates = struct.unpack(xFormat, data[:xDataLen]) yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) return flags, xCoordinates, yCoordinates @@ -563,8 +596,7 @@ deltas = self.coordinates.copy() if deltas.isFloat(): # Warn? - xPoints = [int(round(x)) for x in xPoints] - yPoints = [int(round(y)) for y in xPoints] + deltas.toInt() deltas.absoluteToRelative() # TODO(behdad): Add a configuration option for this? @@ -718,7 +750,7 @@ bbox = calcBounds([coords[last], coords[next]]) if not pointInRect(coords[j], bbox): # Ouch! - warnings.warn("Outline has curve with implicit extrema.") + log.warning("Outline has curve with implicit extrema.") # Ouch! Find analytical curve bounds. pthis = coords[j] plast = coords[last] @@ -957,13 +989,14 @@ cFlags = cFlags[nextOnCurve:] pen.closePath() - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result class GlyphComponent(object): @@ -989,7 +1022,6 @@ self.flags = int(flags) glyphID = int(glyphID) self.glyphName = glyfTable.getGlyphName(int(glyphID)) - #print ">>", reprflag(self.flags) data = data[4:] if self.flags & ARG_1_AND_2_ARE_WORDS: @@ -1025,7 +1057,7 @@ haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) + NON_OVERLAPPING | OVERLAP_COMPOUND) return more, haveInstructions, data def compile(self, more, haveInstructions, glyfTable): @@ -1034,7 +1066,7 @@ # reset all flags we will calculate ourselves flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) + NON_OVERLAPPING | OVERLAP_COMPOUND) if more: flags = flags | MORE_COMPONENTS if haveInstructions: @@ -1047,11 +1079,13 @@ data = data + struct.pack(">HH", self.firstPt, self.secondPt) flags = flags | ARG_1_AND_2_ARE_WORDS else: + x = round(self.x) + y = round(self.y) flags = flags | ARGS_ARE_XY_VALUES - if (-128 <= self.x <= 127) and (-128 <= self.y <= 127): - data = data + struct.pack(">bb", self.x, self.y) + if (-128 <= x <= 127) and (-128 <= y <= 127): + data = data + struct.pack(">bb", x, y) else: - data = data + struct.pack(">hh", self.x, self.y) + data = data + struct.pack(">hh", x, y) flags = flags | ARG_1_AND_2_ARE_WORDS if hasattr(self, "transform"): @@ -1120,29 +1154,37 @@ self.transform = [[scale, 0], [0, scale]] self.flags = safeEval(attrs["flags"]) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + class GlyphCoordinates(object): - def __init__(self, iterable=[]): - self._a = array.array("h") + def __init__(self, iterable=[], typecode="h"): + self._a = array.array(typecode) self.extend(iterable) + @property + def array(self): + return self._a + def isFloat(self): - return self._a.typecode == 'f' + return self._a.typecode == 'd' def _ensureFloat(self): if self.isFloat(): return # The conversion to list() is to work around Jython bug - self._a = array.array("f", list(self._a)) + self._a = array.array("d", list(self._a)) def _checkFloat(self, p): + if self.isFloat(): + return p if any(isinstance(v, float) for v in p): p = [int(v) if int(v) == v else v for v in p] if any(isinstance(v, float) for v in p): @@ -1154,7 +1196,7 @@ return GlyphCoordinates([(0,0)] * count) def copy(self): - c = GlyphCoordinates() + c = GlyphCoordinates(typecode=self._a.typecode) c._a.extend(self._a) return c @@ -1171,13 +1213,18 @@ if isinstance(k, slice): indices = range(*k.indices(len(self))) # XXX This only works if len(v) == len(indices) - # TODO Implement __delitem__ for j,i in enumerate(indices): self[i] = v[j] return v = self._checkFloat(v) self._a[2*k],self._a[2*k+1] = v + def __delitem__(self, i): + i = (2*i) % len(self._a) + del self._a[i] + del self._a[i] + + def __repr__(self): return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' @@ -1190,6 +1237,14 @@ p = self._checkFloat(p) self._a.extend(p) + def toInt(self): + if not self.isFloat(): + return + a = array.array("h") + for n in self._a: + a.append(round(n)) + self._a = a + def relativeToAbsolute(self): a = self._a x,y = 0,0 @@ -1209,13 +1264,29 @@ a[2*i+1] = dy def translate(self, p): - (x,y) = p + """ + >>> GlyphCoordinates([(1,2)]).translate((.5,0)) + """ + (x,y) = self._checkFloat(p) a = self._a for i in range(len(a) // 2): a[2*i ] += x a[2*i+1] += y + def scale(self, p): + """ + >>> GlyphCoordinates([(1,2)]).scale((.5,0)) + """ + (x,y) = self._checkFloat(p) + a = self._a + for i in range(len(a) // 2): + a[2*i ] *= x + a[2*i+1] *= y + def transform(self, t): + """ + >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5))) + """ a = self._a for i in range(len(a) // 2): x = a[2*i ] @@ -1224,13 +1295,197 @@ py = x * t[0][1] + y * t[1][1] self[i] = (px, py) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g == g2 + True + >>> g == g3 + False + >>> g2 == g3 + False + """ if type(self) != type(other): return NotImplemented return self._a == other._a + def __ne__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g != g2 + False + >>> g != g3 + True + >>> g2 != g3 + True + """ + result = self.__eq__(other) + return result if result is NotImplemented else not result + + # Math operations + + def __pos__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = +g + >>> g2 + GlyphCoordinates([(1, 2)]) + >>> g2.translate((1,0)) + >>> g2 + GlyphCoordinates([(2, 2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + return self.copy() + def __neg__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = -g + >>> g2 + GlyphCoordinates([(-1, -2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + r = self.copy() + a = r._a + for i in range(len(a)): + a[i] = -a[i] + return r + def __round__(self): + """ + Note: This is Python 3 only. Python 2 does not call __round__. + As such, we cannot test this method either. :( + """ + r = self.copy() + r.toInt() + return r + + def __add__(self, other): return self.copy().__iadd__(other) + def __sub__(self, other): return self.copy().__isub__(other) + def __mul__(self, other): return self.copy().__imul__(other) + def __truediv__(self, other): return self.copy().__itruediv__(other) + + __radd__ = __add__ + __rmul__ = __mul__ + def __rsub__(self, other): return other + (-self) + + def __iadd__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g += (.5,0) + >>> g + GlyphCoordinates([(1.5, 2.0)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g += g2 + >>> g + GlyphCoordinates([(4.5, 6.0)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate(other) + return self + if isinstance(other, GlyphCoordinates): + if other.isFloat(): self._ensureFloat() + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] += other[i] + return self + return NotImplemented + + def __isub__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g -= (.5,0) + >>> g + GlyphCoordinates([(0.5, 2.0)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g -= g2 + >>> g + GlyphCoordinates([(-2.5, -2.0)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate((-other[0],-other[1])) + return self + if isinstance(other, GlyphCoordinates): + if other.isFloat(): self._ensureFloat() + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] -= other[i] + return self + return NotImplemented + + def __imul__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= (2,.5) + >>> g *= 2 + >>> g + GlyphCoordinates([(4.0, 2.0)]) + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= 2 + >>> g + GlyphCoordinates([(2, 4)]) + """ + if isinstance(other, Number): + other = (other, other) + if isinstance(other, tuple): + if other == (1,1): + return self + assert len(other) == 2 + self.scale(other) + return self + return NotImplemented + + def __itruediv__(self, other): + """ + >>> g = GlyphCoordinates([(1,3)]) + >>> g /= (.5,1.5) + >>> g /= 2 + >>> g + GlyphCoordinates([(1.0, 1.0)]) + """ + if isinstance(other, Number): + other = (other, other) + if isinstance(other, tuple): + if other == (1,1): + return self + assert len(other) == 2 + self.scale((1./other[0],1./other[1])) + return self + return NotImplemented + + def __bool__(self): + """ + >>> g = GlyphCoordinates([]) + >>> bool(g) + False + >>> g = GlyphCoordinates([(0,0), (0.,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,0), (1,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,.5), (0,0)]) + >>> bool(g) + True + """ + return bool(self._a) + + __nonzero__ = __bool__ + def reprflag(flag): bin = "" @@ -1244,3 +1499,8 @@ flag = flag >> 1 bin = (14 - len(bin)) * "0" + bin return bin + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/grUtils.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/grUtils.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/grUtils.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/grUtils.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,79 @@ +import struct, warnings +try: + import lz4 +except: + lz4 = None + +#old scheme for VERSION < 0.9 otherwise use lz4.block + +def decompress(data): + (compression,) = struct.unpack(">L", data[4:8]) + scheme = compression >> 27 + size = compression & 0x07ffffff + if scheme == 0: + pass + elif scheme == 1 and lz4: + res = lz4.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07ffffff)) + if scheme == 0 : + return data + elif scheme == 1 and lz4: + res = lz4.compress(hdr + data) + return res + else: + warnings.warn("Table failed to compress by unsupported compression scheme") + return data + +def _entries(attrs, sameval): + ak = 0 + vals = [] + lastv = 0 + for k,v in attrs: + if len(vals) and (k != ak + 1 or (sameval and v != lastv)) : + yield (ak - len(vals) + 1, len(vals), vals) + vals = [] + ak = k + vals.append(v) + lastv = v + yield (ak - len(vals) + 1, len(vals), vals) + +def entries(attributes, sameval = False): + g = _entries(sorted(attributes.iteritems(), key=lambda x:int(x[0])), sameval) + return g + +def bininfo(num, size=1): + if num == 0: + return struct.pack(">4H", 0, 0, 0, 0) + srange = 1; + select = 0 + while srange <= num: + srange *= 2 + select += 1 + select -= 1 + srange /= 2 + srange *= size + shift = num * size - srange + return struct.pack(">4H", num, srange, select, shift) + +def num2tag(n): + if n < 0x200000: + return str(n) + else: + return struct.unpack('4s', struct.pack('>L', n))[0].replace(b'\000', b'').decode() + +def tag2num(n): + try: + return int(n) + except ValueError: + n = (n+" ")[:4] + return struct.unpack('>L', n.encode('ascii'))[0] + diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,15 +2,24 @@ from fontTools.misc.py23 import * from fontTools import ttLib from fontTools.misc import sstruct -from fontTools.misc.fixedTools import fixedToFloat, floatToFixed from fontTools.misc.textTools import safeEval from fontTools.ttLib import TTLibError from . import DefaultTable import array -import io -import sys +import itertools +import logging import struct +import sys +import fontTools.ttLib.tables.TupleVariation as tv + + +log = logging.getLogger(__name__) +TupleVariation = tv.TupleVariation + +# https://www.microsoft.com/typography/otspec/gvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# # Apple's documentation of 'gvar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html # @@ -19,46 +28,35 @@ GVAR_HEADER_FORMAT = """ > # big endian - version: H - reserved: H - axisCount: H - sharedCoordCount: H - offsetToCoord: I - glyphCount: H - flags: H - offsetToData: I + version: H + reserved: H + axisCount: H + sharedTupleCount: H + offsetToSharedTuples: I + glyphCount: H + flags: H + offsetToGlyphVariationData: I """ GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) -TUPLES_SHARE_POINT_NUMBERS = 0x8000 -TUPLE_COUNT_MASK = 0x0fff - -EMBEDDED_TUPLE_COORD = 0x8000 -INTERMEDIATE_TUPLE = 0x4000 -PRIVATE_POINT_NUMBERS = 0x2000 -TUPLE_INDEX_MASK = 0x0fff - -DELTAS_ARE_ZERO = 0x80 -DELTAS_ARE_WORDS = 0x40 -DELTA_RUN_COUNT_MASK = 0x3f - -POINTS_ARE_WORDS = 0x80 -POINT_RUN_COUNT_MASK = 0x7f - class table__g_v_a_r(DefaultTable.DefaultTable): - dependencies = ["fvar", "glyf"] + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.reserved = 1, 0 + self.variations = {} + def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - - sharedCoords = self.compileSharedCoords_(axisTags) - sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} - sharedCoordSize = sum([len(c) for c in sharedCoords]) - - compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) + sharedTuples = tv.compileSharedTuples( + axisTags, itertools.chain(*self.variations.values())) + sharedTupleIndices = {coord:i for i, coord in enumerate(sharedTuples)} + sharedTupleSize = sum([len(c) for c in sharedTuples]) + compiledGlyphs = self.compileGlyphs_( + ttFont, axisTags, sharedTupleIndices) offset = 0 offsets = [] for glyph in compiledGlyphs: @@ -71,100 +69,26 @@ header["version"] = self.version header["reserved"] = self.reserved header["axisCount"] = len(axisTags) - header["sharedCoordCount"] = len(sharedCoords) - header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["sharedTupleCount"] = len(sharedTuples) + header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets) header["glyphCount"] = len(compiledGlyphs) header["flags"] = tableFormat - header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize + header["offsetToGlyphVariationData"] = header["offsetToSharedTuples"] + sharedTupleSize compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) result = [compiledHeader, compiledOffsets] - result.extend(sharedCoords) + result.extend(sharedTuples) result.extend(compiledGlyphs) return bytesjoin(result) - def compileSharedCoords_(self, axisTags): - coordCount = {} - for variations in self.variations.values(): - for gvar in variations: - coord = gvar.compileCoord(axisTags) - coordCount[coord] = coordCount.get(coord, 0) + 1 - sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] - sharedCoords.sort(reverse=True) - MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 - sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] - return [c[1] for c in sharedCoords] # Strip off counts. - def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): result = [] for glyphName in ttFont.getGlyphOrder(): glyph = ttFont["glyf"][glyphName] - numPointsInGlyph = self.getNumPoints_(glyph) - result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) - return result - - def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): - variations = self.variations.get(glyphName, []) - variations = [v for v in variations if v.hasImpact()] - if len(variations) == 0: - return b"" - - # Each glyph variation tuples modifies a set of control points. To indicate - # which exact points are getting modified, a single tuple can either refer - # to a shared set of points, or the tuple can supply its private point numbers. - # Because the impact of sharing can be positive (no need for a private point list) - # or negative (need to supply 0,0 deltas for unused points), it is not obvious - # how to determine which tuples should take their points from the shared - # pool versus have their own. Perhaps we should resort to brute force, - # and try all combinations? However, if a glyph has n variation tuples, - # we would need to try 2^n combinations (because each tuple may or may not - # be part of the shared set). How many variations tuples do glyphs have? - # - # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} - # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} - # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} - # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). - # - # Is this even worth optimizing? If we never use a shared point list, - # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, - # and 15K for JamRegular. If we always use a shared point list, - # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, - # and 10K for JamRegular. However, in the latter case the delta arrays - # will become larger, but I haven't yet measured by how much. From - # gut feeling (which may be wrong), the optimum is to share some but - # not all points; however, then we would need to try all combinations. - # - # For the time being, we try two variants and then pick the better one: - # (a) each tuple supplies its own private set of points; - # (b) all tuples refer to a shared set of points, which consists of - # "every control point in the glyph". - allPoints = set(range(numPointsInGlyph)) - tuples = [] - data = [] - someTuplesSharePoints = False - for gvar in variations: - privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) - # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. - # This is probably a problem with our code; find the problem and fix it. - #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): - if False: - tuples.append(sharedTuple) - data.append(sharedData) - someTuplesSharePoints = True - else: - tuples.append(privateTuple) - data.append(privateData) - if someTuplesSharePoints: - data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" - tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) - else: - data = bytesjoin(data) - tupleCount = len(tuples) - tuples = bytesjoin(tuples) - result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data - if len(result) % 2 != 0: - result = result + b"\0" # padding + pointCount = self.getNumPoints_(glyph) + variations = self.variations.get(glyphName, []) + result.append(compileGlyph_(variations, pointCount, + axisTags, sharedCoordIndices)) return result def decompile(self, data, ttFont): @@ -174,19 +98,17 @@ assert len(glyphs) == self.glyphCount assert len(axisTags) == self.axisCount offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) - sharedCoords = self.decompileSharedCoords_(axisTags, data) + sharedCoords = tv.decompileSharedTuples( + axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples) self.variations = {} + offsetToData = self.offsetToGlyphVariationData for i in range(self.glyphCount): glyphName = glyphs[i] glyph = ttFont["glyf"][glyphName] numPointsInGlyph = self.getNumPoints_(glyph) - gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] - self.variations[glyphName] = \ - self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) - - def decompileSharedCoords_(self, axisTags, data): - result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) - return result + gvarData = data[offsetToData + offsets[i] : offsetToData + offsets[i + 1]] + self.variations[glyphName] = decompileGlyph_( + numPointsInGlyph, sharedCoords, axisTags, gvarData) @staticmethod def decompileOffsets_(data, tableFormat, glyphCount): @@ -234,68 +156,6 @@ packed.byteswap() return (packed.tostring(), tableFormat) - def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): - if len(data) < 4: - return [] - numAxes = len(axisTags) - tuples = [] - flags, offsetToData = struct.unpack(">HH", data[:4]) - pos = 4 - dataPos = offsetToData - if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: - sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) - else: - sharedPoints = [] - for _ in range(flags & TUPLE_COUNT_MASK): - dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) - tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) - tupleData = data[pos : pos + tupleSize] - pointDeltaData = data[dataPos : dataPos + dataSize] - tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) - pos += tupleSize - dataPos += dataSize - return tuples - - @staticmethod - def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): - flags = struct.unpack(">H", data[2:4])[0] - - pos = 4 - if (flags & EMBEDDED_TUPLE_COORD) == 0: - coord = sharedCoords[flags & TUPLE_INDEX_MASK] - else: - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - if (flags & INTERMEDIATE_TUPLE) != 0: - minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - else: - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - axes = {} - for axis in axisTags: - coords = minCoord[axis], coord[axis], maxCoord[axis] - if coords != (0.0, 0.0, 0.0): - axes[axis] = coords - pos = 0 - if (flags & PRIVATE_POINT_NUMBERS) != 0: - points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) - else: - points = sharedPoints - deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas = [None] * numPointsInGlyph - for p, x, y in zip(points, deltas_x, deltas_y): - deltas[p] = (x, y) - return GlyphVariation(axes, deltas) - - @staticmethod - def computeMinMaxCoord_(coord): - minCoord = {} - maxCoord = {} - for (axis, value) in coord.items(): - minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - return (minCoord, maxCoord) - def toXML(self, writer, ttFont, progress=None): writer.simpletag("version", value=self.version) writer.newline() @@ -329,7 +189,7 @@ if isinstance(element, tuple): name, attrs, content = element if name == "tuple": - gvar = GlyphVariation({}, [None] * numPointsInGlyph) + gvar = TupleVariation({}, [None] * numPointsInGlyph) glyphVariations.append(gvar) for tupleElement in content: if isinstance(tupleElement, tuple): @@ -347,371 +207,23 @@ return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS -class GlyphVariation(object): - def __init__(self, axes, coordinates): - self.axes = axes - self.coordinates = coordinates - - def __repr__(self): - axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) - return "" % (axes, self.coordinates) - - def __eq__(self, other): - return self.coordinates == other.coordinates and self.axes == other.axes - - def getUsedPoints(self): - result = set() - for i, point in enumerate(self.coordinates): - if point is not None: - result.add(i) - return result - - def hasImpact(self): - """Returns True if this GlyphVariation has any visible impact. - - If the result is False, the GlyphVariation can be omitted from the font - without making any visible difference. - """ - for c in self.coordinates: - if c is not None: - return True - return False - - def toXML(self, writer, axisTags): - writer.begintag("tuple") - writer.newline() - for axis in axisTags: - value = self.axes.get(axis) - if value is not None: - minValue, value, maxValue = value - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if minValue == defaultMinValue and maxValue == defaultMaxValue: - writer.simpletag("coord", axis=axis, value=value) - else: - writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) - writer.newline() - wrote_any_points = False - for i, point in enumerate(self.coordinates): - if point is not None: - writer.simpletag("delta", pt=i, x=point[0], y=point[1]) - writer.newline() - wrote_any_points = True - if not wrote_any_points: - writer.comment("no deltas") - writer.newline() - writer.endtag("tuple") - writer.newline() - - def fromXML(self, name, attrs, _content): - if name == "coord": - axis = attrs["axis"] - value = float(attrs["value"]) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - minValue = float(attrs.get("min", defaultMinValue)) - maxValue = float(attrs.get("max", defaultMaxValue)) - self.axes[axis] = (minValue, value, maxValue) - elif name == "delta": - point = safeEval(attrs["pt"]) - x = safeEval(attrs["x"]) - y = safeEval(attrs["y"]) - self.coordinates[point] = (x, y) - - def compile(self, axisTags, sharedCoordIndices, sharedPoints): - tupleData = [] - - coord = self.compileCoord(axisTags) - if coord in sharedCoordIndices: - flags = sharedCoordIndices[coord] - else: - flags = EMBEDDED_TUPLE_COORD - tupleData.append(coord) - - intermediateCoord = self.compileIntermediateCoord(axisTags) - if intermediateCoord is not None: - flags |= INTERMEDIATE_TUPLE - tupleData.append(intermediateCoord) - - if sharedPoints is not None: - auxData = self.compileDeltas(sharedPoints) - else: - flags |= PRIVATE_POINT_NUMBERS - points = self.getUsedPoints() - numPointsInGlyph = len(self.coordinates) - auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) - - tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) - return (tupleData, auxData) - - def compileCoord(self, axisTags): - result = [] - for axis in axisTags: - _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - result.append(struct.pack(">h", floatToFixed(value, 14))) - return bytesjoin(result) - - def compileIntermediateCoord(self, axisTags): - needed = False - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): - needed = True - break - if not needed: - return None - minCoords = [] - maxCoords = [] - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) - maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) - return bytesjoin(minCoords + maxCoords) - - @staticmethod - def decompileCoord_(axisTags, data, offset): - coord = {} - pos = offset - for axis in axisTags: - coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) - pos += 2 - return coord, pos - - @staticmethod - def decompileCoords_(axisTags, numCoords, data, offset): - result = [] - pos = offset - for _ in range(numCoords): - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - result.append(coord) - return result, pos - - @staticmethod - def compilePoints(points, numPointsInGlyph): - # If the set consists of all points in the glyph, it gets encoded with - # a special encoding: a single zero byte. - if len(points) == numPointsInGlyph: - return b"\0" - - # In the 'gvar' table, the packing of point numbers is a little surprising. - # It consists of multiple runs, each being a delta-encoded list of integers. - # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as - # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. - # There are two types of runs, with values being either 8 or 16 bit unsigned - # integers. - points = list(points) - points.sort() - numPoints = len(points) - - # The binary representation starts with the total number of points in the set, - # encoded into one or two bytes depending on the value. - if numPoints < 0x80: - result = [bytechr(numPoints)] - else: - result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] - - MAX_RUN_LENGTH = 127 - pos = 0 - while pos < numPoints: - run = io.BytesIO() - runLength = 0 - lastValue = 0 - useByteEncoding = (points[pos] <= 0xff) - while pos < numPoints and runLength <= MAX_RUN_LENGTH: - curValue = points[pos] - delta = curValue - lastValue - if useByteEncoding and delta > 0xff: - # we need to start a new run (which will not use byte encoding) - break - if useByteEncoding: - run.write(bytechr(delta)) - else: - run.write(bytechr(delta >> 8)) - run.write(bytechr(delta & 0xff)) - lastValue = curValue - pos += 1 - runLength += 1 - if useByteEncoding: - runHeader = bytechr(runLength - 1) - else: - runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) - result.append(runHeader) - result.append(run.getvalue()) - - return bytesjoin(result) - - @staticmethod - def decompilePoints_(numPointsInGlyph, data, offset): - """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" - pos = offset - numPointsInData = byteord(data[pos]) - pos += 1 - if (numPointsInData & POINTS_ARE_WORDS) != 0: - numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) - pos += 1 - if numPointsInData == 0: - return (range(numPointsInGlyph), pos) - result = [] - while len(result) < numPointsInData: - runHeader = byteord(data[pos]) - pos += 1 - numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 - point = 0 - if (runHeader & POINTS_ARE_WORDS) == 0: - for _ in range(numPointsInRun): - point += byteord(data[pos]) - pos += 1 - result.append(point) - else: - for _ in range(numPointsInRun): - point += struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - result.append(point) - if max(result) >= numPointsInGlyph: - raise TTLibError("malformed 'gvar' table") - return (result, pos) - - def compileDeltas(self, points): - deltaX = [] - deltaY = [] - for p in sorted(list(points)): - c = self.coordinates[p] - if c is not None: - deltaX.append(c[0]) - deltaY.append(c[1]) - return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) - - @staticmethod - def compileDeltaValues_(deltas): - """[value1, value2, value3, ...] --> bytestring - - Emits a sequence of runs. Each run starts with a - byte-sized header whose 6 least significant bits - (header & 0x3F) indicate how many values are encoded - in this run. The stored length is the actual length - minus one; run lengths are thus in the range [1..64]. - If the header byte has its most significant bit (0x80) - set, all values in this run are zero, and no data - follows. Otherwise, the header byte is followed by - ((header & 0x3F) + 1) signed values. If (header & - 0x40) is clear, the delta values are stored as signed - bytes; if (header & 0x40) is set, the delta values are - signed 16-bit integers. - """ # Explaining the format because the 'gvar' spec is hard to understand. - stream = io.BytesIO() - pos = 0 - while pos < len(deltas): - value = deltas[pos] - if value == 0: - pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) - elif value >= -128 and value <= 127: - pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) - else: - pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) - return stream.getvalue() - - @staticmethod - def encodeDeltaRunAsZeroes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64 and deltas[pos] == 0: - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) - return pos - - @staticmethod - def encodeDeltaRunAsBytes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - if value < -128 or value > 127: - break - # Within a byte-encoded run of deltas, a single zero - # is best stored literally as 0x00 value. However, - # if are two or more zeroes in a sequence, it is - # better to start a new run. For example, the sequence - # of deltas [15, 15, 0, 15, 15] becomes 6 bytes - # (04 0F 0F 00 0F 0F) when storing the zero value - # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) - # when starting a new run. - if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(runLength - 1)) - for i in range(offset, pos): - stream.write(struct.pack('b', deltas[i])) - return pos - - @staticmethod - def encodeDeltaRunAsWords_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - # Within a word-encoded run of deltas, it is easiest - # to start a new run (with a different encoding) - # whenever we encounter a zero value. For example, - # the sequence [0x6666, 0, 0x7777] needs 7 bytes when - # storing the zero literally (42 66 66 00 00 77 77), - # and equally 7 bytes when starting a new run - # (40 66 66 80 40 77 77). - if value == 0: - break - - # Within a word-encoded run of deltas, a single value - # in the range (-128..127) should be encoded literally - # because it is more compact. For example, the sequence - # [0x6666, 2, 0x7777] becomes 7 bytes when storing - # the value literally (42 66 66 00 02 77 77), but 8 bytes - # when starting a new run (40 66 66 00 02 40 77 77). - isByteEncodable = lambda value: value >= -128 and value <= 127 - if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) - for i in range(offset, pos): - stream.write(struct.pack('>h', deltas[i])) - return pos - - @staticmethod - def decompileDeltas_(numDeltas, data, offset): - """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" - result = [] - pos = offset - while len(result) < numDeltas: - runHeader = byteord(data[pos]) - pos += 1 - numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 - if (runHeader & DELTAS_ARE_ZERO) != 0: - result.extend([0] * numDeltasInRun) - elif (runHeader & DELTAS_ARE_WORDS) != 0: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">h", data[pos:pos+2])[0]) - pos += 2 - else: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">b", data[pos:pos+1])[0]) - pos += 1 - assert len(result) == numDeltas - return (result, pos) - - @staticmethod - def getTupleSize_(flags, axisCount): - size = 4 - if (flags & EMBEDDED_TUPLE_COORD) != 0: - size += axisCount * 2 - if (flags & INTERMEDIATE_TUPLE) != 0: - size += axisCount * 4 - return size +def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices): + tupleVariationCount, tuples, data = tv.compileTupleVariationStore( + variations, pointCount, axisTags, sharedCoordIndices) + if tupleVariationCount == 0: + return b"" + result = (struct.pack(">HH", tupleVariationCount, 4 + len(tuples)) + + tuples + data) + if len(result) % 2 != 0: + result = result + b"\0" # padding + return result + + +def decompileGlyph_(pointCount, sharedTuples, axisTags, data): + if len(data) < 4: + return [] + tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4]) + dataPos = offsetToData + return tv.decompileTupleVariationStore("gvar", axisTags, + tupleVariationCount, pointCount, + sharedTuples, data, 4, offsetToData) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,539 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr, hexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation -import random -import unittest - -def hexencode(s): - h = hexStr(s).upper() - return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) - -# Glyph variation table of uppercase I in the Skia font, as printed in Apple's -# TrueType spec. The actual Skia font uses a different table for uppercase I. -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html -SKIA_GVAR_I = deHexStr( - "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " - "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " - "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " - "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " - "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " - "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " - "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " - "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " - "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " - "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " - "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " - "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " - "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " - "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " - "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " - "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " - "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") - -# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. -SKIA_SHARED_COORDS = deHexStr( - "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " - "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") - - -class GlyphVariationTableTest(unittest.TestCase): - def test_compileOffsets_shortFormat(self): - self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), - table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) - - def test_compileOffsets_longFormat(self): - self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), - table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) - - def test_decompileOffsets_shortFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], - list(decompileOffsets(data, tableFormat=0, glyphCount=5))) - - def test_decompileOffsets_longFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], - list(decompileOffsets(data, tableFormat=1, glyphCount=2))) - - def test_compileGlyph_noVariations(self): - table = table__g_v_a_r() - table.variations = {} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_emptyVariations(self): - table = table__g_v_a_r() - table.variations = {"glyphname": []} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_onlyRedundantVariations(self): - table = table__g_v_a_r() - axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} - table.variations = {"glyphname": [ - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4) - ]} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_roundTrip(self): - table = table__g_v_a_r() - axisTags = ["wght", "wdth"] - numPointsInGlyph = 4 - glyphCoords = [(1,1), (2,2), (3,3), (4,4)] - gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - table.variations = {"oslash": [gvar1, gvar2]} - data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) - self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) - - def test_compileSharedCoords(self): - table = table__g_v_a_r() - table.variations = {} - deltas = [None] * 4 - table.variations["A"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) - ] - table.variations["B"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) - ] - table.variations["C"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) - ] - # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. - # Min and max values are not part of the shared coordinate pool and should get ignored. - result = table.compileSharedCoords_(["wght", "wdth"]) - self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) - - def test_decompileSharedCoords_Skia(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) - self.assertEqual([ - {"wght": 1.0, "wdth": 0.0}, - {"wght": -1.0, "wdth": 0.0}, - {"wght": 0.0, "wdth": 1.0}, - {"wght": 0.0, "wdth": -1.0}, - {"wght": -1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": 1.0}, - {"wght": -1.0, "wdth": 1.0} - ], sharedCoords) - - def test_decompileSharedCoords_empty(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 0 - self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) - - def test_decompileGlyph_Skia_I(self): - axes = ["wght", "wdth"] - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - table.axisCount = len(axes) - sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) - tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) - self.assertEqual(8, len(tuples)) - self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) - self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " - "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", - " ".join(["%d,%d" % c for c in tuples[0].coordinates])) - - def test_decompileGlyph_empty(self): - table = table__g_v_a_r() - self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) - - def test_computeMinMaxCord(self): - coord = {"wght": -0.3, "wdth": 0.7} - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) - self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) - -class GlyphVariationTest(unittest.TestCase): - def test_equal(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - self.assertEqual(gvar1, gvar2) - - def test_equal_differentAxes(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) - self.assertNotEqual(gvar1, gvar2) - - def test_equal_differentCoordinates(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) - self.assertNotEqual(gvar1, gvar2) - - def test_hasImpact_someDeltasNotZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasNone(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [None, None, None]) - self.assertFalse(gvar.hasImpact()) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} - g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) - g.toXML(writer, ["wdth", "wght", "opsz"]) - self.assertEqual([ - '', - '', - '', - '', - '', - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_toXML_allDeltasNone(self): - writer = XMLWriter(BytesIO()) - axes = {"wght":(0.0, 1.0, 1.0)} - g = GlyphVariation(axes, [None] * 5) - g.toXML(writer, ["wght", "wdth"]) - self.assertEqual([ - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_fromXML(self): - g = GlyphVariation({}, [None] * 4) - g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) - g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) - g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) - g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) - g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) - self.assertEqual({ - "wdth":( 0.3, 0.4, 0.5), - "wght":( 0.0, 1.0, 1.0), - "opsz":(-0.5, -0.5, 0.0) - }, g.axes) - self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) - - def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=None; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 08 00 77", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] - self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 09 20 77", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] - self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] - self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD - # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] - self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compileCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) - self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) - self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) - - def test_compileIntermediateCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) - self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) - self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) - self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) - - def test_decompileCoord(self): - decompileCoord = GlyphVariation.decompileCoord_ - data = deHexStr("DE AD C0 00 20 00 DE AD") - self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) - - def test_decompileCoord_roundTrip(self): - # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 - data = deHexStr("7F B9 80 35") - values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) - axisValues = {axis:(val, val, val) for axis, val in values.items()} - gvar = GlyphVariation(axisValues, [None] * 4) - self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) - - def test_decompileCoords(self): - decompileCoords = GlyphVariation.decompileCoords_ - axes = ["wght", "wdth", "opsz"] - coords = [ - {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, - {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, - {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} - ] - data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") - self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) - - def test_compilePoints(self): - compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) - self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph - self.assertEqual("01 00 07", hexencode(compilePoints([7]))) - self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) - self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) - self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) - self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) - self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) - self.assertEqual("81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - hexencode(compilePoints(range(300)))) - self.assertEqual("81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - hexencode(compilePoints(range(399)))) - - def test_decompilePoints(self): - numPointsInGlyph = 65536 - allPoints = list(range(numPointsInGlyph)) - def decompilePoints(data, offset): - points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) - # Conversion to list needed for Python 3. - return (list(points), offset) - # all points in glyph - self.assertEqual((allPoints, 1), decompilePoints("00", 0)) - # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) - self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) - # 2 points; first run: [9, 9+6] - self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) - # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) - self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) - # 1 point; first run: [7] - self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) - # 1 point; first run: [7] in overly verbose encoding - self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) - # 1 point; first run: [65535]; requires words to be treated as unsigned numbers - self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) - # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). - self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) - # combination of all encodings, preceded and followed by 4 bytes of unused data - data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" - self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) - self.assertSetEqual(set(range(300)), set(decompilePoints( - "81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - 0)[0])) - self.assertSetEqual(set(range(399)), set(decompilePoints( - "81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - 0)[0])) - - def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): - decompilePoints = GlyphVariation.decompilePoints_ - # 2 points; first run: [3, 9]. - numPointsInGlyph = 8 - self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) - - def test_decompilePoints_roundTrip(self): - numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding - compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) - decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) - for i in range(50): - points = set(random.sample(range(numPointsInGlyph), 30)) - self.assertSetEqual(points, decompile(compile(points)), - "failed round-trip decompile/compilePoints; points=%s" % points) - allPoints = set(range(numPointsInGlyph)) - self.assertSetEqual(allPoints, decompile(compile(allPoints))) - - def test_compileDeltas(self): - gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) - points = {1, 2} - # deltaX for points: [1, 2]; deltaY for points: [0, 0] - self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) - - def test_compileDeltaValues(self): - compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) - # zeroes - self.assertEqual("80", compileDeltaValues([0])) - self.assertEqual("BF", compileDeltaValues([0] * 64)) - self.assertEqual("BF 80", compileDeltaValues([0] * 65)) - self.assertEqual("BF A3", compileDeltaValues([0] * 100)) - self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) - # bytes - self.assertEqual("00 01", compileDeltaValues([1])) - self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) - self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) - self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) - # words - self.assertEqual("40 66 66", compileDeltaValues([0x6666])) - self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) - self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) - self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) - # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run - self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) - self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) - # bytes, zeroes - self.assertEqual("01 01 00", compileDeltaValues([1, 0])) - self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) - # words, bytes, words: a single byte is more compact when encoded as part of the words run - self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) - self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) - # words, zeroes, words - self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) - self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) - self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) - # words, zeroes, bytes - self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) - self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) - self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) - # words, zeroes - self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) - self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) - - def test_decompileDeltas(self): - decompileDeltas = GlyphVariation.decompileDeltas_ - # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) - self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) - # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) - self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) - # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) - self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) - # combination of all three encodings, preceded and followed by 4 bytes of unused data - data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") - self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) - - def test_decompileDeltas_roundTrip(self): - numDeltas = 30 - compile = GlyphVariation.compileDeltaValues_ - decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] - for i in range(50): - deltas = random.sample(range(-128, 127), 10) - deltas.extend(random.sample(range(-32768, 32767), 10)) - deltas.extend([0] * 10) - random.shuffle(deltas) - self.assertListEqual(deltas, decompile(compile(deltas))) - - def test_getTupleSize(self): - getTupleSize = GlyphVariation.getTupleSize_ - numAxes = 3 - self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) - self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) - self.assertEqual(4, getTupleSize(0x2077, numAxes)) - self.assertEqual(4, getTupleSize(11, numAxes)) - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_e_a_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,9 +5,11 @@ from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat from . import DefaultTable -import warnings +import logging +log = logging.getLogger(__name__) + headFormat = """ > # big endian tableVersion: 16.16F @@ -31,13 +33,13 @@ class table__h_e_a_d(DefaultTable.DefaultTable): - dependencies = ['maxp', 'loca'] + dependencies = ['maxp', 'loca', 'CFF '] def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(headFormat, data, self) if rest: # this is quite illegal, but there seem to be fonts out there that do this - warnings.warn("extra bytes at the end of 'head' table") + log.warning("extra bytes at the end of 'head' table") assert rest == "\0\0" # For timestamp fields, ignore the top four bytes. Some fonts have @@ -48,15 +50,20 @@ for stamp in 'created', 'modified': value = getattr(self, stamp) if value > 0xFFFFFFFF: - warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) + log.warning("'%s' timestamp out of range; ignoring top bytes", stamp) value &= 0xFFFFFFFF setattr(self, stamp, value) if value < 0x7C259DC0: # January 1, 1970 00:00:00 - warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) + log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp) value += 0x7C259DC0 setattr(self, stamp, value) def compile(self, ttFont): + if ttFont.recalcBBoxes: + # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc(). + if 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + self.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox if ttFont.recalcTimestamp: self.modified = timestampNow() data = sstruct.pack(headFormat, self) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_h_e_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,11 +2,15 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import ( + ensureVersionIsLong as fi2ve, versionToFixed as ve2fi) from . import DefaultTable +import math + hheaFormat = """ > # big endian - tableVersion: 16.16F + tableVersion: L ascent: h descent: h lineGap: h @@ -30,29 +34,26 @@ # Note: Keep in sync with table__v_h_e_a - dependencies = ['hmtx', 'glyf'] + dependencies = ['hmtx', 'glyf', 'CFF '] def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self) def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')): self.recalc(ttFont) + self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(hheaFormat, self) def recalc(self, ttFont): - hmtxTable = ttFont['hmtx'] + if 'hmtx' in ttFont: + hmtxTable = ttFont['hmtx'] + self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values()) + + boundsWidthDict = {} if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceWidthMax = 0 - minLeftSideBearing = +INFINITY # arbitrary big number - minRightSideBearing = +INFINITY # arbitrary big number - xMaxExtent = -INFINITY # arbitrary big negative number - for name in ttFont.getGlyphOrder(): - width, lsb = hmtxTable[name] - advanceWidthMax = max(advanceWidthMax, width) g = glyfTable[name] if g.numberOfContours == 0: continue @@ -60,32 +61,48 @@ # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) + boundsWidthDict[name] = g.xMax - g.xMin + elif 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + for name in ttFont.getGlyphOrder(): + cs = topDict.CharStrings[name] + bounds = cs.calcBounds() + if bounds is not None: + boundsWidthDict[name] = int( + math.ceil(bounds[2]) - math.floor(bounds[0])) + + if boundsWidthDict: + minLeftSideBearing = float('inf') + minRightSideBearing = float('inf') + xMaxExtent = -float('inf') + for name, boundsWidth in boundsWidthDict.items(): + advanceWidth, lsb = hmtxTable[name] + rsb = advanceWidth - lsb - boundsWidth + extent = lsb + boundsWidth minLeftSideBearing = min(minLeftSideBearing, lsb) - rsb = width - lsb - (g.xMax - g.xMin) minRightSideBearing = min(minRightSideBearing, rsb) - extent = lsb + (g.xMax - g.xMin) xMaxExtent = max(xMaxExtent, extent) - - if xMaxExtent == -INFINITY: - # No glyph has outlines. - minLeftSideBearing = 0 - minRightSideBearing = 0 - xMaxExtent = 0 - - self.advanceWidthMax = advanceWidthMax self.minLeftSideBearing = minLeftSideBearing self.minRightSideBearing = minRightSideBearing self.xMaxExtent = xMaxExtent - else: - # XXX CFF recalc... - pass + + else: # No glyph has outlines. + self.minLeftSideBearing = 0 + self.minRightSideBearing = 0 + self.xMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(hheaFormat) for name in names: value = getattr(self, name) + if name == "tableVersion": + value = fi2ve(value) + value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + setattr(self, name, ve2fi(attrs["value"])) + return setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_h_m_t_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,15 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools import ttLib from fontTools.misc.textTools import safeEval from . import DefaultTable import sys +import struct import array -import warnings +import logging + + +log = logging.getLogger(__name__) class table__h_m_t_x(DefaultTable.DefaultTable): @@ -13,16 +18,22 @@ advanceName = 'width' sideBearingName = 'lsb' numberOfMetricsName = 'numberOfHMetrics' + longMetricFormat = 'Hh' def decompile(self, data, ttFont): numGlyphs = ttFont['maxp'].numGlyphs numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) if numberOfMetrics > numGlyphs: - numberOfMetrics = numGlyphs # We warn later. - # Note: advanceWidth is unsigned, but we read/write as signed. - metrics = array.array("h", data[:4 * numberOfMetrics]) - if sys.byteorder != "big": - metrics.byteswap() + log.warning("The %s.%s exceeds the maxp.numGlyphs" % ( + self.headerTag, self.numberOfMetricsName)) + numberOfMetrics = numGlyphs + if len(data) < 4 * numberOfMetrics: + raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag) + # Note: advanceWidth is unsigned, but some font editors might + # read/write as signed. We can't be sure whether it was a mistake + # or not, so we read as unsigned but also issue a warning... + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics]) data = data[4 * numberOfMetrics:] numberOfSideBearings = numGlyphs - numberOfMetrics sideBearings = array.array("h", data[:2 * numberOfSideBearings]) @@ -31,21 +42,33 @@ if sys.byteorder != "big": sideBearings.byteswap() if data: - warnings.warn("too much 'hmtx'/'vmtx' table data") + log.warning("too much '%s' table data" % self.tableTag) self.metrics = {} glyphOrder = ttFont.getGlyphOrder() for i in range(numberOfMetrics): glyphName = glyphOrder[i] - self.metrics[glyphName] = list(metrics[i*2:i*2+2]) + advanceWidth, lsb = metrics[i*2:i*2+2] + if advanceWidth > 32767: + log.warning( + "Glyph %r has a huge advance %s (%d); is it intentional or " + "an (invalid) negative value?", glyphName, self.advanceName, + advanceWidth) + self.metrics[glyphName] = (advanceWidth, lsb) lastAdvance = metrics[-2] for i in range(numberOfSideBearings): glyphName = glyphOrder[i + numberOfMetrics] - self.metrics[glyphName] = [lastAdvance, sideBearings[i]] + self.metrics[glyphName] = (lastAdvance, sideBearings[i]) def compile(self, ttFont): metrics = [] + hasNegativeAdvances = False for glyphName in ttFont.getGlyphOrder(): - metrics.append(self.metrics[glyphName]) + advanceWidth, sideBearing = self.metrics[glyphName] + if advanceWidth < 0: + log.error("Glyph %r has negative advance %s" % ( + glyphName, self.advanceName)) + hasNegativeAdvances = True + metrics.append([advanceWidth, sideBearing]) lastAdvance = metrics[-1][0] lastIndex = len(metrics) while metrics[lastIndex-2][0] == lastAdvance: @@ -55,18 +78,24 @@ lastIndex = 1 break additionalMetrics = metrics[lastIndex:] - additionalMetrics = [sb for advance, sb in additionalMetrics] + additionalMetrics = [round(sb) for _, sb in additionalMetrics] metrics = metrics[:lastIndex] - setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) + numberOfMetrics = len(metrics) + setattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics) allMetrics = [] - for item in metrics: - allMetrics.extend(item) - allMetrics = array.array("h", allMetrics) - if sys.byteorder != "big": - allMetrics.byteswap() - data = allMetrics.tostring() - + for advance, sb in metrics: + allMetrics.extend([round(advance), round(sb)]) + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + try: + data = struct.pack(metricsFmt, *allMetrics) + except struct.error as e: + if "out of range" in str(e) and hasNegativeAdvances: + raise ttLib.TTLibError( + "'%s' table can't contain negative advance %ss" + % (self.tableTag, self.advanceName)) + else: + raise additionalMetrics = array.array("h", additionalMetrics) if sys.byteorder != "big": additionalMetrics.byteswap() @@ -88,8 +117,8 @@ if not hasattr(self, "metrics"): self.metrics = {} if name == "mtx": - self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), - safeEval(attrs[self.sideBearingName])] + self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]), + safeEval(attrs[self.sideBearingName])) def __delitem__(self, glyphName): del self.metrics[glyphName] diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/H_V_A_R_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/H_V_A_R_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/H_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/H_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_H_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/__init__.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/__init__.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. -def _moduleFinderHint(): - """Dummy function to let modulefinder know what tables may be - dynamically imported. Generated by MetaTools/buildTableList.py. - - >>> _moduleFinderHint() - """ - from . import B_A_S_E_ - from . import C_B_D_T_ - from . import C_B_L_C_ - from . import C_F_F_ - from . import C_O_L_R_ - from . import C_P_A_L_ - from . import D_S_I_G_ - from . import E_B_D_T_ - from . import E_B_L_C_ - from . import F_F_T_M_ - from . import G_D_E_F_ - from . import G_M_A_P_ - from . import G_P_K_G_ - from . import G_P_O_S_ - from . import G_S_U_B_ - from . import J_S_T_F_ - from . import L_T_S_H_ - from . import M_A_T_H_ - from . import M_E_T_A_ - from . import O_S_2f_2 - from . import S_I_N_G_ - from . import S_V_G_ - from . import T_S_I_B_ - from . import T_S_I_D_ - from . import T_S_I_J_ - from . import T_S_I_P_ - from . import T_S_I_S_ - from . import T_S_I_V_ - from . import T_S_I__0 - from . import T_S_I__1 - from . import T_S_I__2 - from . import T_S_I__3 - from . import T_S_I__5 - from . import V_D_M_X_ - from . import V_O_R_G_ - from . import _a_v_a_r - from . import _c_m_a_p - from . import _c_v_t - from . import _f_e_a_t - from . import _f_p_g_m - from . import _f_v_a_r - from . import _g_a_s_p - from . import _g_l_y_f - from . import _g_v_a_r - from . import _h_d_m_x - from . import _h_e_a_d - from . import _h_h_e_a - from . import _h_m_t_x - from . import _k_e_r_n - from . import _l_o_c_a - from . import _l_t_a_g - from . import _m_a_x_p - from . import _m_e_t_a - from . import _n_a_m_e - from . import _p_o_s_t - from . import _p_r_e_p - from . import _s_b_i_x - from . import _v_h_e_a - from . import _v_m_t_x - -if __name__ == "__main__": - import doctest, sys - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_k_e_r_n.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,18 +2,24 @@ from fontTools.misc.py23 import * from fontTools.ttLib import getSearchRange from fontTools.misc.textTools import safeEval, readHex -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi) from . import DefaultTable import struct +import sys import array -import warnings +import logging + + +log = logging.getLogger(__name__) class table__k_e_r_n(DefaultTable.DefaultTable): def getkern(self, format): for subtable in self.kernTables: - if subtable.version == format: + if subtable.format == format: return subtable return None # not found @@ -29,21 +35,23 @@ else: self.version = version data = data[4:] - tablesIndex = [] self.kernTables = [] for i in range(nTables): if self.version == 1.0: # Apple - length, coverage, tupleIndex = struct.unpack(">lHH", data[:8]) - version = coverage & 0xff + length, coverage, subtableFormat = struct.unpack( + ">LBB", data[:6]) else: - version, length = struct.unpack(">HH", data[:4]) - length = int(length) - if version not in kern_classes: - subtable = KernTable_format_unkown(version) + # in OpenType spec the "version" field refers to the common + # subtable header; the actual subtable format is stored in + # the 8-15 mask bits of "coverage" field. + # This "version" is always 0 so we ignore it here + _, length, subtableFormat, coverage = struct.unpack( + ">HHBB", data[:6]) + if subtableFormat not in kern_classes: + subtable = KernTable_format_unkown(subtableFormat) else: - subtable = kern_classes[version]() - subtable.apple = apple + subtable = kern_classes[subtableFormat](apple) subtable.decompile(data[:length], ttFont) self.kernTables.append(subtable) data = data[length:] @@ -55,7 +63,7 @@ nTables = 0 if self.version == 1.0: # AAT Apple's "new" format. - data = struct.pack(">ll", fl2fi(self.version, 16), nTables) + data = struct.pack(">LL", fl2fi(self.version, 16), nTables) else: data = struct.pack(">HH", self.version, nTables) if hasattr(self, "kernTables"): @@ -81,80 +89,142 @@ if format not in kern_classes: subtable = KernTable_format_unkown(format) else: - subtable = kern_classes[format]() + apple = self.version == 1.0 + subtable = kern_classes[format](apple) self.kernTables.append(subtable) subtable.fromXML(name, attrs, content, ttFont) class KernTable_format_0(object): + # 'version' is kept for backward compatibility + version = format = 0 + + def __init__(self, apple=False): + self.apple = apple + def decompile(self, data, ttFont): - version, length, coverage = (0,0,0) if not self.apple: - version, length, coverage = struct.unpack(">HHH", data[:6]) + version, length, subtableFormat, coverage = struct.unpack( + ">HHBB", data[:6]) + if version != 0: + from fontTools.ttLib import TTLibError + raise TTLibError( + "unsupported kern subtable version: %d" % version) + tupleIndex = None + # Should we also assert length == len(data)? data = data[6:] else: - version, length, coverage = struct.unpack(">LHH", data[:8]) + length, coverage, subtableFormat, tupleIndex = struct.unpack( + ">LBBH", data[:8]) data = data[8:] - self.version, self.coverage = int(version), int(coverage) + assert self.format == subtableFormat, "unsupported format" + self.coverage = coverage + self.tupleIndex = tupleIndex self.kernTable = kernTable = {} - nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) + nPairs, searchRange, entrySelector, rangeShift = struct.unpack( + ">HHHH", data[:8]) data = data[8:] nPairs = min(nPairs, len(data) // 6) datas = array.array("H", data[:6 * nPairs]) - if sys.byteorder != "big": + if sys.byteorder != "big": # pragma: no cover datas.byteswap() it = iter(datas) glyphOrder = ttFont.getGlyphOrder() for k in range(nPairs): left, right, value = next(it), next(it), next(it) - if value >= 32768: value -= 65536 + if value >= 32768: + value -= 65536 try: kernTable[(glyphOrder[left], glyphOrder[right])] = value except IndexError: - # Slower, but will not throw an IndexError on an invalid glyph id. - kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value - if len(data) > 6 * nPairs: - warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) + # Slower, but will not throw an IndexError on an invalid + # glyph id. + kernTable[( + ttFont.getGlyphName(left), + ttFont.getGlyphName(right))] = value + if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess + log.warning( + "excess data in 'kern' subtable: %d bytes", + len(data) - 6 * nPairs) def compile(self, ttFont): nPairs = len(self.kernTable) searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) - data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) + data = struct.pack( + ">HHHH", nPairs, searchRange, entrySelector, rangeShift) # yeehee! (I mean, turn names into indices) try: reverseOrder = ttFont.getReverseGlyphMap() - kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) + kernTable = sorted( + (reverseOrder[left], reverseOrder[right], value) + for ((left, right), value) in self.kernTable.items()) except KeyError: # Slower, but will not throw KeyError on invalid glyph id. getGlyphID = ttFont.getGlyphID - kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) + kernTable = sorted( + (getGlyphID(left), getGlyphID(right), value) + for ((left, right), value) in self.kernTable.items()) for left, right, value in kernTable: data = data + struct.pack(">HHh", left, right, value) - return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data + + if not self.apple: + version = 0 + length = len(data) + 6 + header = struct.pack( + ">HHBB", version, length, self.format, self.coverage) + else: + if self.tupleIndex is None: + # sensible default when compiling a TTX from an old fonttools + # or when inserting a Windows-style format 0 subtable into an + # Apple version=1.0 kern table + log.warning("'tupleIndex' is None; default to 0") + self.tupleIndex = 0 + length = len(data) + 8 + header = struct.pack( + ">LBBH", length, self.coverage, self.format, self.tupleIndex) + return header + data def toXML(self, writer, ttFont): - writer.begintag("kernsubtable", coverage=self.coverage, format=0) + attrs = dict(coverage=self.coverage, format=self.format) + if self.apple: + if self.tupleIndex is None: + log.warning("'tupleIndex' is None; default to 0") + attrs["tupleIndex"] = 0 + else: + attrs["tupleIndex"] = self.tupleIndex + writer.begintag("kernsubtable", **attrs) writer.newline() items = sorted(self.kernTable.items()) for (left, right), value in items: writer.simpletag("pair", [ - ("l", left), - ("r", right), - ("v", value) - ]) + ("l", left), + ("r", right), + ("v", value) + ]) writer.newline() writer.endtag("kernsubtable") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.coverage = safeEval(attrs["coverage"]) - self.version = safeEval(attrs["format"]) + subtableFormat = safeEval(attrs["format"]) + if self.apple: + if "tupleIndex" in attrs: + self.tupleIndex = safeEval(attrs["tupleIndex"]) + else: + # previous fontTools versions didn't export tupleIndex + log.warning( + "Apple kern subtable is missing 'tupleIndex' attribute") + self.tupleIndex = None + else: + self.tupleIndex = None + assert subtableFormat == self.format, "unsupported format" if not hasattr(self, "kernTable"): self.kernTable = {} for element in content: diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -from __future__ import print_function, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._k_e_r_n import KernTable_format_0 - -class MockFont(object): - - def getGlyphOrder(self): - return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] - - def getGlyphName(self, glyphID): - return "glyph%.5d" % glyphID - -class KernTable_format_0_Test(unittest.TestCase): - - def test_decompileBadGlyphId(self): - subtable = KernTable_format_0() - subtable.apple = False - subtable.decompile( b'\x00' * 6 - + b'\x00' + b'\x02' + b'\x00' * 6 - + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' - + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', - MockFont()) - self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) - self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_l_c_a_r.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_c_a_r.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_l_c_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_c_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table__l_c_a_r(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_o_c_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,7 +3,11 @@ from . import DefaultTable import sys import array -import warnings +import logging + + +log = logging.getLogger(__name__) + class table__l_o_c_a(DefaultTable.DefaultTable): @@ -25,7 +29,8 @@ l.append(locations[i] * 2) locations = l if len(locations) < (ttFont['maxp'].numGlyphs + 1): - warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) + log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d", + len(locations) - 1, ttFont['maxp'].numGlyphs) self.locations = locations def compile(self, ttFont): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_t_a_g.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,21 @@ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html class table__l_t_a_g(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.flags = 1, 0 + self.tags = [] + + def addTag(self, tag): + """Add 'tag' to the list of langauge tags if not already there. + + Returns the integer index of 'tag' in the list of all tags. + """ + try: + return self.tags.index(tag) + except ValueError: + self.tags.append(tag) + return len(self.tags) - 1 def decompile(self, data, ttFont): self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) @@ -28,7 +43,7 @@ stringPool = stringPool + tag offset = offset + 12 + len(self.tags) * 4 dataList.append(struct.pack(">HH", offset, len(tag))) - dataList.append(stringPool) + dataList.append(tobytes(stringPool)) return bytesjoin(dataList) def toXML(self, writer, ttFont): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import os -import struct -import unittest -from ._l_t_a_g import table__l_t_a_g - -class Test_l_t_a_g(unittest.TestCase): - - DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" - TAGS_ = ["en", "zh-Hant", "zh"] - - def test_decompile_compile(self): - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(0, table.flags) - self.assertEqual(self.TAGS_, table.tags) - self.assertEqual(self.DATA_, table.compile(ttFont=None)) - - def test_fromXML(self): - table = table__l_t_a_g() - table.fromXML("version", {"value": "1"}, content=None, ttFont=None) - table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(777, table.flags) - self.assertEqual(["sr-Latn", "fa"], table.tags) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - table.toXML(writer, ttFont=None) - expected = os.linesep.join([ - '', - '', - '', - '', - '', - '' - ]) + os.linesep - self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_a_x_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -73,12 +73,12 @@ maxCompositeContours = 0 maxComponentElements = 0 maxComponentDepth = 0 - allXMaxIsLsb = 1 + allXMinIsLsb = 1 for glyphName in ttFont.getGlyphOrder(): g = glyfTable[glyphName] if g.numberOfContours: if hmtxTable[glyphName][1] != g.xMin: - allXMaxIsLsb = 0 + allXMinIsLsb = 0 xMin = min(xMin, g.xMin) yMin = min(yMin, g.yMin) xMax = max(xMax, g.xMax) @@ -108,7 +108,7 @@ self.maxCompositePoints = maxCompositePoints self.maxCompositeContours = maxCompositeContours self.maxComponentDepth = maxComponentDepth - if allXMaxIsLsb: + if allXMinIsLsb: headTable.flags = headTable.flags | 0x2 else: headTable.flags = headTable.flags & ~0x2 diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_e_t_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -16,15 +16,6 @@ numDataMaps: L """ -# According to Apple's spec, the dataMaps entries contain a dataOffset -# that is documented as "Offset from the beginning of the data section -# to the data for this tag". However, this is *not* the case with -# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, -# and it also does not reflect how Apple's ftxdumperfuser tool is parsing -# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework -# build 187). Instead of what is claimed in the spec, the data maps contain -# a dataOffset relative to the very beginning of the 'meta' table. -# The dataOffset field of the 'meta' header apparently gets ignored. DATA_MAP_FORMAT = """ > # big endian @@ -35,7 +26,7 @@ class table__m_e_t_a(DefaultTable.DefaultTable): - def __init__(self, tag="meta"): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.data = {} @@ -54,6 +45,8 @@ tag = dataMap["tag"] offset = dataMap["dataOffset"] self.data[tag] = data[offset : offset + dataMap["dataLength"]] + if tag in ["dlng", "slng"]: + self.data[tag] = self.data[tag].decode("utf-8") def compile(self, ttFont): keys = sorted(self.data.keys()) @@ -68,7 +61,10 @@ dataMaps = [] dataBlocks = [] for tag in keys: - data = self.data[tag] + if tag in ["dlng", "slng"]: + data = self.data[tag].encode("utf-8") + else: + data = self.data[tag] dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { "tag": tag, "dataOffset": dataOffset, @@ -80,14 +76,24 @@ def toXML(self, writer, ttFont, progress=None): for tag in sorted(self.data.keys()): - writer.begintag("hexdata", tag=tag) - writer.newline() - writer.dumphex(self.data[tag]) - writer.endtag("hexdata") - writer.newline() + if tag in ["dlng", "slng"]: + writer.begintag("text", tag=tag) + writer.newline() + writer.write(self.data[tag]) + writer.newline() + writer.endtag("text") + writer.newline() + else: + writer.begintag("hexdata", tag=tag) + writer.newline() + writer.dumphex(self.data[tag]) + writer.endtag("hexdata") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "hexdata": self.data[attrs["tag"]] = readHex(content) + elif name == "text" and attrs["tag"] in ["dlng", "slng"]: + self.data[attrs["tag"]] = strjoin(content).strip() else: raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a -import unittest - - -# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', -# and shortened the payload. Note that from the 'meta' spec, one would -# expect that header.dataOffset is 0x0000001C (pointing to the beginning -# of the data section) and that dataMap[0].dataOffset should be 0 (relative -# to the beginning of the data section). However, in the fonts that Apple -# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative -# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the -# following test data is invalid according to the 'meta' specification, -# it is reflecting the 'meta' table structure in all Apple-supplied fonts. -META_DATA = deHexStr( - "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " - "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") - - -class MetaTableTest(unittest.TestCase): - def test_decompile(self): - table = table__m_e_t_a() - table.decompile(META_DATA, ttFont={"meta": table}) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - def test_compile(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) - - def test_toXML(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - writer = XMLWriter(BytesIO()) - table.toXML(writer, {"meta": table}) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual([ - '', - 'cafebeef', - '' - ], [line.strip() for line in xml.splitlines()][1:]) - - def test_fromXML(self): - table = table__m_e_t_a() - table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_m_o_r_t.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_o_r_t.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_m_o_r_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_o_r_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html +class table__m_o_r_t(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_m_o_r_x.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_o_r_x.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_m_o_r_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_m_o_r_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html +class table__m_o_r_x(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/M_V_A_R_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/M_V_A_R_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/M_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/M_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_M_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_n_a_m_e.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,17 @@ +# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import newTable from . import DefaultTable import struct +import logging + + +log = logging.getLogger(__name__) nameRecordFormat = """ > # big endian @@ -20,22 +27,27 @@ class table__n_a_m_e(DefaultTable.DefaultTable): + dependencies = ["ltag"] def decompile(self, data, ttFont): - format, n, stringOffset = struct.unpack(">HHH", data[:6]) + format, n, stringOffset = struct.unpack(b">HHH", data[:6]) expectedStringOffset = 6 + n * nameRecordSize if stringOffset != expectedStringOffset: - # XXX we need a warn function - print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) + log.error( + "'name' table stringOffset incorrect. Expected: %s; Actual: %s", + expectedStringOffset, stringOffset) stringData = data[stringOffset:] data = data[6:] self.names = [] for i in range(n): if len(data) < 12: - # compensate for buggy font - break + log.error('skipping malformed name record #%d', i) + continue name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) name.string = stringData[name.offset:name.offset+name.length] + if name.offset + name.length > len(stringData): + log.error('skipping malformed name record #%d', i) + continue assert len(name.string) == name.length #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): # if len(name.string) % 2: @@ -55,7 +67,7 @@ format = 0 n = len(names) stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) - data = struct.pack(">HHH", format, n, stringOffset) + data = struct.pack(b">HHH", format, n, stringOffset) lastoffset = 0 done = {} # remember the data so we can reuse the "pointers" for name in names: @@ -111,6 +123,186 @@ else: return None + def setName(self, string, nameID, platformID, platEncID, langID): + """ Set the 'string' for the name record identified by 'nameID', 'platformID', + 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it + and append to the name table. + + 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case, + it is assumed to be already encoded with the correct plaform-specific encoding + identified by the (platformID, platEncID, langID) triplet. A warning is issued + to prevent unexpected results. + """ + if not hasattr(self, 'names'): + self.names = [] + if not isinstance(string, unicode): + if isinstance(string, bytes): + log.warning( + "name string is bytes, ensure it's correctly encoded: %r", string) + else: + raise TypeError( + "expected unicode or bytes, found %s: %r" % ( + type(string).__name__, string)) + namerecord = self.getName(nameID, platformID, platEncID, langID) + if namerecord: + namerecord.string = string + else: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + + def _findUnusedNameID(self, minNameID=256): + """Finds an unused name id. + + The nameID is assigned in the range between 'minNameID' and 32767 (inclusive), + following the last nameID in the name table. + """ + names = getattr(self, 'names', []) + nameID = 1 + max([n.nameID for n in names] + [minNameID - 1]) + if nameID > 32767: + raise ValueError("nameID must be less than 32768") + return nameID + + def addMultilingualName(self, names, ttFont=None, nameID=None): + """Add a multilingual name, returning its name ID + + 'names' is a dictionary with the name in multiple languages, + such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. + The keys can be arbitrary IETF BCP 47 language codes; + the values are Unicode strings. + + 'ttFont' is the TTFont to which the names are added, or None. + If present, the font's 'ltag' table can get populated + to store exotic language codes, which allows encoding + names that otherwise cannot get encoded at all. + + 'nameID' is the name ID to be used, or None to let the library + pick an unused name ID. + """ + if not hasattr(self, 'names'): + self.names = [] + if nameID is None: + nameID = self._findUnusedNameID() + # TODO: Should minimize BCP 47 language codes. + # https://github.com/fonttools/fonttools/issues/930 + for lang, name in sorted(names.items()): + # Apple platforms have been recognizing Windows names + # since early OSX (~2001), so we only add names + # for the Macintosh platform when we cannot not make + # a Windows name. This can happen for exotic BCP47 + # language tags that have no Windows language code. + windowsName = _makeWindowsName(name, nameID, lang) + if windowsName is not None: + self.names.append(windowsName) + else: + macName = _makeMacName(name, nameID, lang, ttFont) + if macName is not None: + self.names.append(macName) + return nameID + + def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255): + """ Add a new name record containing 'string' for each (platformID, platEncID, + langID) tuple specified in the 'platforms' list. + + The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive), + following the last nameID in the name table. + If no 'platforms' are specified, two English name records are added, one for the + Macintosh (platformID=0), and one for the Windows platform (3). + + The 'string' must be a Unicode string, so it can be encoded with different, + platform-specific encodings. + + Return the new nameID. + """ + assert len(platforms) > 0, \ + "'platforms' must contain at least one (platformID, platEncID, langID) tuple" + if not hasattr(self, 'names'): + self.names = [] + if not isinstance(string, unicode): + raise TypeError( + "expected %s, found %s: %r" % ( + unicode.__name__, type(string).__name__,string )) + nameID = self._findUnusedNameID(minNameID + 1) + for platformID, platEncID, langID in platforms: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + return nameID + + +def makeName(string, nameID, platformID, platEncID, langID): + name = NameRecord() + name.string, name.nameID, name.platformID, name.platEncID, name.langID = ( + string, nameID, platformID, platEncID, langID) + return name + + +def _makeWindowsName(name, nameID, language): + """Create a NameRecord for the Microsoft Windows platform + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows + does not support the desired language, the result will be None. + Future versions of fonttools might return a NameRecord for the + OpenType 'name' table format 1, but this is not implemented yet. + """ + langID = _WINDOWS_LANGUAGE_CODES.get(language.lower()) + if langID is not None: + return makeName(name, nameID, 3, 1, langID) + else: + log.warning("cannot add Windows name in language %s " + "because fonttools does not yet support " + "name table format 1" % language) + return None + + +def _makeMacName(name, nameID, language, font=None): + """Create a NameRecord for Apple platforms + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we + create a Macintosh NameRecord that is understood by old applications + (platform ID 1 and an old-style Macintosh language enum). If this + is not possible, we create a Unicode NameRecord (platform ID 0) + whose language points to the font’s 'ltag' table. The latter + can encode any string in any language, but legacy applications + might not recognize the format (in which case they will ignore + those names). + + 'font' should be the TTFont for which you want to create a name. + If 'font' is None, we only return NameRecords for legacy Macintosh; + in that case, the result will be None for names that need to + be encoded with an 'ltag' table. + + See the section “The language identifier” in Apple’s specification: + https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + """ + macLang = _MAC_LANGUAGE_CODES.get(language.lower()) + macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang) + if macLang is not None and macScript is not None: + encoding = getEncoding(1, macScript, macLang, default="ascii") + # Check if we can actually encode this name. If we can't, + # for example because we have no support for the legacy + # encoding, or because the name string contains Unicode + # characters that the legacy encoding cannot represent, + # we fall back to encoding the name in Unicode and put + # the language tag into the ltag table. + try: + _ = tobytes(name, encoding, errors="strict") + return makeName(name, nameID, 1, macScript, macLang) + except UnicodeEncodeError: + pass + if font is not None: + ltag = font.tables.get("ltag") + if ltag is None: + ltag = font["ltag"] = newTable("ltag") + # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)” + # “The preferred platform-specific code for Unicode would be 3 or 4.” + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + return makeName(name, nameID, 0, 4, ltag.addTag(language)) + else: + log.warning("cannot store language %s into 'ltag' table " + "without having access to the TTFont object" % + language) + return None + + class NameRecord(object): def getEncoding(self, default='ascii'): @@ -125,10 +317,7 @@ return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] def __str__(self): - try: - return self.toUnicode() - except UnicodeDecodeError: - return str(self.string) + return self.toStr(errors='backslashreplace') def isUnicode(self): return (self.platformID == 0 or @@ -198,6 +387,14 @@ """ return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + def toStr(self, errors='strict'): + if str == bytes: + # python 2 + return self.toBytes(errors) + else: + # python 3 + return self.toUnicode(errors) + def toXML(self, writer, ttFont): try: unistr = self.toUnicode() @@ -260,3 +457,493 @@ def __repr__(self): return "" % ( self.nameID, self.platformID, self.langID) + + +# Windows language ID → IETF BCP-47 language tag +# +# While Microsoft indicates a region/country for all its language +# IDs, we follow Unicode practice by omitting “most likely subtags” +# as per Unicode CLDR. For example, English is simply “en” and not +# “en-Latn” because according to Unicode, the default script +# for English is Latin. +# +# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html +# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry +_WINDOWS_LANGUAGES = { + 0x0436: 'af', + 0x041C: 'sq', + 0x0484: 'gsw', + 0x045E: 'am', + 0x1401: 'ar-DZ', + 0x3C01: 'ar-BH', + 0x0C01: 'ar', + 0x0801: 'ar-IQ', + 0x2C01: 'ar-JO', + 0x3401: 'ar-KW', + 0x3001: 'ar-LB', + 0x1001: 'ar-LY', + 0x1801: 'ary', + 0x2001: 'ar-OM', + 0x4001: 'ar-QA', + 0x0401: 'ar-SA', + 0x2801: 'ar-SY', + 0x1C01: 'aeb', + 0x3801: 'ar-AE', + 0x2401: 'ar-YE', + 0x042B: 'hy', + 0x044D: 'as', + 0x082C: 'az-Cyrl', + 0x042C: 'az', + 0x046D: 'ba', + 0x042D: 'eu', + 0x0423: 'be', + 0x0845: 'bn', + 0x0445: 'bn-IN', + 0x201A: 'bs-Cyrl', + 0x141A: 'bs', + 0x047E: 'br', + 0x0402: 'bg', + 0x0403: 'ca', + 0x0C04: 'zh-HK', + 0x1404: 'zh-MO', + 0x0804: 'zh', + 0x1004: 'zh-SG', + 0x0404: 'zh-TW', + 0x0483: 'co', + 0x041A: 'hr', + 0x101A: 'hr-BA', + 0x0405: 'cs', + 0x0406: 'da', + 0x048C: 'prs', + 0x0465: 'dv', + 0x0813: 'nl-BE', + 0x0413: 'nl', + 0x0C09: 'en-AU', + 0x2809: 'en-BZ', + 0x1009: 'en-CA', + 0x2409: 'en-029', + 0x4009: 'en-IN', + 0x1809: 'en-IE', + 0x2009: 'en-JM', + 0x4409: 'en-MY', + 0x1409: 'en-NZ', + 0x3409: 'en-PH', + 0x4809: 'en-SG', + 0x1C09: 'en-ZA', + 0x2C09: 'en-TT', + 0x0809: 'en-GB', + 0x0409: 'en', + 0x3009: 'en-ZW', + 0x0425: 'et', + 0x0438: 'fo', + 0x0464: 'fil', + 0x040B: 'fi', + 0x080C: 'fr-BE', + 0x0C0C: 'fr-CA', + 0x040C: 'fr', + 0x140C: 'fr-LU', + 0x180C: 'fr-MC', + 0x100C: 'fr-CH', + 0x0462: 'fy', + 0x0456: 'gl', + 0x0437: 'ka', + 0x0C07: 'de-AT', + 0x0407: 'de', + 0x1407: 'de-LI', + 0x1007: 'de-LU', + 0x0807: 'de-CH', + 0x0408: 'el', + 0x046F: 'kl', + 0x0447: 'gu', + 0x0468: 'ha', + 0x040D: 'he', + 0x0439: 'hi', + 0x040E: 'hu', + 0x040F: 'is', + 0x0470: 'ig', + 0x0421: 'id', + 0x045D: 'iu', + 0x085D: 'iu-Latn', + 0x083C: 'ga', + 0x0434: 'xh', + 0x0435: 'zu', + 0x0410: 'it', + 0x0810: 'it-CH', + 0x0411: 'ja', + 0x044B: 'kn', + 0x043F: 'kk', + 0x0453: 'km', + 0x0486: 'quc', + 0x0487: 'rw', + 0x0441: 'sw', + 0x0457: 'kok', + 0x0412: 'ko', + 0x0440: 'ky', + 0x0454: 'lo', + 0x0426: 'lv', + 0x0427: 'lt', + 0x082E: 'dsb', + 0x046E: 'lb', + 0x042F: 'mk', + 0x083E: 'ms-BN', + 0x043E: 'ms', + 0x044C: 'ml', + 0x043A: 'mt', + 0x0481: 'mi', + 0x047A: 'arn', + 0x044E: 'mr', + 0x047C: 'moh', + 0x0450: 'mn', + 0x0850: 'mn-CN', + 0x0461: 'ne', + 0x0414: 'nb', + 0x0814: 'nn', + 0x0482: 'oc', + 0x0448: 'or', + 0x0463: 'ps', + 0x0415: 'pl', + 0x0416: 'pt', + 0x0816: 'pt-PT', + 0x0446: 'pa', + 0x046B: 'qu-BO', + 0x086B: 'qu-EC', + 0x0C6B: 'qu', + 0x0418: 'ro', + 0x0417: 'rm', + 0x0419: 'ru', + 0x243B: 'smn', + 0x103B: 'smj-NO', + 0x143B: 'smj', + 0x0C3B: 'se-FI', + 0x043B: 'se', + 0x083B: 'se-SE', + 0x203B: 'sms', + 0x183B: 'sma-NO', + 0x1C3B: 'sms', + 0x044F: 'sa', + 0x1C1A: 'sr-Cyrl-BA', + 0x0C1A: 'sr', + 0x181A: 'sr-Latn-BA', + 0x081A: 'sr-Latn', + 0x046C: 'nso', + 0x0432: 'tn', + 0x045B: 'si', + 0x041B: 'sk', + 0x0424: 'sl', + 0x2C0A: 'es-AR', + 0x400A: 'es-BO', + 0x340A: 'es-CL', + 0x240A: 'es-CO', + 0x140A: 'es-CR', + 0x1C0A: 'es-DO', + 0x300A: 'es-EC', + 0x440A: 'es-SV', + 0x100A: 'es-GT', + 0x480A: 'es-HN', + 0x080A: 'es-MX', + 0x4C0A: 'es-NI', + 0x180A: 'es-PA', + 0x3C0A: 'es-PY', + 0x280A: 'es-PE', + 0x500A: 'es-PR', + + # Microsoft has defined two different language codes for + # “Spanish with modern sorting” and “Spanish with traditional + # sorting”. This makes sense for collation APIs, and it would be + # possible to express this in BCP 47 language tags via Unicode + # extensions (eg., “es-u-co-trad” is “Spanish with traditional + # sorting”). However, for storing names in fonts, this distinction + # does not make sense, so we use “es” in both cases. + 0x0C0A: 'es', + 0x040A: 'es', + + 0x540A: 'es-US', + 0x380A: 'es-UY', + 0x200A: 'es-VE', + 0x081D: 'sv-FI', + 0x041D: 'sv', + 0x045A: 'syr', + 0x0428: 'tg', + 0x085F: 'tzm', + 0x0449: 'ta', + 0x0444: 'tt', + 0x044A: 'te', + 0x041E: 'th', + 0x0451: 'bo', + 0x041F: 'tr', + 0x0442: 'tk', + 0x0480: 'ug', + 0x0422: 'uk', + 0x042E: 'hsb', + 0x0420: 'ur', + 0x0843: 'uz-Cyrl', + 0x0443: 'uz', + 0x042A: 'vi', + 0x0452: 'cy', + 0x0488: 'wo', + 0x0485: 'sah', + 0x0478: 'ii', + 0x046A: 'yo', +} + + +_MAC_LANGUAGES = { + 0: 'en', + 1: 'fr', + 2: 'de', + 3: 'it', + 4: 'nl', + 5: 'sv', + 6: 'es', + 7: 'da', + 8: 'pt', + 9: 'no', + 10: 'he', + 11: 'ja', + 12: 'ar', + 13: 'fi', + 14: 'el', + 15: 'is', + 16: 'mt', + 17: 'tr', + 18: 'hr', + 19: 'zh-Hant', + 20: 'ur', + 21: 'hi', + 22: 'th', + 23: 'ko', + 24: 'lt', + 25: 'pl', + 26: 'hu', + 27: 'es', + 28: 'lv', + 29: 'se', + 30: 'fo', + 31: 'fa', + 32: 'ru', + 33: 'zh', + 34: 'nl-BE', + 35: 'ga', + 36: 'sq', + 37: 'ro', + 38: 'cz', + 39: 'sk', + 40: 'sl', + 41: 'yi', + 42: 'sr', + 43: 'mk', + 44: 'bg', + 45: 'uk', + 46: 'be', + 47: 'uz', + 48: 'kk', + 49: 'az-Cyrl', + 50: 'az-Arab', + 51: 'hy', + 52: 'ka', + 53: 'mo', + 54: 'ky', + 55: 'tg', + 56: 'tk', + 57: 'mn-CN', + 58: 'mn', + 59: 'ps', + 60: 'ks', + 61: 'ku', + 62: 'sd', + 63: 'bo', + 64: 'ne', + 65: 'sa', + 66: 'mr', + 67: 'bn', + 68: 'as', + 69: 'gu', + 70: 'pa', + 71: 'or', + 72: 'ml', + 73: 'kn', + 74: 'ta', + 75: 'te', + 76: 'si', + 77: 'my', + 78: 'km', + 79: 'lo', + 80: 'vi', + 81: 'id', + 82: 'tl', + 83: 'ms', + 84: 'ms-Arab', + 85: 'am', + 86: 'ti', + 87: 'om', + 88: 'so', + 89: 'sw', + 90: 'rw', + 91: 'rn', + 92: 'ny', + 93: 'mg', + 94: 'eo', + 128: 'cy', + 129: 'eu', + 130: 'ca', + 131: 'la', + 132: 'qu', + 133: 'gn', + 134: 'ay', + 135: 'tt', + 136: 'ug', + 137: 'dz', + 138: 'jv', + 139: 'su', + 140: 'gl', + 141: 'af', + 142: 'br', + 143: 'iu', + 144: 'gd', + 145: 'gv', + 146: 'ga', + 147: 'to', + 148: 'el-polyton', + 149: 'kl', + 150: 'az', + 151: 'nn', +} + + +_WINDOWS_LANGUAGE_CODES = {lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()} +_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()} + + +# MacOS language ID → MacOS script ID +# +# Note that the script ID is not sufficient to determine what encoding +# to use in TrueType files. For some languages, MacOS used a modification +# of a mainstream script. For example, an Icelandic name would be stored +# with smRoman in the TrueType naming table, but the actual encoding +# is a special Icelandic version of the normal Macintosh Roman encoding. +# As another example, Inuktitut uses an 8-bit encoding for Canadian Aboriginal +# Syllables but MacOS had run out of available script codes, so this was +# done as a (pretty radical) “modification” of Ethiopic. +# +# http://unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt +_MAC_LANGUAGE_TO_SCRIPT = { + 0: 0, # langEnglish → smRoman + 1: 0, # langFrench → smRoman + 2: 0, # langGerman → smRoman + 3: 0, # langItalian → smRoman + 4: 0, # langDutch → smRoman + 5: 0, # langSwedish → smRoman + 6: 0, # langSpanish → smRoman + 7: 0, # langDanish → smRoman + 8: 0, # langPortuguese → smRoman + 9: 0, # langNorwegian → smRoman + 10: 5, # langHebrew → smHebrew + 11: 1, # langJapanese → smJapanese + 12: 4, # langArabic → smArabic + 13: 0, # langFinnish → smRoman + 14: 6, # langGreek → smGreek + 15: 0, # langIcelandic → smRoman (modified) + 16: 0, # langMaltese → smRoman + 17: 0, # langTurkish → smRoman (modified) + 18: 0, # langCroatian → smRoman (modified) + 19: 2, # langTradChinese → smTradChinese + 20: 4, # langUrdu → smArabic + 21: 9, # langHindi → smDevanagari + 22: 21, # langThai → smThai + 23: 3, # langKorean → smKorean + 24: 29, # langLithuanian → smCentralEuroRoman + 25: 29, # langPolish → smCentralEuroRoman + 26: 29, # langHungarian → smCentralEuroRoman + 27: 29, # langEstonian → smCentralEuroRoman + 28: 29, # langLatvian → smCentralEuroRoman + 29: 0, # langSami → smRoman + 30: 0, # langFaroese → smRoman (modified) + 31: 4, # langFarsi → smArabic (modified) + 32: 7, # langRussian → smCyrillic + 33: 25, # langSimpChinese → smSimpChinese + 34: 0, # langFlemish → smRoman + 35: 0, # langIrishGaelic → smRoman (modified) + 36: 0, # langAlbanian → smRoman + 37: 0, # langRomanian → smRoman (modified) + 38: 29, # langCzech → smCentralEuroRoman + 39: 29, # langSlovak → smCentralEuroRoman + 40: 0, # langSlovenian → smRoman (modified) + 41: 5, # langYiddish → smHebrew + 42: 7, # langSerbian → smCyrillic + 43: 7, # langMacedonian → smCyrillic + 44: 7, # langBulgarian → smCyrillic + 45: 7, # langUkrainian → smCyrillic (modified) + 46: 7, # langByelorussian → smCyrillic + 47: 7, # langUzbek → smCyrillic + 48: 7, # langKazakh → smCyrillic + 49: 7, # langAzerbaijani → smCyrillic + 50: 4, # langAzerbaijanAr → smArabic + 51: 24, # langArmenian → smArmenian + 52: 23, # langGeorgian → smGeorgian + 53: 7, # langMoldavian → smCyrillic + 54: 7, # langKirghiz → smCyrillic + 55: 7, # langTajiki → smCyrillic + 56: 7, # langTurkmen → smCyrillic + 57: 27, # langMongolian → smMongolian + 58: 7, # langMongolianCyr → smCyrillic + 59: 4, # langPashto → smArabic + 60: 4, # langKurdish → smArabic + 61: 4, # langKashmiri → smArabic + 62: 4, # langSindhi → smArabic + 63: 26, # langTibetan → smTibetan + 64: 9, # langNepali → smDevanagari + 65: 9, # langSanskrit → smDevanagari + 66: 9, # langMarathi → smDevanagari + 67: 13, # langBengali → smBengali + 68: 13, # langAssamese → smBengali + 69: 11, # langGujarati → smGujarati + 70: 10, # langPunjabi → smGurmukhi + 71: 12, # langOriya → smOriya + 72: 17, # langMalayalam → smMalayalam + 73: 16, # langKannada → smKannada + 74: 14, # langTamil → smTamil + 75: 15, # langTelugu → smTelugu + 76: 18, # langSinhalese → smSinhalese + 77: 19, # langBurmese → smBurmese + 78: 20, # langKhmer → smKhmer + 79: 22, # langLao → smLao + 80: 30, # langVietnamese → smVietnamese + 81: 0, # langIndonesian → smRoman + 82: 0, # langTagalog → smRoman + 83: 0, # langMalayRoman → smRoman + 84: 4, # langMalayArabic → smArabic + 85: 28, # langAmharic → smEthiopic + 86: 28, # langTigrinya → smEthiopic + 87: 28, # langOromo → smEthiopic + 88: 0, # langSomali → smRoman + 89: 0, # langSwahili → smRoman + 90: 0, # langKinyarwanda → smRoman + 91: 0, # langRundi → smRoman + 92: 0, # langNyanja → smRoman + 93: 0, # langMalagasy → smRoman + 94: 0, # langEsperanto → smRoman + 128: 0, # langWelsh → smRoman (modified) + 129: 0, # langBasque → smRoman + 130: 0, # langCatalan → smRoman + 131: 0, # langLatin → smRoman + 132: 0, # langQuechua → smRoman + 133: 0, # langGuarani → smRoman + 134: 0, # langAymara → smRoman + 135: 7, # langTatar → smCyrillic + 136: 4, # langUighur → smArabic + 137: 26, # langDzongkha → smTibetan + 138: 0, # langJavaneseRom → smRoman + 139: 0, # langSundaneseRom → smRoman + 140: 0, # langGalician → smRoman + 141: 0, # langAfrikaans → smRoman + 142: 0, # langBreton → smRoman (modified) + 143: 28, # langInuktitut → smEthiopic (modified) + 144: 0, # langScottishGaelic → smRoman (modified) + 145: 0, # langManxGaelic → smRoman (modified) + 146: 0, # langIrishGaelicScript → smRoman (modified) + 147: 0, # langTongan → smRoman + 148: 6, # langGreekAncient → smRoman + 149: 0, # langGreenlandic → smRoman + 150: 0, # langAzerbaijanRoman → smRoman + 151: 0, # langNynorsk → smRoman +} diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import unittest -from ._n_a_m_e import table__n_a_m_e, NameRecord - - -def makeName(text, nameID, platformID, platEncID, langID): - name = NameRecord() - name.nameID, name.platformID, name.platEncID, name.langID = ( - nameID, platformID, platEncID, langID) - name.string = tobytes(text, encoding=name.getEncoding()) - return name - - -class NameTableTest(unittest.TestCase): - - def test_getDebugName(self): - table = table__n_a_m_e() - table.names = [ - makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English - makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French - makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German - makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese - ] - self.assertEqual("Bold", table.getDebugName(258)) - self.assertEqual("Sem Fracções", table.getDebugName(292)) - self.assertEqual(None, table.getDebugName(999)) - - -class NameRecordTest(unittest.TestCase): - - def test_toUnicode_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertEqual("Foo Bold", name.toUnicode()) - - def test_toUnicode_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual("mac_roman", name.getEncoding()) - self.assertEqual("Foo Italic", name.toUnicode()) - - def test_toUnicode_macromanian(self): - name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian - self.assertEqual("mac_romanian", name.getEncoding()) - self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) - - def test_toUnicode_UnicodeDecodeError(self): - name = makeName(b"\1", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertRaises(UnicodeDecodeError, name.toUnicode) - - def toXML(self, name): - writer = XMLWriter(BytesIO()) - name.toXML(writer, ttFont=None) - xml = writer.file.getvalue().decode("utf_8").strip() - return xml.split(writer.newlinestr.decode("utf_8"))[1:] - - def test_toXML_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo Bold', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length1(self): - name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length2(self): - name = makeName(b"\0Fooz", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fooz', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_double_encoded(self): - name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fo', - '' - ], self.toXML(name)) - - def test_toXML_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual([ - '', - ' Foo Italic', - '' - ], self.toXML(name)) - - def test_toXML_macroman_actual_utf16be(self): - name = makeName("\0F\0o\0o", 222, 1, 0, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_nonASCII(self): - name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' BŠrli', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_ASCII(self): - name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' Barli', - '' - ], self.toXML(name)) - - def test_encoding_macroman_misc(self): - name = makeName('', 123, 1, 0, 17) # Mac Turkish - self.assertEqual(name.getEncoding(), "mac_turkish") - name.langID = 37 - self.assertEqual(name.getEncoding(), "mac_romanian") - name.langID = 45 # Other - self.assertEqual(name.getEncoding(), "mac_roman") - - def test_extended_mac_encodings(self): - name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese - self.assertEqual(name.toUnicode(), unichr(0x2122)) - - def test_extended_unknown(self): - name = makeName(b'\xfe', 123, 10, 11, 12) - self.assertEqual(name.getEncoding(), "ascii") - self.assertEqual(name.getEncoding(None), None) - self.assertEqual(name.getEncoding(default=None), None) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_o_p_b_d.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_o_p_b_d.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_o_p_b_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_o_p_b_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html +class table__o_p_b_d(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/O_S_2f_2.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,10 +2,12 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num -from . import DefaultTable -import warnings +from fontTools.ttLib.tables import DefaultTable +import logging +log = logging.getLogger(__name__) + # panose classification panoseFormat = """ @@ -101,6 +103,8 @@ """the OS/2 table""" + dependencies = ["head"] + def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(OS2_format_0, data, self) @@ -116,13 +120,26 @@ from fontTools import ttLib raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) if len(data): - warnings.warn("too much 'OS/2' table data") + log.warning("too much 'OS/2' table data") self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) def compile(self, ttFont): self.updateFirstAndLastCharIndex(ttFont) panose = self.panose + head = ttFont["head"] + if (self.fsSelection & 1) and not (head.macStyle & 1<<1): + log.warning("fsSelection bit 0 (italic) and " + "head table macStyle bit 1 (italic) should match") + if (self.fsSelection & 1<<5) and not (head.macStyle & 1): + log.warning("fsSelection bit 5 (bold) and " + "head table macStyle bit 0 (bold) should match") + if (self.fsSelection & 1<<6) and (self.fsSelection & 1 + (1<<5)): + log.warning("fsSelection bit 6 (regular) is set, " + "bits 0 (italic) and 5 (bold) must be clear") + if self.version < 4 and self.fsSelection & 0b1110000000: + log.warning("fsSelection bits 7, 8 and 9 are only defined in " + "OS/2 table version 4 and up: version %s", self.version) self.panose = sstruct.pack(panoseFormat, self.panose) if self.version == 0: data = sstruct.pack(OS2_format_0, self) @@ -132,8 +149,8 @@ data = sstruct.pack(OS2_format_2, self) elif self.version == 5: d = self.__dict__.copy() - d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) - d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) + d['usLowerOpticalPointSize'] = round(self.usLowerOpticalPointSize * 20) + d['usUpperOpticalPointSize'] = round(self.usUpperOpticalPointSize * 20) data = sstruct.pack(OS2_format_5, d) else: from fontTools import ttLib @@ -192,16 +209,18 @@ setattr(self, name, safeEval(attrs["value"])) def updateFirstAndLastCharIndex(self, ttFont): + if 'cmap' not in ttFont: + return codes = set() - for table in ttFont['cmap'].tables: + for table in getattr(ttFont['cmap'], 'tables', []): if table.isUnicode(): codes.update(table.cmap.keys()) if codes: minCode = min(codes) maxCode = max(codes) # USHORT cannot hold codepoints greater than 0xFFFF - self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode - self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode + self.usFirstCharIndex = min(0xFFFF, minCode) + self.usLastCharIndex = min(0xFFFF, maxCode) # misspelled attributes kept for legacy reasons @@ -228,3 +247,275 @@ @fsLastCharIndex.setter def fsLastCharIndex(self, value): self.usLastCharIndex = value + + def getUnicodeRanges(self): + """ Return the set of 'ulUnicodeRange*' bits currently enabled. """ + bits = set() + ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2 + ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4 + for i in range(32): + if ul1 & (1 << i): + bits.add(i) + if ul2 & (1 << i): + bits.add(i + 32) + if ul3 & (1 << i): + bits.add(i + 64) + if ul4 & (1 << i): + bits.add(i + 96) + return bits + + def setUnicodeRanges(self, bits): + """ Set the 'ulUnicodeRange*' fields to the specified 'bits'. """ + ul1, ul2, ul3, ul4 = 0, 0, 0, 0 + for bit in bits: + if 0 <= bit < 32: + ul1 |= (1 << bit) + elif 32 <= bit < 64: + ul2 |= (1 << (bit - 32)) + elif 64 <= bit < 96: + ul3 |= (1 << (bit - 64)) + elif 96 <= bit < 123: + ul4 |= (1 << (bit - 96)) + else: + raise ValueError('expected 0 <= int <= 122, found: %r' % bit) + self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2 + self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4 + + def recalcUnicodeRanges(self, ttFont, pruneOnly=False): + """ Intersect the codepoints in the font's Unicode cmap subtables with + the Unicode block ranges defined in the OpenType specification (v1.7), + and set the respective 'ulUnicodeRange*' bits if there is at least ONE + intersection. + If 'pruneOnly' is True, only clear unused bits with NO intersection. + """ + unicodes = set() + for table in ttFont['cmap'].tables: + if table.isUnicode(): + unicodes.update(table.cmap.keys()) + if pruneOnly: + empty = intersectUnicodeRanges(unicodes, inverse=True) + bits = self.getUnicodeRanges() - empty + else: + bits = intersectUnicodeRanges(unicodes) + self.setUnicodeRanges(bits) + return bits + + +# Unicode ranges data from the OpenType OS/2 table specification v1.7 + +OS2_UNICODE_RANGES = ( + (('Basic Latin', (0x0000, 0x007F)),), + (('Latin-1 Supplement', (0x0080, 0x00FF)),), + (('Latin Extended-A', (0x0100, 0x017F)),), + (('Latin Extended-B', (0x0180, 0x024F)),), + (('IPA Extensions', (0x0250, 0x02AF)), + ('Phonetic Extensions', (0x1D00, 0x1D7F)), + ('Phonetic Extensions Supplement', (0x1D80, 0x1DBF))), + (('Spacing Modifier Letters', (0x02B0, 0x02FF)), + ('Modifier Tone Letters', (0xA700, 0xA71F))), + (('Combining Diacritical Marks', (0x0300, 0x036F)), + ('Combining Diacritical Marks Supplement', (0x1DC0, 0x1DFF))), + (('Greek and Coptic', (0x0370, 0x03FF)),), + (('Coptic', (0x2C80, 0x2CFF)),), + (('Cyrillic', (0x0400, 0x04FF)), + ('Cyrillic Supplement', (0x0500, 0x052F)), + ('Cyrillic Extended-A', (0x2DE0, 0x2DFF)), + ('Cyrillic Extended-B', (0xA640, 0xA69F))), + (('Armenian', (0x0530, 0x058F)),), + (('Hebrew', (0x0590, 0x05FF)),), + (('Vai', (0xA500, 0xA63F)),), + (('Arabic', (0x0600, 0x06FF)), + ('Arabic Supplement', (0x0750, 0x077F))), + (('NKo', (0x07C0, 0x07FF)),), + (('Devanagari', (0x0900, 0x097F)),), + (('Bengali', (0x0980, 0x09FF)),), + (('Gurmukhi', (0x0A00, 0x0A7F)),), + (('Gujarati', (0x0A80, 0x0AFF)),), + (('Oriya', (0x0B00, 0x0B7F)),), + (('Tamil', (0x0B80, 0x0BFF)),), + (('Telugu', (0x0C00, 0x0C7F)),), + (('Kannada', (0x0C80, 0x0CFF)),), + (('Malayalam', (0x0D00, 0x0D7F)),), + (('Thai', (0x0E00, 0x0E7F)),), + (('Lao', (0x0E80, 0x0EFF)),), + (('Georgian', (0x10A0, 0x10FF)), + ('Georgian Supplement', (0x2D00, 0x2D2F))), + (('Balinese', (0x1B00, 0x1B7F)),), + (('Hangul Jamo', (0x1100, 0x11FF)),), + (('Latin Extended Additional', (0x1E00, 0x1EFF)), + ('Latin Extended-C', (0x2C60, 0x2C7F)), + ('Latin Extended-D', (0xA720, 0xA7FF))), + (('Greek Extended', (0x1F00, 0x1FFF)),), + (('General Punctuation', (0x2000, 0x206F)), + ('Supplemental Punctuation', (0x2E00, 0x2E7F))), + (('Superscripts And Subscripts', (0x2070, 0x209F)),), + (('Currency Symbols', (0x20A0, 0x20CF)),), + (('Combining Diacritical Marks For Symbols', (0x20D0, 0x20FF)),), + (('Letterlike Symbols', (0x2100, 0x214F)),), + (('Number Forms', (0x2150, 0x218F)),), + (('Arrows', (0x2190, 0x21FF)), + ('Supplemental Arrows-A', (0x27F0, 0x27FF)), + ('Supplemental Arrows-B', (0x2900, 0x297F)), + ('Miscellaneous Symbols and Arrows', (0x2B00, 0x2BFF))), + (('Mathematical Operators', (0x2200, 0x22FF)), + ('Supplemental Mathematical Operators', (0x2A00, 0x2AFF)), + ('Miscellaneous Mathematical Symbols-A', (0x27C0, 0x27EF)), + ('Miscellaneous Mathematical Symbols-B', (0x2980, 0x29FF))), + (('Miscellaneous Technical', (0x2300, 0x23FF)),), + (('Control Pictures', (0x2400, 0x243F)),), + (('Optical Character Recognition', (0x2440, 0x245F)),), + (('Enclosed Alphanumerics', (0x2460, 0x24FF)),), + (('Box Drawing', (0x2500, 0x257F)),), + (('Block Elements', (0x2580, 0x259F)),), + (('Geometric Shapes', (0x25A0, 0x25FF)),), + (('Miscellaneous Symbols', (0x2600, 0x26FF)),), + (('Dingbats', (0x2700, 0x27BF)),), + (('CJK Symbols And Punctuation', (0x3000, 0x303F)),), + (('Hiragana', (0x3040, 0x309F)),), + (('Katakana', (0x30A0, 0x30FF)), + ('Katakana Phonetic Extensions', (0x31F0, 0x31FF))), + (('Bopomofo', (0x3100, 0x312F)), + ('Bopomofo Extended', (0x31A0, 0x31BF))), + (('Hangul Compatibility Jamo', (0x3130, 0x318F)),), + (('Phags-pa', (0xA840, 0xA87F)),), + (('Enclosed CJK Letters And Months', (0x3200, 0x32FF)),), + (('CJK Compatibility', (0x3300, 0x33FF)),), + (('Hangul Syllables', (0xAC00, 0xD7AF)),), + (('Non-Plane 0 *', (0xD800, 0xDFFF)),), + (('Phoenician', (0x10900, 0x1091F)),), + (('CJK Unified Ideographs', (0x4E00, 0x9FFF)), + ('CJK Radicals Supplement', (0x2E80, 0x2EFF)), + ('Kangxi Radicals', (0x2F00, 0x2FDF)), + ('Ideographic Description Characters', (0x2FF0, 0x2FFF)), + ('CJK Unified Ideographs Extension A', (0x3400, 0x4DBF)), + ('CJK Unified Ideographs Extension B', (0x20000, 0x2A6DF)), + ('Kanbun', (0x3190, 0x319F))), + (('Private Use Area (plane 0)', (0xE000, 0xF8FF)),), + (('CJK Strokes', (0x31C0, 0x31EF)), + ('CJK Compatibility Ideographs', (0xF900, 0xFAFF)), + ('CJK Compatibility Ideographs Supplement', (0x2F800, 0x2FA1F))), + (('Alphabetic Presentation Forms', (0xFB00, 0xFB4F)),), + (('Arabic Presentation Forms-A', (0xFB50, 0xFDFF)),), + (('Combining Half Marks', (0xFE20, 0xFE2F)),), + (('Vertical Forms', (0xFE10, 0xFE1F)), + ('CJK Compatibility Forms', (0xFE30, 0xFE4F))), + (('Small Form Variants', (0xFE50, 0xFE6F)),), + (('Arabic Presentation Forms-B', (0xFE70, 0xFEFF)),), + (('Halfwidth And Fullwidth Forms', (0xFF00, 0xFFEF)),), + (('Specials', (0xFFF0, 0xFFFF)),), + (('Tibetan', (0x0F00, 0x0FFF)),), + (('Syriac', (0x0700, 0x074F)),), + (('Thaana', (0x0780, 0x07BF)),), + (('Sinhala', (0x0D80, 0x0DFF)),), + (('Myanmar', (0x1000, 0x109F)),), + (('Ethiopic', (0x1200, 0x137F)), + ('Ethiopic Supplement', (0x1380, 0x139F)), + ('Ethiopic Extended', (0x2D80, 0x2DDF))), + (('Cherokee', (0x13A0, 0x13FF)),), + (('Unified Canadian Aboriginal Syllabics', (0x1400, 0x167F)),), + (('Ogham', (0x1680, 0x169F)),), + (('Runic', (0x16A0, 0x16FF)),), + (('Khmer', (0x1780, 0x17FF)), + ('Khmer Symbols', (0x19E0, 0x19FF))), + (('Mongolian', (0x1800, 0x18AF)),), + (('Braille Patterns', (0x2800, 0x28FF)),), + (('Yi Syllables', (0xA000, 0xA48F)), + ('Yi Radicals', (0xA490, 0xA4CF))), + (('Tagalog', (0x1700, 0x171F)), + ('Hanunoo', (0x1720, 0x173F)), + ('Buhid', (0x1740, 0x175F)), + ('Tagbanwa', (0x1760, 0x177F))), + (('Old Italic', (0x10300, 0x1032F)),), + (('Gothic', (0x10330, 0x1034F)),), + (('Deseret', (0x10400, 0x1044F)),), + (('Byzantine Musical Symbols', (0x1D000, 0x1D0FF)), + ('Musical Symbols', (0x1D100, 0x1D1FF)), + ('Ancient Greek Musical Notation', (0x1D200, 0x1D24F))), + (('Mathematical Alphanumeric Symbols', (0x1D400, 0x1D7FF)),), + (('Private Use (plane 15)', (0xF0000, 0xFFFFD)), + ('Private Use (plane 16)', (0x100000, 0x10FFFD))), + (('Variation Selectors', (0xFE00, 0xFE0F)), + ('Variation Selectors Supplement', (0xE0100, 0xE01EF))), + (('Tags', (0xE0000, 0xE007F)),), + (('Limbu', (0x1900, 0x194F)),), + (('Tai Le', (0x1950, 0x197F)),), + (('New Tai Lue', (0x1980, 0x19DF)),), + (('Buginese', (0x1A00, 0x1A1F)),), + (('Glagolitic', (0x2C00, 0x2C5F)),), + (('Tifinagh', (0x2D30, 0x2D7F)),), + (('Yijing Hexagram Symbols', (0x4DC0, 0x4DFF)),), + (('Syloti Nagri', (0xA800, 0xA82F)),), + (('Linear B Syllabary', (0x10000, 0x1007F)), + ('Linear B Ideograms', (0x10080, 0x100FF)), + ('Aegean Numbers', (0x10100, 0x1013F))), + (('Ancient Greek Numbers', (0x10140, 0x1018F)),), + (('Ugaritic', (0x10380, 0x1039F)),), + (('Old Persian', (0x103A0, 0x103DF)),), + (('Shavian', (0x10450, 0x1047F)),), + (('Osmanya', (0x10480, 0x104AF)),), + (('Cypriot Syllabary', (0x10800, 0x1083F)),), + (('Kharoshthi', (0x10A00, 0x10A5F)),), + (('Tai Xuan Jing Symbols', (0x1D300, 0x1D35F)),), + (('Cuneiform', (0x12000, 0x123FF)), + ('Cuneiform Numbers and Punctuation', (0x12400, 0x1247F))), + (('Counting Rod Numerals', (0x1D360, 0x1D37F)),), + (('Sundanese', (0x1B80, 0x1BBF)),), + (('Lepcha', (0x1C00, 0x1C4F)),), + (('Ol Chiki', (0x1C50, 0x1C7F)),), + (('Saurashtra', (0xA880, 0xA8DF)),), + (('Kayah Li', (0xA900, 0xA92F)),), + (('Rejang', (0xA930, 0xA95F)),), + (('Cham', (0xAA00, 0xAA5F)),), + (('Ancient Symbols', (0x10190, 0x101CF)),), + (('Phaistos Disc', (0x101D0, 0x101FF)),), + (('Carian', (0x102A0, 0x102DF)), + ('Lycian', (0x10280, 0x1029F)), + ('Lydian', (0x10920, 0x1093F))), + (('Domino Tiles', (0x1F030, 0x1F09F)), + ('Mahjong Tiles', (0x1F000, 0x1F02F))), +) + + +_unicodeRangeSets = [] + +def _getUnicodeRangeSets(): + # build the sets of codepoints for each unicode range bit, and cache result + if not _unicodeRangeSets: + for bit, blocks in enumerate(OS2_UNICODE_RANGES): + rangeset = set() + for _, (start, stop) in blocks: + rangeset.update(set(range(start, stop+1))) + if bit == 57: + # The spec says that bit 57 ("Non Plane 0") implies that there's + # at least one codepoint beyond the BMP; so I also include all + # the non-BMP codepoints here + rangeset.update(set(range(0x10000, 0x110000))) + _unicodeRangeSets.append(rangeset) + return _unicodeRangeSets + + +def intersectUnicodeRanges(unicodes, inverse=False): + """ Intersect a sequence of (int) Unicode codepoints with the Unicode block + ranges defined in the OpenType specification v1.7, and return the set of + 'ulUnicodeRanges' bits for which there is at least ONE intersection. + If 'inverse' is True, return the the bits for which there is NO intersection. + + >>> intersectUnicodeRanges([0x0410]) == {9} + True + >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} + True + >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( + ... set(range(123)) - {9, 57, 122}) + True + """ + unicodes = set(unicodes) + uniranges = _getUnicodeRangeSets() + bits = set([ + bit for bit, unirange in enumerate(uniranges) + if not unirange.isdisjoint(unicodes) ^ inverse]) + return bits + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/otBase.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/otBase.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/otBase.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,7 +1,12 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from .DefaultTable import DefaultTable +import sys +import array import struct +import logging + +log = logging.getLogger(__name__) class OverflowErrorRecord(object): def __init__(self, overflowTuple): @@ -31,26 +36,10 @@ def decompile(self, data, font): from . import otTables - cachingStats = None if True else {} - class GlobalState(object): - def __init__(self, tableType, cachingStats): - self.tableType = tableType - self.cachingStats = cachingStats - globalState = GlobalState(tableType=self.tableTag, - cachingStats=cachingStats) - reader = OTTableReader(data, globalState) + reader = OTTableReader(data, tableTag=self.tableTag) tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.decompile(reader, font) - if cachingStats: - stats = sorted([(v, k) for k, v in cachingStats.items()]) - stats.reverse() - print("cachingsstats for ", self.tableTag) - for v, k in stats: - if v < 2: - break - print(v, k) - print("---", len(stats)) def compile(self, font): """ Create a top-level OTFWriter for the GPOS/GSUB table. @@ -73,15 +62,11 @@ If a lookup subtable overflows an offset, we have to start all over. """ - class GlobalState(object): - def __init__(self, tableType): - self.tableType = tableType - globalState = GlobalState(tableType=self.tableTag) overflowRecord = None while True: try: - writer = OTTableWriter(globalState) + writer = OTTableWriter(tableTag=self.tableTag) self.table.compile(writer, font) return writer.getAllData() @@ -91,7 +76,7 @@ raise # Oh well... overflowRecord = e.value - print("Attempting to fix OTLOffsetOverflowError", e) + log.info("Attempting to fix OTLOffsetOverflowError %s", e) lastItem = overflowRecord ok = 0 @@ -119,31 +104,29 @@ """Helper class to retrieve data from an OpenType table.""" - __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') + __slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag') - def __init__(self, data, globalState={}, localState=None, offset=0): + def __init__(self, data, localState=None, offset=0, tableTag=None): self.data = data self.offset = offset self.pos = offset - self.globalState = globalState self.localState = localState + self.tableTag = tableTag def advance(self, count): self.pos += count + def seek(self, pos): self.pos = pos def copy(self): - other = self.__class__(self.data, self.globalState, self.localState, self.offset) + other = self.__class__(self.data, self.localState, self.offset, self.tableTag) other.pos = self.pos return other def getSubReader(self, offset): offset = self.offset + offset - cachingStats = self.globalState.cachingStats - if cachingStats is not None: - cachingStats[offset] = cachingStats.get(offset, 0) + 1 - return self.__class__(self.data, self.globalState, self.localState, offset) + return self.__class__(self.data, self.localState, offset, self.tableTag) def readUShort(self): pos = self.pos @@ -152,6 +135,22 @@ self.pos = newpos return value + def readUShortArray(self, count): + pos = self.pos + newpos = pos + count * 2 + value = array.array("H", self.data[pos:newpos]) + if sys.byteorder != "big": + value.byteswap() + self.pos = newpos + return value + + def readInt8(self): + pos = self.pos + newpos = pos + 1 + value, = struct.unpack(">b", self.data[pos:newpos]) + self.pos = newpos + return value + def readShort(self): pos = self.pos newpos = pos + 2 @@ -166,6 +165,13 @@ self.pos = newpos return value + def readUInt8(self): + pos = self.pos + newpos = pos + 1 + value, = struct.unpack(">B", self.data[pos:newpos]) + self.pos = newpos + return value + def readUInt24(self): pos = self.pos newpos = pos + 3 @@ -184,7 +190,7 @@ pos = self.pos newpos = pos + 4 value = Tag(self.data[pos:newpos]) - assert len(value) == 4 + assert len(value) == 4, value self.pos = newpos return value @@ -211,11 +217,11 @@ """Helper class to gather and assemble data for OpenType tables.""" - def __init__(self, globalState, localState=None): + def __init__(self, localState=None, tableTag=None): self.items = [] self.pos = None - self.globalState = globalState self.localState = localState + self.tableTag = tableTag self.longOffset = False self.parent = None @@ -227,45 +233,19 @@ def __getitem__(self, name): return self.localState[name] - # assembler interface - - def getAllData(self): - """Assemble all data, including all subtables.""" - self._doneWriting() - tables, extTables = self._gatherTables() - tables.reverse() - extTables.reverse() - # Gather all data in two passes: the absolute positions of all - # subtable are needed before the actual data can be assembled. - pos = 0 - for table in tables: - table.pos = pos - pos = pos + table.getDataLength() - - for table in extTables: - table.pos = pos - pos = pos + table.getDataLength() - - data = [] - for table in tables: - tableData = table.getData() - data.append(tableData) - - for table in extTables: - tableData = table.getData() - data.append(tableData) + def __delitem__(self, name): + del self.localState[name] - return bytesjoin(data) + # assembler interface def getDataLength(self): """Return the length of this table in bytes, without subtables.""" l = 0 for item in self.items: - if hasattr(item, "getData") or hasattr(item, "getCountData"): - if item.longOffset: - l = l + 4 # sizeof(ULong) - else: - l = l + 2 # sizeof(UShort) + if hasattr(item, "getCountData"): + l += item.size + elif hasattr(item, "getData"): + l += 4 if item.longOffset else 2 else: l = l + len(item) return l @@ -286,36 +266,6 @@ items[i] = packUShort(item.pos - pos) except struct.error: # provide data to fix overflow problem. - # If the overflow is to a lookup, or from a lookup to a subtable, - # just report the current item. Otherwise... - if self.name not in [ 'LookupList', 'Lookup']: - # overflow is within a subTable. Life is more complicated. - # If we split the sub-table just before the current item, we may still suffer overflow. - # This is because duplicate table merging is done only within an Extension subTable tree; - # when we split the subtable in two, some items may no longer be duplicates. - # Get worst case by adding up all the item lengths, depth first traversal. - # and then report the first item that overflows a short. - def getDeepItemLength(table): - if hasattr(table, "getDataLength"): - length = 0 - for item in table.items: - length = length + getDeepItemLength(item) - else: - length = len(table) - return length - - length = self.getDataLength() - if hasattr(self, "sortCoverageLast") and item.name == "Coverage": - # Coverage is first in the item list, but last in the table list, - # The original overflow is really in the item list. Skip the Coverage - # table in the following test. - items = items[i+1:] - - for j in range(len(items)): - item = items[j] - length = length + getDeepItemLength(item) - if length > 65535: - break overflowErrorRecord = self.getOverflowErrorRecord(item) raise OTLOffsetOverflowError(overflowErrorRecord) @@ -327,13 +277,15 @@ return hash(self.items) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.items == other.items - def _doneWriting(self, internedTables=None): + def _doneWriting(self, internedTables): # Convert CountData references to data string items # collapse duplicate table references to a unique entry # "tables" are OTTableWriter objects. @@ -341,32 +293,29 @@ # For Extension Lookup types, we can # eliminate duplicates only within the tree under the Extension Lookup, # as offsets may exceed 64K even between Extension LookupTable subtables. - if internedTables is None: + isExtension = hasattr(self, "Extension") + + # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level + # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly + # empty, array. So, we don't share those. + # See: https://github.com/behdad/fonttools/issues/518 + dontShare = hasattr(self, 'DontShare') + + if isExtension: internedTables = {} - items = self.items - iRange = list(range(len(items))) - if hasattr(self, "Extension"): - newTree = 1 - else: - newTree = 0 - for i in iRange: + items = self.items + for i in range(len(items)): item = items[i] if hasattr(item, "getCountData"): items[i] = item.getCountData() elif hasattr(item, "getData"): - if newTree: - item._doneWriting() - else: - item._doneWriting(internedTables) - internedItem = internedTables.get(item) - if internedItem: - items[i] = item = internedItem - else: - internedTables[item] = item + item._doneWriting(internedTables) + if not dontShare: + items[i] = item = internedTables.setdefault(item, item) self.items = tuple(items) - def _gatherTables(self, tables=None, extTables=None, done=None): + def _gatherTables(self, tables, extTables, done): # Convert table references in self.items tree to a flat # list of tables in depth-first traversal order. # "tables" are OTTableWriter objects. @@ -374,21 +323,21 @@ # resolve duplicate references to be the last reference in the list of tables. # For extension lookups, duplicate references can be merged only within the # writer tree under the extension lookup. - if tables is None: # init call for first time. - tables = [] - extTables = [] - done = {} - done[self] = 1 + done[id(self)] = True numItems = len(self.items) iRange = list(range(numItems)) iRange.reverse() - if hasattr(self, "Extension"): - appendExtensions = 1 - else: - appendExtensions = 0 + isExtension = hasattr(self, "Extension") + dontShare = hasattr(self, 'DontShare') + + selfTables = tables + + if isExtension: + assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" + tables, extTables, done = extTables, None, {} # add Coverage table if it is sorted last. sortCoverageLast = 0 @@ -399,7 +348,7 @@ if hasattr(item, "name") and (item.name == "Coverage"): sortCoverageLast = 1 break - if item not in done: + if id(item) not in done: item._gatherTables(tables, extTables, done) else: # We're a new parent of item @@ -414,24 +363,50 @@ # we've already 'gathered' it above continue - if appendExtensions: - assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" - newDone = {} - item._gatherTables(extTables, None, newDone) - - elif item not in done: + if id(item) not in done: item._gatherTables(tables, extTables, done) else: - # We're a new parent of item + # Item is already written out by other parent pass - tables.append(self) - return tables, extTables + selfTables.append(self) + + def getAllData(self): + """Assemble all data, including all subtables.""" + internedTables = {} + self._doneWriting(internedTables) + tables = [] + extTables = [] + done = {} + self._gatherTables(tables, extTables, done) + tables.reverse() + extTables.reverse() + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + for table in extTables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getData() + data.append(tableData) + + for table in extTables: + tableData = table.getData() + data.append(tableData) + + return bytesjoin(data) # interface for gathering data, as used by table.compile() def getSubWriter(self): - subwriter = self.__class__(self.globalState, self.localState) + subwriter = self.__class__(self.localState, self.tableTag) subwriter.parent = self # because some subtables have idential values, we discard # the duplicates under the getAllData method. Hence some # subtable writers can have more than one parent writer. @@ -439,14 +414,23 @@ return subwriter def writeUShort(self, value): - assert 0 <= value < 0x10000 + assert 0 <= value < 0x10000, value self.items.append(struct.pack(">H", value)) def writeShort(self, value): + assert -32768 <= value < 32768, value self.items.append(struct.pack(">h", value)) + def writeUInt8(self, value): + assert 0 <= value < 256, value + self.items.append(struct.pack(">B", value)) + + def writeInt8(self, value): + assert -128 <= value < 128, value + self.items.append(struct.pack(">b", value)) + def writeUInt24(self, value): - assert 0 <= value < 0x1000000 + assert 0 <= value < 0x1000000, value b = struct.pack(">L", value) self.items.append(b[1:]) @@ -458,14 +442,14 @@ def writeTag(self, tag): tag = Tag(tag).tobytes() - assert len(tag) == 4 + assert len(tag) == 4, tag self.items.append(tag) def writeSubTable(self, subWriter): self.items.append(subWriter) - def writeCountReference(self, table, name): - ref = CountReference(table, name) + def writeCountReference(self, table, name, size=2, value=None): + ref = CountReference(table, name, size=size, value=value) self.items.append(ref) return ref @@ -476,7 +460,7 @@ def writeData(self, data): self.items.append(data) - def getOverflowErrorRecord(self, item): + def getOverflowErrorRecord(self, item): LookupListIndex = SubTableIndex = itemName = itemIndex = None if self.name == 'LookupList': LookupListIndex = item.repeatIndex @@ -484,7 +468,7 @@ LookupListIndex = self.repeatIndex SubTableIndex = item.repeatIndex else: - itemName = item.name + itemName = getattr(item, 'name', '') if hasattr(item, 'repeatIndex'): itemIndex = item.repeatIndex if self.name == 'SubTable': @@ -494,10 +478,10 @@ LookupListIndex = self.parent.parent.repeatIndex SubTableIndex = self.parent.repeatIndex else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. - itemName = ".".join([self.name, item.name]) + itemName = ".".join([self.name, itemName]) p1 = self.parent while p1 and p1.name not in ['ExtSubTable', 'SubTable']: - itemName = ".".join([p1.name, item.name]) + itemName = ".".join([p1.name, itemName]) p1 = p1.parent if p1: if p1.name == 'ExtSubTable': @@ -507,14 +491,17 @@ LookupListIndex = p1.parent.repeatIndex SubTableIndex = p1.repeatIndex - return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) + return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) ) class CountReference(object): """A reference to a Count value, not a count of references.""" - def __init__(self, table, name): + def __init__(self, table, name, size=None, value=None): self.table = table self.name = name + self.size = size + if value is not None: + self.setValue(value) def setValue(self, value): table = self.table name = self.name @@ -523,13 +510,17 @@ else: assert table[name] == value, (name, table[name], value) def getCountData(self): - return packUShort(self.table[self.name]) + v = self.table[self.name] + if v is None: v = 0 + return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v) + +def packUInt8 (value): + return struct.pack(">B", value) def packUShort(value): return struct.pack(">H", value) - def packULong(value): assert 0 <= value < 0x100000000, value return struct.pack(">L", value) @@ -579,22 +570,50 @@ def getConverterByName(self, name): return self.convertersByName[name] + def populateDefaults(self, propagator=None): + for conv in self.getConverters(): + if conv.repeat: + if not hasattr(self, conv.name): + setattr(self, conv.name, []) + countValue = len(getattr(self, conv.name)) - conv.aux + try: + count_conv = self.getConverterByName(conv.repeat) + setattr(self, conv.repeat, countValue) + except KeyError: + # conv.repeat is a propagated count + if propagator and conv.repeat in propagator: + propagator[conv.repeat].setValue(countValue) + else: + if conv.aux and not eval(conv.aux, None, self.__dict__): + continue + if hasattr(self, conv.name): + continue # Warn if it should NOT be present?! + if hasattr(conv, 'writeNullOffset'): + setattr(self, conv.name, None) # Warn? + #elif not conv.isCount: + # # Warn? + # pass + def decompile(self, reader, font): self.readFormat(reader) table = {} self.__rawTable = table # for debugging - converters = self.getConverters() - for conv in converters: + for conv in self.getConverters(): if conv.name == "SubTable": - conv = conv.getConverter(reader.globalState.tableType, + conv = conv.getConverter(reader.tableTag, table["LookupType"]) if conv.name == "ExtSubTable": - conv = conv.getConverter(reader.globalState.tableType, + conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"]) if conv.name == "FeatureParams": conv = conv.getConverter(reader["FeatureTag"]) + if conv.name == "SubStruct": + conv = conv.getConverter(reader.tableTag, + table["MorphType"]) if conv.repeat: - if conv.repeat in table: + if isinstance(conv.repeat, int): + countValue = conv.repeat + elif conv.repeat in table: countValue = table[conv.repeat] else: # conv.repeat is a propagated count @@ -608,33 +627,52 @@ if conv.isPropagated: reader[conv.name] = table[conv.name] - self.postRead(table, font) + if hasattr(self, 'postRead'): + self.postRead(table, font) + else: + self.__dict__.update(table) del self.__rawTable # succeeded, get rid of debugging info def compile(self, writer, font): self.ensureDecompiled() - table = self.preWrite(font) + if hasattr(self, 'preWrite'): + table = self.preWrite(font) + else: + table = self.__dict__.copy() + if hasattr(self, 'sortCoverageLast'): writer.sortCoverageLast = 1 + if hasattr(self, 'DontShare'): + writer.DontShare = True + if hasattr(self.__class__, 'LookupType'): writer['LookupType'].setValue(self.__class__.LookupType) self.writeFormat(writer) for conv in self.getConverters(): - value = table.get(conv.name) + value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None! if conv.repeat: if value is None: value = [] countValue = len(value) - conv.aux - if conv.repeat in table: - CountReference(table, conv.repeat).setValue(countValue) + if isinstance(conv.repeat, int): + assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value)) + elif conv.repeat in table: + CountReference(table, conv.repeat, value=countValue) else: # conv.repeat is a propagated count writer[conv.repeat].setValue(countValue) - conv.writeArray(writer, font, table, value) + values = value + for i, value in enumerate(values): + try: + conv.write(writer, font, table, value, i) + except Exception as e: + name = value.__class__.__name__ if value is not None else conv.name + e.args = e.args + (name+'['+str(i)+']',) + raise elif conv.isCount: # Special-case Count values. # Assumption: a Count field will *always* precede @@ -643,18 +681,27 @@ # table. We will later store it here. # We add a reference: by the time the data is assembled # the Count value will be filled in. - ref = writer.writeCountReference(table, conv.name) + ref = writer.writeCountReference(table, conv.name, conv.staticSize) table[conv.name] = None if conv.isPropagated: writer[conv.name] = ref elif conv.isLookupType: - ref = writer.writeCountReference(table, conv.name) - table[conv.name] = None + # We make sure that subtables have the same lookup type, + # and that the type is the same as the one set on the + # Lookup object, if any is set. + if conv.name not in table: + table[conv.name] = None + ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name]) writer['LookupType'] = ref else: if conv.aux and not eval(conv.aux, None, table): continue - conv.write(writer, font, table, value) + try: + conv.write(writer, font, table, value) + except Exception as e: + name = value.__class__.__name__ if value is not None else conv.name + e.args = e.args + (name,) + raise if conv.isPropagated: writer[conv.name] = value @@ -664,12 +711,6 @@ def writeFormat(self, writer): pass - def postRead(self, table, font): - self.__dict__.update(table) - - def preWrite(self, font): - return self.__dict__.copy() - def toXML(self, xmlWriter, font, attrs=None, name=None): tableName = name if name else self.__class__.__name__ if attrs is None: @@ -688,7 +729,7 @@ # do it ourselves. I think I'm getting schizophrenic... for conv in self.getConverters(): if conv.repeat: - value = getattr(self, conv.name) + value = getattr(self, conv.name, []) for i in range(len(value)): item = value[i] conv.xmlWrite(xmlWriter, font, item, conv.name, @@ -696,7 +737,7 @@ else: if conv.aux and not eval(conv.aux, None, vars(self)): continue - value = getattr(self, conv.name) + value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None! conv.xmlWrite(xmlWriter, font, value, conv.name, []) def fromXML(self, name, attrs, content, font): @@ -715,7 +756,9 @@ setattr(self, conv.name, value) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented @@ -736,20 +779,19 @@ return NotImplemented def getConverters(self): - return self.converters[self.Format] + return self.converters.get(self.Format, []) def getConverterByName(self, name): return self.convertersByName[self.Format][name] def readFormat(self, reader): self.Format = reader.readUShort() - assert self.Format != 0, (self, reader.pos, len(reader.data)) def writeFormat(self, writer): writer.writeUShort(self.Format) def toXML(self, xmlWriter, font, attrs=None, name=None): - BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + BaseTable.toXML(self, xmlWriter, font, attrs, name) # @@ -845,6 +887,18 @@ # see ValueRecordFactory + def __init__(self, valueFormat=None, src=None): + if valueFormat is not None: + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + setattr(self, name, None if isDevice else 0) + if src is not None: + for key,val in src.__dict__.items(): + assert hasattr(self, key) + setattr(self, key, val) + elif src is not None: + self.__dict__ = src.__dict__.copy() + def getFormat(self): format = 0 for name in self.__dict__.keys(): @@ -870,7 +924,7 @@ xmlWriter.newline() for name, deviceRecord in deviceItems: if deviceRecord is not None: - deviceRecord.toXML(xmlWriter, font) + deviceRecord.toXML(xmlWriter, font, name=name) xmlWriter.endtag(valueName) xmlWriter.newline() else: @@ -894,7 +948,9 @@ setattr(self, name, value) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/otConverters.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/otConverters.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/otConverters.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,9 +1,22 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from .otBase import ValueRecordFactory -import array +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, floatToFixed as fl2fi, ensureVersionIsLong as fi2ve, + versionToFixed as ve2fi) +from fontTools.misc.textTools import pad, safeEval +from fontTools.ttLib import getSearchRange +from .otBase import (CountReference, FormatSwitchingBaseTable, + OTTableReader, OTTableWriter, ValueRecordFactory) +from .otTables import (lookupTypes, AATStateTable, AATState, AATAction, + ContextualMorphAction, LigatureMorphAction, + MorxSubtable) +from functools import partial +import struct +import logging + + +log = logging.getLogger(__name__) +istuple = lambda t: isinstance(t, tuple) def buildConverters(tableSpec, tableNamespace): @@ -17,24 +30,37 @@ if name.startswith("ValueFormat"): assert tp == "uint16" converterClass = ValueFormat - elif name.endswith("Count") or name.endswith("LookupType"): - assert tp == "uint16" - converterClass = ComputedUShort + elif name.endswith("Count") or name in ("StructLength", "MorphType"): + converterClass = { + "uint8": ComputedUInt8, + "uint16": ComputedUShort, + "uint32": ComputedULong, + }[tp] elif name == "SubTable": converterClass = SubTable elif name == "ExtSubTable": converterClass = ExtSubTable + elif name == "SubStruct": + converterClass = SubStruct elif name == "FeatureParams": converterClass = FeatureParams + elif name in ("CIDGlyphMapping", "GlyphCIDMapping"): + converterClass = StructWithLength else: - if not tp in converterMapping: + if not tp in converterMapping and '(' not in tp: tableName = tp converterClass = Struct else: - converterClass = converterMapping[tp] - tableClass = tableNamespace.get(tableName) - conv = converterClass(name, repeat, aux, tableClass) - if name in ["SubTable", "ExtSubTable"]: + converterClass = eval(tp, tableNamespace, converterMapping) + if tp in ('MortChain', 'MortSubtable', 'MorxChain'): + tableClass = tableNamespace.get(tp) + else: + tableClass = tableNamespace.get(tableName) + if tableClass is not None: + conv = converterClass(name, repeat, aux, tableClass=tableClass) + else: + conv = converterClass(name, repeat, aux) + if name in ["SubTable", "ExtSubTable", "SubStruct"]: conv.lookupTypes = tableNamespace['lookupTypes'] # also create reverse mapping for t in conv.lookupTypes.values(): @@ -54,15 +80,18 @@ class _MissingItem(tuple): __slots__ = () + try: from collections import UserList -except: +except ImportError: from UserList import UserList + class _LazyList(UserList): def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) + def __getitem__(self, k): if isinstance(k, slice): indices = range(*k.indices(len(self))) @@ -74,19 +103,34 @@ self.data[k] = item return item + def __add__(self, other): + if isinstance(other, _LazyList): + other = list(other) + elif isinstance(other, list): + pass + else: + return NotImplemented + return list(self) + other + + def __radd__(self, other): + if not isinstance(other, list): + return NotImplemented + return other + list(self) + + class BaseConverter(object): """Base class for converter objects. Apart from the constructor, this is an abstract class.""" - def __init__(self, name, repeat, aux, tableClass): + def __init__(self, name, repeat, aux, tableClass=None): self.name = name self.repeat = repeat self.aux = aux self.tableClass = tableClass - self.isCount = name.endswith("Count") - self.isLookupType = name.endswith("LookupType") - self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] + self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize'] + self.isLookupType = name.endswith("LookupType") or name == "MorphType" + self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "VarRegionCount", "MappingCount", "RegionAxisCount", 'DesignAxisCount', 'DesignAxisRecordSize', 'AxisValueCount', 'ValueRecordSize'] def readArray(self, reader, font, tableDict, count): """Read an array of values from the reader.""" @@ -120,8 +164,8 @@ raise NotImplementedError(self) def writeArray(self, writer, font, tableDict, values): - for i in range(len(values)): - self.write(writer, font, tableDict, values[i], i) + for i, value in enumerate(values): + self.write(writer, font, tableDict, value, i) def write(self, writer, font, tableDict, value, repeatIndex=None): """Write a value to the writer.""" @@ -161,6 +205,11 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeULong(value) +class Flags32(ULong): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", "0x%08X" % value)]) + xmlWriter.newline() + class Short(IntValue): staticSize = 2 def read(self, reader, font, tableDict): @@ -175,6 +224,20 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(value) +class Int8(IntValue): + staticSize = 1 + def read(self, reader, font, tableDict): + return reader.readInt8() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeInt8(value) + +class UInt8(IntValue): + staticSize = 1 + def read(self, reader, font, tableDict): + return reader.readUInt8() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt8(value) + class UInt24(IntValue): staticSize = 3 def read(self, reader, font, tableDict): @@ -182,10 +245,18 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUInt24(value) -class ComputedUShort(UShort): +class ComputedInt(IntValue): def xmlWrite(self, xmlWriter, font, value, name, attrs): - xmlWriter.comment("%s=%s" % (name, value)) - xmlWriter.newline() + if value is not None: + xmlWriter.comment("%s=%s" % (name, value)) + xmlWriter.newline() + +class ComputedUInt8(ComputedInt, UInt8): + pass +class ComputedUShort(ComputedInt, UShort): + pass +class ComputedULong(ComputedInt, ULong): + pass class Tag(SimpleValue): staticSize = 4 @@ -198,9 +269,7 @@ staticSize = 2 def readArray(self, reader, font, tableDict, count): glyphOrder = font.getGlyphOrder() - gids = array.array("H", reader.readData(2 * count)) - if sys.byteorder != "big": - gids.byteswap() + gids = reader.readUShortArray(count) try: l = [glyphOrder[gid] for gid in gids] except IndexError: @@ -212,6 +281,22 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(font.getGlyphID(value)) + +class NameID(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + nameTable = font.get("name") if font else None + if nameTable: + name = nameTable.getDebugName(value) + xmlWriter.write(" ") + if name: + xmlWriter.comment(name) + else: + xmlWriter.comment("missing from name table") + log.warning("name id %d missing from name table" % value) + xmlWriter.newline() + + class FloatValue(SimpleValue): def xmlRead(self, attrs, content, font): return float(attrs["value"]) @@ -222,7 +307,7 @@ return reader.readUShort() / 10 def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUShort(int(round(value * 10))) + writer.writeUShort(round(value * 10)) class Fixed(FloatValue): staticSize = 4 @@ -231,33 +316,68 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeLong(fl2fi(value, 16)) +class F2Dot14(FloatValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return fi2fl(reader.readShort(), 14) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeShort(fl2fi(value, 14)) + class Version(BaseConverter): staticSize = 4 def read(self, reader, font, tableDict): value = reader.readLong() assert (value >> 16) == 1, "Unsupported version 0x%08x" % value - return fi2fl(value, 16) + return value def write(self, writer, font, tableDict, value, repeatIndex=None): - if value < 0x10000: - value = fl2fi(value, 16) - value = int(round(value)) + value = fi2ve(value) assert (value >> 16) == 1, "Unsupported version 0x%08x" % value writer.writeLong(value) def xmlRead(self, attrs, content, font): value = attrs["value"] - value = float(int(value, 0)) if value.startswith("0") else float(value) - if value >= 0x10000: - value = fi2fl(value, 16) + value = ve2fi(value) return value def xmlWrite(self, xmlWriter, font, value, name, attrs): - if value >= 0x10000: - value = fi2fl(value, 16) - if value % 1 != 0: - # Write as hex - value = "0x%08x" % fl2fi(value, 16) + value = fi2ve(value) + value = "0x%08x" % value xmlWriter.simpletag(name, attrs + [("value", value)]) xmlWriter.newline() + @staticmethod + def fromFloat(v): + return fl2fi(v, 16) + + +class Char64(SimpleValue): + """An ASCII string with up to 64 characters. + + Unused character positions are filled with 0x00 bytes. + Used in Apple AAT fonts in the `gcid` table. + """ + staticSize = 64 + + def read(self, reader, font, tableDict): + data = reader.readData(self.staticSize) + zeroPos = data.find(b"\0") + if zeroPos >= 0: + data = data[:zeroPos] + s = tounicode(data, encoding="ascii", errors="replace") + if s != tounicode(data, encoding="ascii", errors="ignore"): + log.warning('replaced non-ASCII characters in "%s"' % + s) + return s + + def write(self, writer, font, tableDict, value, repeatIndex=None): + data = tobytes(value, encoding="ascii", errors="replace") + if data != tobytes(value, encoding="ascii", errors="ignore"): + log.warning('replacing non-ASCII characters in "%s"' % + value) + if len(data) > self.staticSize: + log.warning('truncating overlong "%s" to %d bytes' % + (value, self.staticSize)) + data = (data + b"\0" * self.staticSize)[:self.staticSize] + writer.writeData(data) + class Struct(BaseConverter): @@ -292,18 +412,73 @@ Format = attrs.get("Format") if Format is not None: table.Format = int(Format) + + noPostRead = not hasattr(table, 'postRead') + if noPostRead: + # TODO Cache table.hasPropagated. + cleanPropagation = False + for conv in table.getConverters(): + if conv.isPropagated: + cleanPropagation = True + if not hasattr(font, '_propagator'): + font._propagator = {} + propagator = font._propagator + assert conv.name not in propagator, (conv.name, propagator) + setattr(table, conv.name, None) + propagator[conv.name] = CountReference(table.__dict__, conv.name) + for element in content: if isinstance(element, tuple): name, attrs, content = element table.fromXML(name, attrs, content, font) else: pass + + table.populateDefaults(propagator=getattr(font, '_propagator', None)) + + if noPostRead: + if cleanPropagation: + for conv in table.getConverters(): + if conv.isPropagated: + propagator = font._propagator + del propagator[conv.name] + if not propagator: + del font._propagator + return table def __repr__(self): return "Struct of " + repr(self.tableClass) +class StructWithLength(Struct): + def read(self, reader, font, tableDict): + pos = reader.pos + table = self.tableClass() + table.decompile(reader, font) + reader.seek(pos + table.StructLength) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + for convIndex, conv in enumerate(value.getConverters()): + if conv.name == "StructLength": + break + lengthIndex = len(writer.items) + convIndex + if isinstance(value, FormatSwitchingBaseTable): + lengthIndex += 1 # implicit Format field + deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize] + + before = writer.getDataLength() + value.StructLength = deadbeef + value.compile(writer, font) + length = writer.getDataLength() - before + lengthWriter = writer.getSubWriter() + conv.write(lengthWriter, font, tableDict, length) + assert(writer.items[lengthIndex] == + b"\xde\xad\xbe\xef"[:conv.staticSize]) + writer.items[lengthIndex] = lengthWriter.getAllData() + + class Table(Struct): longOffset = False @@ -322,11 +497,6 @@ offset = self.readOffset(reader) if offset == 0: return None - if offset <= 3: - # XXX hack to work around buggy pala.ttf - print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ - % (offset, self.tableClass.__name__)) - return None table = self.tableClass() reader = reader.getSubReader(offset) if font.lazy: @@ -357,18 +527,31 @@ return reader.readULong() +# TODO Clean / merge the SubTable and SubStruct + +class SubStruct(Struct): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs) + class SubTable(Table): def getConverter(self, tableType, lookupType): tableClass = self.lookupTypes[tableType][lookupType] return self.__class__(self.name, self.repeat, self.aux, tableClass) + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs) class ExtSubTable(LTable, SubTable): def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. + writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer. Table.write(self, writer, font, tableDict, value, repeatIndex) + class FeatureParams(Table): def getConverter(self, featureTag): tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) @@ -377,7 +560,7 @@ class ValueFormat(IntValue): staticSize = 2 - def __init__(self, name, repeat, aux, tableClass): + def __init__(self, name, repeat, aux, tableClass=None): BaseConverter.__init__(self, name, repeat, aux, tableClass) self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") def read(self, reader, font, tableDict): @@ -408,6 +591,858 @@ return value +class AATLookup(BaseConverter): + BIN_SEARCH_HEADER_SIZE = 10 + + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + if issubclass(self.tableClass, SimpleValue): + self.converter = self.tableClass(name='Value', repeat=None, aux=None) + else: + self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass) + + def read(self, reader, font, tableDict): + format = reader.readUShort() + if format == 0: + return self.readFormat0(reader, font) + elif format == 2: + return self.readFormat2(reader, font) + elif format == 4: + return self.readFormat4(reader, font) + elif format == 6: + return self.readFormat6(reader, font) + elif format == 8: + return self.readFormat8(reader, font) + else: + assert False, "unsupported lookup format: %d" % format + + def write(self, writer, font, tableDict, value, repeatIndex=None): + values = list(sorted([(font.getGlyphID(glyph), val) + for glyph, val in value.items()])) + # TODO: Also implement format 4. + formats = list(sorted(filter(None, [ + self.buildFormat0(writer, font, values), + self.buildFormat2(writer, font, values), + self.buildFormat6(writer, font, values), + self.buildFormat8(writer, font, values), + ]))) + # We use the format ID as secondary sort key to make the output + # deterministic when multiple formats have same encoded size. + dataSize, lookupFormat, writeMethod = formats[0] + pos = writer.getDataLength() + writeMethod() + actualSize = writer.getDataLength() - pos + assert actualSize == dataSize, ( + "AATLookup format %d claimed to write %d bytes, but wrote %d" % + (lookupFormat, dataSize, actualSize)) + + @staticmethod + def writeBinSearchHeader(writer, numUnits, unitSize): + writer.writeUShort(unitSize) + writer.writeUShort(numUnits) + searchRange, entrySelector, rangeShift = \ + getSearchRange(n=numUnits, itemSize=unitSize) + writer.writeUShort(searchRange) + writer.writeUShort(entrySelector) + writer.writeUShort(rangeShift) + + def buildFormat0(self, writer, font, values): + numGlyphs = len(font.getGlyphOrder()) + if len(values) != numGlyphs: + return None + valueSize = self.converter.staticSize + return (2 + numGlyphs * valueSize, 0, + lambda: self.writeFormat0(writer, font, values)) + + def writeFormat0(self, writer, font, values): + writer.writeUShort(0) + for glyphID_, value in values: + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + + def buildFormat2(self, writer, font, values): + segStart, segValue = values[0] + segEnd = segStart + segments = [] + for glyphID, curValue in values[1:]: + if glyphID != segEnd + 1 or curValue != segValue: + segments.append((segStart, segEnd, segValue)) + segStart = segEnd = glyphID + segValue = curValue + else: + segEnd = glyphID + segments.append((segStart, segEnd, segValue)) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments) + 1, valueSize + 4 + return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2, + lambda: self.writeFormat2(writer, font, segments)) + + def writeFormat2(self, writer, font, segments): + writer.writeUShort(2) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments), valueSize + 4 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for firstGlyph, lastGlyph, value in segments: + writer.writeUShort(lastGlyph) + writer.writeUShort(firstGlyph) + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + writer.writeUShort(0xFFFF) + writer.writeUShort(0xFFFF) + writer.writeData(b'\x00' * valueSize) + + def buildFormat6(self, writer, font, values): + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6, + lambda: self.writeFormat6(writer, font, values)) + + def writeFormat6(self, writer, font, values): + writer.writeUShort(6) + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for glyphID, value in values: + writer.writeUShort(glyphID) + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + writer.writeUShort(0xFFFF) + writer.writeData(b'\x00' * valueSize) + + def buildFormat8(self, writer, font, values): + minGlyphID, maxGlyphID = values[0][0], values[-1][0] + if len(values) != maxGlyphID - minGlyphID + 1: + return None + valueSize = self.converter.staticSize + return (6 + len(values) * valueSize, 8, + lambda: self.writeFormat8(writer, font, values)) + + def writeFormat8(self, writer, font, values): + firstGlyphID = values[0][0] + writer.writeUShort(8) + writer.writeUShort(firstGlyphID) + writer.writeUShort(len(values)) + for _, value in values: + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + + def readFormat0(self, reader, font): + numGlyphs = len(font.getGlyphOrder()) + data = self.converter.readArray( + reader, font, tableDict=None, count=numGlyphs) + return {font.getGlyphName(k): value + for k, value in enumerate(data)} + + def readFormat2(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize, numUnits = reader.readUShort(), reader.readUShort() + assert unitSize >= 4 + self.converter.staticSize, unitSize + for i in range(numUnits): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + value = self.converter.read(reader, font, tableDict=None) + if last != 0xFFFF: + for k in range(first, last + 1): + mapping[font.getGlyphName(k)] = value + return mapping + + def readFormat4(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 6, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + offset = reader.readUShort() + if last != 0xFFFF: + dataReader = reader.getSubReader(0) # relative to current position + dataReader.seek(pos + offset) # relative to start of table + data = self.converter.readArray( + dataReader, font, tableDict=None, + count=last - first + 1) + for k, v in enumerate(data): + mapping[font.getGlyphName(first + k)] = v + return mapping + + def readFormat6(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 2 + self.converter.staticSize, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + glyphID = reader.readUShort() + value = self.converter.read( + reader, font, tableDict=None) + if glyphID != 0xFFFF: + mapping[font.getGlyphName(glyphID)] = value + return mapping + + def readFormat8(self, reader, font): + first = reader.readUShort() + count = reader.readUShort() + data = self.converter.readArray( + reader, font, tableDict=None, count=count) + return {font.getGlyphName(first + k): value + for (k, value) in enumerate(data)} + + def xmlRead(self, attrs, content, font): + value = {} + for element in content: + if isinstance(element, tuple): + name, a, eltContent = element + if name == "Lookup": + value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font) + return value + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, value in sorted(value.items()): + self.converter.xmlWrite( + xmlWriter, font, value=value, + name="Lookup", attrs=[("glyph", glyph)]) + xmlWriter.endtag(name) + xmlWriter.newline() + + +# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup +# followed by an offset to a glyph data table. Other than usual, the +# offsets in the AATLookup are not relative to the beginning of +# the beginning of the 'ankr' table, but relative to the glyph data table. +# So, to find the anchor data for a glyph, one needs to add the offset +# to the data table to the offset found in the AATLookup, and then use +# the sum of these two offsets to find the actual data. +class AATLookupWithDataOffset(BaseConverter): + def read(self, reader, font, tableDict): + lookupOffset = reader.readULong() + dataOffset = reader.readULong() + lookupReader = reader.getSubReader(lookupOffset) + lookup = AATLookup('DataOffsets', None, None, UShort) + offsets = lookup.read(lookupReader, font, tableDict) + result = {} + for glyph, offset in offsets.items(): + dataReader = reader.getSubReader(offset + dataOffset) + item = self.tableClass() + item.decompile(dataReader, font) + result[glyph] = item + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + # We do not work with OTTableWriter sub-writers because + # the offsets in our AATLookup are relative to our data + # table, for which we need to provide an offset value itself. + # It might have been possible to somehow make a kludge for + # performing this indirect offset computation directly inside + # OTTableWriter. But this would have made the internal logic + # of OTTableWriter even more complex than it already is, + # so we decided to roll our own offset computation for the + # contents of the AATLookup and associated data table. + offsetByGlyph, offsetByData, dataLen = {}, {}, 0 + compiledData = [] + for glyph in sorted(value, key=font.getGlyphID): + subWriter = OTTableWriter() + value[glyph].compile(subWriter, font) + data = subWriter.getAllData() + offset = offsetByData.get(data, None) + if offset == None: + offset = dataLen + dataLen = dataLen + len(data) + offsetByData[data] = offset + compiledData.append(data) + offsetByGlyph[glyph] = offset + # For calculating the offsets to our AATLookup and data table, + # we can use the regular OTTableWriter infrastructure. + lookupWriter = writer.getSubWriter() + lookupWriter.longOffset = True + lookup = AATLookup('DataOffsets', None, None, UShort) + lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None) + + dataWriter = writer.getSubWriter() + dataWriter.longOffset = True + writer.writeSubTable(lookupWriter) + writer.writeSubTable(dataWriter) + for d in compiledData: + dataWriter.writeData(d) + + def xmlRead(self, attrs, content, font): + lookup = AATLookup('DataOffsets', None, None, self.tableClass) + return lookup.xmlRead(attrs, content, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + lookup = AATLookup('DataOffsets', None, None, self.tableClass) + lookup.xmlWrite(xmlWriter, font, value, name, attrs) + + +class MorxSubtableConverter(BaseConverter): + _PROCESSING_ORDERS = { + # bits 30 and 28 of morx.CoverageFlags; see morx spec + (False, False): "LayoutOrder", + (True, False): "ReversedLayoutOrder", + (False, True): "LogicalOrder", + (True, True): "ReversedLogicalOrder", + } + + _PROCESSING_ORDERS_REVERSED = { + val: key for key, val in _PROCESSING_ORDERS.items() + } + + def __init__(self, name, repeat, aux): + BaseConverter.__init__(self, name, repeat, aux) + + def _setTextDirectionFromCoverageFlags(self, flags, subtable): + if (flags & 0x20) != 0: + subtable.TextDirection = "Any" + elif (flags & 0x80) != 0: + subtable.TextDirection = "Vertical" + else: + subtable.TextDirection = "Horizontal" + + def read(self, reader, font, tableDict): + pos = reader.pos + m = MorxSubtable() + m.StructLength = reader.readULong() + flags = reader.readUInt8() + orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] + self._setTextDirectionFromCoverageFlags(flags, m) + m.Reserved = reader.readUShort() + m.Reserved |= (flags & 0xF) << 16 + m.MorphType = reader.readUInt8() + m.SubFeatureFlags = reader.readULong() + tableClass = lookupTypes["morx"].get(m.MorphType) + if tableClass is None: + assert False, ("unsupported 'morx' lookup type %s" % + m.MorphType) + # To decode AAT ligatures, we need to know the subtable size. + # The easiest way to pass this along is to create a new reader + # that works on just the subtable as its data. + headerLength = reader.pos - pos + data = reader.data[ + reader.pos + : reader.pos + m.StructLength - headerLength] + assert len(data) == m.StructLength - headerLength + subReader = OTTableReader(data=data, tableTag=reader.tableTag) + m.SubStruct = tableClass() + m.SubStruct.decompile(subReader, font) + reader.seek(pos + m.StructLength) + return m + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("StructLength=%d" % value.StructLength) + xmlWriter.newline() + xmlWriter.simpletag("TextDirection", value=value.TextDirection) + xmlWriter.newline() + xmlWriter.simpletag("ProcessingOrder", + value=value.ProcessingOrder) + xmlWriter.newline() + if value.Reserved != 0: + xmlWriter.simpletag("Reserved", + value="0x%04x" % value.Reserved) + xmlWriter.newline() + xmlWriter.comment("MorphType=%d" % value.MorphType) + xmlWriter.newline() + xmlWriter.simpletag("SubFeatureFlags", + value="0x%08x" % value.SubFeatureFlags) + xmlWriter.newline() + value.SubStruct.toXML(xmlWriter, font) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + m = MorxSubtable() + covFlags = 0 + m.Reserved = 0 + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "CoverageFlags": + # Only in XML from old versions of fonttools. + covFlags = safeEval(eltAttrs["value"]) + orderKey = ((covFlags & 0x40) != 0, + (covFlags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[ + orderKey] + self._setTextDirectionFromCoverageFlags( + covFlags, m) + elif eltName == "ProcessingOrder": + m.ProcessingOrder = eltAttrs["value"] + assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder + elif eltName == "TextDirection": + m.TextDirection = eltAttrs["value"] + assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection + elif eltName == "Reserved": + m.Reserved = safeEval(eltAttrs["value"]) + elif eltName == "SubFeatureFlags": + m.SubFeatureFlags = safeEval(eltAttrs["value"]) + elif eltName.endswith("Morph"): + m.fromXML(eltName, eltAttrs, eltContent, font) + else: + assert False, eltName + m.Reserved = (covFlags & 0xF) << 16 | m.Reserved + return m + + def write(self, writer, font, tableDict, value, repeatIndex=None): + covFlags = (value.Reserved & 0x000F0000) >> 16 + reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[ + value.ProcessingOrder] + covFlags |= 0x80 if value.TextDirection == "Vertical" else 0 + covFlags |= 0x40 if reverseOrder else 0 + covFlags |= 0x20 if value.TextDirection == "Any" else 0 + covFlags |= 0x10 if logicalOrder else 0 + value.CoverageFlags = covFlags + lengthIndex = len(writer.items) + before = writer.getDataLength() + value.StructLength = 0xdeadbeef + # The high nibble of value.Reserved is actuallly encoded + # into coverageFlags, so we need to clear it here. + origReserved = value.Reserved # including high nibble + value.Reserved = value.Reserved & 0xFFFF # without high nibble + value.compile(writer, font) + value.Reserved = origReserved # restore original value + assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef" + length = writer.getDataLength() - before + writer.items[lengthIndex] = struct.pack(">L", length) + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader +# TODO: Untangle the implementation of the various lookup-specific formats. +class STXHeader(BaseConverter): + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + assert issubclass(self.tableClass, AATAction) + self.classLookup = AATLookup("GlyphClasses", None, None, UShort) + if issubclass(self.tableClass, ContextualMorphAction): + self.perGlyphLookup = AATLookup("PerGlyphLookup", + None, None, GlyphID) + else: + self.perGlyphLookup = None + + def read(self, reader, font, tableDict): + table = AATStateTable() + pos = reader.pos + classTableReader = reader.getSubReader(0) + stateArrayReader = reader.getSubReader(0) + entryTableReader = reader.getSubReader(0) + actionReader = None + ligaturesReader = None + table.GlyphClassCount = reader.readULong() + classTableReader.seek(pos + reader.readULong()) + stateArrayReader.seek(pos + reader.readULong()) + entryTableReader.seek(pos + reader.readULong()) + if self.perGlyphLookup is not None: + perGlyphTableReader = reader.getSubReader(0) + perGlyphTableReader.seek(pos + reader.readULong()) + if issubclass(self.tableClass, LigatureMorphAction): + actionReader = reader.getSubReader(0) + actionReader.seek(pos + reader.readULong()) + ligComponentReader = reader.getSubReader(0) + ligComponentReader.seek(pos + reader.readULong()) + ligaturesReader = reader.getSubReader(0) + ligaturesReader.seek(pos + reader.readULong()) + numLigComponents = (ligaturesReader.pos + - ligComponentReader.pos) // 2 + assert numLigComponents >= 0 + table.LigComponents = \ + ligComponentReader.readUShortArray(numLigComponents) + table.Ligatures = self._readLigatures(ligaturesReader, font) + table.GlyphClasses = self.classLookup.read(classTableReader, + font, tableDict) + numStates = int((entryTableReader.pos - stateArrayReader.pos) + / (table.GlyphClassCount * 2)) + for stateIndex in range(numStates): + state = AATState() + table.States.append(state) + for glyphClass in range(table.GlyphClassCount): + entryIndex = stateArrayReader.readUShort() + state.Transitions[glyphClass] = \ + self._readTransition(entryTableReader, + entryIndex, font, + actionReader) + if self.perGlyphLookup is not None: + table.PerGlyphLookups = self._readPerGlyphLookups( + table, perGlyphTableReader, font) + return table + + def _readTransition(self, reader, entryIndex, font, actionReader): + transition = self.tableClass() + entryReader = reader.getSubReader( + reader.pos + entryIndex * transition.staticSize) + transition.decompile(entryReader, font, actionReader) + return transition + + def _readLigatures(self, reader, font): + limit = len(reader.data) + numLigatureGlyphs = (limit - reader.pos) // 2 + return [font.getGlyphName(g) + for g in reader.readUShortArray(numLigatureGlyphs)] + + def _countPerGlyphLookups(self, table): + # Somewhat annoyingly, the morx table does not encode + # the size of the per-glyph table. So we need to find + # the maximum value that MorphActions use as index + # into this table. + numLookups = 0 + for state in table.States: + for t in state.Transitions.values(): + if isinstance(t, ContextualMorphAction): + if t.MarkIndex != 0xFFFF: + numLookups = max( + numLookups, + t.MarkIndex + 1) + if t.CurrentIndex != 0xFFFF: + numLookups = max( + numLookups, + t.CurrentIndex + 1) + return numLookups + + def _readPerGlyphLookups(self, table, reader, font): + pos = reader.pos + lookups = [] + for _ in range(self._countPerGlyphLookups(table)): + lookupReader = reader.getSubReader(0) + lookupReader.seek(pos + reader.readULong()) + lookups.append( + self.perGlyphLookup.read(lookupReader, font, {})) + return lookups + + def write(self, writer, font, tableDict, value, repeatIndex=None): + glyphClassWriter = OTTableWriter() + self.classLookup.write(glyphClassWriter, font, tableDict, + value.GlyphClasses, repeatIndex=None) + glyphClassData = pad(glyphClassWriter.getAllData(), 4) + glyphClassCount = max(value.GlyphClasses.values()) + 1 + glyphClassTableOffset = 16 # size of STXHeader + if self.perGlyphLookup is not None: + glyphClassTableOffset += 4 + + actionData, actionIndex = None, None + if issubclass(self.tableClass, LigatureMorphAction): + glyphClassTableOffset += 12 + actionData, actionIndex = \ + self._compileLigActions(value, font) + actionData = pad(actionData, 4) + + stateArrayData, entryTableData = self._compileStates( + font, value.States, glyphClassCount, actionIndex) + stateArrayOffset = glyphClassTableOffset + len(glyphClassData) + entryTableOffset = stateArrayOffset + len(stateArrayData) + perGlyphOffset = entryTableOffset + len(entryTableData) + perGlyphData = \ + pad(self._compilePerGlyphLookups(value, font), 4) + ligComponentsData = self._compileLigComponents(value, font) + ligaturesData = self._compileLigatures(value, font) + if actionData is None: + actionOffset = None + ligComponentsOffset = None + ligaturesOffset = None + else: + assert len(perGlyphData) == 0 + actionOffset = entryTableOffset + len(entryTableData) + ligComponentsOffset = actionOffset + len(actionData) + ligaturesOffset = ligComponentsOffset + len(ligComponentsData) + writer.writeULong(glyphClassCount) + writer.writeULong(glyphClassTableOffset) + writer.writeULong(stateArrayOffset) + writer.writeULong(entryTableOffset) + if self.perGlyphLookup is not None: + writer.writeULong(perGlyphOffset) + if actionOffset is not None: + writer.writeULong(actionOffset) + writer.writeULong(ligComponentsOffset) + writer.writeULong(ligaturesOffset) + writer.writeData(glyphClassData) + writer.writeData(stateArrayData) + writer.writeData(entryTableData) + writer.writeData(perGlyphData) + if actionData is not None: + writer.writeData(actionData) + if ligComponentsData is not None: + writer.writeData(ligComponentsData) + if ligaturesData is not None: + writer.writeData(ligaturesData) + + def _compileStates(self, font, states, glyphClassCount, actionIndex): + stateArrayWriter = OTTableWriter() + entries, entryIDs = [], {} + for state in states: + for glyphClass in range(glyphClassCount): + transition = state.Transitions[glyphClass] + entryWriter = OTTableWriter() + transition.compile(entryWriter, font, + actionIndex) + entryData = entryWriter.getAllData() + assert len(entryData) == transition.staticSize, ( \ + "%s has staticSize %d, " + "but actually wrote %d bytes" % ( + repr(transition), + transition.staticSize, + len(entryData))) + entryIndex = entryIDs.get(entryData) + if entryIndex is None: + entryIndex = len(entries) + entryIDs[entryData] = entryIndex + entries.append(entryData) + stateArrayWriter.writeUShort(entryIndex) + stateArrayData = pad(stateArrayWriter.getAllData(), 4) + entryTableData = pad(bytesjoin(entries), 4) + return stateArrayData, entryTableData + + def _compilePerGlyphLookups(self, table, font): + if self.perGlyphLookup is None: + return b"" + numLookups = self._countPerGlyphLookups(table) + assert len(table.PerGlyphLookups) == numLookups, ( + "len(AATStateTable.PerGlyphLookups) is %d, " + "but the actions inside the table refer to %d" % + (len(table.PerGlyphLookups), numLookups)) + writer = OTTableWriter() + for lookup in table.PerGlyphLookups: + lookupWriter = writer.getSubWriter() + lookupWriter.longOffset = True + self.perGlyphLookup.write(lookupWriter, font, + {}, lookup, None) + writer.writeSubTable(lookupWriter) + return writer.getAllData() + + def _compileLigActions(self, table, font): + assert issubclass(self.tableClass, LigatureMorphAction) + actions = set() + for state in table.States: + for _glyphClass, trans in state.Transitions.items(): + actions.add(trans.compileLigActions()) + result, actionIndex = b"", {} + # Sort the compiled actions in decreasing order of + # length, so that the longer sequence come before the + # shorter ones. For each compiled action ABCD, its + # suffixes BCD, CD, and D do not be encoded separately + # (in case they occur); instead, we can just store an + # index that points into the middle of the longer + # sequence. Every compiled AAT ligature sequence is + # terminated with an end-of-sequence flag, which can + # only be set on the last element of the sequence. + # Therefore, it is sufficient to consider just the + # suffixes. + for a in sorted(actions, key=lambda x:(-len(x), x)): + if a not in actionIndex: + for i in range(0, len(a), 4): + suffix = a[i:] + suffixIndex = (len(result) + i) // 4 + actionIndex.setdefault( + suffix, suffixIndex) + result += a + assert len(result) % self.tableClass.staticSize == 0 + return (result, actionIndex) + + def _compileLigComponents(self, table, font): + if not hasattr(table, "LigComponents"): + return None + writer = OTTableWriter() + for component in table.LigComponents: + writer.writeUShort(component) + return writer.getAllData() + + def _compileLigatures(self, table, font): + if not hasattr(table, "Ligatures"): + return None + writer = OTTableWriter() + for glyphName in table.Ligatures: + writer.writeUShort(font.getGlyphID(glyphName)) + return writer.getAllData() + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount) + xmlWriter.newline() + for g, klass in sorted(value.GlyphClasses.items()): + xmlWriter.simpletag("GlyphClass", glyph=g, value=klass) + xmlWriter.newline() + for stateIndex, state in enumerate(value.States): + xmlWriter.begintag("State", index=stateIndex) + xmlWriter.newline() + for glyphClass, trans in sorted(state.Transitions.items()): + trans.toXML(xmlWriter, font=font, + attrs={"onGlyphClass": glyphClass}, + name="Transition") + xmlWriter.endtag("State") + xmlWriter.newline() + for i, lookup in enumerate(value.PerGlyphLookups): + xmlWriter.begintag("PerGlyphLookup", index=i) + xmlWriter.newline() + for glyph, val in sorted(lookup.items()): + xmlWriter.simpletag("Lookup", glyph=glyph, + value=val) + xmlWriter.newline() + xmlWriter.endtag("PerGlyphLookup") + xmlWriter.newline() + if hasattr(value, "LigComponents"): + xmlWriter.begintag("LigComponents") + xmlWriter.newline() + for i, val in enumerate(getattr(value, "LigComponents")): + xmlWriter.simpletag("LigComponent", index=i, + value=val) + xmlWriter.newline() + xmlWriter.endtag("LigComponents") + xmlWriter.newline() + self._xmlWriteLigatures(xmlWriter, font, value, name, attrs) + xmlWriter.endtag(name) + xmlWriter.newline() + + def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs): + if not hasattr(value, "Ligatures"): + return + xmlWriter.begintag("Ligatures") + xmlWriter.newline() + for i, g in enumerate(getattr(value, "Ligatures")): + xmlWriter.simpletag("Ligature", index=i, glyph=g) + xmlWriter.newline() + xmlWriter.endtag("Ligatures") + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + table = AATStateTable() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "GlyphClass": + glyph = eltAttrs["glyph"] + value = eltAttrs["value"] + table.GlyphClasses[glyph] = safeEval(value) + elif eltName == "State": + state = self._xmlReadState(eltAttrs, eltContent, font) + table.States.append(state) + elif eltName == "PerGlyphLookup": + lookup = self.perGlyphLookup.xmlRead( + eltAttrs, eltContent, font) + table.PerGlyphLookups.append(lookup) + elif eltName == "LigComponents": + table.LigComponents = \ + self._xmlReadLigComponents( + eltAttrs, eltContent, font) + elif eltName == "Ligatures": + table.Ligatures = \ + self._xmlReadLigatures( + eltAttrs, eltContent, font) + table.GlyphClassCount = max(table.GlyphClasses.values()) + 1 + return table + + def _xmlReadState(self, attrs, content, font): + state = AATState() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "Transition": + glyphClass = safeEval(eltAttrs["onGlyphClass"]) + transition = self.tableClass() + transition.fromXML(eltName, eltAttrs, + eltContent, font) + state.Transitions[glyphClass] = transition + return state + + def _xmlReadLigComponents(self, attrs, content, font): + ligComponents = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "LigComponent": + ligComponents.append( + safeEval(eltAttrs["value"])) + return ligComponents + + def _xmlReadLigatures(self, attrs, content, font): + ligs = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "Ligature": + ligs.append(eltAttrs["glyph"]) + return ligs + + +class CIDGlyphMap(BaseConverter): + def read(self, reader, font, tableDict): + numCIDs = reader.readUShort() + result = {} + for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)): + if glyphID != 0xFFFF: + result[cid] = font.getGlyphName(glyphID) + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = {cid: font.getGlyphID(glyph) + for cid, glyph in value.items()} + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for cid in range(count): + writer.writeUShort(items.get(cid, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[safeEval(eAttrs["cid"])] = \ + eAttrs["glyph"].strip() + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for cid, glyph in sorted(value.items()): + if glyph is not None and glyph != 0xFFFF: + xmlWriter.simpletag( + "CID", cid=cid, glyph=glyph) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class GlyphCIDMap(BaseConverter): + def read(self, reader, font, tableDict): + glyphOrder = font.getGlyphOrder() + count = reader.readUShort() + cids = reader.readUShortArray(count) + if count > len(glyphOrder): + log.warning("GlyphCIDMap has %d elements, " + "but the font has only %d glyphs; " + "ignoring the rest" % + (count, len(glyphOrder))) + result = {} + for glyphID in range(min(len(cids), len(glyphOrder))): + cid = cids[glyphID] + if cid != 0xFFFF: + result[glyphOrder[glyphID]] = cid + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = {font.getGlyphID(g): cid + for g, cid in value.items() + if cid is not None and cid != 0xFFFF} + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for glyphID in range(count): + writer.writeUShort(items.get(glyphID, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[eAttrs["glyph"]] = \ + safeEval(eAttrs["value"]) + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, cid in sorted(value.items()): + if cid is not None and cid != 0xFFFF: + xmlWriter.simpletag( + "CID", glyph=glyph, value=cid) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + class DeltaValue(BaseConverter): def read(self, reader, font, tableDict): @@ -462,20 +1497,129 @@ return safeEval(attrs["value"]) +class VarIdxMapValue(BaseConverter): + + def read(self, reader, font, tableDict): + fmt = tableDict['EntryFormat'] + nItems = tableDict['MappingCount'] + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1<> 4) + read = { + 1: reader.readUInt8, + 2: reader.readUShort, + 3: reader.readUInt24, + 4: reader.readULong, + }[entrySize] + + mapping = [] + for i in range(nItems): + raw = read() + idx = ((raw & outerMask) << outerShift) | (raw & innerMask) + mapping.append(idx) + + return mapping + + def write(self, writer, font, tableDict, value, repeatIndex=None): + fmt = tableDict['EntryFormat'] + mapping = value + writer['MappingCount'].setValue(len(mapping)) + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1<> 4) + write = { + 1: writer.writeUInt8, + 2: writer.writeUShort, + 3: writer.writeUInt24, + 4: writer.writeULong, + }[entrySize] + + for idx in mapping: + raw = ((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask) + write(raw) + + +class VarDataValue(BaseConverter): + + def read(self, reader, font, tableDict): + values = [] + + regionCount = tableDict["VarRegionCount"] + shortCount = tableDict["NumShorts"] + + for i in range(min(regionCount, shortCount)): + values.append(reader.readShort()) + for i in range(min(regionCount, shortCount), regionCount): + values.append(reader.readInt8()) + for i in range(regionCount, shortCount): + reader.readInt8() + + return values + + def write(self, writer, font, tableDict, value, repeatIndex=None): + regionCount = tableDict["VarRegionCount"] + shortCount = tableDict["NumShorts"] + + for i in range(min(regionCount, shortCount)): + writer.writeShort(value[i]) + for i in range(min(regionCount, shortCount), regionCount): + writer.writeInt8(value[i]) + for i in range(regionCount, shortCount): + writer.writeInt8(0) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + converterMapping = { # type class - "int16": Short, + "int8": Int8, + "int16": Short, + "uint8": UInt8, + "uint8": UInt8, "uint16": UShort, "uint24": UInt24, "uint32": ULong, + "char64": Char64, + "Flags32": Flags32, "Version": Version, "Tag": Tag, "GlyphID": GlyphID, + "NameID": NameID, "DeciPoints": DeciPoints, "Fixed": Fixed, + "F2Dot14": F2Dot14, "struct": Struct, "Offset": Table, "LOffset": LTable, "ValueRecord": ValueRecord, "DeltaValue": DeltaValue, + "VarIdxMapValue": VarIdxMapValue, + "VarDataValue": VarDataValue, + + # AAT + "CIDGlyphMap": CIDGlyphMap, + "GlyphCIDMap": GlyphCIDMap, + "MortChain": StructWithLength, + "MortSubtable": StructWithLength, + "MorxChain": StructWithLength, + "MorxSubtable": MorxSubtableConverter, + + # "Template" types + "AATLookup": lambda C: partial(AATLookup, tableClass=C), + "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C), + "STXHeader": lambda C: partial(STXHeader, tableClass=C), + "OffsetTo": lambda C: partial(Table, tableClass=C), + "LOffsetTo": lambda C: partial(LTable, tableClass=C), } diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/otData.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/otData.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/otData.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * @@ -59,23 +60,23 @@ ('FeatureParamsSize', [ ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), - ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), + ('NameID', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), ]), ('FeatureParamsStylisticSet', [ ('uint16', 'Version', None, None, 'Set to 0.'), - ('uint16', 'UINameID', None, None, 'UI NameID.'), + ('NameID', 'UINameID', None, None, 'UI NameID.'), ]), ('FeatureParamsCharacterVariants', [ ('uint16', 'Format', None, None, 'Set to 0.'), - ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), - ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), - ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), + ('NameID', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), + ('NameID', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), + ('NameID', 'SampleTextNameID', None, None, 'Sample text NameID.'), ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), - ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), + ('NameID', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), ]), @@ -134,7 +135,7 @@ ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), - ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), + ('DeltaValue', 'DeltaValue', '', 'DeltaFormat in (1,2,3)', 'Array of compressed data'), ]), @@ -143,10 +144,11 @@ # ('GPOS', [ - ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GPOS table- 0x00010000 or 0x00010001'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), + ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GPOS table'), ]), ('SinglePosFormat1', [ @@ -443,10 +445,11 @@ # ('GSUB', [ - ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GSUB table- 0x00010000 or 0x00010001'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), + ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GSUB table'), ]), ('SingleSubstFormat1', [ @@ -639,12 +642,13 @@ # ('GDEF', [ - ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003'), ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), - ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkGlyphSetsDef', None, 'Version >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ('LOffset', 'VarStore', None, 'Version >= 0x00010003', 'Offset to variation store (may be NULL)'), ]), ('AttachList', [ @@ -836,6 +840,193 @@ ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), ]), + + # + # STAT + # + ('STAT', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000, currently 0x00010002.'), + ('uint16', 'DesignAxisRecordSize', None, None, 'Size in bytes of each design axis record'), + ('uint16', 'DesignAxisCount', None, None, 'Number of design axis records'), + ('LOffsetTo(AxisRecordArray)', 'DesignAxisRecord', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the design axes array'), + ('uint16', 'AxisValueCount', None, None, 'Number of axis value tables'), + ('LOffsetTo(AxisValueArray)', 'AxisValueArray', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the axes value offset array'), + ('NameID', 'ElidedFallbackNameID', None, 'Version >= 0x00010001', 'NameID to use when all style attributes are elided.'), + ]), + + ('AxisRecordArray', [ + ('AxisRecord', 'Axis', 'DesignAxisCount', 0, 'Axis records'), + ]), + + ('AxisRecord', [ + ('Tag', 'AxisTag', None, None, 'A tag identifying the axis of design variation'), + ('NameID', 'AxisNameID', None, None, 'The name ID for entries in the "name" table that provide a display string for this axis'), + ('uint16', 'AxisOrdering', None, None, 'A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names'), + ('uint8', 'MoreBytes', 'DesignAxisRecordSize', -8, 'Extra bytes. Set to empty array.'), + ]), + + ('AxisValueArray', [ + ('Offset', 'AxisValue', 'AxisValueCount', 0, 'Axis values'), + ]), + + ('AxisValueFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'Value', None, None, ''), + ]), + + ('AxisValueFormat2', [ + ('uint16', 'Format', None, None, 'Format, = 2'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'NominalValue', None, None, ''), + ('Fixed', 'RangeMinValue', None, None, ''), + ('Fixed', 'RangeMaxValue', None, None, ''), + ]), + + ('AxisValueFormat3', [ + ('uint16', 'Format', None, None, 'Format, = 3'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'Value', None, None, ''), + ('Fixed', 'LinkedValue', None, None, ''), + ]), + + ('AxisValueFormat4', [ + ('uint16', 'Format', None, None, 'Format, = 4'), + ('uint16', 'AxisCount', None, None, 'The total number of axes contributing to this axis-values combination.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('struct', 'AxisValueRecord', 'AxisCount', 0, 'Array of AxisValue records that provide the combination of axis values, one for each contributing axis. '), + ]), + + ('AxisValueRecord', [ + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('Fixed', 'Value', None, None, 'A numeric value for this attribute value.'), + ]), + + + # + # Variation fonts + # + + # GSUB/GPOS FeatureVariations + + ('FeatureVariations', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'), + ('uint32', 'FeatureVariationCount', None, None, 'Number of records in the FeatureVariationRecord array'), + ('struct', 'FeatureVariationRecord', 'FeatureVariationCount', 0, 'Array of FeatureVariationRecord'), + ]), + + ('FeatureVariationRecord', [ + ('LOffset', 'ConditionSet', None, None, 'Offset to a ConditionSet table, from beginning of the FeatureVariations table.'), + ('LOffset', 'FeatureTableSubstitution', None, None, 'Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table'), + ]), + + ('ConditionSet', [ + ('uint16', 'ConditionCount', None, None, 'Number of condition tables in the ConditionTable array'), + ('LOffset', 'ConditionTable', 'ConditionCount', 0, 'Array of condition tables.'), + ]), + + ('ConditionTableFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1'), + ('uint16', 'AxisIndex', None, None, 'Index for the variation axis within the fvar table, base 0.'), + ('F2Dot14', 'FilterRangeMinValue', None, None, 'Minimum normalized axis value of the font variation instances that satisfy this condition.'), + ('F2Dot14', 'FilterRangeMaxValue', None, None, 'Maximum value that satisfies this condition.'), + ]), + + ('FeatureTableSubstitution', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'), + ('uint16', 'SubstitutionCount', None, None, 'Number of records in the FeatureVariationRecords array'), + ('FeatureTableSubstitutionRecord', 'SubstitutionRecord', 'SubstitutionCount', 0, 'Array of FeatureTableSubstitutionRecord'), + ]), + + ('FeatureTableSubstitutionRecord', [ + ('uint16', 'FeatureIndex', None, None, 'The feature table index to match.'), + ('LOffset', 'Feature', None, None, 'Offset to an alternate feature table, from start of the FeatureTableSubstitution table.'), + ]), + + # VariationStore + + ('VarRegionAxis', [ + ('F2Dot14', 'StartCoord', None, None, ''), + ('F2Dot14', 'PeakCoord', None, None, ''), + ('F2Dot14', 'EndCoord', None, None, ''), + ]), + + ('VarRegion', [ + ('struct', 'VarRegionAxis', 'RegionAxisCount', 0, ''), + ]), + + ('VarRegionList', [ + ('uint16', 'RegionAxisCount', None, None, ''), + ('uint16', 'RegionCount', None, None, ''), + ('VarRegion', 'Region', 'RegionCount', 0, ''), + ]), + + ('VarData', [ + ('uint16', 'ItemCount', None, None, ''), + ('uint16', 'NumShorts', None, None, ''), # Automatically computed + ('uint16', 'VarRegionCount', None, None, ''), + ('uint16', 'VarRegionIndex', 'VarRegionCount', 0, ''), + ('VarDataValue', 'Item', 'ItemCount', 0, ''), + ]), + + ('VarStore', [ + ('uint16', 'Format', None, None, 'Set to 1.'), + ('LOffset', 'VarRegionList', None, None, ''), + ('uint16', 'VarDataCount', None, None, ''), + ('LOffset', 'VarData', 'VarDataCount', 0, ''), + ]), + + # Variation helpers + + ('VarIdxMap', [ + ('uint16', 'EntryFormat', None, None, ''), # Automatically computed + ('uint16', 'MappingCount', None, None, ''), # Automatically computed + ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'), + ]), + + # Glyph advance variations + + ('HVAR', [ + ('Version', 'Version', None, None, 'Version of the HVAR table-initially = 0x00010000'), + ('LOffset', 'VarStore', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'AdvWidthMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'LsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'RsbMap', None, None, ''), + ]), + ('VVAR', [ + ('Version', 'Version', None, None, 'Version of the VVAR table-initially = 0x00010000'), + ('LOffset', 'VarStore', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'AdvHeightMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'TsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'BsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'VOrgMap', None, None, 'Vertical origin mapping.'), + ]), + + # Font-wide metrics variations + + ('MetricsValueRecord', [ + ('Tag', 'ValueTag', None, None, '4-byte font-wide measure identifier'), + ('uint32', 'VarIdx', None, None, 'Combined outer-inner variation index'), + ('uint8', 'MoreBytes', 'ValueRecordSize', -8, 'Extra bytes. Set to empty array.'), + ]), + + ('MVAR', [ + ('Version', 'Version', None, None, 'Version of the MVAR table-initially = 0x00010000'), + ('uint16', 'Reserved', None, None, 'Set to 0'), + ('uint16', 'ValueRecordSize', None, None, ''), + ('uint16', 'ValueRecordCount', None, None, ''), + ('Offset', 'VarStore', None, None, ''), + ('MetricsValueRecord', 'ValueRecord', 'ValueRecordCount', 0, ''), + ]), + + # # math # @@ -989,6 +1180,96 @@ ## Apple Advanced Typography (AAT) tables ## + ('AATLookupSegment', [ + ('uint16', 'lastGlyph', None, None, 'Last glyph index in this segment.'), + ('uint16', 'firstGlyph', None, None, 'First glyph index in this segment.'), + ('uint16', 'value', None, None, 'A 16-bit offset from the start of the table to the data.'), + ]), + + + # + # ankr + # + + ('ankr', [ + ('struct', 'AnchorPoints', None, None, 'Anchor points table.'), + ]), + + ('AnchorPointsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the anchor points table, = 0.'), + ('uint16', 'Flags', None, None, 'Flags. Currenty unused, set to zero.'), + ('AATLookupWithDataOffset(AnchorGlyphData)', 'Anchors', None, None, 'Table of with anchor overrides for each glyph.'), + ]), + + ('AnchorGlyphData', [ + ('uint32', 'AnchorPointCount', None, None, 'Number of anchor points for this glyph.'), + ('struct', 'AnchorPoint', 'AnchorPointCount', 0, 'Individual anchor points.'), + ]), + + ('AnchorPoint', [ + ('int16', 'XCoordinate', None, None, 'X coordinate of this anchor point.'), + ('int16', 'YCoordinate', None, None, 'Y coordinate of this anchor point.'), + ]), + + # + # bsln + # + + ('bsln', [ + ('Version', 'Version', None, None, 'Version number of the AAT baseline table (0x00010000 for the initial version).'), + ('struct', 'Baseline', None, None, 'Baseline table.'), + ]), + + ('BaselineFormat0', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 0.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'), + ]), + + ('BaselineFormat1', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'), + ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'), + ]), + + ('BaselineFormat2', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'), + ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'), + ]), + + ('BaselineFormat3', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'), + ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'), + ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'), + ]), + + + # + # cidg + # + + ('cidg', [ + ('struct', 'CIDGlyphMapping', None, None, 'CID-to-glyph mapping table.'), + ]), + + ('CIDGlyphMappingFormat0', [ + ('uint16', 'Format', None, None, 'Format of the CID-to-glyph mapping table, = 0.'), + ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'), + ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'), + ('uint16', 'Registry', None, None, 'The registry ID.'), + ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'Order', None, None, 'The order ID.'), + ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'SupplementVersion', None, None, 'The supplement version.'), + ('CIDGlyphMap', 'Mapping', None, None, 'A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used'), + ]), + + # # feat # @@ -1010,7 +1291,7 @@ ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), - ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), + ('NameID', 'FeatureNameID', None, None, 'The name table index for the feature name.'), ]), ('Settings', [ @@ -1019,7 +1300,217 @@ ('Setting', [ ('uint16', 'SettingValue', None, None, 'The setting.'), - ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ('NameID', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ]), + + + # + # gcid + # + + ('gcid', [ + ('struct', 'GlyphCIDMapping', None, None, 'Glyph to CID mapping table.'), + ]), + + ('GlyphCIDMappingFormat0', [ + ('uint16', 'Format', None, None, 'Format of the glyph-to-CID mapping table, = 0.'), + ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'), + ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'), + ('uint16', 'Registry', None, None, 'The registry ID.'), + ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'Order', None, None, 'The order ID.'), + ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'SupplementVersion', None, None, 'The supplement version.'), + ('GlyphCIDMap', 'Mapping', None, None, 'The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used'), + ]), + + + # + # lcar + # + + ('lcar', [ + ('Version', 'Version', None, None, 'Version number of the ligature caret table (0x00010000 for the initial version).'), + ('struct', 'LigatureCarets', None, None, 'Ligature carets table.'), + ]), + + ('LigatureCaretsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'), + ('AATLookup(LigCaretDistances)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, in font unit distances.'), + ]), + + ('LigatureCaretsFormat1', [ + ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'), + ('AATLookup(LigCaretPoints)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, as control points.'), + ]), + + ('LigCaretDistances', [ + ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'), + ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'Distance in font units through which a subdivision is made orthogonally to the baseline.'), + ]), + + ('LigCaretPoints', [ + ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'), + ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'The number of the control point through which a subdivision is made orthogonally to the baseline.'), + ]), + + + # + # mort + # + + ('mort', [ + ('Version', 'Version', None, None, 'Version of the mort table.'), + ('uint32', 'MorphChainCount', None, None, 'Number of metamorphosis chains.'), + ('MortChain', 'MorphChain', 'MorphChainCount', 0, 'Array of metamorphosis chains.'), + ]), + + ('MortChain', [ + ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'), + ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'), + ('uint16', 'MorphFeatureCount', None, None, 'Number of metamorphosis feature entries.'), + ('uint16', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'), + ('struct', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'), + ('MortSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of metamorphosis subtables.'), + ]), + + ('MortSubtable', [ + ('uint16', 'StructLength', None, None, 'Total subtable length, including this header.'), + ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'), + ('uint8', 'MorphType', None, None, 'Subtable type.'), + ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'), + ('SubStruct', 'SubStruct', None, None, 'SubTable.'), + ]), + + # + # morx + # + + ('morx', [ + ('uint16', 'Version', None, None, 'Version of the morx table.'), + ('uint16', 'Reserved', None, None, 'Reserved (set to zero).'), + ('uint32', 'MorphChainCount', None, None, 'Number of extended metamorphosis chains.'), + ('MorxChain', 'MorphChain', 'MorphChainCount', 0, 'Array of extended metamorphosis chains.'), + ]), + + ('MorxChain', [ + ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'), + ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'), + ('uint32', 'MorphFeatureCount', None, None, 'Number of feature subtable entries.'), + ('uint32', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'), + ('MorphFeature', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'), + ('MorxSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of extended metamorphosis subtables.'), + ]), + + ('MorphFeature', [ + ('uint16', 'FeatureType', None, None, 'The type of feature.'), + ('uint16', 'FeatureSetting', None, None, "The feature's setting (aka selector)."), + ('Flags32', 'EnableFlags', None, None, 'Flags for the settings that this feature and setting enables.'), + ('Flags32', 'DisableFlags', None, None, 'Complement of flags for the settings that this feature and setting disable.'), + ]), + + # Apple TrueType Reference Manual, chapter “The ‘morx’ table”, + # section “Metamorphosis Subtables”. + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html + ('MorxSubtable', [ + ('uint32', 'StructLength', None, None, 'Total subtable length, including this header.'), + ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'), + ('uint16', 'Reserved', None, None, 'Unused.'), + ('uint8', 'MorphType', None, None, 'Subtable type.'), + ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'), + ('SubStruct', 'SubStruct', None, None, 'SubTable.'), + ]), + + ('StateHeader', [ + ('uint32', 'ClassCount', None, None, 'Number of classes, which is the number of 16-bit entry indices in a single line in the state array.'), + ('uint32', 'MorphClass', None, None, 'Offset from the start of this state table header to the start of the class table.'), + ('uint32', 'StateArrayOffset', None, None, 'Offset from the start of this state table header to the start of the state array.'), + ('uint32', 'EntryTableOffset', None, None, 'Offset from the start of this state table header to the start of the entry table.'), + ]), + + ('RearrangementMorph', [ + ('STXHeader(RearrangementMorphAction)', 'StateTable', None, None, 'Finite-state transducer table for indic rearrangement.'), + ]), + + ('ContextualMorph', [ + ('STXHeader(ContextualMorphAction)', 'StateTable', None, None, 'Finite-state transducer for contextual glyph substitution.'), + ]), + + ('LigatureMorph', [ + ('STXHeader(LigatureMorphAction)', 'StateTable', None, None, 'Finite-state transducer for ligature substitution.'), + ]), + + ('NoncontextualMorph', [ + ('AATLookup(GlyphID)', 'Substitution', None, None, 'The noncontextual glyph substitution table.'), + ]), + + ('InsertionMorph', [ + ('struct', 'StateHeader', None, None, 'Header.'), + # TODO: Add missing parts. + ]), + + ('MorphClass', [ + ('uint16', 'FirstGlyph', None, None, 'Glyph index of the first glyph in the class table.'), + #('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'), + #('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'), + ]), + + # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below. + # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + + + # + # prop + # + + ('prop', [ + ('Fixed', 'Version', None, None, 'Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.'), + ('struct', 'GlyphProperties', None, None, 'Glyph properties.'), + ]), + + ('GlyphPropertiesFormat0', [ + ('uint16', 'Format', None, None, 'Format, = 0.'), + ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.'), + ]), + + ('GlyphPropertiesFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1.'), + ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph if that glyph is not present in the Properties lookup table.'), + ('AATLookup(uint16)', 'Properties', None, None, 'Lookup data associating glyphs with their properties.'), + ]), + + + # + # opbd + # + + ('opbd', [ + ('Version', 'Version', None, None, 'Version number of the optical bounds table (0x00010000 for the initial version).'), + ('struct', 'OpticalBounds', None, None, 'Optical bounds table.'), + ]), + + ('OpticalBoundsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 0.'), + ('AATLookup(OpticalBoundsDeltas)', 'OpticalBoundsDeltas', None, None, 'Lookup table associating glyphs with their optical bounds, given as deltas in font units.'), + ]), + + ('OpticalBoundsFormat1', [ + ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 1.'), + ('AATLookup(OpticalBoundsPoints)', 'OpticalBoundsPoints', None, None, 'Lookup table associating glyphs with their optical bounds, given as references to control points.'), + ]), + + ('OpticalBoundsDeltas', [ + ('int16', 'Left', None, None, 'Delta value for the left-side optical edge.'), + ('int16', 'Top', None, None, 'Delta value for the top-side optical edge.'), + ('int16', 'Right', None, None, 'Delta value for the right-side optical edge.'), + ('int16', 'Bottom', None, None, 'Delta value for the bottom-side optical edge.'), + ]), + + ('OpticalBoundsPoints', [ + ('int16', 'Left', None, None, 'Control point index for the left-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Top', None, None, 'Control point index for the top-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Right', None, None, 'Control point index for the right-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Bottom', None, None, 'Control point index for the bottom-side optical edge, or -1 if this glyph has none.'), ]), ] diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/otTables.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/otTables.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/otTables.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,14 +1,421 @@ +# coding: utf-8 """fontTools.ttLib.tables.otTables -- A collection of classes representing the various OpenType subtables. Most are constructed upon import from data in otData.py, all are populated with converter objects from otConverters.py. """ -from __future__ import print_function, division, absolute_import +from __future__ import print_function, division, absolute_import, unicode_literals from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval from .otBase import BaseTable, FormatSwitchingBaseTable import operator -import warnings +import logging +import struct + + +log = logging.getLogger(__name__) + + +class AATStateTable(object): + def __init__(self): + self.GlyphClasses = {} # GlyphID --> GlyphClass + self.States = [] # List of AATState, indexed by state number + self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...] + + +class AATState(object): + def __init__(self): + self.Transitions = {} # GlyphClass --> AATAction + + +class AATAction(object): + _FLAGS = None + + def _writeFlagsToXML(self, xmlWriter): + flags = [f for f in self._FLAGS if self.__dict__[f]] + if flags: + xmlWriter.simpletag("Flags", value=",".join(flags)) + xmlWriter.newline() + if self.ReservedFlags != 0: + xmlWriter.simpletag( + "ReservedFlags", + value='0x%04X' % self.ReservedFlags) + xmlWriter.newline() + + def _setFlag(self, flag): + assert flag in self._FLAGS, "unsupported flag %s" % flag + self.__dict__[flag] = True + + +class RearrangementMorphAction(AATAction): + staticSize = 4 + _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"] + + _VERBS = { + 0: "no change", + 1: "Ax ⇒ xA", + 2: "xD ⇒ Dx", + 3: "AxD ⇒ DxA", + 4: "ABx ⇒ xAB", + 5: "ABx ⇒ xBA", + 6: "xCD ⇒ CDx", + 7: "xCD ⇒ DCx", + 8: "AxCD ⇒ CDxA", + 9: "AxCD ⇒ DCxA", + 10: "ABxD ⇒ DxAB", + 11: "ABxD ⇒ DxBA", + 12: "ABxCD ⇒ CDxAB", + 13: "ABxCD ⇒ CDxBA", + 14: "ABxCD ⇒ DCxAB", + 15: "ABxCD ⇒ DCxBA", + } + + def __init__(self): + self.NewState = 0 + self.Verb = 0 + self.MarkFirst = False + self.DontAdvance = False + self.MarkLast = False + self.ReservedFlags = 0 + + def compile(self, writer, font, actionIndex): + assert actionIndex is None + writer.writeUShort(self.NewState) + assert self.Verb >= 0 and self.Verb <= 15, self.Verb + flags = self.Verb | self.ReservedFlags + if self.MarkFirst: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if self.MarkLast: flags |= 0x2000 + writer.writeUShort(flags) + + def decompile(self, reader, font, actionReader): + assert actionReader is None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.Verb = flags & 0xF + self.MarkFirst = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.MarkLast = bool(flags & 0x2000) + self.ReservedFlags = flags & 0x1FF0 + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + xmlWriter.simpletag("Verb", value=self.Verb) + verbComment = self._VERBS.get(self.Verb) + if verbComment is not None: + xmlWriter.comment(verbComment) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.NewState = self.Verb = self.ReservedFlags = 0 + self.MarkFirst = self.DontAdvance = self.MarkLast = False + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Verb": + self.Verb = safeEval(eltAttrs["value"]) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + + +class ContextualMorphAction(AATAction): + staticSize = 8 + _FLAGS = ["SetMark", "DontAdvance"] + + def __init__(self): + self.NewState = 0 + self.SetMark, self.DontAdvance = False, False + self.ReservedFlags = 0 + self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF + + def compile(self, writer, font, actionIndex): + assert actionIndex is None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetMark: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + writer.writeUShort(flags) + writer.writeUShort(self.MarkIndex) + writer.writeUShort(self.CurrentIndex) + + def decompile(self, reader, font, actionReader): + assert actionReader is None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetMark = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.ReservedFlags = flags & 0x3FFF + self.MarkIndex = reader.readUShort() + self.CurrentIndex = reader.readUShort() + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + xmlWriter.simpletag("MarkIndex", value=self.MarkIndex) + xmlWriter.newline() + xmlWriter.simpletag("CurrentIndex", + value=self.CurrentIndex) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.NewState = self.ReservedFlags = 0 + self.SetMark = self.DontAdvance = False + self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "MarkIndex": + self.MarkIndex = safeEval(eltAttrs["value"]) + elif eltName == "CurrentIndex": + self.CurrentIndex = safeEval(eltAttrs["value"]) + + +class LigAction(object): + def __init__(self): + self.Store = False + # GlyphIndexDelta is a (possibly negative) delta that gets + # added to the glyph ID at the top of the AAT runtime + # execution stack. It is *not* a byte offset into the + # morx table. The result of the addition, which is performed + # at run time by the shaping engine, is an index into + # the ligature components table. See 'morx' specification. + # In the AAT specification, this field is called Offset; + # but its meaning is quite different from other offsets + # in either AAT or OpenType, so we use a different name. + self.GlyphIndexDelta = 0 + + +class LigatureMorphAction(AATAction): + staticSize = 6 + _FLAGS = ["SetComponent", "DontAdvance"] + + def __init__(self): + self.NewState = 0 + self.SetComponent, self.DontAdvance = False, False + self.ReservedFlags = 0 + self.Actions = [] + + def compile(self, writer, font, actionIndex): + assert actionIndex is not None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetComponent: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if len(self.Actions) > 0: flags |= 0x2000 + writer.writeUShort(flags) + if len(self.Actions) > 0: + actions = self.compileLigActions() + writer.writeUShort(actionIndex[actions]) + else: + writer.writeUShort(0) + + def decompile(self, reader, font, actionReader): + assert actionReader is not None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetComponent = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + performAction = bool(flags & 0x2000) + # As of 2017-09-12, the 'morx' specification says that + # the reserved bitmask in ligature subtables is 0x3FFF. + # However, the specification also defines a flag 0x2000, + # so the reserved value should actually be 0x1FFF. + # TODO: Report this specification bug to Apple. + self.ReservedFlags = flags & 0x1FFF + actionIndex = reader.readUShort() + if performAction: + self.Actions = self._decompileLigActions( + actionReader, actionIndex) + else: + self.Actions = [] + + def compileLigActions(self): + result = [] + for i, action in enumerate(self.Actions): + last = (i == len(self.Actions) - 1) + value = action.GlyphIndexDelta & 0x3FFFFFFF + value |= 0x80000000 if last else 0 + value |= 0x40000000 if action.Store else 0 + result.append(struct.pack(">L", value)) + return bytesjoin(result) + + def _decompileLigActions(self, actionReader, actionIndex): + actions = [] + last = False + reader = actionReader.getSubReader( + actionReader.pos + actionIndex * 4) + while not last: + value = reader.readULong() + last = bool(value & 0x80000000) + action = LigAction() + actions.append(action) + action.Store = bool(value & 0x40000000) + delta = value & 0x3FFFFFFF + if delta >= 0x20000000: # sign-extend 30-bit value + delta = -0x40000000 + delta + action.GlyphIndexDelta = delta + return actions + + def fromXML(self, name, attrs, content, font): + self.NewState = self.ReservedFlags = 0 + self.SetComponent = self.DontAdvance = False + self.ReservedFlags = 0 + self.Actions = [] + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "Action": + action = LigAction() + flags = eltAttrs.get("Flags", "").split(",") + flags = [f.strip() for f in flags] + action.Store = "Store" in flags + action.GlyphIndexDelta = safeEval( + eltAttrs["GlyphIndexDelta"]) + self.Actions.append(action) + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + for action in self.Actions: + attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)] + if action.Store: + attribs.append(("Flags", "Store")) + xmlWriter.simpletag("Action", attribs) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class InsertionMorphAction(AATAction): + staticSize = 8 + + _FLAGS = ["SetMark", "DontAdvance", + "CurrentIsKashidaLike", "MarkedIsKashidaLike", + "CurrentInsertBefore", "MarkedInsertBefore"] + + def __init__(self): + self.NewState = 0 + for flag in self._FLAGS: + setattr(self, flag, False) + self.ReservedFlags = 0 + self.CurrentInsertionAction, self.MarkedInsertionAction = [], [] + + def compile(self, writer, font, actionIndex): + assert actionIndex is not None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetMark: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if self.CurrentIsKashidaLike: flags |= 0x2000 + if self.MarkedIsKashidaLike: flags |= 0x1000 + if self.CurrentInsertBefore: flags |= 0x0800 + if self.MarkedInsertBefore: flags |= 0x0400 + flags |= len(self.CurrentInsertionAction) << 5 + flags |= len(self.MarkedInsertionAction) + writer.writeUShort(flags) + if len(self.CurrentInsertionAction) > 0: + currentIndex = actionIndex[ + tuple(self.CurrentInsertionAction)] + else: + currentIndex = 0xFFFF + writer.writeUShort(currentIndex) + if len(self.MarkedInsertionAction) > 0: + markedIndex = actionIndex[ + tuple(self.MarkedInsertionAction)] + else: + markedIndex = 0xFFFF + writer.writeUShort(markedIndex) + + def decompile(self, reader, font, actionReader): + assert actionReader is not None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetMark = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.CurrentIsKashidaLike = bool(flags & 0x2000) + self.MarkedIsKashidaLike = bool(flags & 0x1000) + self.CurrentInsertBefore = bool(flags & 0x0800) + self.MarkedInsertBefore = bool(flags & 0x0400) + self.CurrentInsertionAction = self._decompileInsertionAction( + actionReader, font, + index=reader.readUShort(), + count=((flags & 0x03E0) >> 5)) + self.MarkedInsertionAction = self._decompileInsertionAction( + actionReader, font, + index=reader.readUShort(), + count=(flags & 0x001F)) + + def _decompileInsertionAction(self, actionReader, font, index, count): + if index == 0xFFFF or count == 0: + return [] + reader = actionReader.getSubReader( + actionReader.pos + index * 2) + return [font.getGlyphName(glyphID) + for glyphID in reader.readUShortArray(count)] + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + for g in self.CurrentInsertionAction: + xmlWriter.simpletag("CurrentInsertionAction", glyph=g) + xmlWriter.newline() + for g in self.MarkedInsertionAction: + xmlWriter.simpletag("MarkedInsertionAction", glyph=g) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.__init__() + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "CurrentInsertionAction": + self.CurrentInsertionAction.append( + eltAttrs["glyph"]) + elif eltName == "MarkedInsertionAction": + self.MarkedInsertionAction.append( + eltAttrs["glyph"]) + else: + assert False, eltName class FeatureParams(BaseTable): @@ -33,6 +440,10 @@ # manual implementation to get rid of glyphID dependencies + def populateDefaults(self, propagator=None): + if not hasattr(self, 'glyphs'): + self.glyphs = [] + def postRead(self, rawTable, font): if self.Format == 1: # TODO only allow glyphs that are valid? @@ -46,7 +457,7 @@ # this when writing font out. sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) if ranges != sorted_ranges: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges = sorted_ranges del sorted_ranges for r in ranges: @@ -57,21 +468,21 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("Coverage table has start glyph ID out of range: %s." % start) + log.warning("Coverage table has start glyph ID out of range: %s.", start) continue try: endID = font.getGlyphID(end, requireReal=True) + 1 except KeyError: # Apparently some tools use 65535 to "match all" the range if end != 'glyph65535': - warnings.warn("Coverage table has end glyph ID out of range: %s." % end) + log.warning("Coverage table has end glyph ID out of range: %s.", end) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) else: - assert 0, "unknown format: %s" % self.Format - del self.Format # Don't need this anymore + self.glyphs = [] + log.warning("Unknown Coverage format: %s" % self.Format) def preWrite(self, font): glyphs = getattr(self, "glyphs", None) @@ -107,7 +518,7 @@ ranges[i] = r index = index + end - start + 1 if brokenOrder: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges.sort(key=lambda a: a.StartID) for r in ranges: del r.StartID @@ -131,8 +542,79 @@ glyphs.append(attrs["value"]) +class VarIdxMap(BaseTable): + + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = [] + + def postRead(self, rawTable, font): + assert (rawTable['EntryFormat'] & 0xFFC0) == 0 + self.mapping = rawTable['mapping'] + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = [] + rawTable = { 'mapping': mapping } + rawTable['MappingCount'] = len(mapping) + + # TODO Remove this abstraction/optimization and move it varLib.builder? + + ored = 0 + for idx in mapping: + ored |= idx + + inner = ored & 0xFFFF + innerBits = 0 + while inner: + innerBits += 1 + inner >>= 1 + innerBits = max(innerBits, 1) + assert innerBits <= 16 + + ored = (ored >> (16-innerBits)) | (ored & ((1<> 16), + ('inner', value & 0xFFFF), + ) + xmlWriter.simpletag("Map", attrs) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = [] + self.mapping = mapping + outer = safeEval(attrs['outer']) + inner = safeEval(attrs['inner']) + assert inner <= 0xFFFF + mapping.append((outer << 16) | inner) + + class SingleSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = {} + def postRead(self, rawTable, font): mapping = {} input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) @@ -151,7 +633,6 @@ else: assert 0, "unknown format: %s" % self.Format self.mapping = mapping - del self.Format # Don't need this anymore def preWrite(self, font): mapping = getattr(self, "mapping", None) @@ -172,7 +653,11 @@ if (inID + delta) % 65536 != outID: break else: - format = 1 + if delta is None: + # the mapping is empty, better use format 2 + format = 2 + else: + format = 1 rawTable = {} self.Format = format @@ -203,8 +688,89 @@ mapping[attrs["in"]] = attrs["out"] +class MultipleSubst(FormatSwitchingBaseTable): + + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = {} + + def postRead(self, rawTable, font): + mapping = {} + if self.Format == 1: + glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + subst = [s.Substitute for s in rawTable["Sequence"]] + mapping = dict(zip(glyphs, subst)) + else: + assert 0, "unknown format: %s" % self.Format + self.mapping = mapping + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = {} + cov = Coverage() + cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID) + self.Format = 1 + rawTable = { + "Coverage": cov, + "Sequence": [self.makeSequence_(mapping[glyph]) + for glyph in cov.glyphs], + } + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.mapping.items()) + for inGlyph, outGlyphs in items: + out = ",".join(outGlyphs) + xmlWriter.simpletag("Substitution", + [("in", inGlyph), ("out", out)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = {} + self.mapping = mapping + + # TTX v3.0 and earlier. + if name == "Coverage": + self.old_coverage_ = [] + for element in content: + if not isinstance(element, tuple): + continue + element_name, element_attrs, _ = element + if element_name == "Glyph": + self.old_coverage_.append(element_attrs["value"]) + return + if name == "Sequence": + index = int(attrs.get("index", len(mapping))) + glyph = self.old_coverage_[index] + glyph_mapping = mapping[glyph] = [] + for element in content: + if not isinstance(element, tuple): + continue + element_name, element_attrs, _ = element + if element_name == "Substitute": + glyph_mapping.append(element_attrs["value"]) + return + + # TTX v3.1 and later. + outGlyphs = attrs["out"].split(",") if attrs["out"] else [] + mapping[attrs["in"]] = [g.strip() for g in outGlyphs] + + @staticmethod + def makeSequence_(g): + seq = Sequence() + seq.Substitute = g + return seq + + class ClassDef(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'classDefs'): + self.classDefs = {} + def postRead(self, rawTable, font): classDefs = {} glyphOrder = font.getGlyphOrder() @@ -215,17 +781,19 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + log.warning("ClassDef table has start glyph ID out of range: %s.", start) startID = len(glyphOrder) endID = startID + len(classList) if endID > len(glyphOrder): - warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) + log.warning("ClassDef table has entries for out of range glyph IDs: %s,%s.", + start, len(classList)) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) for glyphID, cls in zip(range(startID, endID), classList): - classDefs[glyphOrder[glyphID]] = cls + if cls: + classDefs[glyphOrder[glyphID]] = cls elif self.Format == 2: records = rawTable["ClassRangeRecord"] @@ -236,37 +804,37 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + log.warning("ClassDef table has start glyph ID out of range: %s.", start) continue try: endID = font.getGlyphID(end, requireReal=True) + 1 except KeyError: # Apparently some tools use 65535 to "match all" the range if end != 'glyph65535': - warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) + log.warning("ClassDef table has end glyph ID out of range: %s.", end) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) for glyphID in range(startID, endID): - classDefs[glyphOrder[glyphID]] = cls + if cls: + classDefs[glyphOrder[glyphID]] = cls else: assert 0, "unknown format: %s" % self.Format self.classDefs = classDefs - del self.Format # Don't need this anymore - def preWrite(self, font): + def _getClassRanges(self, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: - classDefs = self.classDefs = {} - items = list(classDefs.items()) - format = 2 - rawTable = {"ClassRangeRecord": []} + self.classDefs = {} + return getGlyphID = font.getGlyphID - for i in range(len(items)): - glyphName, cls = items[i] - items[i] = getGlyphID(glyphName), glyphName, cls - items.sort() + items = [] + for glyphName, cls in classDefs.items(): + if not cls: + continue + items.append((getGlyphID(glyphName), glyphName, cls)) if items: + items.sort() last, lastName, lastCls = items[0] ranges = [[lastCls, last, lastName]] for glyphID, glyphName, cls in items[1:]: @@ -277,7 +845,13 @@ lastName = glyphName lastCls = cls ranges[-1].extend([last, lastName]) + return ranges + def preWrite(self, font): + format = 2 + rawTable = {"ClassRangeRecord": []} + ranges = self._getClassRanges(font) + if ranges: startGlyph = ranges[0][1] endGlyph = ranges[-1][3] glyphCount = endGlyph - startGlyph + 1 @@ -320,19 +894,21 @@ class AlternateSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'alternates'): + self.alternates = {} + def postRead(self, rawTable, font): alternates = {} if self.Format == 1: input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) alts = rawTable["AlternateSet"] - if len(input) != len(alts): - assert len(input) == len(alts) - for i in range(len(input)): - alternates[input[i]] = alts[i].Alternate + assert len(input) == len(alts) + for inp,alt in zip(input,alts): + alternates[inp] = alt.Alternate else: assert 0, "unknown format: %s" % self.Format self.alternates = alternates - del self.Format # Don't need this anymore def preWrite(self, font): self.Format = 1 @@ -348,7 +924,7 @@ cov.glyphs = [ item[1] for item in items] alternates = [] setList = [ item[-1] for item in items] - for set in setList: + for set in setList: alts = AlternateSet() alts.Alternate = set alternates.append(alts) @@ -388,6 +964,10 @@ class LigatureSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'ligatures'): + self.ligatures = {} + def postRead(self, rawTable, font): ligatures = {} if self.Format == 1: @@ -399,13 +979,27 @@ else: assert 0, "unknown format: %s" % self.Format self.ligatures = ligatures - del self.Format # Don't need this anymore def preWrite(self, font): self.Format = 1 ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = self.ligatures = {} + + if ligatures and isinstance(next(iter(ligatures)), tuple): + # New high-level API in v3.1 and later. Note that we just support compiling this + # for now. We don't load to this API, and don't do XML with it. + + # ligatures is map from components-sequence to lig-glyph + newLigatures = dict() + for comps,lig in sorted(ligatures.items(), key=lambda item: (-len(item[0]), item[0])): + ligature = Ligature() + ligature.Component = comps[1:] + ligature.CompCount = len(comps) + ligature.LigGlyph = lig + newLigatures.setdefault(comps[0], []).append(ligature) + ligatures = newLigatures + items = list(ligatures.items()) for i in range(len(items)): glyphName, set = items[i] @@ -459,7 +1053,6 @@ ligs.append(lig) -# # For each subtable format there is a class. However, we don't really distinguish # between "field name" and "format name": often these are the same. Yet there's # a whole bunch of fields with different names. The following dict is a mapping @@ -542,6 +1135,7 @@ return ok lookup = lookups[lookupIndex] + lookup.LookupType = extType for si in range(len(lookup.SubTable)): subTable = lookup.SubTable[si] extSubTableClass = lookupTypes[overflowRecord.tableType][extType] @@ -609,6 +1203,73 @@ return ok +def splitPairPos(oldSubTable, newSubTable, overflowRecord): + st = oldSubTable + ok = False + newSubTable.Format = oldSubTable.Format + if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1: + for name in 'ValueFormat1', 'ValueFormat2': + setattr(newSubTable, name, getattr(oldSubTable, name)) + + # Move top half of coverage to new subtable + + newSubTable.Coverage = oldSubTable.Coverage.__class__() + + coverage = oldSubTable.Coverage.glyphs + records = oldSubTable.PairSet + + oldCount = len(oldSubTable.PairSet) // 2 + + oldSubTable.Coverage.glyphs = coverage[:oldCount] + oldSubTable.PairSet = records[:oldCount] + + newSubTable.Coverage.glyphs = coverage[oldCount:] + newSubTable.PairSet = records[oldCount:] + + oldSubTable.PairSetCount = len(oldSubTable.PairSet) + newSubTable.PairSetCount = len(newSubTable.PairSet) + + ok = True + + elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1: + if not hasattr(oldSubTable, 'Class2Count'): + oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record) + for name in 'Class2Count', 'ClassDef2', 'ValueFormat1', 'ValueFormat2': + setattr(newSubTable, name, getattr(oldSubTable, name)) + + # The two subtables will still have the same ClassDef2 and the table + # sharing will still cause the sharing to overflow. As such, disable + # sharing on the one that is serialized second (that's oldSubTable). + oldSubTable.DontShare = True + + # Move top half of class numbers to new subtable + + newSubTable.Coverage = oldSubTable.Coverage.__class__() + newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__() + + coverage = oldSubTable.Coverage.glyphs + classDefs = oldSubTable.ClassDef1.classDefs + records = oldSubTable.Class1Record + + oldCount = len(oldSubTable.Class1Record) // 2 + newGlyphs = set(k for k,v in classDefs.items() if v >= oldCount) + + oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs] + oldSubTable.ClassDef1.classDefs = {k:v for k,v in classDefs.items() if v < oldCount} + oldSubTable.Class1Record = records[:oldCount] + + newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs] + newSubTable.ClassDef1.classDefs = {k:(v-oldCount) for k,v in classDefs.items() if v > oldCount} + newSubTable.Class1Record = records[oldCount:] + + oldSubTable.Class1Count = len(oldSubTable.Class1Record) + newSubTable.Class1Count = len(newSubTable.Class1Record) + + ok = True + + return ok + + splitTable = { 'GSUB': { # 1: splitSingleSubst, # 2: splitMultipleSubst, @@ -621,7 +1282,7 @@ }, 'GPOS': { # 1: splitSinglePos, -# 2: splitPairPos, + 2: splitPairPos, # 3: splitCursivePos, # 4: splitMarkBasePos, # 5: splitMarkLigPos, @@ -643,6 +1304,11 @@ subIndex = overflowRecord.SubTableIndex subtable = lookup.SubTable[subIndex] + # First, try not sharing anything for this subtable... + if not hasattr(subtable, "DontShare"): + subtable.DontShare = True + return True + if hasattr(subtable, 'ExtSubTable'): # We split the subtable of the Extension table, and add a new Extension table # to contain the new subtable. @@ -650,7 +1316,7 @@ subTableType = subtable.ExtSubTable.__class__.LookupType extSubTable = subtable subtable = extSubTable.ExtSubTable - newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] + newExtSubTableClass = lookupTypes[overflowRecord.tableType][extSubTable.__class__.LookupType] newExtSubTable = newExtSubTableClass() newExtSubTable.Format = extSubTable.Format lookup.SubTable.insert(subIndex + 1, newExtSubTable) @@ -696,12 +1362,14 @@ if name not in namespace: # the class doesn't exist yet, so the base implementation is used. cls = type(name, (baseClass,), {}) + if name in ('GSUB', 'GPOS'): + cls.DontShare = True namespace[name] = cls for base, alts in _equivalents.items(): base = namespace[base] for alt in alts: - namespace[alt] = type(alt, (base,), {}) + namespace[alt] = base global lookupTypes lookupTypes = { @@ -726,6 +1394,17 @@ 8: ChainContextPos, 9: ExtensionPos, }, + 'mort': { + 4: NoncontextualMorph, + }, + 'morx': { + 0: RearrangementMorph, + 1: ContextualMorph, + 2: LigatureMorph, + # 3: Reserved, + 4: NoncontextualMorph, + # 5: InsertionMorph, + }, } lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS for lookupEnum in lookupTypes.values(): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_p_o_s_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -91,9 +91,7 @@ self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) for glyphID in range(numGlyphs): index = indices[glyphID] - if index > 32767: # reserved for future use; ignore - name = "" - elif index > 257: + if index > 257: try: name = extraNames[index-258] except IndexError: @@ -156,7 +154,8 @@ assert len(glyphOrder) == numGlyphs indices = array.array("H") extraDict = {} - extraNames = self.extraNames + extraNames = self.extraNames = [ + n for n in self.extraNames if n not in standardGlyphOrder] for i in range(len(extraNames)): extraDict[extraNames[i]] = i for glyphID in range(numGlyphs): @@ -171,7 +170,6 @@ index = standardGlyphOrder.index(psName) else: index = 258 + len(extraNames) - assert index < 32768, "Too many glyph names for 'post' table format 2" extraDict[psName] = len(extraNames) extraNames.append(psName) indices.append(index) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_p_r_o_p.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_p_r_o_p.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_p_r_o_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_p_r_o_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html +class table__p_r_o_p(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_s_b_i_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -31,8 +31,9 @@ class table__s_b_i_x(DefaultTable.DefaultTable): - def __init__(self, tag): - self.tableTag = tag + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) self.version = 1 self.flags = 1 self.numStrikes = 0 diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/S__i_l_f.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/S__i_l_f.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/S__i_l_f.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/S__i_l_f.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,877 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from itertools import * +from . import DefaultTable +from . import grUtils +from array import array +import struct, operator, warnings, re, sys + +Silf_hdr_format = ''' + > + version: 16.16F +''' + +Silf_hdr_format_3 = ''' + > + version: 16.16F + compilerVersion: L + numSilf: H + x + x +''' + +Silf_part1_format_v3 = ''' + > + ruleVersion: 16.16F + passOffset: H + pseudosOffset: H +''' + +Silf_part1_format = ''' + > + maxGlyphID: H + extraAscent: h + extraDescent: h + numPasses: B + iSubst: B + iPos: B + iJust: B + iBidi: B + flags: B + maxPreContext: B + maxPostContext: B + attrPseudo: B + attrBreakWeight: B + attrDirectionality: B + attrMirroring: B + attrSkipPasses: B + numJLevels: B +''' + +Silf_justify_format = ''' + > + attrStretch: B + attrShrink: B + attrStep: B + attrWeight: B + runto: B + x + x + x +''' + +Silf_part2_format = ''' + > + numLigComp: H + numUserDefn: B + maxCompPerLig: B + direction: B + attCollisions: B + x + x + x + numCritFeatures: B +''' + +Silf_pseudomap_format = ''' + > + unicode: L + nPseudo: H +''' + +Silf_classmap_format = ''' + > + numClass: H + numLinear: H +''' + +Silf_lookupclass_format = ''' + > + numIDs: H + searchRange: H + entrySelector: H + rangeShift: H +''' + +Silf_lookuppair_format = ''' + > + glyphId: H + index: H +''' + +Silf_pass_format = ''' + > + flags: B + maxRuleLoop: B + maxRuleContext: B + maxBackup: B + numRules: H + fsmOffset: H + pcCode: L + rcCode: L + aCode: L + oDebug: L + numRows: H + numTransitional: H + numSuccess: H + numColumns: H +''' + +aCode_info = ( + ("NOP", 0), + ("PUSH_BYTE", "b"), + ("PUSH_BYTE_U", "B"), + ("PUSH_SHORT", ">h"), + ("PUSH_SHORT_U", ">H"), + ("PUSH_LONG", ">L"), + ("ADD", 0), + ("SUB", 0), + ("MUL", 0), + ("DIV", 0), + ("MIN", 0), + ("MAX", 0), + ("NEG", 0), + ("TRUNC8", 0), + ("TRUNC16", 0), + ("COND", 0), + ("AND", 0), # x10 + ("OR", 0), + ("NOT", 0), + ("EQUAL", 0), + ("NOT_EQ", 0), + ("LESS", 0), + ("GTR", 0), + ("LESS_EQ", 0), + ("GTR_EQ", 0), + ("NEXT", 0), + ("NEXT_N", "b"), + ("COPY_NEXT", 0), + ("PUT_GLYPH_8BIT_OBS", "B"), + ("PUT_SUBS_8BIT_OBS", "bBB"), + ("PUT_COPY", "b"), + ("INSERT", 0), + ("DELETE", 0), # x20 + ("ASSOC", -1), + ("CNTXT_ITEM", "bB"), + ("ATTR_SET", "B"), + ("ATTR_ADD", "B"), + ("ATTR_SUB", "B"), + ("ATTR_SET_SLOT", "B"), + ("IATTR_SET_SLOT", "BB"), + ("PUSH_SLOT_ATTR", "Bb"), + ("PUSH_GLYPH_ATTR_OBS", "Bb"), + ("PUSH_GLYPH_METRIC", "Bbb"), + ("PUSH_FEAT", "Bb"), + ("PUSH_ATT_TO_GATTR_OBS", "Bb"), + ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), + ("PUSH_ISLOT_ATTR", "Bbb"), + ("PUSH_IGLYPH_ATTR", "Bbb"), + ("POP_RET", 0), # x30 + ("RET_ZERO", 0), + ("RET_TRUE", 0), + ("IATTR_SET", "BB"), + ("IATTR_ADD", "BB"), + ("IATTR_SUB", "BB"), + ("PUSH_PROC_STATE", "B"), + ("PUSH_VERSION", 0), + ("PUT_SUBS", ">bHH"), + ("PUT_SUBS2", 0), + ("PUT_SUBS3", 0), + ("PUT_GLYPH", ">H"), + ("PUSH_GLYPH_ATTR", ">Hb"), + ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), + ("BITOR", 0), + ("BITAND", 0), + ("BITNOT", 0), # x40 + ("BITSET", ">HH"), + ("SET_FEAT", "Bb") +) +aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)]) + +def disassemble(aCode): + codelen = len(aCode) + pc = 0 + res = [] + while pc < codelen: + opcode = byteord(aCode[pc:pc+1]) + if opcode > len(aCode_info): + instr = aCode_info[0] + else: + instr = aCode_info[opcode] + pc += 1 + if instr[1] != 0 and pc >= codelen : return res + if instr[1] == -1: + count = byteord(aCode[pc]) + fmt = "%dB" % count + pc += 1 + elif instr[1] == 0: + fmt = "" + else : + fmt = instr[1] + if fmt == "": + res.append(instr[0]) + continue + parms = struct.unpack_from(fmt, aCode[pc:]) + res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") + pc += struct.calcsize(fmt) + return res + +instre = re.compile("^\s*([^(]+)\s*(?:\(([^)]+)\))?") +def assemble(instrs): + res = [] + for inst in instrs: + m = instre.match(inst) + if not m or not m.group(1) in aCode_map: + continue + opcode, parmfmt = aCode_map[m.group(1)] + res.append(struct.pack("B", opcode)) + if m.group(2): + if parmfmt == 0: + continue + parms = [int(x) for x in re.split(",\s*", m.group(2))] + if parmfmt == -1: + l = len(parms) + res.append(struct.pack(("%dB" % (l+1)), l, *parms)) + else: + res.append(struct.pack(parmfmt, *parms)) + return b"".join(res) + +def writecode(tag, writer, instrs): + writer.begintag(tag) + writer.newline() + for l in disassemble(instrs): + writer.write(l) + writer.newline() + writer.endtag(tag) + writer.newline() + +def readcode(content): + res = [] + for e in content_string(content).split('\n'): + e = e.strip() + if not len(e): continue + res.append(e) + return assemble(res) + +attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID', + 'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID') +attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi') +attrs_contexts = ('maxPreContext', 'maxPostContext') +attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality', + 'attrMirroring', 'attrSkipPasses', 'attCollisions') +pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup', + 'minRulePreContext', 'maxRulePreContext', 'collisionThreshold') +pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns') + +def writesimple(tag, self, writer, *attrkeys): + attrs = dict([(k, getattr(self, k)) for k in attrkeys]) + writer.simpletag(tag, **attrs) + writer.newline() + +def getSimple(self, attrs, *attr_list): + for k in attr_list: + if k in attrs: + setattr(self, k, int(safeEval(attrs[k]))) + +def content_string(contents): + res = "" + for element in contents: + if isinstance(element, tuple): continue + res += element + return res.strip() + +def wrapline(writer, dat, length=80): + currline = "" + for d in dat: + if len(currline) > length: + writer.write(currline[:-1]) + writer.newline() + currline = "" + currline += d + " " + if len(currline): + writer.write(currline[:-1]) + writer.newline() + +class _Object() : + pass + +class table_S__i_l_f(DefaultTable.DefaultTable): + '''Silf table support''' + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.silfs = [] + + def decompile(self, data, ttFont): + sstruct.unpack2(Silf_hdr_format, data, self) + if self.version >= 5.0: + (data, self.scheme) = grUtils.decompress(data) + sstruct.unpack2(Silf_hdr_format_3, data, self) + base = sstruct.calcsize(Silf_hdr_format_3) + elif self.version < 3.0: + self.numSilf = struct.unpack('>H', data[4:6]) + self.scheme = 0 + self.compilerVersion = 0 + base = 8 + else: + self.scheme = 0 + sstruct.unpack2(Silf_hdr_format_3, data, self) + base = sstruct.calcsize(Silf_hdr_format_3) + + silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:]) + for offset in silfoffsets: + s = Silf() + self.silfs.append(s) + s.decompile(data[offset:], ttFont, self.version) + + def compile(self, ttFont): + self.numSilf = len(self.silfs) + if self.version < 3.0: + hdr = sstruct.pack(Silf_hdr_format, self) + hdr += struct.pack(">HH", self.numSilf, 0) + else: + hdr = sstruct.pack(Silf_hdr_format_3, self) + offset = len(hdr) + 4 * self.numSilf + data = "" + for s in self.silfs: + hdr += struct.pack(">L", offset) + subdata = s.compile(ttFont, self.version) + offset += len(subdata) + data += subdata + if self.version >= 5.0: + return grUtils.compress(self.scheme, hdr+data) + return hdr+data + + def toXML(self, writer, ttFont): + writer.comment('Attributes starting with _ are informative only') + writer.newline() + writer.simpletag('version', version=self.version, + compilerVersion=self.compilerVersion, compressionScheme=self.scheme) + writer.newline() + for s in self.silfs: + writer.begintag('silf') + writer.newline() + s.toXML(writer, ttFont, self.version) + writer.endtag('silf') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.scheme=int(safeEval(attrs['compressionScheme'])) + self.version = float(safeEval(attrs['version'])) + self.compilerVersion = int(safeEval(attrs['compilerVersion'])) + return + if name == 'silf': + s = Silf() + self.silfs.append(s) + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + s.fromXML(tag, attrs, subcontent, ttFont, self.version) + +class Silf(object): + '''A particular Silf subtable''' + + def __init__(self): + self.passes = [] + self.scriptTags = [] + self.critFeatures = [] + self.jLevels = [] + self.pMap = {} + + def decompile(self, data, ttFont, version=2.0): + if version >= 3.0 : + _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) + _, data = sstruct.unpack2(Silf_part1_format, data, self) + for jlevel in range(self.numJLevels): + j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) + self.jLevels.append(j) + _, data = sstruct.unpack2(Silf_part2_format, data, self) + if self.numCritFeatures: + self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data) + data = data[self.numCritFeatures * 2 + 1:] + (numScriptTag,) = struct.unpack_from('B', data) + if numScriptTag: + self.scriptTags = [struct.unpack("4s", data[x:x+4])[0] for x in range(1, 1 + 4 * numScriptTag, 4)] + data = data[1 + 4 * numScriptTag:] + (self.lbGID,) = struct.unpack('>H', data[:2]) + if self.numPasses: + self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses]) + data = data[6 + 4 * self.numPasses:] + (numPseudo,) = struct.unpack(">H", data[:2]) + for i in range(numPseudo): + if version >= 3.0: + pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object()) + else: + pseudo = struct.unpack('>HH', data[8+4*i:12+4*i], _Object()) + self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) + data = data[8 + 6 * numPseudo:] + currpos = (sstruct.calcsize(Silf_part1_format) + + sstruct.calcsize(Silf_justify_format) * self.numJLevels + + sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures + + 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo) + if version >= 3.0: + currpos += sstruct.calcsize(Silf_part1_format_v3) + self.classes = Classes() + self.classes.decompile(data, ttFont, version) + for i in range(self.numPasses): + p = Pass() + self.passes.append(p) + p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos], + ttFont, version) + + def compile(self, ttFont, version=2.0): + self.numPasses = len(self.passes) + self.numJLevels = len(self.jLevels) + self.numCritFeatures = len(self.critFeatures) + numPseudo = len(self.pMap) + data = "" + if version >= 3.0: + hdroffset = sstruct.calcsize(Silf_part1_format_v3) + else: + hdroffset = 0 + data += sstruct.pack(Silf_part1_format, self) + for j in self.jLevels: + data += sstruct.pack(Silf_justify_format, j) + data += sstruct.pack(Silf_part2_format, self) + if self.numCritFeatures: + data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) + data += struct.pack("BB", 0, len(self.scriptTags)) + if len(self.scriptTags): + tdata = [struct.pack("4s", x) for x in self.scriptTags] + data += "".join(tdata) + data += struct.pack(">H", self.lbGID) + self.passOffset = len(data) + + data1 = grUtils.bininfo(numPseudo, 6) + currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) + self.pseudosOffset = currpos + len(data1) + for u, p in sorted(self.pMap.items()): + data1 += struct.pack((">LH" if version >= 3.0 else ">HH"), + u, ttFont.getGlyphID(p)) + data1 += self.classes.compile(ttFont, version) + currpos += len(data1) + data2 = "" + datao = "" + for i, p in enumerate(self.passes): + base = currpos + len(data2) + datao += struct.pack(">L", base) + data2 += p.compile(ttFont, base, version) + datao += struct.pack(">L", currpos + len(data2)) + + if version >= 3.0: + data3 = sstruct.pack(Silf_part1_format_v3, self) + else: + data3 = "" + return data3 + data + datao + data1 + data2 + + + def toXML(self, writer, ttFont, version=2.0): + if version >= 3.0: + writer.simpletag('version', ruleVersion=self.ruleVersion) + writer.newline() + writesimple('info', self, writer, *attrs_info) + writesimple('passindexes', self, writer, *attrs_passindexes) + writesimple('contexts', self, writer, *attrs_contexts) + writesimple('attributes', self, writer, *attrs_attributes) + if len(self.jLevels): + writer.begintag('justifications') + writer.newline() + jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) + for i, j in enumerate(self.jLevels): + attrs = dict([(k, getattr(j, k)) for k in jnames]) + writer.simpletag('justify', **attrs) + writer.newline() + writer.endtag('justifications') + writer.newline() + if len(self.critFeatures): + writer.begintag('critFeatures') + writer.newline() + writer.write(" ".join(map(str, self.critFeatures))) + writer.newline() + writer.endtag('critFeatures') + writer.newline() + if len(self.scriptTags): + writer.begintag('scriptTags') + writer.newline() + writer.write(" ".join(self.scriptTags)) + writer.newline() + writer.endtag('scriptTags') + writer.newline() + if self.pMap: + writer.begintag('pseudoMap') + writer.newline() + for k, v in sorted(self.pMap.items()): + writer.simpletag('pseudo', unicode=hex(k), pseudo=v) + writer.newline() + writer.endtag('pseudoMap') + writer.newline() + self.classes.toXML(writer, ttFont, version) + if len(self.passes): + writer.begintag('passes') + writer.newline() + for i, p in enumerate(self.passes): + writer.begintag('pass', _index=i) + writer.newline() + p.toXML(writer, ttFont, version) + writer.endtag('pass') + writer.newline() + writer.endtag('passes') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'version': + self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0"))) + if name == 'info': + getSimple(self, attrs, *attrs_info) + elif name == 'passindexes': + getSimple(self, attrs, *attrs_passindexes) + elif name == 'contexts': + getSimple(self, attrs, *attrs_contexts) + elif name == 'attributes': + getSimple(self, attrs, *attrs_attributes) + elif name == 'justifications': + for element in content: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + if tag == 'justify': + j = _Object() + for k, v in attrs.items(): + setattr(j, k, int(v)) + self.jLevels.append(j) + elif name == 'critFeatures': + self.critFeatures = [] + element = content_string(content) + self.critFeatures.extend(map(int, element.split())) + elif name == 'scriptTags': + self.scriptTags = [] + element = content_string(content) + for n in element.split(): + self.scriptTags.append(n) + elif name == 'pseudoMap': + self.pMap = {} + for element in content: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + if tag == 'pseudo': + k = int(attrs['unicode'], 16) + v = attrs['pseudo'] + self.pMap[k] = v + elif name == 'classes': + self.classes = Classes() + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + self.classes.fromXML(tag, attrs, subcontent, ttFont, version) + elif name == 'passes': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'pass': + p = Pass() + for e in subcontent: + if not isinstance(e, tuple): continue + p.fromXML(e[0], e[1], e[2], ttFont, version) + self.passes.append(p) + + +class Classes(object): + + def __init__(self): + self.linear = [] + self.nonLinear = [] + + def decompile(self, data, ttFont, version=2.0): + sstruct.unpack2(Silf_classmap_format, data, self) + if version >= 4.0 : + oClasses = struct.unpack((">%dL" % (self.numClass+1)), + data[4:8+4*self.numClass]) + else: + oClasses = struct.unpack((">%dH" % (self.numClass+1)), + data[4:6+2*self.numClass]) + for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]): + self.linear.append(map(ttFont.getGlyphName, + struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))) + for s,e in zip(oClasses[self.numLinear:self.numClass], + oClasses[self.numLinear+1:self.numClass+1]): + nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)] + nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) + self.nonLinear.append(nonLin) + + def compile(self, ttFont, version=2.0): + data = "" + oClasses = [] + if version >= 4.0: + offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) + else: + offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) + for l in self.linear: + oClasses.append(len(data) + offset) + gs = map(ttFont.getGlyphID, l) + data += struct.pack((">%dH" % len(l)), *gs) + for l in self.nonLinear: + oClasses.append(len(data) + offset) + gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] + data += grUtils.bininfo(len(gs)) + data += "".join([struct.pack(">HH", *x) for x in sorted(gs)]) + oClasses.append(len(data) + offset) + self.numClass = len(oClasses) - 1 + self.numLinear = len(self.linear) + return sstruct.pack(Silf_classmap_format, self) + \ + struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), + *oClasses) + data + + def toXML(self, writer, ttFont, version=2.0): + writer.begintag('classes') + writer.newline() + writer.begintag('linearClasses') + writer.newline() + for i,l in enumerate(self.linear): + writer.begintag('linear', _index=i) + writer.newline() + wrapline(writer, l) + writer.endtag('linear') + writer.newline() + writer.endtag('linearClasses') + writer.newline() + writer.begintag('nonLinearClasses') + writer.newline() + for i, l in enumerate(self.nonLinear): + writer.begintag('nonLinear', _index=i + self.numLinear) + writer.newline() + for inp, ind in l.items(): + writer.simpletag('map', glyph=inp, index=ind) + writer.newline() + writer.endtag('nonLinear') + writer.newline() + writer.endtag('nonLinearClasses') + writer.newline() + writer.endtag('classes') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'linearClasses': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'linear': + l = content_string(subcontent).split() + self.linear.append(l) + elif name == 'nonLinearClasses': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag =='nonLinear': + l = {} + for e in subcontent: + if not isinstance(e, tuple): continue + tag, attrs, subsubcontent = e + if tag == 'map': + l[attrs['glyph']] = int(safeEval(attrs['index'])) + self.nonLinear.append(l) + +class Pass(object): + + def __init__(self): + self.colMap = {} + self.rules = [] + self.rulePreContexts = [] + self.ruleSortKeys = [] + self.ruleConstraints = [] + self.passConstraints = "" + self.actions = [] + self.stateTrans = [] + self.startStates = [] + + def decompile(self, data, ttFont, version=2.0): + _, data = sstruct.unpack2(Silf_pass_format, data, self) + (numRange, _, _, _) = struct.unpack(">4H", data[:8]) + data = data[8:] + for i in range(numRange): + (first, last, col) = struct.unpack(">3H", data[6*i:6*i+6]) + for g in range(first, last+1): + self.colMap[ttFont.getGlyphName(g)] = col + data = data[6*numRange:] + oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) + data = data[2+2*self.numSuccess:] + rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) + self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])] + data = data[2*oRuleMap[-1]:] + (self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2]) + numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 + self.startStates = struct.unpack((">%dH" % numStartStates), + data[2:2 + numStartStates * 2]) + data = data[2+numStartStates*2:] + self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules]) + data = data[2*self.numRules:] + self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules]) + data = data[self.numRules:] + (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) + oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)), + data[3:5 + self.numRules * 2])) + data = data[5 + self.numRules * 2:] + oActions = list(struct.unpack((">%dH" % (self.numRules + 1)), + data[:2 + self.numRules * 2])) + data = data[2 * self.numRules + 2:] + for i in range(self.numTransitional): + a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2]) + a.byteswap() + self.stateTrans.append(a) + data = data[self.numTransitional * self.numColumns * 2 + 1:] + self.passConstraints = data[:pConstraint] + data = data[pConstraint:] + for i in range(len(oConstraints)-2,-1,-1): + if oConstraints[i] == 0 : + oConstraints[i] = oConstraints[i+1] + self.ruleConstraints = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oConstraints, oConstraints[1:])] + data = data[oConstraints[-1]:] + self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])] + data = data[oActions[-1]:] + # not using debug + + def compile(self, ttFont, base, version=2.0): + # build it all up backwards + oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [""], (0, []))[1] + oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [""], (1, []))[1] + constraintCode = "\000" + "".join(self.ruleConstraints) + transes = [] + for t in self.stateTrans: + t.byteswap() + transes.append(t.tostring()) + t.byteswap() + if not len(transes): + self.startStates = [0] + oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1] + passRanges = [] + gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) + for e in grUtils.entries(gidcolmap, sameval = True): + if e[1]: + passRanges.append((e[0], e[0]+e[1]-1, e[2][0])) + self.numRules = len(self.actions) + self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6 + + len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2 + + 2 * len(self.startStates) + 3 * self.numRules + 3 + + 4 * self.numRules + 4) + self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base + self.rcCode = self.pcCode + len(self.passConstraints) + self.aCode = self.rcCode + len(constraintCode) + self.oDebug = 0 + # now generate output + data = sstruct.pack(Silf_pass_format, self) + data += grUtils.bininfo(len(passRanges), 6) + data += "".join(struct.pack(">3H", *p) for p in passRanges) + data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) + flatrules = reduce(lambda a,x: a+x, self.rules, []) + data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) + data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) + data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) + data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) + data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) + data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) + data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints) + data += struct.pack((">%dH" % (self.numRules+1)), *oActions) + return data + "".join(transes) + struct.pack("B", 0) + \ + self.passConstraints + constraintCode + "".join(self.actions) + + def toXML(self, writer, ttFont, version=2.0): + writesimple('info', self, writer, *pass_attrs_info) + writesimple('fsminfo', self, writer, *pass_attrs_fsm) + writer.begintag('colmap') + writer.newline() + wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(), + key=lambda x:ttFont.getGlyphID(x[0]))]) + writer.endtag('colmap') + writer.newline() + writer.begintag('staterulemap') + writer.newline() + for i, r in enumerate(self.rules): + writer.simpletag('state', number = self.numRows - self.numSuccess + i, + rules = " ".join(map(str, r))) + writer.newline() + writer.endtag('staterulemap') + writer.newline() + writer.begintag('rules') + writer.newline() + for i in range(len(self.actions)): + writer.begintag('rule', index=i, precontext=self.rulePreContexts[i], + sortkey=self.ruleSortKeys[i]) + writer.newline() + if len(self.ruleConstraints[i]): + writecode('constraint', writer, self.ruleConstraints[i]) + writecode('action', writer, self.actions[i]) + writer.endtag('rule') + writer.newline() + writer.endtag('rules') + writer.newline() + if len(self.passConstraints): + writecode('passConstraint', writer, self.passConstraints) + if len(self.stateTrans): + writer.begintag('fsm') + writer.newline() + writer.begintag('starts') + writer.write(" ".join(map(str, self.startStates))) + writer.endtag('starts') + writer.newline() + for i, s in enumerate(self.stateTrans): + writer.begintag('row', _i=i) + # no newlines here + writer.write(" ".join(map(str, s))) + writer.endtag('row') + writer.newline() + writer.endtag('fsm') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'info': + getSimple(self, attrs, *pass_attrs_info) + elif name == 'fsminfo': + getSimple(self, attrs, *pass_attrs_fsm) + elif name == 'colmap': + e = content_string(content) + for w in e.split(): + x = w.split('=') + if len(x) != 2 or x[0] == '' or x[1] == '': continue + self.colMap[x[0]] = int(x[1]) + elif name == 'staterulemap': + for e in content: + if not isinstance(e, tuple): continue + tag, a, c = e + if tag == 'state': + self.rules.append(map(int, a['rules'].split(" "))) + elif name == 'rules': + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag != 'rule': continue + self.rulePreContexts.append(int(a['precontext'])) + self.ruleSortKeys.append(int(a['sortkey'])) + con = "" + act = "" + for e in c: + if not isinstance(e, tuple): continue + tag, a, subc = e + if tag == 'constraint': + con = readcode(subc) + elif tag == 'action': + act = readcode(subc) + self.actions.append(act) + self.ruleConstraints.append(con) + elif name == 'passConstraint': + self.passConstraints = readcode(content) + elif name == 'fsm': + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag == 'row': + s = array('H') + e = content_string(c) + s.extend(map(int, e.split())) + self.stateTrans.append(s) + elif tag == 'starts': + s = [] + e = content_string(c) + s.extend(map(int, e.split())) + self.startStates = s + diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/S__i_l_l.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/S__i_l_l.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/S__i_l_l.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/S__i_l_l.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,79 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +from . import grUtils +import struct + +Sill_hdr = ''' + > + version: 16.16F +''' + +class table_S__i_l_l(DefaultTable.DefaultTable): + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.langs = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Sill_hdr, data, self) + numLangs, = struct.unpack('>H', data[:2]) + data = data[8:] + maxsetting = 0 + langinfo = [] + for i in range(numLangs): + (langcode, numsettings, offset) = struct.unpack(">4sHH", + data[i * 8:(i+1) * 8]) + offset = int(offset / 8) - (numLangs + 1) + langcode = langcode.replace(b'\000', b'') + langinfo.append((langcode, numsettings, offset)) + maxsetting = max(maxsetting, offset + numsettings) + data = data[numLangs * 8:] + finfo = [] + for i in range(maxsetting): + (fid, val, _) = struct.unpack(">LHH", data[i * 8:(i+1) * 8]) + finfo.append((fid, val)) + self.langs = {} + for c, n, o in langinfo: + self.langs[c] = [] + for i in range(o, o+n): + self.langs[c].append(finfo[i]) + + def compile(self, ttFont): + ldat = "" + fdat = "" + offset = 0 + for c, inf in sorted(self.langs.items()): + ldat += struct.pack(">4sHH", c.encode('utf8'), len(inf), 8 * (offset + len(self.langs) + 1)) + for fid, val in inf: + fdat += struct.pack(">LHH", fid, val, 0) + offset += len(inf) + return sstruct.pack(Sill_hdr, self) + grUtils.bininfo(len(self.langs)) + \ + ldat + fdat + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version) + writer.newline() + for c, inf in sorted(self.langs.items()): + writer.begintag('lang', name=c) + writer.newline() + for fid, val in inf: + writer.simpletag('feature', fid=grUtils.num2tag(fid), val=val) + writer.newline() + writer.endtag('lang') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = float(safeEval(attrs['version'])) + elif name == 'lang': + c = attrs['name'] + self.langs[c] = [] + for element in content: + if not isinstance(element, tuple): continue + tag, a, subcontent = element + if tag == 'feature': + self.langs[c].append((grUtils.tag2num(a['fid']), + int(safeEval(a['val'])))) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/S_T_A_T_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/S_T_A_T_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/S_T_A_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/S_T_A_T_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_S_T_A_T_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/S_V_G_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/S_V_G_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/S_V_G_.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,6 +8,11 @@ import xml.etree.ElementTree as ET import struct import re +import logging + + +log = logging.getLogger(__name__) + __doc__=""" Compiles/decompiles version 0 and 1 SVG tables from/to XML. @@ -94,6 +99,10 @@ class table_S_V_G_(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.colorPalettes = None + def decompile(self, data, ttFont): self.docList = None self.colorPalettes = None @@ -101,10 +110,15 @@ self.version = struct.unpack(">H", data[pos:pos+2])[0] if self.version == 1: + # This is pre-standardization version of the table; and obsolete. But we decompile it for now. + # https://wiki.mozilla.org/SVGOpenTypeFonts self.decompile_format_1(data, ttFont) else: if self.version != 0: - print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) + log.warning( + "Unknown SVG table version '%s'. Decompiling as version 0.", self.version) + # This is the standardized version of the table; and current. + # https://www.microsoft.com/typography/otspec/svg.htm self.decompile_format_0(data, ttFont) def decompile_format_0(self, data, ttFont): @@ -141,10 +155,8 @@ pos += 4 def decompile_format_1(self, data, ttFont): - pos = 2 - self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - self.decompileEntryList(data, pos) + self.offsetToSVGDocIndex = 2 + self.decompileEntryList(data) def decompileEntryList(self, data): # data starts with the first entry of the entry list. @@ -273,7 +285,7 @@ writer.newline() for uiNameID in self.colorPalettes.colorParamUINameIDs: writer.begintag("colorParamUINameID") - writer.writeraw(str(uiNameID)) + writer._writeraw(str(uiNameID)) writer.endtag("colorParamUINameID") writer.newline() for colorPalette in self.colorPalettes.colorPaletteList: @@ -294,10 +306,6 @@ writer.endtag("colorPalettes") writer.newline() - else: - writer.begintag("colorPalettes") - writer.endtag("colorPalettes") - writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "svgDoc": @@ -314,7 +322,7 @@ if self.colorPalettes.numColorParams == 0: self.colorPalettes = None else: - print("Unknown", name, content) + log.warning("Unknown %s %s", name, content) class DocumentIndexEntry(object): def __init__(self): @@ -335,7 +343,7 @@ def fromXML(self, name, attrs, content, ttFont): for element in content: - if isinstance(element, type("")): + if not isinstance(element, tuple): continue name, attrib, content = element if name == "colorParamUINameID": @@ -344,7 +352,7 @@ elif name == "colorPalette": colorPalette = ColorPalette() self.colorPaletteList.append(colorPalette) - colorPalette.fromXML((name, attrib, content), ttFont) + colorPalette.fromXML(name, attrib, content, ttFont) self.numColorParams = len(self.colorParamUINameIDs) self.numColorPalettes = len(self.colorPaletteList) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_t_r_a_k.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_t_r_a_k.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_t_r_a_k.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_t_r_a_k.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,314 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct +try: + from collections.abc import MutableMapping +except ImportError: + from UserDict import DictMixin as MutableMapping + + +# Apple's documentation of 'trak': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html + +TRAK_HEADER_FORMAT = """ + > # big endian + version: 16.16F + format: H + horizOffset: H + vertOffset: H + reserved: H +""" + +TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT) + + +TRACK_DATA_FORMAT = """ + > # big endian + nTracks: H + nSizes: H + sizeTableOffset: L +""" + +TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT) + + +TRACK_TABLE_ENTRY_FORMAT = """ + > # big endian + track: 16.16F + nameIndex: H + offset: H +""" + +TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT) + + +# size values are actually '16.16F' fixed-point values, but here I do the +# fixedToFloat conversion manually instead of relying on sstruct +SIZE_VALUE_FORMAT = ">l" +SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT) + +# per-Size values are in 'FUnits', i.e. 16-bit signed integers +PER_SIZE_VALUE_FORMAT = ">h" +PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT) + + +class table__t_r_a_k(DefaultTable.DefaultTable): + dependencies = ['name'] + + def compile(self, ttFont): + dataList = [] + offset = TRAK_HEADER_FORMAT_SIZE + for direction in ('horiz', 'vert'): + trackData = getattr(self, direction + 'Data', TrackData()) + offsetName = direction + 'Offset' + # set offset to 0 if None or empty + if not trackData: + setattr(self, offsetName, 0) + continue + # TrackData table format must be longword aligned + alignedOffset = (offset + 3) & ~3 + padding, offset = b"\x00"*(alignedOffset - offset), alignedOffset + setattr(self, offsetName, offset) + + data = trackData.compile(offset) + offset += len(data) + dataList.append(padding + data) + + self.reserved = 0 + tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList) + return tableData + + def decompile(self, data, ttFont): + sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self) + for direction in ('horiz', 'vert'): + trackData = TrackData() + offset = getattr(self, direction + 'Offset') + if offset != 0: + trackData.decompile(data, offset) + setattr(self, direction + 'Data', trackData) + + def toXML(self, writer, ttFont, progress=None): + writer.simpletag('version', value=self.version) + writer.newline() + writer.simpletag('format', value=self.format) + writer.newline() + for direction in ('horiz', 'vert'): + dataName = direction + 'Data' + writer.begintag(dataName) + writer.newline() + trackData = getattr(self, dataName, TrackData()) + trackData.toXML(writer, ttFont) + writer.endtag(dataName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = safeEval(attrs['value']) + elif name == 'format': + self.format = safeEval(attrs['value']) + elif name in ('horizData', 'vertData'): + trackData = TrackData() + setattr(self, name, trackData) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content_ = element + trackData.fromXML(name, attrs, content_, ttFont) + + +class TrackData(MutableMapping): + + def __init__(self, initialdata={}): + self._map = dict(initialdata) + + def compile(self, offset): + nTracks = len(self) + sizes = self.sizes() + nSizes = len(sizes) + + # offset to the start of the size subtable + offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE*nTracks + trackDataHeader = sstruct.pack( + TRACK_DATA_FORMAT, + {'nTracks': nTracks, 'nSizes': nSizes, 'sizeTableOffset': offset}) + + entryDataList = [] + perSizeDataList = [] + # offset to per-size tracking values + offset += SIZE_VALUE_FORMAT_SIZE*nSizes + # sort track table entries by track value + for track, entry in sorted(self.items()): + assert entry.nameIndex is not None + entry.track = track + entry.offset = offset + entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)] + # sort per-size values by size + for size, value in sorted(entry.items()): + perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)] + offset += PER_SIZE_VALUE_FORMAT_SIZE*nSizes + # sort size values + sizeDataList = [struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)] + + data = bytesjoin([trackDataHeader] + entryDataList + sizeDataList + perSizeDataList) + return data + + def decompile(self, data, offset): + # initial offset is from the start of trak table to the current TrackData + trackDataHeader = data[offset:offset+TRACK_DATA_FORMAT_SIZE] + if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackData header') + sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self) + offset += TRACK_DATA_FORMAT_SIZE + + nSizes = self.nSizes + sizeTableOffset = self.sizeTableOffset + sizeTable = [] + for i in range(nSizes): + sizeValueData = data[sizeTableOffset:sizeTableOffset+SIZE_VALUE_FORMAT_SIZE] + if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackData size subtable') + sizeValue, = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData) + sizeTable.append(fi2fl(sizeValue, 16)) + sizeTableOffset += SIZE_VALUE_FORMAT_SIZE + + for i in range(self.nTracks): + entry = TrackTableEntry() + entryData = data[offset:offset+TRACK_TABLE_ENTRY_FORMAT_SIZE] + if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackTableEntry record') + sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry) + perSizeOffset = entry.offset + for j in range(nSizes): + size = sizeTable[j] + perSizeValueData = data[perSizeOffset:perSizeOffset+PER_SIZE_VALUE_FORMAT_SIZE] + if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE: + raise TTLibError('not enough data to decompile per-size track values') + perSizeValue, = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData) + entry[size] = perSizeValue + perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE + self[entry.track] = entry + offset += TRACK_TABLE_ENTRY_FORMAT_SIZE + + def toXML(self, writer, ttFont, progress=None): + nTracks = len(self) + nSizes = len(self.sizes()) + writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes)) + writer.newline() + for track, entry in sorted(self.items()): + assert entry.nameIndex is not None + entry.track = track + entry.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name != 'trackEntry': + return + entry = TrackTableEntry() + entry.fromXML(name, attrs, content, ttFont) + self[entry.track] = entry + + def sizes(self): + if not self: + return frozenset() + tracks = list(self.tracks()) + sizes = self[tracks.pop(0)].sizes() + for track in tracks: + entrySizes = self[track].sizes() + if sizes != entrySizes: + raise TTLibError( + "'trak' table entries must specify the same sizes: " + "%s != %s" % (sorted(sizes), sorted(entrySizes))) + return frozenset(sizes) + + def __getitem__(self, track): + return self._map[track] + + def __delitem__(self, track): + del self._map[track] + + def __setitem__(self, track, entry): + self._map[track] = entry + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + + tracks = keys + + def __repr__(self): + return "TrackData({})".format(self._map if self else "") + + +class TrackTableEntry(MutableMapping): + + def __init__(self, values={}, nameIndex=None): + self.nameIndex = nameIndex + self._map = dict(values) + + def toXML(self, writer, ttFont, progress=None): + name = ttFont["name"].getDebugName(self.nameIndex) + writer.begintag( + "trackEntry", + (('value', self.track), ('nameIndex', self.nameIndex))) + writer.newline() + if name: + writer.comment(name) + writer.newline() + for size, perSizeValue in sorted(self.items()): + writer.simpletag("track", size=size, value=perSizeValue) + writer.newline() + writer.endtag("trackEntry") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.track = safeEval(attrs['value']) + self.nameIndex = safeEval(attrs['nameIndex']) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, _ = element + if name != 'track': + continue + size = safeEval(attrs['size']) + self[size] = safeEval(attrs['value']) + + def __getitem__(self, size): + return self._map[size] + + def __delitem__(self, size): + del self._map[size] + + def __setitem__(self, size, value): + self._map[size] = value + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + + sizes = keys + + def __repr__(self): + return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.nameIndex == other.nameIndex and dict(self) == dict(other) + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__0.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,9 +1,16 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI0 is the index table containing the lengths and offsets for the glyph +programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained +in the TSI1 table. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from . import DefaultTable import struct -tsi0Format = '>HHl' +tsi0Format = '>HHL' def fixlongs(glyphID, textLength, textOffset): return int(glyphID), int(textLength), textOffset @@ -22,7 +29,7 @@ indices.append((glyphID, textLength, textOffset)) data = data[size:] assert len(data) == 0 - assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 + assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number" self.indices = indices[:-5] self.extra_indices = indices[-4:] @@ -30,11 +37,11 @@ if not hasattr(self, "indices"): # We have no corresponding table (TSI1 or TSI3); let's return # no data, which effectively means "ignore us". - return "" + return b"" data = b"" for index, textLength, textOffset in self.indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) - data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 + data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34) for index, textLength, textOffset in self.extra_indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) return data diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__1.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,40 +1,81 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI1 contains the text of the glyph programs in the form of low-level assembly +code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from . import DefaultTable +from fontTools.misc.loggingTools import LogMixin + -class table_T_S_I__1(DefaultTable.DefaultTable): +class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable): extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} indextable = "TSI0" def decompile(self, data, ttFont): + totalLength = len(data) indextable = ttFont[self.indextable] - self.glyphPrograms = {} - for i in range(len(indextable.indices)): - glyphID, textLength, textOffset = indextable.indices[i] - if textLength == 0x8000: - # Ugh. Hi Beat! - textLength = indextable.indices[i+1][1] - if textLength > 0x8000: - pass # XXX Hmmm. - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text - - self.extraPrograms = {} - for i in range(len(indextable.extra_indices)): - extraCode, textLength, textOffset = indextable.extra_indices[i] - if textLength == 0x8000: - if self.extras[extraCode] == "fpgm": # this is the last one - textLength = len(data) - textOffset + for indices, isExtra in zip( + (indextable.indices, indextable.extra_indices), (False, True)): + programs = {} + for i, (glyphID, textLength, textOffset) in enumerate(indices): + if isExtra: + name = self.extras[glyphID] else: - textLength = indextable.extra_indices[i+1][1] - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.extraPrograms[self.extras[extraCode]] = text + name = ttFont.getGlyphName(glyphID) + if textOffset > totalLength: + self.log.warning("textOffset > totalLength; %r skipped" % name) + continue + if textLength < 0x8000: + # If the length stored in the record is less than 32768, then use + # that as the length of the record. + pass + elif textLength == 0x8000: + # If the length is 32768, compute the actual length as follows: + isLast = i == (len(indices)-1) + if isLast: + if isExtra: + # For the last "extra" record (the very last record of the + # table), the length is the difference between the total + # length of the TSI1 table and the textOffset of the final + # record. + nextTextOffset = totalLength + else: + # For the last "normal" record (the last record just prior + # to the record containing the "magic number"), the length + # is the difference between the textOffset of the record + # following the "magic number" (0xFFFE) record (i.e. the + # first "extra" record), and the textOffset of the last + # "normal" record. + nextTextOffset = indextable.extra_indices[0][2] + else: + # For all other records with a length of 0x8000, the length is + # the difference between the textOffset of the record in + # question and the textOffset of the next record. + nextTextOffset = indices[i+1][2] + assert nextTextOffset >= textOffset, "entries not sorted by offset" + if nextTextOffset > totalLength: + self.log.warning( + "nextTextOffset > totalLength; %r truncated" % name) + nextTextOffset = totalLength + textLength = nextTextOffset - textOffset + else: + from fontTools import ttLib + raise ttLib.TTLibError( + "%r textLength (%d) must not be > 32768" % (name, textLength)) + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + text = tounicode(text, encoding='utf-8') + if text: + programs[name] = text + if isExtra: + self.extraPrograms = programs + else: + self.glyphPrograms = programs def compile(self, ttFont): if not hasattr(self, "glyphPrograms"): @@ -50,12 +91,12 @@ data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. name = glyphNames[i] if name in self.glyphPrograms: - text = tobytes(self.glyphPrograms[name]) + text = tobytes(self.glyphPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? + textLength = 0x8000 indices.append((i, textLength, len(data))) data = data + text @@ -66,12 +107,12 @@ data = data + b"\015" # align on 2-byte boundaries, fill with return chars. code, name = codes[i] if name in self.extraPrograms: - text = tobytes(self.extraPrograms[name]) + text = tobytes(self.extraPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? + textLength = 0x8000 extra_indices.append((code, textLength, len(data))) data = data + text indextable.set(indices, extra_indices) @@ -86,7 +127,7 @@ continue writer.begintag("glyphProgram", name=name) writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) + writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("glyphProgram") writer.newline() @@ -98,7 +139,7 @@ continue writer.begintag("extraProgram", name=name) writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) + writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("extraProgram") writer.newline() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__2.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,10 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI2 is the index table containing the lengths and offsets for the glyph +programs that are contained in the TSI3 table. It uses the same format as +the TSI0 table. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import ttLib diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__3.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,8 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import ttLib diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I__5.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,8 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI5 contains the VTT character groups. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval @@ -23,7 +28,7 @@ glyphNames = ttFont.getGlyphOrder() a = array.array("H") for i in range(len(glyphNames)): - a.append(self.glyphGrouping[glyphNames[i]]) + a.append(self.glyphGrouping.get(glyphNames[i], 0)) if sys.byteorder != "big": a.byteswap() return a.tostring() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_B_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_B_(asciiTable.asciiTable): +class table_T_S_I_B_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_D_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_D_(asciiTable.asciiTable): +class table_T_S_I_D_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_J_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_J_(asciiTable.asciiTable): +class table_T_S_I_J_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_P_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_P_(asciiTable.asciiTable): +class table_T_S_I_P_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_S_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_S_(asciiTable.asciiTable): +class table_T_S_I_S_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_S_I_V_.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,4 +3,19 @@ from . import asciiTable class table_T_S_I_V_(asciiTable.asciiTable): - pass + + def toXML(self, writer, ttFont): + data = tostr(self.data) + # removing null bytes. XXX needed?? + data = data.split('\0') + data = strjoin(data) + writer.begintag("source") + writer.newline() + writer.write_noindent(data.replace("\r", "\n")) + writer.newline() + writer.endtag("source") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + lines = strjoin(content).split("\n") + self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/T_T_F_A_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_T_F_A_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/T_T_F_A_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/T_T_F_A_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_T_F_A_(asciiTable.asciiTable): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/ttProgram.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/ttProgram.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/ttProgram.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,6 +5,10 @@ from fontTools.misc.textTools import num2binary, binary2num, readHex import array import re +import logging + + +log = logging.getLogger(__name__) # first, the list of instructions that eat bytes or words from the instruction stream @@ -190,6 +194,8 @@ _pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") +_indentRE = re.compile("^FDEF|IF|ELSE\[ \]\t.+") +_unindentRE = re.compile("^ELSE|ENDF|EIF\[ \]\t.+") def _skipWhite(data, pos): m = _whiteRE.match(data, pos) @@ -218,43 +224,74 @@ self._assemble() return self.bytecode.tostring() - def getAssembly(self, preserve=False): + def getAssembly(self, preserve=True): if not hasattr(self, "assembly"): self._disassemble(preserve=preserve) return self.assembly def toXML(self, writer, ttFont): if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: - assembly = self.getAssembly() - writer.begintag("assembly") - writer.newline() - i = 0 - nInstr = len(assembly) - while i < nInstr: - instr = assembly[i] - writer.write(instr) + try: + assembly = self.getAssembly() + except: + import traceback + tmp = StringIO() + traceback.print_exc(file=tmp) + msg = "An exception occurred during the decompilation of glyph program:\n\n" + msg += tmp.getvalue() + log.error(msg) + writer.begintag("bytecode") writer.newline() - m = _pushCountPat.match(instr) - i = i + 1 - if m: - nValues = int(m.group(1)) - line = [] - j = 0 - for j in range(nValues): - if j and not (j % 25): - writer.write(' '.join(line)) - writer.newline() - line = [] - line.append(assembly[i+j]) - writer.write(' '.join(line)) + writer.comment(msg.strip()) + writer.newline() + writer.dumphex(self.getBytecode()) + writer.endtag("bytecode") + writer.newline() + else: + if not assembly: + return + writer.begintag("assembly") + writer.newline() + i = 0 + indent = 0 + nInstr = len(assembly) + while i < nInstr: + instr = assembly[i] + if _unindentRE.match(instr): + indent -= 1 + writer.write(writer.indentwhite * indent) + writer.write(instr) writer.newline() - i = i + j + 1 - writer.endtag("assembly") + m = _pushCountPat.match(instr) + i = i + 1 + if m: + nValues = int(m.group(1)) + line = [] + j = 0 + for j in range(nValues): + if j and not (j % 25): + writer.write(writer.indentwhite * indent) + writer.write(' '.join(line)) + writer.newline() + line = [] + line.append(assembly[i+j]) + writer.write(writer.indentwhite * indent) + writer.write(' '.join(line)) + writer.newline() + i = i + j + 1 + if _indentRE.match(instr): + indent += 1 + writer.endtag("assembly") + writer.newline() else: + bytecode = self.getBytecode() + if not bytecode: + return writer.begintag("bytecode") writer.newline() - writer.dumphex(self.getBytecode()) + writer.dumphex(bytecode) writer.endtag("bytecode") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "assembly": @@ -266,7 +303,7 @@ self.fromBytecode(readHex(content)) def _assemble(self): - assembly = self.assembly + assembly = getattr(self, 'assembly', []) if isinstance(assembly, type([])): assembly = ' '.join(assembly) bytecode = [] @@ -391,7 +428,7 @@ def _disassemble(self, preserve=False): assembly = [] i = 0 - bytecode = self.bytecode + bytecode = getattr(self, 'bytecode', []) numBytecode = len(bytecode) while i < numBytecode: op = bytecode[i] @@ -477,6 +514,15 @@ __nonzero__ = __bool__ + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + def _test(): """ diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/TupleVariation.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/TupleVariation.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/TupleVariation.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/TupleVariation.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,623 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +import array +import io +import logging +import struct +import sys + + +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm + +EMBEDDED_PEAK_TUPLE = 0x8000 +INTERMEDIATE_REGION = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTA_RUN_COUNT_MASK = 0x3f + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7f + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0fff +TUPLE_INDEX_MASK = 0x0fff + +log = logging.getLogger(__name__) + + +class TupleVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes.copy() + self.coordinates = coordinates[:] + + def __repr__(self): + axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + result = set() + for i, point in enumerate(self.coordinates): + if point is not None: + result.add(i) + return result + + def hasImpact(self): + """Returns True if this TupleVariation has any visible impact. + + If the result is False, the TupleVariation can be omitted from the font + without making any visible difference. + """ + for c in self.coordinates: + if c is not None: + return True + return False + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = (float(v) for v in value) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=value) + else: + writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) + writer.newline() + wrote_any_deltas = False + for i, delta in enumerate(self.coordinates): + if type(delta) == tuple and len(delta) == 2: + writer.simpletag("delta", pt=i, x=delta[0], y=delta[1]) + writer.newline() + wrote_any_deltas = True + elif type(delta) == int: + writer.simpletag("delta", cvt=i, value=delta) + writer.newline() + wrote_any_deltas = True + elif delta is not None: + log.error("bad delta format") + writer.comment("bad delta #%d" % i) + writer.newline() + wrote_any_deltas = True + if not wrote_any_deltas: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = float(attrs["value"]) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = float(attrs.get("min", defaultMinValue)) + maxValue = float(attrs.get("max", defaultMaxValue)) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + if "pt" in attrs: + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + elif "cvt" in attrs: + cvt = safeEval(attrs["cvt"]) + value = safeEval(attrs["value"]) + self.coordinates[cvt] = value + else: + log.warning("bad delta format: %s" % + ", ".join(sorted(attrs.keys()))) + + def compile(self, axisTags, sharedCoordIndices, sharedPoints): + tupleData = [] + + assert all(tag in axisTags for tag in self.axes.keys()), ("Unknown axis tag found.", self.axes.keys(), axisTags) + + coord = self.compileCoord(axisTags) + if coord in sharedCoordIndices: + flags = sharedCoordIndices[coord] + else: + flags = EMBEDDED_PEAK_TUPLE + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_REGION + tupleData.append(intermediateCoord) + + points = self.getUsedPoints() + if sharedPoints == points: + # Only use the shared points if they are identical to the actually used points + auxData = self.compileDeltas(sharedPoints) + usesSharedPoints = True + else: + flags |= PRIVATE_POINT_NUMBERS + numPointsInGlyph = len(self.coordinates) + auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) + usesSharedPoints = False + + tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) + return (tupleData, auxData, usesSharedPoints) + + def compileCoord(self, axisTags): + result = [] + for axis in axisTags: + _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + result.append(struct.pack(">h", floatToFixed(value, 14))) + return bytesjoin(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) + maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) + return bytesjoin(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def compilePoints(points, numPointsInGlyph): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + if len(points) == numPointsInGlyph: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result = [bytechr(numPoints)] + else: + result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] + + MAX_RUN_LENGTH = 127 + pos = 0 + lastValue = 0 + while pos < numPoints: + run = io.BytesIO() + runLength = 0 + useByteEncoding = None + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding is None: + useByteEncoding = 0 <= delta <= 0xff + if useByteEncoding and (delta > 0xff or delta < 0): + # we need to start a new run (which will not use byte encoding) + break + # TODO This never switches back to a byte-encoding from a short-encoding. + # That's suboptimal. + if useByteEncoding: + run.write(bytechr(delta)) + else: + run.write(bytechr(delta >> 8)) + run.write(bytechr(delta & 0xff)) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + runHeader = bytechr(runLength - 1) + else: + runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) + result.append(runHeader) + result.append(run.getvalue()) + + return bytesjoin(result) + + @staticmethod + def decompilePoints_(numPoints, data, offset, tableTag): + """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)""" + assert tableTag in ('cvar', 'gvar') + pos = offset + numPointsInData = byteord(data[pos]) + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) + pos += 1 + if numPointsInData == 0: + return (range(numPoints), pos) + + result = [] + while len(result) < numPointsInData: + runHeader = byteord(data[pos]) + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) != 0: + points = array.array("H") + pointsSize = numPointsInRun * 2 + else: + points = array.array("B") + pointsSize = numPointsInRun + points.fromstring(data[pos:pos+pointsSize]) + if sys.byteorder != "big": + points.byteswap() + + assert len(points) == numPointsInRun + pos += pointsSize + + result.extend(points) + + # Convert relative to absolute + absolute = [] + current = 0 + for delta in result: + current += delta + absolute.append(current) + result = absolute + del absolute + + badPoints = {str(p) for p in result if p < 0 or p >= numPoints} + if badPoints: + log.warning("point %s out of range in '%s' table" % + (",".join(sorted(badPoints)), tableTag)) + return (result, pos) + + def compileDeltas(self, points): + deltaX = [] + deltaY = [] + for p in sorted(list(points)): + c = self.coordinates[p] + if type(c) is tuple and len(c) == 2: + deltaX.append(c[0]) + deltaY.append(c[1]) + elif type(c) is int: + deltaX.append(c) + elif c is not None: + raise ValueError("invalid type of delta: %s" % type(c)) + return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) + + @staticmethod + def compileDeltaValues_(deltas): + """[value1, value2, value3, ...] --> bytestring + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + stream = io.BytesIO() + pos = 0 + while pos < len(deltas): + value = deltas[pos] + if value == 0: + pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) + elif value >= -128 and value <= 127: + pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) + else: + pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, stream) + return stream.getvalue() + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64 and deltas[pos] == 0: + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + if value < -128 or value > 127: + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(runLength - 1)) + for i in range(offset, pos): + stream.write(struct.pack('b', round(deltas[i]))) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + isByteEncodable = lambda value: value >= -128 and value <= 127 + if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) + for i in range(offset, pos): + stream.write(struct.pack('>h', round(deltas[i]))) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas: + runHeader = byteord(data[pos]) + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_ARE_ZERO) != 0: + result.extend([0] * numDeltasInRun) + else: + if (runHeader & DELTAS_ARE_WORDS) != 0: + deltas = array.array("h") + deltasSize = numDeltasInRun * 2 + else: + deltas = array.array("b") + deltasSize = numDeltasInRun + deltas.fromstring(data[pos:pos+deltasSize]) + if sys.byteorder != "big": + deltas.byteswap() + assert len(deltas) == numDeltasInRun + pos += deltasSize + result.extend(deltas) + assert len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_PEAK_TUPLE) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_REGION) != 0: + size += axisCount * 4 + return size + + +def decompileSharedTuples(axisTags, sharedTupleCount, data, offset): + result = [] + for _ in range(sharedTupleCount): + t, offset = TupleVariation.decompileCoord_(axisTags, data, offset) + result.append(t) + return result + + +def compileSharedTuples(axisTags, variations): + coordCount = {} + for var in variations: + coord = var.compileCoord(axisTags) + coordCount[coord] = coordCount.get(coord, 0) + 1 + sharedCoords = [(count, coord) + for (coord, count) in coordCount.items() if count > 1] + sharedCoords.sort(reverse=True) + MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 + sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] + return [c[1] for c in sharedCoords] # Strip off counts. + + +def compileTupleVariationStore(variations, pointCount, + axisTags, sharedTupleIndices, + useSharedPoints=True): + variations = [v for v in variations if v.hasImpact()] + if len(variations) == 0: + return (0, b"", b"") + + # Each glyph variation tuples modifies a set of control points. To + # indicate which exact points are getting modified, a single tuple + # can either refer to a shared set of points, or the tuple can + # supply its private point numbers. Because the impact of sharing + # can be positive (no need for a private point list) or negative + # (need to supply 0,0 deltas for unused points), it is not obvious + # how to determine which tuples should take their points from the + # shared pool versus have their own. Perhaps we should resort to + # brute force, and try all combinations? However, if a glyph has n + # variation tuples, we would need to try 2^n combinations (because + # each tuple may or may not be part of the shared set). How many + # variations tuples do glyphs have? + # + # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} + # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} + # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 8} + # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). + # + + # Is this even worth optimizing? If we never use a shared point + # list, the private lists will consume 112K for Skia, 5K for + # BuffaloGalRegular, and 15K for JamRegular. If we always use a + # shared point list, the shared lists will consume 16K for Skia, + # 3K for BuffaloGalRegular, and 10K for JamRegular. However, in + # the latter case the delta arrays will become larger, but I + # haven't yet measured by how much. From gut feeling (which may be + # wrong), the optimum is to share some but not all points; + # however, then we would need to try all combinations. + # + # For the time being, we try two variants and then pick the better one: + # (a) each tuple supplies its own private set of points; + # (b) all tuples refer to a shared set of points, which consists of + # "every control point in the glyph that has explicit deltas". + usedPoints = set() + for v in variations: + usedPoints |= v.getUsedPoints() + tuples = [] + data = [] + someTuplesSharePoints = False + sharedPointVariation = None # To keep track of a variation that uses shared points + for v in variations: + privateTuple, privateData, _ = v.compile( + axisTags, sharedTupleIndices, sharedPoints=None) + sharedTuple, sharedData, usesSharedPoints = v.compile( + axisTags, sharedTupleIndices, sharedPoints=usedPoints) + if useSharedPoints and (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): + tuples.append(sharedTuple) + data.append(sharedData) + someTuplesSharePoints |= usesSharedPoints + sharedPointVariation = v + else: + tuples.append(privateTuple) + data.append(privateData) + if someTuplesSharePoints: + # Use the last of the variations that share points for compiling the packed point data + data = sharedPointVariation.compilePoints(usedPoints, len(sharedPointVariation.coordinates)) + bytesjoin(data) + tupleVariationCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) + else: + data = bytesjoin(data) + tupleVariationCount = len(tuples) + tuples = bytesjoin(tuples) + return tupleVariationCount, tuples, data + + +def decompileTupleVariationStore(tableTag, axisTags, + tupleVariationCount, pointCount, sharedTuples, + data, pos, dataPos): + numAxes = len(axisTags) + result = [] + if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = TupleVariation.decompilePoints_( + pointCount, data, dataPos, tableTag) + else: + sharedPoints = [] + for _ in range(tupleVariationCount & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) + tupleSize = TupleVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + result.append(decompileTupleVariation_( + pointCount, sharedTuples, sharedPoints, + tableTag, axisTags, tupleData, pointDeltaData)) + pos += tupleSize + dataPos += dataSize + return result + + +def decompileTupleVariation_(pointCount, sharedTuples, sharedPoints, + tableTag, axisTags, data, tupleData): + assert tableTag in ("cvar", "gvar"), tableTag + flags = struct.unpack(">H", data[2:4])[0] + pos = 4 + if (flags & EMBEDDED_PEAK_TUPLE) == 0: + peak = sharedTuples[flags & TUPLE_INDEX_MASK] + else: + peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_REGION) != 0: + start, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + end, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + else: + start, end = inferRegion_(peak) + axes = {} + for axis in axisTags: + region = start[axis], peak[axis], end[axis] + if region != (0.0, 0.0, 0.0): + axes[axis] = region + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = TupleVariation.decompilePoints_( + pointCount, tupleData, pos, tableTag) + else: + points = sharedPoints + + deltas = [None] * pointCount + + if tableTag == "cvar": + deltas_cvt, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + for p, delta in zip(points, deltas_cvt): + if 0 <= p < pointCount: + deltas[p] = delta + + elif tableTag == "gvar": + deltas_x, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + deltas_y, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + for p, x, y in zip(points, deltas_x, deltas_y): + if 0 <= p < pointCount: + deltas[p] = (x, y) + + return TupleVariation(axes, deltas) + + +def inferRegion_(peak): + """Infer start and end for a (non-intermediate) region + + This helper function computes the applicability region for + variation tuples whose INTERMEDIATE_REGION flag is not set in the + TupleVariationHeader structure. Variation tuples apply only to + certain regions of the variation space; outside that region, the + tuple has no effect. To make the binary encoding more compact, + TupleVariationHeaders can omit the intermediateStartTuple and + intermediateEndTuple fields. + """ + start, end = {}, {} + for (axis, value) in peak.items(): + start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (start, end) diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/V_D_M_X_.py 2018-01-08 12:40:40.000000000 +0000 @@ -176,10 +176,10 @@ writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz)) writer.newline() - for yPelHeight in group.keys(): - yMax, yMin = group[yPelHeight] + for yPelHeight, (yMax, yMin) in sorted(group.items()): writer.simpletag( - "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) + "record", + [('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)]) writer.newline() writer.endtag("group") writer.newline() diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/_v_h_e_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,11 +2,15 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import ( + ensureVersionIsLong as fi2ve, versionToFixed as ve2fi) from . import DefaultTable +import math + vheaFormat = """ > # big endian - tableVersion: 16.16F + tableVersion: L ascent: h descent: h lineGap: h @@ -16,7 +20,7 @@ yMaxExtent: h caretSlopeRise: h caretSlopeRun: h - reserved0: h + caretOffset: h reserved1: h reserved2: h reserved3: h @@ -29,29 +33,26 @@ # Note: Keep in sync with table__h_h_e_a - dependencies = ['vmtx', 'glyf'] + dependencies = ['vmtx', 'glyf', 'CFF '] def decompile(self, data, ttFont): sstruct.unpack(vheaFormat, data, self) def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')): self.recalc(ttFont) + self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(vheaFormat, self) def recalc(self, ttFont): - vtmxTable = ttFont['vmtx'] + if 'vmtx' in ttFont: + vmtxTable = ttFont['vmtx'] + self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values()) + + boundsHeightDict = {} if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceHeightMax = 0 - minTopSideBearing = +INFINITY # arbitrary big number - minBottomSideBearing = +INFINITY # arbitrary big number - yMaxExtent = -INFINITY # arbitrary big negative number - for name in ttFont.getGlyphOrder(): - height, tsb = vtmxTable[name] - advanceHeightMax = max(advanceHeightMax, height) g = glyfTable[name] if g.numberOfContours == 0: continue @@ -59,32 +60,57 @@ # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) + boundsHeightDict[name] = g.yMax - g.yMin + elif 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + for name in ttFont.getGlyphOrder(): + cs = topDict.CharStrings[name] + bounds = cs.calcBounds() + if bounds is not None: + boundsHeightDict[name] = int( + math.ceil(bounds[3]) - math.floor(bounds[1])) + + if boundsHeightDict: + minTopSideBearing = float('inf') + minBottomSideBearing = float('inf') + yMaxExtent = -float('inf') + for name, boundsHeight in boundsHeightDict.items(): + advanceHeight, tsb = vmtxTable[name] + bsb = advanceHeight - tsb - boundsHeight + extent = tsb + boundsHeight minTopSideBearing = min(minTopSideBearing, tsb) - bsb = height - tsb - (g.yMax - g.yMin) minBottomSideBearing = min(minBottomSideBearing, bsb) - extent = tsb + (g.yMax - g.yMin) yMaxExtent = max(yMaxExtent, extent) - - if yMaxExtent == -INFINITY: - # No glyph has outlines. - minTopSideBearing = 0 - minBottomSideBearing = 0 - yMaxExtent = 0 - - self.advanceHeightMax = advanceHeightMax self.minTopSideBearing = minTopSideBearing self.minBottomSideBearing = minBottomSideBearing self.yMaxExtent = yMaxExtent - else: - # XXX CFF recalc... - pass + + else: # No glyph has outlines. + self.minTopSideBearing = 0 + self.minBottomSideBearing = 0 + self.yMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(vheaFormat) for name in names: value = getattr(self, name) + if name == "tableVersion": + value = fi2ve(value) + value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + setattr(self, name, ve2fi(attrs["value"])) + return setattr(self, name, safeEval(attrs["value"])) + + # reserved0 is caretOffset for legacy reasons + @property + def reserved0(self): + return self.caretOffset + + @reserved0.setter + def reserved0(self, value): + self.caretOffset = value diff -Nru fonttools-3.0/Lib/fontTools/ttLib/tables/V_V_A_R_.py fonttools-3.21.2/Lib/fontTools/ttLib/tables/V_V_A_R_.py --- fonttools-3.0/Lib/fontTools/ttLib/tables/V_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/tables/V_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_V_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.21.2/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-3.0/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 @@ -1,519 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test OTF - - - Regular - - - FontTools: Test OTF: 2015 - - - Test OTF - - - Version 1.000 - - - TestOTF-Regular - - - Test OTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - Test TTF - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test OTF - - - Regular - - - FontTools: Test OTF: 2015 - - - Test OTF - - - Version 1.000 - - - TestOTF-Regular - - - Test OTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 131 122 -131 hlineto - return - - - - - - 500 450 hmoveto - 750 -400 -750 vlineto - 50 50 rmoveto - 650 300 -650 vlineto - endchar - - - 0 endchar - - - 250 endchar - - - 723 55 hmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - endchar - - - 241 55 hmoveto - -107 callsubr - endchar - - - 250 endchar - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru fonttools-3.0/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.21.2/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-3.0/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 @@ -1,553 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test TTF - - - Regular - - - FontTools: Test TTF: 2015 - - - Test TTF - - - Version 1.000 - - - TestTTF-Regular - - - Test TTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - Test TTF - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test TTF - - - Regular - - - FontTools: Test TTF: 2015 - - - Test TTF - - - Version 1.000 - - - TestTTF-Regular - - - Test TTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru fonttools-3.0/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.21.2/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-3.0/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ - - - - - - - - - - - Description without language. - - - Description with "en" language. - - - Description with "fr" language. - - - - - License without language. - - - License with "en" language. - - - License with "fr" language. - - - - - Copyright without language. - - - Copyright with "en" language. - - - Copyright with "fr" language. - - - - - Trademark without language. - - - Trademark with "en" language. - - - Trademark with "fr" language. - - - - - Extension 1 - Name Without Language - Extension 1 - Name With "en" Language - Extension 1 - Name With "fr" Language - - Extension 1 - Item 1 - Name Without Language - Extension 1 - Item 1 - Name With "en" Language - Extension 1 - Item 1 - Name With "fr" Language - Extension 1 - Item 1 - Value Without Language - Extension 1 - Item 1 - Value With "en" Language - Extension 1 - Item 1 - Value With "fr" Language - - - Extension 1 - Item 2 - Name Without Language - Extension 1 - Item 2 - Name With "en" Language - Extension 1 - Item 2 - Name With "fr" Language - Extension 1 - Item 2 - Value Without Language - Extension 1 - Item 2 - Value With "en" Language - Extension 1 - Item 2 - Value With "fr" Language - - - - Extension 2 - Name Without Language - Extension 2 - Name With "en" Language - Extension 2 - Name With "fr" Language - - Extension 2 - Item 1 - Name Without Language - Extension 2 - Item 1 - Name With "en" Language - Extension 2 - Item 1 - Name With "fr" Language - Extension 2 - Item 1 - Value Without Language - Extension 2 - Item 1 - Value With "en" Language - Extension 2 - Item 1 - Value With "fr" Language - - - Extension 2 - Item 2 - Name Without Language - Extension 2 - Item 2 - Name With "en" Language - Extension 2 - Item 2 - Name With "fr" Language - Extension 2 - Item 2 - Value Without Language - Extension 2 - Item 2 - Value With "en" Language - Extension 2 - Item 2 - Value With "fr" Language - - - Extension 2 - Item 3 - Name Without Language - Extension 2 - Item 3 - Name With "en" Language - Extension 2 - Item 3 - Name With "fr" Language - Extension 2 - Item 3 - Value Without Language - Extension 2 - Item 3 - Value With "en" Language - - - diff -Nru fonttools-3.0/Lib/fontTools/ttLib/woff2.py fonttools-3.21.2/Lib/fontTools/ttLib/woff2.py --- fonttools-3.0/Lib/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/woff2.py 2018-01-08 12:40:40.000000000 +0000 @@ -13,6 +13,10 @@ WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, sfntDirectoryEntrySize, calcChecksum) from fontTools.ttLib.tables import ttProgram +import logging + + +log = logging.getLogger(__name__) haveBrotli = False try: @@ -28,8 +32,9 @@ def __init__(self, file, checkChecksums=1, fontNumber=-1): if not haveBrotli: - print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) + log.error( + 'The WOFF2 decoder requires the Brotli Python extension, available at: ' + 'https://github.com/google/brotli') raise ImportError("No module named brotli") self.file = file @@ -106,7 +111,8 @@ self.ttFont['loca'] = WOFF2LocaTable() glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() glyfTable.reconstruct(data, self.ttFont) - glyfTable.padding = padding + if padding: + glyfTable.padding = padding data = glyfTable.compile(self.ttFont) return data @@ -132,8 +138,9 @@ def __init__(self, file, numTables, sfntVersion="\000\001\000\000", flavor=None, flavorData=None): if not haveBrotli: - print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) + log.error( + 'The WOFF2 encoder requires the Brotli Python extension, available at: ' + 'https://github.com/google/brotli') raise ImportError("No module named brotli") self.file = file @@ -226,7 +233,14 @@ """ if self.sfntVersion == "OTTO": return - for tag in ('maxp', 'head', 'loca', 'glyf'): + + # make up glyph names required to decompile glyf table + self._decompileTable('maxp') + numGlyphs = self.ttFont['maxp'].numGlyphs + glyphOrder = ['.notdef'] + ["glyph%.5d" % i for i in range(1, numGlyphs)] + self.ttFont.setGlyphOrder(glyphOrder) + + for tag in ('head', 'loca', 'glyf'): self._decompileTable(tag) self.ttFont['glyf'].padding = padding for tag in ('glyf', 'loca'): diff -Nru fonttools-3.0/Lib/fontTools/ttLib/woff2_test.py fonttools-3.21.2/Lib/fontTools/ttLib/woff2_test.py --- fonttools-3.0/Lib/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,747 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, - woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, - getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, - WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, - WOFF2Writer) -import unittest -import sstruct -import os -import random -import copy -from collections import OrderedDict - -haveBrotli = False -try: - import brotli - haveBrotli = True -except ImportError: - pass - - -# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires -# deprecation warnings if a program uses the old name. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - -current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) -data_dir = os.path.join(current_dir, 'testdata') -TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') -OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') -METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') - -TT_WOFF2 = BytesIO() -CFF_WOFF2 = BytesIO() - - -def setUpModule(): - if not haveBrotli: - raise unittest.SkipTest("No module named brotli") - assert os.path.exists(TTX) - assert os.path.exists(OTX) - # import TT-flavoured test font and save it as WOFF2 - ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - ttf.importXML(TTX, quiet=True) - ttf.flavor = "woff2" - ttf.save(TT_WOFF2, reorderTables=None) - # import CFF-flavoured test font and save it as WOFF2 - otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - otf.importXML(OTX, quiet=True) - otf.flavor = "woff2" - otf.save(CFF_WOFF2, reorderTables=None) - - -class WOFF2ReaderTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(OTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_bad_signature(self): - with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): - WOFF2Reader(BytesIO(b"wOFF")) - - def test_not_enough_data_header(self): - incomplete_header = self.file.read(woff2DirectorySize - 1) - with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): - WOFF2Reader(BytesIO(incomplete_header)) - - def test_incorrect_compressed_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['totalCompressedSize'] = 0 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaises(brotli.error): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_incorrect_uncompressed_size(self): - decompress_backup = brotli.decompress - brotli.decompress = lambda data: b"" # return empty byte string - with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): - WOFF2Reader(self.file) - brotli.decompress = decompress_backup - - def test_incorrect_file_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['length'] -= 1 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaisesRegex( - ttLib.TTLibError, "doesn't match the actual file size"): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_num_tables(self): - tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - self.assertEqual(header['numTables'], len(tags)) - - def test_table_tags(self): - tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) - reader = WOFF2Reader(self.file) - self.assertEqual(set(reader.keys()), tags) - - def test_get_normal_tables(self): - woff2Reader = WOFF2Reader(self.file) - specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') - for tag in [t for t in self.font.keys() if t not in specialTags]: - origData = self.font.getTableData(tag) - decompressedData = woff2Reader[tag] - self.assertEqual(origData, decompressedData) - - def test_reconstruct_unknown(self): - reader = WOFF2Reader(self.file) - with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): - reader.reconstructTable('ZZZZ') - - -class WOFF2ReaderTTFTest(WOFF2ReaderTest): - """ Tests specific to TT-flavored fonts. """ - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(TTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_reconstruct_glyf(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['glyf'] - self.assertEqual(self.font.getTableData('glyf'), reconstructedData) - - def test_reconstruct_loca(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['loca'] - self.assertEqual(self.font.getTableData('loca'), reconstructedData) - self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) - - def test_reconstruct_loca_not_match_orig_size(self): - reader = WOFF2Reader(self.file) - reader.tables['loca'].origLength -= 1 - with self.assertRaisesRegex( - ttLib.TTLibError, "'loca' table doesn't match original size"): - reader.reconstructTable('loca') - - -def normalise_table(font, tag, padding=4): - """ Return normalised table data. Keep 'font' instance unmodified. """ - assert tag in ('glyf', 'loca', 'head') - assert tag in font - if tag == 'head': - origHeadFlags = font['head'].flags - font['head'].flags |= (1 << 11) - tableData = font['head'].compile(font) - if font.sfntVersion in ("\x00\x01\x00\x00", "true"): - assert {'glyf', 'loca', 'head'}.issubset(font.keys()) - origIndexFormat = font['head'].indexToLocFormat - if hasattr(font['loca'], 'locations'): - origLocations = font['loca'].locations[:] - else: - origLocations = [] - glyfTable = ttLib.getTableClass('glyf')() - glyfTable.decompile(font.getTableData('glyf'), font) - glyfTable.padding = padding - if tag == 'glyf': - tableData = glyfTable.compile(font) - elif tag == 'loca': - glyfTable.compile(font) - tableData = font['loca'].compile(font) - if tag == 'head': - glyfTable.compile(font) - font['loca'].compile(font) - tableData = font['head'].compile(font) - font['head'].indexToLocFormat = origIndexFormat - font['loca'].set(origLocations) - if tag == 'head': - font['head'].flags = origHeadFlags - return tableData - - -def normalise_font(font, padding=4): - """ Return normalised font data. Keep 'font' instance unmodified. """ - # drop DSIG but keep a copy - DSIG_copy = copy.deepcopy(font['DSIG']) - del font['DSIG'] - # ovverride TTFont attributes - origFlavor = font.flavor - origRecalcBBoxes = font.recalcBBoxes - origRecalcTimestamp = font.recalcTimestamp - origLazy = font.lazy - font.flavor = None - font.recalcBBoxes = False - font.recalcTimestamp = False - font.lazy = True - # save font to temporary stream - infile = BytesIO() - font.save(infile) - infile.seek(0) - # reorder tables alphabetically - outfile = BytesIO() - reader = ttLib.sfnt.SFNTReader(infile) - writer = ttLib.sfnt.SFNTWriter( - outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) - for tag in sorted(reader.keys()): - if tag in woff2TransformedTableTags + ('head',): - writer[tag] = normalise_table(font, tag, padding) - else: - writer[tag] = reader[tag] - writer.close() - # restore font attributes - font['DSIG'] = DSIG_copy - font.flavor = origFlavor - font.recalcBBoxes = origRecalcBBoxes - font.recalcTimestamp = origRecalcTimestamp - font.lazy = origLazy - return outfile.getvalue() - - -class WOFF2DirectoryEntryTest(unittest.TestCase): - - def setUp(self): - self.entry = WOFF2DirectoryEntry() - - def test_not_enough_data_table_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): - self.entry.fromString(b"") - - def test_not_enough_data_table_tag(self): - incompleteData = bytearray([0x3F, 0, 0, 0]) - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): - self.entry.fromString(bytes(incompleteData)) - - def test_table_reserved_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): - self.entry.fromString(bytechr(0xC0)) - - def test_loca_zero_transformLength(self): - data = bytechr(getKnownTagIndex('loca')) # flags - data += packBase128(random.randint(1, 100)) # origLength - data += packBase128(1) # non-zero transformLength - with self.assertRaisesRegex( - ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): - self.entry.fromString(data) - - def test_fromFile(self): - unknownTag = Tag('ZZZZ') - data = bytechr(getKnownTagIndex(unknownTag)) - data += unknownTag.tobytes() - data += packBase128(random.randint(1, 100)) - expectedPos = len(data) - f = BytesIO(data + b'\0'*100) - self.entry.fromFile(f) - self.assertEqual(f.tell(), expectedPos) - - def test_transformed_toString(self): - self.entry.tag = Tag('glyf') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = random.randint(101, 200) - self.entry.length = random.randint(1, 100) - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + - base128Size(self.entry.length)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_known_toString(self): - self.entry.tag = Tag('head') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = 54 - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_unknown_toString(self): - self.entry.tag = Tag('ZZZZ') - self.entry.flags = woff2UnknownTagIndex - self.entry.origLength = random.randint(1, 100) - expectedSize = (woff2FlagsSize + woff2UnknownTagSize + - base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - -class DummyReader(WOFF2Reader): - - def __init__(self, file, checkChecksums=1, fontNumber=-1): - self.file = file - for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', - 'metaOrigLength', 'privLength', 'privOffset'): - setattr(self, attr, 0) - - -class WOFF2FlavorDataTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - # make random byte strings; font data must be 4-byte aligned - cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file = BytesIO(self.fontdata) - self.file.seek(0, 2) - - def test_get_metaData_no_privData(self): - self.file.write(self.compressed_metadata) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - - def test_get_privData_no_metaData(self): - self.file.write(self.privData) - reader = DummyReader(self.file) - reader.privOffset = len(self.fontdata) - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_metaData_and_privData(self): - self.file.write(self.compressed_metadata + self.privData) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - reader.privOffset = reader.metaOffset + reader.metaLength - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_major_minorVersion(self): - reader = DummyReader(self.file) - reader.majorVersion = reader.minorVersion = 1 - flavorData = WOFF2FlavorData(reader) - self.assertEqual(flavorData.majorVersion, 1) - self.assertEqual(flavorData.minorVersion, 1) - - -class WOFF2WriterTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(OTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - @classmethod - def setUpFlavorData(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file.seek(0) - self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) - - def test_DSIG_dropped(self): - self.writer['DSIG'] = b"\0" - self.assertEqual(len(self.writer.tables), 0) - self.assertEqual(self.writer.numTables, self.numTables-1) - - def test_no_rewrite_table(self): - self.writer['ZZZZ'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): - self.writer['ZZZZ'] = b"\0" - - def test_num_tables(self): - self.writer['ABCD'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): - self.writer.close() - - def test_required_tables(self): - font = ttLib.TTFont(flavor="woff2") - with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): - font.save(BytesIO()) - - def test_head_transform_flag(self): - headData = self.font.getTableData('head') - origFlags = byteord(headData[16]) - woff2font = ttLib.TTFont(self.file) - newHeadData = woff2font.getTableData('head') - modifiedFlags = byteord(newHeadData[16]) - self.assertNotEqual(origFlags, modifiedFlags) - restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 - self.assertEqual(origFlags, restoredFlags) - - def test_tables_sorted_alphabetically(self): - expected = sorted([t for t in self.tags if t != 'DSIG']) - woff2font = ttLib.TTFont(self.file) - self.assertEqual(expected, list(woff2font.reader.keys())) - - def test_checksums(self): - normFile = BytesIO(normalise_font(self.font, padding=4)) - normFile.seek(0) - normFont = ttLib.TTFont(normFile, checkChecksums=2) - w2font = ttLib.TTFont(self.file) - # force reconstructing glyf table using 4-byte padding - w2font.reader.padding = 4 - for tag in [t for t in self.tags if t != 'DSIG']: - w2data = w2font.reader[tag] - normData = normFont.reader[tag] - if tag == "head": - w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] - normData = normData[:8] + b'\0\0\0\0' + normData[12:] - w2CheckSum = ttLib.sfnt.calcChecksum(w2data) - normCheckSum = ttLib.sfnt.calcChecksum(normData) - self.assertEqual(w2CheckSum, normCheckSum) - normCheckSumAdjustment = normFont['head'].checkSumAdjustment - self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) - - def test_calcSFNTChecksumsLengthsAndOffsets(self): - normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) - self.writer._calcSFNTChecksumsLengthsAndOffsets() - for tag, entry in normFont.reader.tables.items(): - self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) - self.assertEqual(entry.length, self.writer.tables[tag].origLength) - self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) - - def test_bad_sfntVersion(self): - for i in range(self.numTables): - self.writer[bytechr(65 + i)*4] = b"\0" - self.writer.sfntVersion = 'ZZZZ' - with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): - self.writer.close() - - def test_calcTotalSize_no_flavorData(self): - expected = self.length - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData(self): - expected = self.length + len(self.compressed_metadata) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_privData(self): - expected = self.length + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData_and_privData(self): - metaDataLength = (len(self.compressed_metadata) + 3) & ~3 - expected = self.length + metaDataLength + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_getVersion(self): - # no version - self.assertEqual((0, 0), self.writer._getVersion()) - # version from head.fontRevision - fontRevision = self.font['head'].fontRevision - versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) - entry = self.writer.tables['head'] = ttLib.getTableClass('head')() - entry.data = self.font.getTableData('head') - self.assertEqual(versionTuple, self.writer._getVersion()) - # version from writer.flavorData - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.majorVersion, flavorData.minorVersion = (10, 11) - self.assertEqual((10, 11), self.writer._getVersion()) - - -class WOFF2WriterTTFTest(WOFF2WriterTest): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(TTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - def test_normaliseGlyfAndLoca(self): - normTables = {} - for tag in ('head', 'loca', 'glyf'): - normTables[tag] = normalise_table(self.font, tag, padding=4) - for tag in self.tags: - tableData = self.font.getTableData(tag) - self.writer[tag] = tableData - if tag in normTables: - self.assertNotEqual(tableData, normTables[tag]) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - for tag in normTables: - self.assertEqual(self.writer.tables[tag].data, normTables[tag]) - - -class WOFF2LocaTableTest(unittest.TestCase): - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head') - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - - def test_compile_short_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0, 0x20000, 2))) - self.font['glyf'].indexFormat = 0 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20000) - - def test_compile_short_loca_overflow(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20000 + 1))) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex( - ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): - locaTable.compile(self.font) - - def test_compile_short_loca_not_multiples_of_2(self): - locaTable = self.font['loca'] - locaTable.set([1, 3, 5, 7]) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): - locaTable.compile(self.font) - - def test_compile_long_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20001))) - self.font['glyf'].indexFormat = 1 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20001 * 4) - - def test_compile_set_indexToLocFormat_0(self): - locaTable = self.font['loca'] - # offsets are all multiples of 2 and max length is < 0x10000 - locaTable.set(list(range(0, 0x20000, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(0, newIndexFormat) - - def test_compile_set_indexToLocFormat_1(self): - locaTable = self.font['loca'] - # offsets are not multiples of 2 - locaTable.set(list(range(10))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - # max length is >= 0x10000 - locaTable.set(list(range(0, 0x20000 + 1, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - - -class WOFF2GlyfTableTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font.importXML(TTX, quiet=True) - cls.tables = {} - cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') - for tag in reversed(cls.transformedTags): # compile in inverse order - cls.tables[tag] = font.getTableData(tag) - infile = BytesIO(TT_WOFF2.getvalue()) - reader = WOFF2Reader(infile) - cls.transformedGlyfData = reader.tables['glyf'].loadData( - reader.transformBuffer) - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head')() - font['maxp'] = ttLib.getTableClass('maxp')() - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - for tag in self.transformedTags: - font[tag].decompile(self.tables[tag], font) - - def test_reconstruct_glyf_padded_4(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_padded_2(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_unpadded(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.compile(self.font) - self.assertEqual(self.tables['glyf'], data) - - def test_reconstruct_glyf_incorrect_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - badGlyphOrder = self.font.getGlyphOrder()[:-1] - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.reconstruct(self.transformedGlyfData, self.font) - - def test_reconstruct_glyf_missing_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.reconstruct(self.transformedGlyfData, self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_reconstruct_loca_padded_4(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_padded_2(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_unpadded(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - self.assertEqual(self.tables['loca'], data) - - def test_reconstruct_glyf_header_not_enough_data(self): - with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): - WOFF2GlyfTable().reconstruct(b"", self.font) - - def test_reconstruct_glyf_table_incorrect_size(self): - msg = "incorrect size of transformed 'glyf'" - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) - - def test_transform_glyf(self): - glyfTable = self.font['glyf'] - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_transform_glyf_incorrect_glyphOrder(self): - glyfTable = self.font['glyf'] - badGlyphOrder = self.font.getGlyphOrder()[:-1] - del glyfTable.glyphOrder - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - glyfTable.glyphOrder = badGlyphOrder - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - - def test_transform_glyf_missing_glyphOrder(self): - glyfTable = self.font['glyf'] - del glyfTable.glyphOrder - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.transform(self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_roundtrip_glyf_reconstruct_and_transform(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_roundtrip_glyf_transform_and_reconstruct(self): - glyfTable = self.font['glyf'] - transformedData = glyfTable.transform(self.font) - newGlyfTable = WOFF2GlyfTable() - newGlyfTable.reconstruct(transformedData, self.font) - newGlyfTable.padding = 4 - reconstructedData = newGlyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) - self.assertEqual(normGlyfData, reconstructedData) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Lib/fontTools/ttx.py fonttools-3.21.2/Lib/fontTools/ttx.py --- fonttools-3.0/Lib/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/ttx.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,22 +1,23 @@ """\ usage: ttx [options] inputfile1 [... inputfileN] - TTX %s -- From OpenType To XML And Back + TTX -- From OpenType To XML And Back If an input file is a TrueType or OpenType font file, it will be - dumped to an TTX file (an XML-based text format). - If an input file is a TTX file, it will be compiled to a TrueType - or OpenType font file. + decompiled to a TTX file (an XML-based text format). + If an input file is a TTX file, it will be compiled to whatever + format the data is in, a TrueType or OpenType/CFF font file. Output files are created so they are unique: an existing file is never overwritten. General options: - -h Help: print this message + -h Help: print this message. + --version: show version and exit. -d Specify a directory where the output files are to be created. -o Specify a file to write the output to. A special - value of of - would use the standard output. + value of - would use the standard output. -f Overwrite existing output file(s), ie. don't append numbers. -v Verbose: more messages will be written to stdout about what is being done. @@ -56,10 +57,13 @@ If no export format is specified 'raw' format is used. -e Don't ignore decompilation errors, but show a full traceback and abort. - -y Select font number for TrueType Collection, + -y Select font number for TrueType Collection (.ttc/.otc), starting from 0. --unicodedata Use custom database file to write character names in the comments of the cmap TTX output. + --newline Control how line endings are written in the XML + file. It can be 'LF', 'CR', or 'CRLF'. If not specified, the + default platform-specific line endings are used. Compile options: -m Merge with TrueType-input-file: specify a TrueType or OpenType @@ -69,6 +73,11 @@ file as-is. --recalc-timestamp Set font 'modified' timestamp to current time. By default, the modification time of the TTX file will be used. + --flavor Specify flavor of output font file. May be 'woff' + or 'woff2'. Note that WOFF2 requires the Brotli Python extension, + available at https://github.com/google/brotli + --with-zopfli Use Zopfli instead of Zlib to compress WOFF. The Python + extension is available at https://pypi.python.org/pypi/zopfli """ @@ -78,34 +87,19 @@ from fontTools.misc.macCreatorType import getMacCreatorAndType from fontTools.unicode import setUnicodeData from fontTools.misc.timeTools import timestampSinceEpoch +from fontTools.misc.loggingTools import Timer +from fontTools.misc.cliTools import makeOutputFileName import os import sys import getopt import re +import logging -def usage(): - from fontTools import version - print(__doc__ % version) - sys.exit(2) +log = logging.getLogger("fontTools.ttx") -numberAddedRE = re.compile("#\d+$") opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') -def makeOutputFileName(input, outputDir, extension, overWrite=False): - dirName, fileName = os.path.split(input) - fileName, ext = os.path.splitext(fileName) - if outputDir: - dirName = outputDir - fileName = numberAddedRE.split(fileName)[0] - output = os.path.join(dirName, fileName + extension) - n = 1 - if not overWrite: - while os.path.exists(output): - output = os.path.join(dirName, fileName + "#" + repr(n) + extension) - n = n + 1 - return output - class Options(object): @@ -123,7 +117,10 @@ ignoreDecompileErrors = True bitmapGlyphDataFormat = 'raw' unicodedata = None + newlinestr = None recalcTimestamp = False + flavor = None + useZopfli = False def __init__(self, rawOptions, numFiles): self.onlyTables = [] @@ -132,13 +129,15 @@ for option, value in rawOptions: # general options if option == "-h": + print(__doc__) + sys.exit(0) + elif option == "--version": from fontTools import version - print(__doc__ % version) + print(version) sys.exit(0) elif option == "-d": if not os.path.isdir(value): - print("The -d option value must be an existing directory") - sys.exit(2) + raise getopt.GetoptError("The -d option value must be an existing directory") self.outputDir = value elif option == "-o": self.outputFile = value @@ -152,8 +151,12 @@ elif option == "-l": self.listTables = True elif option == "-t": + # pad with space if table tag length is less than 4 + value = value.ljust(4) self.onlyTables.append(value) elif option == "-x": + # pad with space if table tag length is less than 4 + value = value.ljust(4) self.skipTables.append(value) elif option == "-s": self.splitTables = True @@ -162,8 +165,8 @@ elif option == "-z": validOptions = ('raw', 'row', 'bitwise', 'extfile') if value not in validOptions: - print("-z does not allow %s as a format. Use %s" % (option, validOptions)) - sys.exit(2) + raise getopt.GetoptError( + "-z does not allow %s as a format. Use %s" % (option, validOptions)) self.bitmapGlyphDataFormat = value elif option == "-y": self.fontNumber = int(value) @@ -178,14 +181,41 @@ self.ignoreDecompileErrors = False elif option == "--unicodedata": self.unicodedata = value + elif option == "--newline": + validOptions = ('LF', 'CR', 'CRLF') + if value == "LF": + self.newlinestr = "\n" + elif value == "CR": + self.newlinestr = "\r" + elif value == "CRLF": + self.newlinestr = "\r\n" + else: + raise getopt.GetoptError( + "Invalid choice for --newline: %r (choose from %s)" + % (value, ", ".join(map(repr, validOptions)))) elif option == "--recalc-timestamp": self.recalcTimestamp = True - if self.onlyTables and self.skipTables: - print("-t and -x options are mutually exclusive") + elif option == "--flavor": + self.flavor = value + elif option == "--with-zopfli": + self.useZopfli = True + if self.verbose and self.quiet: + raise getopt.GetoptError("-q and -v options are mutually exclusive") + if self.verbose: + self.logLevel = logging.DEBUG + elif self.quiet: + self.logLevel = logging.WARNING + else: + self.logLevel = logging.INFO + if self.mergeFile and self.flavor: + raise getopt.GetoptError("-m and --flavor options are mutually exclusive") sys.exit(2) + if self.onlyTables and self.skipTables: + raise getopt.GetoptError("-t and -x options are mutually exclusive") if self.mergeFile and numFiles > 1: - print("Must specify exactly one TTX source file when using -m") - sys.exit(2) + raise getopt.GetoptError("Must specify exactly one TTX source file when using -m") + if self.flavor != 'woff' and self.useZopfli: + raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'") def ttList(input, output, options): @@ -213,45 +243,43 @@ ttf.close() +@Timer(log, 'Done dumping TTX in %(time).3f seconds') def ttDump(input, output, options): - if not options.quiet: - print('Dumping "%s" to "%s"...' % (input, output)) + log.info('Dumping "%s" to "%s"...', input, output) if options.unicodedata: setUnicodeData(options.unicodedata) - ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, - quiet=options.quiet, + ttf = TTFont(input, 0, allowVID=options.allowVID, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, - quiet=options.quiet, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, disassembleInstructions=options.disassembleInstructions, - bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat, + newlinestr=options.newlinestr) ttf.close() +@Timer(log, 'Done compiling TTX in %(time).3f seconds') def ttCompile(input, output, options): - if not options.quiet: - print('Compiling "%s" to "%s"...' % (input, output)) - ttf = TTFont(options.mergeFile, + log.info('Compiling "%s" to "%s"...' % (input, output)) + if options.useZopfli: + from fontTools.ttLib import sfnt + sfnt.USE_ZOPFLI = True + ttf = TTFont(options.mergeFile, flavor=options.flavor, recalcBBoxes=options.recalcBBoxes, recalcTimestamp=options.recalcTimestamp, - verbose=options.verbose, allowVID=options.allowVID) - ttf.importXML(input, quiet=options.quiet) + allowVID=options.allowVID) + ttf.importXML(input) - if not options.recalcTimestamp: + if not options.recalcTimestamp and 'head' in ttf: # use TTX file modification time for head "modified" timestamp mtime = os.path.getmtime(input) ttf['head'].modified = timestampSinceEpoch(mtime) ttf.save(output) - if options.verbose: - import time - print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) - def guessFileType(fileName): base, ext = os.path.splitext(fileName) @@ -259,12 +287,15 @@ f = open(fileName, "rb") except IOError: return None + header = f.read(256) + f.close() + if header.startswith(b'\xef\xbb\xbf>> script("a") + 'Latn' + >>> script(",") + 'Zyyy' + >>> script(unichr(0x10FFFF)) + 'Zzzz' + """ + code = byteord(char) + # 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which + # comes after (to the right of) any existing entries of x in a, and it + # partitions array a into two halves so that, for the left side + # all(val <= x for val in a[lo:i]), and for the right side + # all(val > x for val in a[i:hi]). + # Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting + # breakpoints); we want to use `bisect_right` to look up the range that + # contains the given codepoint: i.e. whose start is less than or equal + # to the codepoint. Thus, we subtract -1 from the index returned. + i = bisect_right(Scripts.RANGES, code) + return Scripts.VALUES[i-1] + + +def script_extension(char): + """ Return the script extension property assigned to the Unicode character + 'char' as a set of string. + + >>> script_extension("a") == {'Latn'} + True + >>> script_extension(unichr(0x060C)) == {'Arab', 'Syrc', 'Thaa'} + True + >>> script_extension(unichr(0x10FFFF)) == {'Zzzz'} + True + """ + code = byteord(char) + i = bisect_right(ScriptExtensions.RANGES, code) + value = ScriptExtensions.VALUES[i-1] + if value is None: + # code points not explicitly listed for Script Extensions + # have as their value the corresponding Script property value + return {script(char)} + return value + + +def script_name(code, default=KeyError): + """ Return the long, human-readable script name given a four-letter + Unicode script code. + + If no matching name is found, a KeyError is raised by default. + + You can use the 'default' argument to return a fallback value (e.g. + 'Unknown' or None) instead of throwing an error. + """ + try: + return str(Scripts.NAMES[code].replace("_", " ")) + except KeyError: + if isinstance(default, type) and issubclass(default, KeyError): + raise + return default + + +_normalize_re = re.compile(r"[-_ ]+") + + +def _normalize_property_name(string): + """Remove case, strip space, '-' and '_' for loose matching.""" + return _normalize_re.sub("", string).lower() + + +_SCRIPT_CODES = {_normalize_property_name(v): k + for k, v in Scripts.NAMES.items()} + + +def script_code(script_name, default=KeyError): + """Returns the four-letter Unicode script code from its long name + + If no matching script code is found, a KeyError is raised by default. + + You can use the 'default' argument to return a fallback string (e.g. + 'Zzzz' or None) instead of throwing an error. + """ + normalized_name = _normalize_property_name(script_name) + try: + return _SCRIPT_CODES[normalized_name] + except KeyError: + if isinstance(default, type) and issubclass(default, KeyError): + raise + return default + + +def block(char): + """ Return the block property assigned to the Unicode character 'char' + as a string. + + >>> block("a") + 'Basic Latin' + >>> block(unichr(0x060C)) + 'Arabic' + >>> block(unichr(0xEFFFF)) + 'No_Block' + """ + code = byteord(char) + i = bisect_right(Blocks.RANGES, code) + return Blocks.VALUES[i-1] diff -Nru fonttools-3.0/Lib/fontTools/unicode.py fonttools-3.21.2/Lib/fontTools/unicode.py --- fonttools-3.0/Lib/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/unicode.py 2018-01-08 12:40:40.000000000 +0000 @@ -30,7 +30,12 @@ class _UnicodeBuiltin(object): def __getitem__(self, charCode): - import unicodedata + try: + # use unicodedata backport to python2, if available: + # https://github.com/mikekap/unicodedata2 + import unicodedata2 as unicodedata + except ImportError: + import unicodedata try: return unicodedata.name(unichr(charCode)) except ValueError: diff -Nru fonttools-3.0/Lib/fontTools/varLib/builder.py fonttools-3.21.2/Lib/fontTools/varLib/builder.py --- fonttools-3.0/Lib/fontTools/varLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from fontTools import ttLib +from fontTools.ttLib.tables import otTables as ot + +# VariationStore + +def buildVarRegionAxis(axisSupport): + self = ot.VarRegionAxis() + self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport] + return self + +def buildVarRegion(support, axisTags): + assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags) + self = ot.VarRegion() + self.VarRegionAxis = [] + for tag in axisTags: + self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0)))) + self.VarRegionAxisCount = len(self.VarRegionAxis) + return self + +def buildVarRegionList(supports, axisTags): + self = ot.VarRegionList() + self.RegionAxisCount = len(axisTags) + self.Region = [] + for support in supports: + self.Region.append(buildVarRegion(support, axisTags)) + self.RegionCount = len(self.Region) + return self + + +def _reorderItem(lst, narrows): + out = [] + count = len(lst) + for i in range(count): + if i not in narrows: + out.append(lst[i]) + for i in range(count): + if i in narrows: + out.append(lst[i]) + return out + +def varDataCalculateNumShorts(self, optimize=True): + count = self.VarRegionCount + items = self.Item + narrows = set(range(count)) + for item in items: + wides = [i for i in narrows if not (-128 <= item[i] <= 127)] + narrows.difference_update(wides) + if not narrows: + break + if optimize: + # Reorder columns such that all SHORT columns come before UINT8 + self.VarRegionIndex = _reorderItem(self.VarRegionIndex, narrows) + for i in range(self.ItemCount): + items[i] = _reorderItem(items[i], narrows) + self.NumShorts = count - len(narrows) + else: + wides = set(range(count)) - narrows + self.NumShorts = 1+max(wides) if wides else 0 + return self + +def buildVarData(varRegionIndices, items, optimize=True): + self = ot.VarData() + self.VarRegionIndex = list(varRegionIndices) + regionCount = self.VarRegionCount = len(self.VarRegionIndex) + records = self.Item = [] + if items: + for item in items: + assert len(item) == regionCount + records.append(list(item)) + self.ItemCount = len(self.Item) + varDataCalculateNumShorts(self, optimize=optimize) + return self + + +def buildVarStore(varRegionList, varDataList): + self = ot.VarStore() + self.Format = 1 + self.VarRegionList = varRegionList + self.VarData = list(varDataList) + self.VarDataCount = len(self.VarData) + return self + + +# Variation helpers + +def buildVarIdxMap(varIdxes): + # TODO Change VarIdxMap mapping to hold separate outer,inner indices + self = ot.VarIdxMap() + self.mapping = list(varIdxes) + return self + +def buildVarDevTable(varIdx): + self = ot.Device() + self.DeltaFormat = 0x8000 + self.StartSize = varIdx >> 16 + self.EndSize = varIdx & 0xFFFF + return self diff -Nru fonttools-3.0/Lib/fontTools/varLib/designspace.py fonttools-3.21.2/Lib/fontTools/varLib/designspace.py --- fonttools-3.0/Lib/fontTools/varLib/designspace.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/designspace.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,113 @@ +"""Rudimentary support for loading MutatorMath .designspace files.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +try: + import xml.etree.cElementTree as ET +except ImportError: + import xml.etree.ElementTree as ET + +__all__ = ['load', 'loads'] + +namespaces = {'xml': '{http://www.w3.org/XML/1998/namespace}'} + + +def _xml_parse_location(et): + loc = {} + for dim in et.find('location'): + assert dim.tag == 'dimension' + name = dim.attrib['name'] + value = float(dim.attrib['xvalue']) + assert name not in loc + loc[name] = value + return loc + + +def _load_item(et): + item = dict(et.attrib) + for element in et: + if element.tag == 'location': + value = _xml_parse_location(et) + else: + value = {} + if 'copy' in element.attrib: + value['copy'] = bool(int(element.attrib['copy'])) + # TODO load more?! + item[element.tag] = value + return item + + +def _xml_parse_axis_or_map(element): + dic = {} + for name in element.attrib: + if name in ['name', 'tag']: + dic[name] = element.attrib[name] + else: + dic[name] = float(element.attrib[name]) + return dic + + +def _load_axis(et): + item = _xml_parse_axis_or_map(et) + maps = [] + labelnames = {} + for element in et: + assert element.tag in ['labelname', 'map'] + if element.tag == 'labelname': + lang = element.attrib["{0}lang".format(namespaces['xml'])] + labelnames[lang] = element.text + elif element.tag == 'map': + maps.append(_xml_parse_axis_or_map(element)) + if labelnames: + item['labelname'] = labelnames + if maps: + item['map'] = maps + return item + + +def _load(et): + designspace = {} + ds = et.getroot() + + axes_element = ds.find('axes') + if axes_element is not None: + axes = [] + for et in axes_element: + axes.append(_load_axis(et)) + designspace['axes'] = axes + + sources_element = ds.find('sources') + if sources_element is not None: + sources = [] + for et in sources_element: + sources.append(_load_item(et)) + designspace['sources'] = sources + + instances_element = ds.find('instances') + if instances_element is not None: + instances = [] + for et in instances_element: + instances.append(_load_item(et)) + designspace['instances'] = instances + + return designspace + + +def load(filename): + """Load designspace from a file name or object. + Returns a dictionary containing three (optional) items: + - list of "axes" + - list of "sources" (aka masters) + - list of "instances" + """ + return _load(ET.parse(filename)) + + +def loads(string): + """Load designspace from a string.""" + return _load(ET.fromstring(string)) + +if __name__ == '__main__': + import sys + from pprint import pprint + for f in sys.argv[1:]: + pprint(load(f)) diff -Nru fonttools-3.0/Lib/fontTools/varLib/__init__.py fonttools-3.21.2/Lib/fontTools/varLib/__init__.py --- fonttools-3.0/Lib/fontTools/varLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,754 @@ +""" +Module for dealing with 'gvar'-style font variations, also known as run-time +interpolation. + +The ideas here are very similar to MutatorMath. There is even code to read +MutatorMath .designspace files in the varLib.designspace module. + +For now, if you run this file on a designspace file, it tries to find +ttf-interpolatable files for the masters and build a variable-font from +them. Such ttf-interpolatable and designspace files can be generated from +a Glyphs source, eg., using noto-source as an example: + + $ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs + +Then you can make a variable-font this way: + + $ fonttools varLib master_ufo/NotoSansArabic.designspace + +API *will* change in near future. +""" +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import Vector +from fontTools.ttLib import TTFont, newTable +from fontTools.ttLib.tables._n_a_m_e import NameRecord +from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates +from fontTools.ttLib.tables.ttProgram import Program +from fontTools.ttLib.tables.TupleVariation import TupleVariation +from fontTools.ttLib.tables import otTables as ot +from fontTools.varLib import builder, designspace, models, varStore +from fontTools.varLib.merger import VariationMerger, _all_equal +from fontTools.varLib.mvar import MVAR_ENTRIES +from fontTools.varLib.iup import iup_delta_optimize +from collections import OrderedDict +import os.path +import logging +from pprint import pformat + +log = logging.getLogger("fontTools.varLib") + + +class VarLibError(Exception): + pass + +# +# Creation routines +# + +def _add_fvar(font, axes, instances): + """ + Add 'fvar' table to font. + + axes is an ordered dictionary of DesignspaceAxis objects. + + instances is list of dictionary objects with 'location', 'stylename', + and possibly 'postscriptfontname' entries. + """ + + assert axes + assert isinstance(axes, OrderedDict) + + log.info("Generating fvar") + + fvar = newTable('fvar') + nameTable = font['name'] + + for a in axes.values(): + axis = Axis() + axis.axisTag = Tag(a.tag) + # TODO Skip axes that have no variation. + axis.minValue, axis.defaultValue, axis.maxValue = a.minimum, a.default, a.maximum + axis.axisNameID = nameTable.addName(tounicode(a.labelname['en'])) + # TODO: + # Replace previous line with the following when the following issues are resolved: + # https://github.com/fonttools/fonttools/issues/930 + # https://github.com/fonttools/fonttools/issues/931 + # axis.axisNameID = nameTable.addMultilingualName(a.labelname, font) + fvar.axes.append(axis) + + for instance in instances: + coordinates = instance['location'] + name = tounicode(instance['stylename']) + psname = instance.get('postscriptfontname') + + inst = NamedInstance() + inst.subfamilyNameID = nameTable.addName(name) + if psname is not None: + psname = tounicode(psname) + inst.postscriptNameID = nameTable.addName(psname) + inst.coordinates = {axes[k].tag:axes[k].map_backward(v) for k,v in coordinates.items()} + #inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()} + fvar.instances.append(inst) + + assert "fvar" not in font + font['fvar'] = fvar + + return fvar + +def _add_avar(font, axes): + """ + Add 'avar' table to font. + + axes is an ordered dictionary of DesignspaceAxis objects. + """ + + assert axes + assert isinstance(axes, OrderedDict) + + log.info("Generating avar") + + avar = newTable('avar') + + interesting = False + for axis in axes.values(): + # Currently, some rasterizers require that the default value maps + # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment + # maps, even when the default normalization mapping for the axis + # was not modified. + # https://github.com/googlei18n/fontmake/issues/295 + # https://github.com/fonttools/fonttools/issues/1011 + # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed + curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + if not axis.map: + continue + + items = sorted(axis.map.items()) + keys = [item[0] for item in items] + vals = [item[1] for item in items] + + # Current avar requirements. We don't have to enforce + # these on the designer and can deduce some ourselves, + # but for now just enforce them. + assert axis.minimum == min(keys) + assert axis.maximum == max(keys) + assert axis.default in keys + # No duplicates + assert len(set(keys)) == len(keys) + assert len(set(vals)) == len(vals) + # Ascending values + assert sorted(vals) == vals + + keys_triple = (axis.minimum, axis.default, axis.maximum) + vals_triple = tuple(axis.map_forward(v) for v in keys_triple) + + keys = [models.normalizeValue(v, keys_triple) for v in keys] + vals = [models.normalizeValue(v, vals_triple) for v in vals] + + if all(k == v for k, v in zip(keys, vals)): + continue + interesting = True + + curve.update(zip(keys, vals)) + + assert 0.0 in curve and curve[0.0] == 0.0 + assert -1.0 not in curve or curve[-1.0] == -1.0 + assert +1.0 not in curve or curve[+1.0] == +1.0 + # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}) + + assert "avar" not in font + if not interesting: + log.info("No need for avar") + avar = None + else: + font['avar'] = avar + + return avar + +def _add_stat(font, axes): + + nameTable = font['name'] + + assert "STAT" not in font + STAT = font["STAT"] = newTable('STAT') + stat = STAT.table = ot.STAT() + stat.Version = 0x00010000 + + axisRecords = [] + for i,a in enumerate(axes.values()): + axis = ot.AxisRecord() + axis.AxisTag = Tag(a.tag) + # Meh. Reuse fvar nameID! + axis.AxisNameID = nameTable.addName(tounicode(a.labelname['en'])) + axis.AxisOrdering = i + axisRecords.append(axis) + + axisRecordArray = ot.AxisRecordArray() + axisRecordArray.Axis = axisRecords + # XXX these should not be hard-coded but computed automatically + stat.DesignAxisRecordSize = 8 + stat.DesignAxisCount = len(axisRecords) + stat.DesignAxisRecord = axisRecordArray + +# TODO Move to glyf or gvar table proper +def _GetCoordinates(font, glyphName): + """font, glyphName --> glyph coordinates as expected by "gvar" table + + The result includes four "phantom points" for the glyph metrics, + as mandated by the "gvar" spec. + """ + glyf = font["glyf"] + if glyphName not in glyf.glyphs: return None + glyph = glyf[glyphName] + if glyph.isComposite(): + coord = GlyphCoordinates([(getattr(c, 'x', 0),getattr(c, 'y', 0)) for c in glyph.components]) + control = (glyph.numberOfContours,[c.glyphName for c in glyph.components]) + else: + allData = glyph.getCoordinates(glyf) + coord = allData[0] + control = (glyph.numberOfContours,)+allData[1:] + + # Add phantom points for (left, right, top, bottom) positions. + horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName] + if not hasattr(glyph, 'xMin'): + glyph.recalcBounds(glyf) + leftSideX = glyph.xMin - leftSideBearing + rightSideX = leftSideX + horizontalAdvanceWidth + # XXX these are incorrect. Load vmtx and fix. + topSideY = glyph.yMax + bottomSideY = -glyph.yMin + coord = coord.copy() + coord.extend([(leftSideX, 0), + (rightSideX, 0), + (0, topSideY), + (0, bottomSideY)]) + + return coord, control + +# TODO Move to glyf or gvar table proper +def _SetCoordinates(font, glyphName, coord): + glyf = font["glyf"] + assert glyphName in glyf.glyphs + glyph = glyf[glyphName] + + # Handle phantom points for (left, right, top, bottom) positions. + assert len(coord) >= 4 + if not hasattr(glyph, 'xMin'): + glyph.recalcBounds(glyf) + leftSideX = coord[-4][0] + rightSideX = coord[-3][0] + topSideY = coord[-2][1] + bottomSideY = coord[-1][1] + + for _ in range(4): + del coord[-1] + + if glyph.isComposite(): + assert len(coord) == len(glyph.components) + for p,comp in zip(coord, glyph.components): + if hasattr(comp, 'x'): + comp.x,comp.y = p + elif glyph.numberOfContours is 0: + assert len(coord) == 0 + else: + assert len(coord) == len(glyph.coordinates) + glyph.coordinates = coord + + glyph.recalcBounds(glyf) + + horizontalAdvanceWidth = round(rightSideX - leftSideX) + leftSideBearing = round(glyph.xMin - leftSideX) + # XXX Handle vertical + font["hmtx"].metrics[glyphName] = horizontalAdvanceWidth, leftSideBearing + +def _add_gvar(font, model, master_ttfs, tolerance=0.5, optimize=True): + + assert tolerance >= 0 + + log.info("Generating gvar") + assert "gvar" not in font + gvar = font["gvar"] = newTable('gvar') + gvar.version = 1 + gvar.reserved = 0 + gvar.variations = {} + + for glyph in font.getGlyphOrder(): + + allData = [_GetCoordinates(m, glyph) for m in master_ttfs] + allCoords = [d[0] for d in allData] + allControls = [d[1] for d in allData] + control = allControls[0] + if (any(c != control for c in allControls)): + log.warning("glyph %s has incompatible masters; skipping" % glyph) + continue + del allControls + + # Update gvar + gvar.variations[glyph] = [] + deltas = model.getDeltas(allCoords) + supports = model.supports + assert len(deltas) == len(supports) + + # Prepare for IUP optimization + origCoords = deltas[0] + endPts = control[1] if control[0] >= 1 else list(range(len(control[1]))) + + for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])): + if all(abs(v) <= tolerance for v in delta.array): + continue + var = TupleVariation(support, delta) + if optimize: + delta_opt = iup_delta_optimize(delta, origCoords, endPts, tolerance=tolerance) + + if None in delta_opt: + # Use "optimized" version only if smaller... + var_opt = TupleVariation(support, delta_opt) + + axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...? + tupleData, auxData, _ = var.compile(axis_tags, [], None) + unoptimized_len = len(tupleData) + len(auxData) + tupleData, auxData, _ = var_opt.compile(axis_tags, [], None) + optimized_len = len(tupleData) + len(auxData) + + if optimized_len < unoptimized_len: + var = var_opt + + gvar.variations[glyph].append(var) + +def _remove_TTHinting(font): + for tag in ("cvar", "cvt ", "fpgm", "prep"): + if tag in font: + del font[tag] + for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"): + setattr(font["maxp"], attr, 0) + font["maxp"].maxZones = 1 + font["glyf"].removeHinting() + # TODO: Modify gasp table to deactivate gridfitting for all ranges? + +def _merge_TTHinting(font, model, master_ttfs, tolerance=0.5): + + log.info("Merging TT hinting") + assert "cvar" not in font + + # Check that the existing hinting is compatible + + # fpgm and prep table + + for tag in ("fpgm", "prep"): + all_pgms = [m[tag].program for m in master_ttfs if tag in m] + if len(all_pgms) == 0: + continue + if tag in font: + font_pgm = font[tag].program + else: + font_pgm = Program() + if any(pgm != font_pgm for pgm in all_pgms): + log.warning("Masters have incompatible %s tables, hinting is discarded." % tag) + _remove_TTHinting(font) + return + + # glyf table + + for name, glyph in font["glyf"].glyphs.items(): + all_pgms = [ + m["glyf"][name].program + for m in master_ttfs + if hasattr(m["glyf"][name], "program") + ] + if not any(all_pgms): + continue + glyph.expand(font["glyf"]) + if hasattr(glyph, "program"): + font_pgm = glyph.program + else: + font_pgm = Program() + if any(pgm != font_pgm for pgm in all_pgms if pgm): + log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name) + _remove_TTHinting(font) + return + + # cvt table + + all_cvs = [Vector(m["cvt "].values) for m in master_ttfs if "cvt " in m] + + if len(all_cvs) == 0: + # There is no cvt table to make a cvar table from, we're done here. + return + + if len(all_cvs) != len(master_ttfs): + log.warning("Some masters have no cvt table, hinting is discarded.") + _remove_TTHinting(font) + return + + num_cvt0 = len(all_cvs[0]) + if (any(len(c) != num_cvt0 for c in all_cvs)): + log.warning("Masters have incompatible cvt tables, hinting is discarded.") + _remove_TTHinting(font) + return + + # We can build the cvar table now. + + cvar = font["cvar"] = newTable('cvar') + cvar.version = 1 + cvar.variations = [] + + deltas = model.getDeltas(all_cvs) + supports = model.supports + for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])): + delta = [round(d) for d in delta] + if all(abs(v) <= tolerance for v in delta): + continue + var = TupleVariation(support, delta) + cvar.variations.append(var) + +def _add_HVAR(font, model, master_ttfs, axisTags): + + log.info("Generating HVAR") + + hAdvanceDeltas = {} + metricses = [m["hmtx"].metrics for m in master_ttfs] + for glyph in font.getGlyphOrder(): + hAdvances = [metrics[glyph][0] for metrics in metricses] + # TODO move round somewhere else? + hAdvanceDeltas[glyph] = tuple(round(d) for d in model.getDeltas(hAdvances)[1:]) + + # We only support the direct mapping right now. + + supports = model.supports[1:] + varTupleList = builder.buildVarRegionList(supports, axisTags) + varTupleIndexes = list(range(len(supports))) + n = len(supports) + items = [] + zeroes = [0]*n + for glyphName in font.getGlyphOrder(): + items.append(hAdvanceDeltas.get(glyphName, zeroes)) + while items and items[-1] is zeroes: + del items[-1] + + advanceMapping = None + # Add indirect mapping to save on duplicates + uniq = set(items) + # TODO Improve heuristic + if (len(items) - len(uniq)) * len(varTupleIndexes) > len(items): + newItems = sorted(uniq) + mapper = {v:i for i,v in enumerate(newItems)} + mapping = [mapper[item] for item in items] + while len(mapping) > 1 and mapping[-1] == mapping[-2]: + del mapping[-1] + advanceMapping = builder.buildVarIdxMap(mapping) + items = newItems + del mapper, mapping, newItems + del uniq + + varData = builder.buildVarData(varTupleIndexes, items) + varstore = builder.buildVarStore(varTupleList, [varData]) + + assert "HVAR" not in font + HVAR = font["HVAR"] = newTable('HVAR') + hvar = HVAR.table = ot.HVAR() + hvar.Version = 0x00010000 + hvar.VarStore = varstore + hvar.AdvWidthMap = advanceMapping + hvar.LsbMap = hvar.RsbMap = None + +def _add_MVAR(font, model, master_ttfs, axisTags): + + log.info("Generating MVAR") + + store_builder = varStore.OnlineVarStoreBuilder(axisTags) + store_builder.setModel(model) + + records = [] + lastTableTag = None + fontTable = None + tables = None + for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]): + if tableTag != lastTableTag: + tables = fontTable = None + if tableTag in font: + # TODO Check all masters have same table set? + fontTable = font[tableTag] + tables = [master[tableTag] for master in master_ttfs] + lastTableTag = tableTag + if tables is None: + continue + + # TODO support gasp entries + + master_values = [getattr(table, itemName) for table in tables] + if _all_equal(master_values): + base, varIdx = master_values[0], None + else: + base, varIdx = store_builder.storeMasters(master_values) + setattr(fontTable, itemName, base) + + if varIdx is None: + continue + log.info(' %s: %s.%s %s', tag, tableTag, itemName, master_values) + rec = ot.MetricsValueRecord() + rec.ValueTag = tag + rec.VarIdx = varIdx + records.append(rec) + + assert "MVAR" not in font + if records: + MVAR = font["MVAR"] = newTable('MVAR') + mvar = MVAR.table = ot.MVAR() + mvar.Version = 0x00010000 + mvar.Reserved = 0 + mvar.VarStore = store_builder.finish() + # XXX these should not be hard-coded but computed automatically + mvar.ValueRecordSize = 8 + mvar.ValueRecordCount = len(records) + mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag) + + +def _merge_OTL(font, model, master_fonts, axisTags): + + log.info("Merging OpenType Layout tables") + merger = VariationMerger(model, axisTags, font) + + merger.mergeTables(font, master_fonts, ['GPOS']) + store = merger.store_builder.finish() + try: + GDEF = font['GDEF'].table + assert GDEF.Version <= 0x00010002 + except KeyError: + font['GDEF']= newTable('GDEF') + GDEFTable = font["GDEF"] = newTable('GDEF') + GDEF = GDEFTable.table = ot.GDEF() + GDEF.Version = 0x00010003 + GDEF.VarStore = store + + + +# Pretty much all of this file should be redesigned and moved inot submodules... +# Such a mess right now, but kludging along... +class _DesignspaceAxis(object): + + def __repr__(self): + return repr(self.__dict__) + + @staticmethod + def _map(v, map): + keys = map.keys() + if not keys: + return v + if v in keys: + return map[v] + k = min(keys) + if v < k: + return v + map[k] - k + k = max(keys) + if v > k: + return v + map[k] - k + # Interpolate + a = max(k for k in keys if k < v) + b = min(k for k in keys if k > v) + va = map[a] + vb = map[b] + return va + (vb - va) * (v - a) / (b - a) + + def map_forward(self, v): + if self.map is None: return v + return self._map(v, self.map) + + def map_backward(self, v): + if self.map is None: return v + map = {v:k for k,v in self.map.items()} + return self._map(v, map) + + +def load_designspace(designspace_filename): + + ds = designspace.load(designspace_filename) + axes = ds.get('axes') + masters = ds.get('sources') + if not masters: + raise VarLibError("no sources found in .designspace") + instances = ds.get('instances', []) + + standard_axis_map = OrderedDict([ + ('weight', ('wght', {'en':'Weight'})), + ('width', ('wdth', {'en':'Width'})), + ('slant', ('slnt', {'en':'Slant'})), + ('optical', ('opsz', {'en':'Optical Size'})), + ]) + + + # Setup axes + axis_objects = OrderedDict() + if axes is not None: + for axis_dict in axes: + axis_name = axis_dict.get('name') + if not axis_name: + axis_name = axis_dict['name'] = axis_dict['tag'] + if 'map' not in axis_dict: + axis_dict['map'] = None + else: + axis_dict['map'] = {m['input']:m['output'] for m in axis_dict['map']} + + if axis_name in standard_axis_map: + if 'tag' not in axis_dict: + axis_dict['tag'] = standard_axis_map[axis_name][0] + if 'labelname' not in axis_dict: + axis_dict['labelname'] = standard_axis_map[axis_name][1].copy() + + axis = _DesignspaceAxis() + for item in ['name', 'tag', 'minimum', 'default', 'maximum', 'map']: + assert item in axis_dict, 'Axis does not have "%s"' % item + if 'labelname' not in axis_dict: + axis_dict['labelname'] = {'en': axis_name} + axis.__dict__ = axis_dict + axis_objects[axis_name] = axis + else: + # No element. Guess things... + base_idx = None + for i,m in enumerate(masters): + if 'info' in m and m['info']['copy']: + assert base_idx is None + base_idx = i + assert base_idx is not None, "Cannot find 'base' master; Either add element to .designspace document, or add element to one of the sources in the .designspace document." + + master_locs = [o['location'] for o in masters] + base_loc = master_locs[base_idx] + axis_names = set(base_loc.keys()) + assert all(name in standard_axis_map for name in axis_names), "Non-standard axis found and there exist no element." + + for name,(tag,labelname) in standard_axis_map.items(): + if name not in axis_names: + continue + + axis = _DesignspaceAxis() + axis.name = name + axis.tag = tag + axis.labelname = labelname.copy() + axis.default = base_loc[name] + axis.minimum = min(m[name] for m in master_locs if name in m) + axis.maximum = max(m[name] for m in master_locs if name in m) + axis.map = None + # TODO Fill in weight / width mapping from OS/2 table? Need loading fonts... + axis_objects[name] = axis + del base_idx, base_loc, axis_names, master_locs + axes = axis_objects + del axis_objects + log.info("Axes:\n%s", pformat(axes)) + + + # Check all master and instance locations are valid and fill in defaults + for obj in masters+instances: + obj_name = obj.get('name', obj.get('stylename', '')) + loc = obj['location'] + for axis_name in loc.keys(): + assert axis_name in axes, "Location axis '%s' unknown for '%s'." % (axis_name, obj_name) + for axis_name,axis in axes.items(): + if axis_name not in loc: + loc[axis_name] = axis.default + else: + v = axis.map_backward(loc[axis_name]) + assert axis.minimum <= v <= axis.maximum, "Location for axis '%s' (mapped to %s) out of range for '%s' [%s..%s]" % (axis_name, v, obj_name, axis.minimum, axis.maximum) + + + # Normalize master locations + + normalized_master_locs = [o['location'] for o in masters] + log.info("Internal master locations:\n%s", pformat(normalized_master_locs)) + + # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar + internal_axis_supports = {} + for axis in axes.values(): + triple = (axis.minimum, axis.default, axis.maximum) + internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple] + log.info("Internal axis supports:\n%s", pformat(internal_axis_supports)) + + normalized_master_locs = [models.normalizeLocation(m, internal_axis_supports) for m in normalized_master_locs] + log.info("Normalized master locations:\n%s", pformat(normalized_master_locs)) + + + # Find base master + base_idx = None + for i,m in enumerate(normalized_master_locs): + if all(v == 0 for v in m.values()): + assert base_idx is None + base_idx = i + assert base_idx is not None, "Base master not found; no master at default location?" + log.info("Index of base master: %s", base_idx) + + return axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances + + +def build(designspace_filename, master_finder=lambda s:s): + """ + Build variation font from a designspace file. + + If master_finder is set, it should be a callable that takes master + filename as found in designspace file and map it to master font + binary as to be opened (eg. .ttf or .otf). + """ + + axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename) + + log.info("Building variable font") + log.info("Loading master fonts") + basedir = os.path.dirname(designspace_filename) + master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters] + master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs] + # Reload base font as target font + vf = TTFont(master_ttfs[base_idx]) + + # TODO append masters as named-instances as well; needs .designspace change. + fvar = _add_fvar(vf, axes, instances) + _add_stat(vf, axes) + _add_avar(vf, axes) + del instances + + # Map from axis names to axis tags... + normalized_master_locs = [{axes[k].tag:v for k,v in loc.items()} for loc in normalized_master_locs] + #del axes + # From here on, we use fvar axes only + axisTags = [axis.axisTag for axis in fvar.axes] + + # Assume single-model for now. + model = models.VariationModel(normalized_master_locs, axisOrder=axisTags) + assert 0 == model.mapping[base_idx] + + log.info("Building variations tables") + _add_MVAR(vf, model, master_fonts, axisTags) + _add_HVAR(vf, model, master_fonts, axisTags) + _merge_OTL(vf, model, master_fonts, axisTags) + if 'glyf' in vf: + _add_gvar(vf, model, master_fonts) + _merge_TTHinting(vf, model, master_fonts) + + return vf, model, master_ttfs + + +def main(args=None): + from argparse import ArgumentParser + from fontTools import configLogger + + parser = ArgumentParser(prog='varLib') + parser.add_argument('designspace') + options = parser.parse_args(args) + + # TODO: allow user to configure logging via command-line options + configLogger(level="INFO") + + designspace_filename = options.designspace + finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf') + outfile = os.path.splitext(designspace_filename)[0] + '-VF.ttf' + + vf, model, master_ttfs = build(designspace_filename, finder) + + log.info("Saving variation font %s", outfile) + vf.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/varLib/interpolatable.py fonttools-3.21.2/Lib/fontTools/varLib/interpolatable.py --- fonttools-3.0/Lib/fontTools/varLib/interpolatable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/interpolatable.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,181 @@ +""" +Tool to find wrong contour order between different masters, and +other interpolatability (or lack thereof) issues. + +Call as: +$ fonttools varLib.interpolatable font1 font2 ... +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +from fontTools.pens.basePen import AbstractPen, BasePen +from fontTools.pens.recordingPen import RecordingPen +from fontTools.pens.statisticsPen import StatisticsPen +import itertools + + +class PerContourPen(BasePen): + def __init__(self, Pen, glyphset=None): + BasePen.__init__(self, glyphset) + self._glyphset = glyphset + self._Pen = Pen + self._pen = None + self.value = [] + def _moveTo(self, p0): + self._newItem() + self._pen.moveTo(p0) + def _lineTo(self, p1): + self._pen.lineTo(p1) + def _qCurveToOne(self, p1, p2): + self._pen.qCurveTo(p1, p2) + def _curveToOne(self, p1, p2, p3): + self._pen.curveTo(p1, p2, p3) + def _closePath(self): + self._pen.closePath() + self._pen = None + def _endPath(self): + self._pen.endPath() + self._pen = None + + def _newItem(self): + self._pen = pen = self._Pen() + self.value.append(pen) + +class PerContourOrComponentPen(PerContourPen): + + def addComponent(self, glyphName, transformation): + self._newItem() + self.value[-1].addComponent(glyphName, transformation) + + +def _vdiff(v0, v1): + return tuple(b-a for a,b in zip(v0,v1)) +def _vlen(vec): + v = 0 + for x in vec: + v += x*x + return v + +def _matching_cost(G, matching): + return sum(G[i][j] for i,j in enumerate(matching)) + +def min_cost_perfect_bipartite_matching(G): + n = len(G) + try: + from scipy.optimize import linear_sum_assignment + rows, cols = linear_sum_assignment(G) + assert (rows == list(range(n))).all() + return list(cols), _matching_cost(G, cols) + except ImportError: + pass + + try: + from munkres import Munkres + cols = [None] * n + for row,col in Munkres().compute(G): + cols[row] = col + return cols, _matching_cost(G, cols) + except ImportError: + pass + + if n > 6: + raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'") + + # Otherwise just brute-force + permutations = itertools.permutations(range(n)) + best = list(next(permutations)) + best_cost = _matching_cost(G, best) + for p in permutations: + cost = _matching_cost(G, p) + if cost < best_cost: + best, best_cost = list(p), cost + return best, best_cost + + +def test(glyphsets, glyphs=None, names=None): + + if names is None: + names = glyphsets + if glyphs is None: + glyphs = glyphsets[0].keys() + + hist = [] + for glyph_name in glyphs: + #print() + #print(glyph_name) + + try: + allVectors = [] + for glyphset,name in zip(glyphsets, names): + #print('.', end='') + glyph = glyphset[glyph_name] + + perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset) + glyph.draw(perContourPen) + contourPens = perContourPen.value + del perContourPen + + contourVectors = [] + allVectors.append(contourVectors) + for contour in contourPens: + stats = StatisticsPen(glyphset=glyphset) + contour.replay(stats) + size = abs(stats.area) ** .5 * .5 + vector = ( + int(size), + int(stats.meanX), + int(stats.meanY), + int(stats.stddevX * 2), + int(stats.stddevY * 2), + int(stats.correlation * size), + ) + contourVectors.append(vector) + #print(vector) + + # Check each master against the next one in the list. + for i,(m0,m1) in enumerate(zip(allVectors[:-1],allVectors[1:])): + if len(m0) != len(m1): + print('%s: %s+%s: Glyphs not compatible!!!!!' % (glyph_name, names[i], names[i+1])) + continue + if not m0: + continue + costs = [[_vlen(_vdiff(v0,v1)) for v1 in m1] for v0 in m0] + matching, matching_cost = min_cost_perfect_bipartite_matching(costs) + if matching != list(range(len(m0))): + print('%s: %s+%s: Glyph has wrong contour/component order: %s' % (glyph_name, names[i], names[i+1], matching)) #, m0, m1) + break + upem = 2048 + item_cost = round((matching_cost / len(m0) / len(m0[0])) ** .5 / upem * 100) + hist.append(item_cost) + threshold = 7 + if item_cost >= threshold: + print('%s: %s+%s: Glyph has very high cost: %d%%' % (glyph_name, names[i], names[i+1], item_cost)) + + + except ValueError as e: + print('%s: %s: math error %s; skipping glyph.' % (glyph_name, name, e)) + print(contour.value) + #raise + #for x in hist: + # print(x) + +def main(args): + filenames = args + glyphs = None + #glyphs = ['uni08DB', 'uniFD76'] + #glyphs = ['uni08DE', 'uni0034'] + #glyphs = ['uni08DE', 'uni0034', 'uni0751', 'uni0753', 'uni0754', 'uni08A4', 'uni08A4.fina', 'uni08A5.fina'] + + from os.path import basename + names = [basename(filename).rsplit('.', 1)[0] for filename in filenames] + + from fontTools.ttLib import TTFont + fonts = [TTFont(filename) for filename in filenames] + + glyphsets = [font.getGlyphSet() for font in fonts] + test(glyphsets, glyphs=glyphs, names=names) + +if __name__ == '__main__': + import sys + main(sys.argv[1:]) diff -Nru fonttools-3.0/Lib/fontTools/varLib/interpolate_layout.py fonttools-3.21.2/Lib/fontTools/varLib/interpolate_layout.py --- fonttools-3.0/Lib/fontTools/varLib/interpolate_layout.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/interpolate_layout.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,91 @@ +""" +Interpolate OpenType Layout tables (GDEF / GPOS / GSUB). +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib import models, VarLibError, load_designspace +from fontTools.varLib.merger import InstancerMerger +import os.path +import logging +from pprint import pformat + +log = logging.getLogger("fontTools.varLib.interpolate_layout") + + +def interpolate_layout(designspace_filename, loc, master_finder=lambda s:s, mapped=False): + """ + Interpolate GPOS from a designspace file and location. + + If master_finder is set, it should be a callable that takes master + filename as found in designspace file and map it to master font + binary as to be opened (eg. .ttf or .otf). + + If mapped is False (default), then location is mapped using the + map element of the axes in designspace file. If mapped is True, + it is assumed that location is in designspace's internal space and + no mapping is performed. + """ + + axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename) + + + log.info("Building interpolated font") + log.info("Loading master fonts") + basedir = os.path.dirname(designspace_filename) + master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters] + master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs] + + #font = master_fonts[base_idx] + font = TTFont(master_ttfs[base_idx]) + + log.info("Location: %s", pformat(loc)) + if not mapped: + loc = {name:axes[name].map_forward(v) for name,v in loc.items()} + log.info("Internal location: %s", pformat(loc)) + loc = models.normalizeLocation(loc, internal_axis_supports) + log.info("Normalized location: %s", pformat(loc)) + + # Assume single-model for now. + model = models.VariationModel(normalized_master_locs) + assert 0 == model.mapping[base_idx] + + merger = InstancerMerger(font, model, loc) + + log.info("Building interpolated tables") + merger.mergeTables(font, master_fonts, ['GPOS']) + return font + + +def main(args=None): + from fontTools import configLogger + + import sys + if args is None: + args = sys.argv[1:] + + designspace_filename = args[0] + locargs = args[1:] + outfile = os.path.splitext(designspace_filename)[0] + '-instance.ttf' + + # TODO: allow user to configure logging via command-line options + configLogger(level="INFO") + + finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf') + + loc = {} + for arg in locargs: + tag,val = arg.split('=') + loc[tag] = float(val) + + font = interpolate_layout(designspace_filename, loc, finder) + log.info("Saving font %s", outfile) + font.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/varLib/iup.py fonttools-3.21.2/Lib/fontTools/varLib/iup.py --- fonttools-3.0/Lib/fontTools/varLib/iup.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/iup.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,305 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * + + +def iup_segment(coords, rc1, rd1, rc2, rd2): + # rc1 = reference coord 1 + # rd1 = reference delta 1 + out_arrays = [None, None] + for j in 0,1: + out_arrays[j] = out = [] + x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j] + + + if x1 == x2: + n = len(coords) + if d1 == d2: + out.extend([d1]*n) + else: + out.extend([0]*n) + continue + + if x1 > x2: + x1, x2 = x2, x1 + d1, d2 = d2, d1 + + # x1 < x2 + scale = (d2 - d1) / (x2 - x1) + for pair in coords: + x = pair[j] + + if x <= x1: + d = d1 + elif x >= x2: + d = d2 + else: + # Interpolate + d = d1 + (x - x1) * scale + + out.append(d) + + return zip(*out_arrays) + +def iup_contour(delta, coords): + assert len(delta) == len(coords) + if None not in delta: + return delta + + n = len(delta) + # indices of points with explicit deltas + indices = [i for i,v in enumerate(delta) if v is not None] + if not indices: + # All deltas are None. Return 0,0 for all. + return [(0,0)]*n + + out = [] + it = iter(indices) + start = next(it) + if start != 0: + # Initial segment that wraps around + i1, i2, ri1, ri2 = 0, start, start, indices[-1] + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + out.append(delta[start]) + for end in it: + if end - start > 1: + i1, i2, ri1, ri2 = start+1, end, start, end + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + out.append(delta[end]) + start = end + if start != n-1: + # Final segment that wraps around + i1, i2, ri1, ri2 = start+1, n, start, indices[0] + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + + assert len(delta) == len(out), (len(delta), len(out)) + return out + +def iup_delta(delta, coords, ends): + assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4 + n = len(coords) + ends = ends + [n-4, n-3, n-2, n-1] + out = [] + start = 0 + for end in ends: + end += 1 + contour = iup_contour(delta[start:end], coords[start:end]) + out.extend(contour) + start = end + + return out + +# Optimizer + +def can_iup_in_between(deltas, coords, i, j, tolerance): + assert j - i >= 2 + interp = list(iup_segment(coords[i+1:j], coords[i], deltas[i], coords[j], deltas[j])) + deltas = deltas[i+1:j] + + assert len(deltas) == len(interp) + + return all(abs(complex(x-p, y-q)) <= tolerance for (x,y),(p,q) in zip(deltas, interp)) + +def _iup_contour_bound_forced_set(delta, coords, tolerance=0): + """The forced set is a conservative set of points on the contour that must be encoded + explicitly (ie. cannot be interpolated). Calculating this set allows for significantly + speeding up the dynamic-programming, as well as resolve circularity in DP. + + The set is precise; that is, if an index is in the returned set, then there is no way + that IUP can generate delta for that point, given coords and delta. + """ + assert len(delta) == len(coords) + + forced = set() + # Track "last" and "next" points on the contour as we sweep. + nd, nc = delta[0], coords[0] + ld, lc = delta[-1], coords[-1] + for i in range(len(delta)-1, -1, -1): + d, c = ld, lc + ld, lc = delta[i-1], coords[i-1] + + for j in (0,1): # For X and for Y + cj = c[j] + dj = d[j] + lcj = lc[j] + ldj = ld[j] + ncj = nc[j] + ndj = nd[j] + + if lcj <= ncj: + c1, c2 = lcj, ncj + d1, d2 = ldj, ndj + else: + c1, c2 = ncj, lcj + d1, d2 = ndj, ldj + + # If coordinate for current point is between coordinate of adjacent + # points on the two sides, but the delta for current point is NOT + # between delta for those adjacent points (considering tolerance + # allowance), then there is no way that current point can be IUP-ed. + # Mark it forced. + force = False + if c1 <= cj <= c2: + if not (min(d1,d2)-tolerance <= dj <= max(d1,d2)+tolerance): + force = True + else: # cj < c1 or c2 < cj + if c1 == c2: + if d1 == d2: + if abs(dj - d1) > tolerance: + force = True + else: + if abs(dj) > tolerance: + # Disabled the following because the "d1 == d2" does + # check does not take tolerance into consideration... + pass # force = True + elif d1 != d2: + if cj < c1: + if dj != d1 and ((dj-tolerance < d1) != (d1 < d2)): + force = True + else: # c2 < cj + if d2 != dj and ((d2 < dj+tolerance) != (d1 < d2)): + force = True + + if force: + forced.add(i) + break + + nd, nc = d, c + + return forced + +def _iup_contour_optimize_dp(delta, coords, forced={}, tolerance=0, lookback=None): + """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of + points i to n-1 where i is explicitly encoded. We find this by considering all next + explicit points j and check whether interpolation can fill points between i and j. + + Note that solution always encodes last point explicitly. Higher-level is responsible + for removing that restriction. + + As major speedup, we stop looking further whenever we see a "forced" point.""" + + n = len(delta) + if lookback is None: + lookback = n + costs = {-1:0} + chain = {-1:None} + for i in range(0, n): + best_cost = costs[i-1] + 1 + + costs[i] = best_cost + chain[i] = i - 1 + + if i - 1 in forced: + continue + + for j in range(i-2, max(i-lookback, -2), -1): + + cost = costs[j] + 1 + + if cost < best_cost and can_iup_in_between(delta, coords, j, i, tolerance): + costs[i] = best_cost = cost + chain[i] = j + + if j in forced: + break + + return chain, costs + +def _rot_list(l, k): + """Rotate list by k items forward. Ie. item at position 0 will be + at position k in returned list. Negative k is allowed.""" + n = len(l) + k %= n + if not k: return l + return l[n-k:] + l[:n-k] + +def _rot_set(s, k, n): + k %= n + if not k: return s + return {(v + k) % n for v in s} + +def iup_contour_optimize(delta, coords, tolerance=0.): + n = len(delta) + + # Get the easy cases out of the way: + + # If all are within tolerance distance of 0, encode nothing: + if all(abs(complex(*p)) <= tolerance for p in delta): + return [None] * n + + # If there's exactly one point, return it: + if n == 1: + return delta + + # If all deltas are exactly the same, return just one (the first one): + d0 = delta[0] + if all(d0 == d for d in delta): + return [d0] + [None] * (n-1) + + # Else, solve the general problem using Dynamic Programming. + + forced = _iup_contour_bound_forced_set(delta, coords, tolerance) + # The _iup_contour_optimize_dp() routine returns the optimal encoding + # solution given the constraint that the last point is always encoded. + # To remove this constraint, we use two different methods, depending on + # whether forced set is non-empty or not: + + if forced: + # Forced set is non-empty: rotate the contour start point + # such that the last point in the list is a forced point. + k = (n-1) - max(forced) + assert k >= 0 + + delta = _rot_list(delta, k) + coords = _rot_list(coords, k) + forced = _rot_set(forced, k, n) + + chain, costs = _iup_contour_optimize_dp(delta, coords, forced, tolerance) + + # Assemble solution. + solution = set() + i = n - 1 + while i is not None: + solution.add(i) + i = chain[i] + assert forced <= solution, (forced, solution) + delta = [delta[i] if i in solution else None for i in range(n)] + + delta = _rot_list(delta, -k) + else: + # Repeat the contour an extra time, solve the 2*n case, then look for solutions of the + # circular n-length problem in the solution for 2*n linear case. I cannot prove that + # this always produces the optimal solution... + chain, costs = _iup_contour_optimize_dp(delta+delta, coords+coords, forced, tolerance, n) + best_sol, best_cost = None, n+1 + + for start in range(n-1, 2*n-1): + # Assemble solution. + solution = set() + i = start + while i > start - n: + solution.add(i % n) + i = chain[i] + if i == start - n: + cost = costs[start] - costs[start - n] + if cost <= best_cost: + best_sol, best_cost = solution, cost + + delta = [delta[i] if i in best_sol else None for i in range(n)] + + + return delta + +def iup_delta_optimize(delta, coords, ends, tolerance=0.): + assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4 + n = len(coords) + ends = ends + [n-4, n-3, n-2, n-1] + out = [] + start = 0 + for end in ends: + contour = iup_contour_optimize(delta[start:end+1], coords[start:end+1], tolerance) + assert len(contour) == end - start + 1 + out.extend(contour) + start = end+1 + + return out diff -Nru fonttools-3.0/Lib/fontTools/varLib/__main__.py fonttools-3.21.2/Lib/fontTools/varLib/__main__.py --- fonttools-3.0/Lib/fontTools/varLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.varLib import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Lib/fontTools/varLib/merger.py fonttools-3.21.2/Lib/fontTools/varLib/merger.py --- fonttools-3.0/Lib/fontTools/varLib/merger.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/merger.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,829 @@ +""" +Merge OpenType Layout tables (GDEF / GPOS / GSUB). +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import classifyTools +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables import otBase as otBase +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from fontTools.varLib import builder, varStore +from fontTools.varLib.varStore import VarStoreInstancer +from functools import reduce + + +class Merger(object): + + def __init__(self, font=None): + self.font = font + + @classmethod + def merger(celf, clazzes, attrs=(None,)): + assert celf != Merger, 'Subclass Merger instead.' + if 'mergers' not in celf.__dict__: + celf.mergers = {} + if type(clazzes) == type: + clazzes = (clazzes,) + if type(attrs) == str: + attrs = (attrs,) + def wrapper(method): + assert method.__name__ == 'merge' + done = [] + for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) + mergers = celf.mergers.setdefault(clazz, {}) + for attr in attrs: + assert attr not in mergers, \ + "Oops, class '%s' has merge function for '%s' defined already." % (clazz.__name__, attr) + mergers[attr] = method + return None + return wrapper + + @classmethod + def mergersFor(celf, thing, _default={}): + typ = type(thing) + + for celf in celf.mro(): + + mergers = getattr(celf, 'mergers', None) + if mergers is None: + break; + + m = celf.mergers.get(typ, None) + if m is not None: + return m + + return _default + + def mergeObjects(self, out, lst, exclude=()): + keys = sorted(vars(out).keys()) + assert all(keys == sorted(vars(v).keys()) for v in lst), \ + (keys, [sorted(vars(v).keys()) for v in lst]) + mergers = self.mergersFor(out) + defaultMerger = mergers.get('*', self.__class__.mergeThings) + try: + for key in keys: + if key in exclude: continue + value = getattr(out, key) + values = [getattr(table, key) for table in lst] + mergerFunc = mergers.get(key, defaultMerger) + mergerFunc(self, value, values) + except Exception as e: + e.args = e.args + ('.'+key,) + raise + + def mergeLists(self, out, lst): + count = len(out) + assert all(count == len(v) for v in lst), (count, [len(v) for v in lst]) + for i,(value,values) in enumerate(zip(out, zip(*lst))): + try: + self.mergeThings(value, values) + except Exception as e: + e.args = e.args + ('[%d]' % i,) + raise + + def mergeThings(self, out, lst): + clazz = type(out) + try: + assert all(type(item) == clazz for item in lst), (out, lst) + mergerFunc = self.mergersFor(out).get(None, None) + if mergerFunc is not None: + mergerFunc(self, out, lst) + elif hasattr(out, '__dict__'): + self.mergeObjects(out, lst) + elif isinstance(out, list): + self.mergeLists(out, lst) + else: + assert all(out == v for v in lst), (out, lst) + except Exception as e: + e.args = e.args + (clazz.__name__,) + raise + + def mergeTables(self, font, master_ttfs, tables): + + for tag in tables: + if tag not in font: continue + self.mergeThings(font[tag], [m[tag] for m in master_ttfs]) + +# +# Aligning merger +# +class AligningMerger(Merger): + pass + +def _SinglePosUpgradeToFormat2(self): + if self.Format == 2: return self + + ret = ot.SinglePos() + ret.Format = 2 + ret.Coverage = self.Coverage + ret.ValueFormat = self.ValueFormat + ret.Value = [self.Value for g in ret.Coverage.glyphs] + ret.ValueCount = len(ret.Value) + + return ret + +def _merge_GlyphOrders(font, lst, values_lst=None, default=None): + """Takes font and list of glyph lists (must be sorted by glyph id), and returns + two things: + - Combined glyph list, + - If values_lst is None, return input glyph lists, but padded with None when a glyph + was missing in a list. Otherwise, return values_lst list-of-list, padded with None + to match combined glyph lists. + """ + if values_lst is None: + dict_sets = [set(l) for l in lst] + else: + dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)] + combined = set() + combined.update(*dict_sets) + + sortKey = font.getReverseGlyphMap().__getitem__ + order = sorted(combined, key=sortKey) + # Make sure all input glyphsets were in proper order + assert all(sorted(vs, key=sortKey) == vs for vs in lst) + del combined + + paddedValues = None + if values_lst is None: + padded = [[glyph if glyph in dict_set else default + for glyph in order] + for dict_set in dict_sets] + else: + assert len(lst) == len(values_lst) + padded = [[dict_set[glyph] if glyph in dict_set else default + for glyph in order] + for dict_set in dict_sets] + return order, padded + +def _Lookup_SinglePos_get_effective_value(subtables, glyph): + for self in subtables: + if self is None or \ + type(self) != ot.SinglePos or \ + self.Coverage is None or \ + glyph not in self.Coverage.glyphs: + continue + if self.Format == 1: + return self.Value + elif self.Format == 2: + return self.Value[self.Coverage.glyphs.index(glyph)] + else: + assert 0 + return None + +def _Lookup_PairPos_get_effective_value_pair(subtables, firstGlyph, secondGlyph): + for self in subtables: + if self is None or \ + type(self) != ot.PairPos or \ + self.Coverage is None or \ + firstGlyph not in self.Coverage.glyphs: + continue + if self.Format == 1: + ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)] + pvr = ps.PairValueRecord + for rec in pvr: # TODO Speed up + if rec.SecondGlyph == secondGlyph: + return rec + continue + elif self.Format == 2: + klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0) + klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0) + return self.Class1Record[klass1].Class2Record[klass2] + else: + assert 0 + return None + +@AligningMerger.merger(ot.SinglePos) +def merge(merger, self, lst): + self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0) + assert len(lst) == 1 or (valueFormat & ~0xF == 0), valueFormat + + # If all have same coverage table and all are format 1, + if all(v.Format == 1 for v in lst) and all(self.Coverage.glyphs == v.Coverage.glyphs for v in lst): + self.Value = otBase.ValueRecord(valueFormat) + merger.mergeThings(self.Value, [v.Value for v in lst]) + self.ValueFormat = self.Value.getFormat() + return + + # Upgrade everything to Format=2 + self.Format = 2 + lst = [_SinglePosUpgradeToFormat2(v) for v in lst] + + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst], + [v.Value for v in lst]) + + self.Coverage.glyphs = glyphs + self.Value = [otBase.ValueRecord(valueFormat) for g in glyphs] + self.ValueCount = len(self.Value) + + for i,values in enumerate(padded): + for j,glyph in enumerate(glyphs): + if values[j] is not None: continue + # Fill in value from other subtables + # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness + # is different between used subtable and current subtable! + # TODO(behdad) Check and warn if that happens? + v = _Lookup_SinglePos_get_effective_value(merger.lookup_subtables[i], glyph) + if v is None: + v = otBase.ValueRecord(valueFormat) + values[j] = v + + merger.mergeLists(self.Value, padded) + + # Merge everything else; though, there shouldn't be anything else. :) + merger.mergeObjects(self, lst, + exclude=('Format', 'Coverage', 'Value', 'ValueCount')) + self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in self.Value], 0) + +@AligningMerger.merger(ot.PairSet) +def merge(merger, self, lst): + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], + [vs.PairValueRecord for vs in lst]) + + self.PairValueRecord = pvrs = [] + for glyph in glyphs: + pvr = ot.PairValueRecord() + pvr.SecondGlyph = glyph + pvr.Value1 = otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None + pvr.Value2 = otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None + pvrs.append(pvr) + self.PairValueCount = len(self.PairValueRecord) + + for i,values in enumerate(padded): + for j,glyph in enumerate(glyphs): + # Fill in value from other subtables + v = ot.PairValueRecord() + v.SecondGlyph = glyph + if values[j] is not None: + vpair = values[j] + else: + vpair = _Lookup_PairPos_get_effective_value_pair(merger.lookup_subtables[i], self._firstGlyph, glyph) + if vpair is None: + v1, v2 = None, None + else: + v1, v2 = vpair.Value1, vpair.Value2 + v.Value1 = otBase.ValueRecord(merger.valueFormat1, src=v1) if merger.valueFormat1 else None + v.Value2 = otBase.ValueRecord(merger.valueFormat2, src=v2) if merger.valueFormat2 else None + values[j] = v + del self._firstGlyph + + merger.mergeLists(self.PairValueRecord, padded) + +def _PairPosFormat1_merge(self, lst, merger): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." + + # Merge everything else; makes sure Format is the same. + merger.mergeObjects(self, lst, + exclude=('Coverage', + 'PairSet', 'PairSetCount', + 'ValueFormat1', 'ValueFormat2')) + + empty = ot.PairSet() + empty.PairValueRecord = [] + empty.PairValueCount = 0 + + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst], + [v.PairSet for v in lst], + default=empty) + + self.Coverage.glyphs = glyphs + self.PairSet = [ot.PairSet() for g in glyphs] + self.PairSetCount = len(self.PairSet) + for glyph, ps in zip(glyphs, self.PairSet): + ps._firstGlyph = glyph + + merger.mergeLists(self.PairSet, padded) + +def _ClassDef_invert(self, allGlyphs=None): + + if isinstance(self, dict): + classDefs = self + else: + classDefs = self.classDefs if self and self.classDefs else {} + m = max(classDefs.values()) if classDefs else 0 + + ret = [] + for _ in range(m + 1): + ret.append(set()) + + for k,v in classDefs.items(): + ret[v].add(k) + + # Class-0 is special. It's "everything else". + if allGlyphs is None: + ret[0] = None + else: + # Limit all classes to glyphs in allGlyphs. + # Collect anything without a non-zero class into class=zero. + ret[0] = class0 = set(allGlyphs) + for s in ret[1:]: + s.intersection_update(class0) + class0.difference_update(s) + + return ret + +def _ClassDef_merge_classify(lst, allGlyphs=None): + self = ot.ClassDef() + self.classDefs = classDefs = {} + + classifier = classifyTools.Classifier() + for l in lst: + sets = _ClassDef_invert(l, allGlyphs=allGlyphs) + if allGlyphs is None: + sets = sets[1:] + classifier.update(sets) + classes = classifier.getClasses() + + if allGlyphs is None: + classes.insert(0, set()) + + for i,classSet in enumerate(classes): + if i == 0: + continue + for g in classSet: + classDefs[g] = i + + return self, classes + +def _ClassDef_calculate_Format(self, font): + fmt = 2 + ranges = self._getClassRanges(font) + if ranges: + startGlyph = ranges[0][1] + endGlyph = ranges[-1][3] + glyphCount = endGlyph - startGlyph + 1 + if len(ranges) * 3 >= glyphCount + 1: + # Format 1 is more compact + fmt = 1 + self.Format = fmt + +def _PairPosFormat2_align_matrices(self, lst, font, transparent=False): + + matrices = [l.Class1Record for l in lst] + + # Align first classes + self.ClassDef1, classes = _ClassDef_merge_classify([l.ClassDef1 for l in lst], allGlyphs=set(self.Coverage.glyphs)) + _ClassDef_calculate_Format(self.ClassDef1, font) + self.Class1Count = len(classes) + new_matrices = [] + for l,matrix in zip(lst, matrices): + nullRow = None + coverage = set(l.Coverage.glyphs) + classDef1 = l.ClassDef1.classDefs + class1Records = [] + for classSet in classes: + exemplarGlyph = next(iter(classSet)) + if exemplarGlyph not in coverage: + if nullRow is None: + nullRow = ot.Class1Record() + class2records = nullRow.Class2Record = [] + # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f + for _ in range(l.Class2Count): + if transparent: + rec2 = None + else: + rec2 = ot.Class2Record() + rec2.Value1 = otBase.ValueRecord(self.ValueFormat1) if self.ValueFormat1 else None + rec2.Value2 = otBase.ValueRecord(self.ValueFormat2) if self.ValueFormat2 else None + class2records.append(rec2) + rec1 = nullRow + else: + klass = classDef1.get(exemplarGlyph, 0) + rec1 = matrix[klass] # TODO handle out-of-range? + class1Records.append(rec1) + new_matrices.append(class1Records) + matrices = new_matrices + del new_matrices + + # Align second classes + self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst]) + _ClassDef_calculate_Format(self.ClassDef2, font) + self.Class2Count = len(classes) + new_matrices = [] + for l,matrix in zip(lst, matrices): + classDef2 = l.ClassDef2.classDefs + class1Records = [] + for rec1old in matrix: + oldClass2Records = rec1old.Class2Record + rec1new = ot.Class1Record() + class2Records = rec1new.Class2Record = [] + for classSet in classes: + if not classSet: # class=0 + rec2 = oldClass2Records[0] + else: + exemplarGlyph = next(iter(classSet)) + klass = classDef2.get(exemplarGlyph, 0) + rec2 = oldClass2Records[klass] + class2Records.append(rec2) + class1Records.append(rec1new) + new_matrices.append(class1Records) + matrices = new_matrices + del new_matrices + + return matrices + +def _PairPosFormat2_merge(self, lst, merger): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." + + merger.mergeObjects(self, lst, + exclude=('Coverage', + 'ClassDef1', 'Class1Count', + 'ClassDef2', 'Class2Count', + 'Class1Record', + 'ValueFormat1', 'ValueFormat2')) + + # Align coverages + glyphs, _ = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst]) + self.Coverage.glyphs = glyphs + + # Currently, if the coverage of PairPosFormat2 subtables are different, + # we do NOT bother walking down the subtable list when filling in new + # rows for alignment. As such, this is only correct if current subtable + # is the last subtable in the lookup. Ensure that. + # + # Note that our canonicalization process merges trailing PairPosFormat2's, + # so in reality this is rare. + for l,subtables in zip(lst,merger.lookup_subtables): + if l.Coverage.glyphs != glyphs: + assert l == subtables[-1] + + matrices = _PairPosFormat2_align_matrices(self, lst, merger.font) + + self.Class1Record = list(matrices[0]) # TODO move merger to be selfless + merger.mergeLists(self.Class1Record, matrices) + +@AligningMerger.merger(ot.PairPos) +def merge(merger, self, lst): + merger.valueFormat1 = self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + merger.valueFormat2 = self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + if self.Format == 1: + _PairPosFormat1_merge(self, lst, merger) + elif self.Format == 2: + _PairPosFormat2_merge(self, lst, merger) + else: + assert False + + del merger.valueFormat1, merger.valueFormat2 + + # Now examine the list of value records, and update to the union of format values, + # as merge might have created new values. + vf1 = 0 + vf2 = 0 + if self.Format == 1: + for pairSet in self.PairSet: + for pairValueRecord in pairSet.PairValueRecord: + pv1 = pairValueRecord.Value1 + if pv1 is not None: + vf1 |= pv1.getFormat() + pv2 = pairValueRecord.Value2 + if pv2 is not None: + vf2 |= pv2.getFormat() + elif self.Format == 2: + for class1Record in self.Class1Record: + for class2Record in class1Record.Class2Record: + pv1 = class2Record.Value1 + if pv1 is not None: + vf1 |= pv1.getFormat() + pv2 = class2Record.Value2 + if pv2 is not None: + vf2 |= pv2.getFormat() + self.ValueFormat1 = vf1 + self.ValueFormat2 = vf2 + + +def _PairSet_flatten(lst, font): + self = ot.PairSet() + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + + # Align them + glyphs, padded = _merge_GlyphOrders(font, + [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], + [vs.PairValueRecord for vs in lst]) + + self.Coverage.glyphs = glyphs + self.PairValueRecord = pvrs = [] + for values in zip(*padded): + for v in values: + if v is not None: + pvrs.append(v) + break + else: + assert False + self.PairValueCount = len(self.PairValueRecord) + + return self + +def _Lookup_PairPosFormat1_subtables_flatten(lst, font): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." + + self = ot.PairPos() + self.Format = 1 + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + # Align them + glyphs, padded = _merge_GlyphOrders(font, + [v.Coverage.glyphs for v in lst], + [v.PairSet for v in lst]) + + self.Coverage.glyphs = glyphs + self.PairSet = [_PairSet_flatten([v for v in values if v is not None], font) + for values in zip(*padded)] + self.PairSetCount = len(self.PairSet) + return self + +def _Lookup_PairPosFormat2_subtables_flatten(lst, font): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." + + self = ot.PairPos() + self.Format = 2 + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + # Align them + glyphs, _ = _merge_GlyphOrders(font, + [v.Coverage.glyphs for v in lst]) + self.Coverage.glyphs = glyphs + + matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True) + + matrix = self.Class1Record = [] + for rows in zip(*matrices): + row = ot.Class1Record() + matrix.append(row) + row.Class2Record = [] + row = row.Class2Record + for cols in zip(*list(r.Class2Record for r in rows)): + col = next(iter(c for c in cols if c is not None)) + row.append(col) + + return self + +def _Lookup_PairPos_subtables_canonicalize(lst, font): + """Merge multiple Format1 subtables at the beginning of lst, + and merge multiple consecutive Format2 subtables that have the same + Class2 (ie. were split because of offset overflows). Returns new list.""" + lst = list(lst) + + l = len(lst) + i = 0 + while i < l and lst[i].Format == 1: + i += 1 + lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)] + + l = len(lst) + i = l + while i > 0 and lst[i - 1].Format == 2: + i -= 1 + lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)] + + return lst + +@AligningMerger.merger(ot.Lookup) +def merge(merger, self, lst): + subtables = merger.lookup_subtables = [l.SubTable for l in lst] + + # Remove Extension subtables + for l,sts in list(zip(lst,subtables))+[(self,self.SubTable)]: + if not sts: + continue + if sts[0].__class__.__name__.startswith('Extension'): + assert _all_equal([st.__class__ for st in sts]) + assert _all_equal([st.ExtensionLookupType for st in sts]) + l.LookupType = sts[0].ExtensionLookupType + new_sts = [st.ExtSubTable for st in sts] + del sts[:] + sts.extend(new_sts) + + isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos) + + if isPairPos: + + # AFDKO and feaLib sometimes generate two Format1 subtables instead of one. + # Merge those before continuing. + # https://github.com/fonttools/fonttools/issues/719 + self.SubTable = _Lookup_PairPos_subtables_canonicalize(self.SubTable, merger.font) + subtables = merger.lookup_subtables = [_Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables] + + merger.mergeLists(self.SubTable, subtables) + self.SubTableCount = len(self.SubTable) + + if isPairPos: + # If format-1 subtable created during canonicalization is empty, remove it. + assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1 + if not self.SubTable[0].Coverage.glyphs: + self.SubTable.pop(0) + self.SubTableCount -= 1 + + # If format-2 subtable created during canonicalization is empty, remove it. + assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2 + if not self.SubTable[-1].Coverage.glyphs: + self.SubTable.pop(-1) + self.SubTableCount -= 1 + + merger.mergeObjects(self, lst, exclude=['SubTable', 'SubTableCount']) + + del merger.lookup_subtables + + +# +# InstancerMerger +# + +class InstancerMerger(AligningMerger): + """A merger that takes multiple master fonts, and instantiates + an instance.""" + + def __init__(self, font, model, location): + Merger.__init__(self, font) + self.model = model + self.location = location + self.scalars = model.getScalars(location) + +@InstancerMerger.merger(ot.Anchor) +def merge(merger, self, lst): + XCoords = [a.XCoordinate for a in lst] + YCoords = [a.YCoordinate for a in lst] + model = merger.model + scalars = merger.scalars + self.XCoordinate = round(model.interpolateFromMastersAndScalars(XCoords, scalars)) + self.YCoordinate = round(model.interpolateFromMastersAndScalars(YCoords, scalars)) + +@InstancerMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + model = merger.model + scalars = merger.scalars + # TODO Handle differing valueformats + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + assert not hasattr(self, tableName) + + if hasattr(self, name): + values = [getattr(a, name, 0) for a in lst] + value = round(model.interpolateFromMastersAndScalars(values, scalars)) + setattr(self, name, value) + + +# +# MutatorMerger +# + +class MutatorMerger(AligningMerger): + """A merger that takes a variable font, and instantiates + an instance.""" + + def __init__(self, font, location): + Merger.__init__(self, font) + self.location = location + + store = None + if 'GDEF' in font: + gdef = font['GDEF'].table + if gdef.Version >= 0x00010003: + store = gdef.VarStore + + self.instancer = VarStoreInstancer(store, font['fvar'].axes, location) + + def instantiate(self): + font = self.font + + self.mergeTables(font, [font], ['GPOS']) + + if 'GDEF' in font: + gdef = font['GDEF'].table + if gdef.Version >= 0x00010003: + del gdef.VarStore + gdef.Version = 0x00010002 + if gdef.MarkGlyphSetsDef is None: + del gdef.MarkGlyphSetsDef + gdef.Version = 0x00010000 + if not (gdef.LigCaretList or + gdef.MarkAttachClassDef or + gdef.GlyphClassDef or + gdef.AttachList or + (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)): + del font['GDEF'] + +@MutatorMerger.merger(ot.Anchor) +def merge(merger, self, lst): + if self.Format != 3: + return + + instancer = merger.instancer + for v in "XY": + tableName = v+'DeviceTable' + if not hasattr(self, tableName): + continue + dev = getattr(self, tableName) + delattr(self, tableName) + if dev is None: + continue + + assert dev.DeltaFormat == 0x8000 + varidx = (dev.StartSize << 16) + dev.EndSize + delta = round(instancer[varidx]) + + attr = v+'Coordinate' + setattr(self, attr, getattr(self, attr) + delta) + + self.Format = 1 + +@MutatorMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + + # All other structs are merged with self pointing to a copy of base font, + # except for ValueRecords which are sometimes created later and initialized + # to have 0/None members. Hence the copy. + self.__dict__ = lst[0].__dict__.copy() + + instancer = merger.instancer + # TODO Handle differing valueformats + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + if not hasattr(self, tableName): + continue + dev = getattr(self, tableName) + delattr(self, tableName) + if dev is None: + continue + + assert dev.DeltaFormat == 0x8000 + varidx = (dev.StartSize << 16) + dev.EndSize + delta = round(instancer[varidx]) + + setattr(self, name, getattr(self, name) + delta) + + +# +# VariationMerger +# + +class VariationMerger(AligningMerger): + """A merger that takes multiple master fonts, and builds a + variable font.""" + + def __init__(self, model, axisTags, font): + Merger.__init__(self, font) + self.model = model + self.store_builder = varStore.OnlineVarStoreBuilder(axisTags) + self.store_builder.setModel(model) + +def _all_equal(lst): + if not lst: + return True + it = iter(lst) + v0 = next(it) + for v in it: + if v0 != v: + return False + return True + +def buildVarDevTable(store_builder, master_values): + if _all_equal(master_values): + return master_values[0], None + base, varIdx = store_builder.storeMasters(master_values) + return base, builder.buildVarDevTable(varIdx) + +@VariationMerger.merger(ot.Anchor) +def merge(merger, self, lst): + assert self.Format == 1 + self.XCoordinate, XDeviceTable = buildVarDevTable(merger.store_builder, [a.XCoordinate for a in lst]) + self.YCoordinate, YDeviceTable = buildVarDevTable(merger.store_builder, [a.YCoordinate for a in lst]) + if XDeviceTable or YDeviceTable: + self.Format = 3 + self.XDeviceTable = XDeviceTable + self.YDeviceTable = YDeviceTable + +@VariationMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + if hasattr(self, name): + value, deviceTable = buildVarDevTable(merger.store_builder, + [getattr(a, name, 0) for a in lst]) + setattr(self, name, value) + if deviceTable: + setattr(self, tableName, deviceTable) diff -Nru fonttools-3.0/Lib/fontTools/varLib/models.py fonttools-3.21.2/Lib/fontTools/varLib/models.py --- fonttools-3.0/Lib/fontTools/varLib/models.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/models.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,316 @@ +"""Variation fonts interpolation models.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ['normalizeValue', 'normalizeLocation', 'supportScalar', 'VariationModel'] + +def normalizeValue(v, triple): + """Normalizes value based on a min/default/max triple. + >>> normalizeValue(400, (100, 400, 900)) + 0.0 + >>> normalizeValue(100, (100, 400, 900)) + -1.0 + >>> normalizeValue(650, (100, 400, 900)) + 0.5 + """ + lower, default, upper = triple + assert lower <= default <= upper, "invalid axis values: %3.3f, %3.3f %3.3f"%(lower, default, upper) + v = max(min(v, upper), lower) + if v == default: + v = 0. + elif v < default: + v = (v - default) / (default - lower) + else: + v = (v - default) / (upper - default) + return v + +def normalizeLocation(location, axes): + """Normalizes location based on axis min/default/max values from axes. + >>> axes = {"wght": (100, 400, 900)} + >>> normalizeLocation({"wght": 400}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 100}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": 900}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 650}, axes) + {'wght': 0.5} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': -1.0} + >>> axes = {"wght": (0, 0, 1000)} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": -1}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 500}, axes) + {'wght': 0.5} + >>> normalizeLocation({"wght": 1001}, axes) + {'wght': 1.0} + >>> axes = {"wght": (0, 1000, 1000)} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": -1}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": 500}, axes) + {'wght': -0.5} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 1001}, axes) + {'wght': 0.0} + """ + out = {} + for tag,triple in axes.items(): + v = location.get(tag, triple[1]) + out[tag] = normalizeValue(v, triple) + return out + +def supportScalar(location, support, ot=True): + """Returns the scalar multiplier at location, for a master + with support. If ot is True, then a peak value of zero + for support of an axis means "axis does not participate". That + is how OpenType Variation Font technology works. + >>> supportScalar({}, {}) + 1.0 + >>> supportScalar({'wght':.2}, {}) + 1.0 + >>> supportScalar({'wght':.2}, {'wght':(0,2,3)}) + 0.1 + >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False) + 0.375 + >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + """ + scalar = 1. + for axis,(lower,peak,upper) in support.items(): + if ot: + # OpenType-specific case handling + if peak == 0.: + continue + if lower > peak or peak > upper: + continue + if lower < 0. and upper > 0.: + continue + v = location.get(axis, 0.) + else: + assert axis in location + v = location[axis] + if v == peak: + continue + if v <= lower or upper <= v: + scalar = 0. + break; + if v < peak: + scalar *= (v - lower) / (peak - lower) + else: # v > peak + scalar *= (v - upper) / (peak - upper) + return scalar + + +class VariationModel(object): + + """ + Locations must be in normalized space. Ie. base master + is at origin (0). + >>> from pprint import pprint + >>> locations = [ \ + {'wght':100}, \ + {'wght':-100}, \ + {'wght':-180}, \ + {'wdth':+.3}, \ + {'wght':+120,'wdth':.3}, \ + {'wght':+120,'wdth':.2}, \ + {}, \ + {'wght':+180,'wdth':.3}, \ + {'wght':+180}, \ + ] + >>> model = VariationModel(locations, axisOrder=['wght']) + >>> pprint(model.locations) + [{}, + {'wght': -100}, + {'wght': -180}, + {'wght': 100}, + {'wght': 180}, + {'wdth': 0.3}, + {'wdth': 0.3, 'wght': 180}, + {'wdth': 0.3, 'wght': 120}, + {'wdth': 0.2, 'wght': 120}] + >>> pprint(model.deltaWeights) + [{}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0, 4: 1.0, 5: 1.0}, + {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.25}, + {0: 1.0, + 3: 0.75, + 4: 0.25, + 5: 0.6666666666666667, + 6: 0.16666666666666669, + 7: 0.6666666666666667}] + """ + + def __init__(self, locations, axisOrder=[]): + locations = [{k:v for k,v in loc.items() if v != 0.} for loc in locations] + keyFunc = self.getMasterLocationsSortKeyFunc(locations, axisOrder=axisOrder) + axisPoints = keyFunc.axisPoints + self.locations = sorted(locations, key=keyFunc) + # TODO Assert that locations are unique. + self.mapping = [self.locations.index(l) for l in locations] # Mapping from user's master order to our master order + self.reverseMapping = [locations.index(l) for l in self.locations] # Reverse of above + + self._computeMasterSupports(axisPoints) + + @staticmethod + def getMasterLocationsSortKeyFunc(locations, axisOrder=[]): + assert {} in locations, "Base master not found." + axisPoints = {} + for loc in locations: + if len(loc) != 1: + continue + axis = next(iter(loc)) + value = loc[axis] + if axis not in axisPoints: + axisPoints[axis] = {0.} + assert value not in axisPoints[axis] + axisPoints[axis].add(value) + + def getKey(axisPoints, axisOrder): + def sign(v): + return -1 if v < 0 else +1 if v > 0 else 0 + def key(loc): + rank = len(loc) + onPointAxes = [axis for axis,value in loc.items() if value in axisPoints[axis]] + orderedAxes = [axis for axis in axisOrder if axis in loc] + orderedAxes.extend([axis for axis in sorted(loc.keys()) if axis not in axisOrder]) + return ( + rank, # First, order by increasing rank + -len(onPointAxes), # Next, by decreasing number of onPoint axes + tuple(axisOrder.index(axis) if axis in axisOrder else 0x10000 for axis in orderedAxes), # Next, by known axes + tuple(orderedAxes), # Next, by all axes + tuple(sign(loc[axis]) for axis in orderedAxes), # Next, by signs of axis values + tuple(abs(loc[axis]) for axis in orderedAxes), # Next, by absolute value of axis values + ) + return key + + ret = getKey(axisPoints, axisOrder) + ret.axisPoints = axisPoints + return ret + + @staticmethod + def lowerBound(value, lst): + if any(v < value for v in lst): + return max(v for v in lst if v < value) + else: + return value + @staticmethod + def upperBound(value, lst): + if any(v > value for v in lst): + return min(v for v in lst if v > value) + else: + return value + + def _computeMasterSupports(self, axisPoints): + supports = [] + deltaWeights = [] + locations = self.locations + for i,loc in enumerate(locations): + box = {} + + # Account for axisPoints first + for axis,values in axisPoints.items(): + if not axis in loc: + continue + locV = loc[axis] + box[axis] = (self.lowerBound(locV, values), locV, self.upperBound(locV, values)) + + locAxes = set(loc.keys()) + # Walk over previous masters now + for j,m in enumerate(locations[:i]): + # Master with extra axes do not participte + if not set(m.keys()).issubset(locAxes): + continue + # If it's NOT in the current box, it does not participate + relevant = True + for axis, (lower,_,upper) in box.items(): + if axis in m and not (lower < m[axis] < upper): + relevant = False + break + if not relevant: + continue + # Split the box for new master + for axis,val in m.items(): + assert axis in box + lower,locV,upper = box[axis] + if val < locV: + lower = val + elif locV < val: + upper = val + box[axis] = (lower,locV,upper) + supports.append(box) + + deltaWeight = {} + # Walk over previous masters now, populate deltaWeight + for j,m in enumerate(locations[:i]): + scalar = supportScalar(loc, supports[j]) + if scalar: + deltaWeight[j] = scalar + deltaWeights.append(deltaWeight) + + self.supports = supports + self.deltaWeights = deltaWeights + + def getDeltas(self, masterValues): + assert len(masterValues) == len(self.deltaWeights) + mapping = self.reverseMapping + out = [] + for i,weights in enumerate(self.deltaWeights): + delta = masterValues[mapping[i]] + for j,weight in weights.items(): + delta -= out[j] * weight + out.append(delta) + return out + + def getScalars(self, loc): + return [supportScalar(loc, support) for support in self.supports] + + @staticmethod + def interpolateFromDeltasAndScalars(deltas, scalars): + v = None + assert len(deltas) == len(scalars) + for i,(delta,scalar) in enumerate(zip(deltas, scalars)): + if not scalar: continue + contribution = delta * scalar + if v is None: + v = contribution + else: + v += contribution + return v + + def interpolateFromDeltas(self, loc, deltas): + scalars = self.getScalars(loc) + return self.interpolateFromDeltasAndScalars(deltas, scalars) + + def interpolateFromMasters(self, loc, masterValues): + deltas = self.getDeltas(masterValues) + return self.interpolateFromDeltas(loc, deltas) + + def interpolateFromMastersAndScalars(self, masterValues, scalars): + deltas = self.getDeltas(masterValues) + return self.interpolateFromDeltasAndScalars(deltas, scalars) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/varLib/mutator.py fonttools-3.21.2/Lib/fontTools/varLib/mutator.py --- fonttools-3.0/Lib/fontTools/varLib/mutator.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/mutator.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,158 @@ +""" +Instantiate a variation font. Run, eg: + +$ python mutator.py ./NotoSansArabic-VF.ttf wght=140 wdth=85 +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import floatToFixedToFloat +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates +from fontTools.varLib import _GetCoordinates, _SetCoordinates, _DesignspaceAxis +from fontTools.varLib.models import supportScalar, normalizeLocation +from fontTools.varLib.merger import MutatorMerger +from fontTools.varLib.varStore import VarStoreInstancer +from fontTools.varLib.mvar import MVAR_ENTRIES +from fontTools.varLib.iup import iup_delta +import os.path +import logging + + +log = logging.getLogger("fontTools.varlib.mutator") + + +def instantiateVariableFont(varfont, location, inplace=False): + """ Generate a static instance from a variable TTFont and a dictionary + defining the desired location along the variable font's axes. + The location values must be specified as user-space coordinates, e.g.: + + {'wght': 400, 'wdth': 100} + + By default, a new TTFont object is returned. If ``inplace`` is True, the + input varfont is modified and reduced to a static font. + """ + if not inplace: + # make a copy to leave input varfont unmodified + stream = BytesIO() + varfont.save(stream) + stream.seek(0) + varfont = TTFont(stream) + + fvar = varfont['fvar'] + axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes} + loc = normalizeLocation(location, axes) + if 'avar' in varfont: + maps = varfont['avar'].segments + loc = {k:_DesignspaceAxis._map(v, maps[k]) for k,v in loc.items()} + # Quantize to F2Dot14, to avoid surprise interpolations. + loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()} + # Location is normalized now + log.info("Normalized location: %s", loc) + + log.info("Mutating glyf/gvar tables") + gvar = varfont['gvar'] + glyf = varfont['glyf'] + # get list of glyph names in gvar sorted by component depth + glyphnames = sorted( + gvar.variations.keys(), + key=lambda name: ( + glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth + if glyf[name].isComposite() else 0, + name)) + for glyphname in glyphnames: + variations = gvar.variations[glyphname] + coordinates,_ = _GetCoordinates(varfont, glyphname) + origCoords, endPts = None, None + for var in variations: + scalar = supportScalar(loc, var.axes) + if not scalar: continue + delta = var.coordinates + if None in delta: + if origCoords is None: + origCoords,control = _GetCoordinates(varfont, glyphname) + endPts = control[1] if control[0] >= 1 else list(range(len(control[1]))) + delta = iup_delta(delta, origCoords, endPts) + coordinates += GlyphCoordinates(delta) * scalar + _SetCoordinates(varfont, glyphname, coordinates) + + if 'cvar' in varfont: + log.info("Mutating cvt/cvar tables") + cvar = varfont['cvar'] + cvt = varfont['cvt '] + deltas = {} + for var in cvar.variations: + scalar = supportScalar(loc, var.axes) + if not scalar: continue + for i, c in enumerate(var.coordinates): + if c is not None: + deltas[i] = deltas.get(i, 0) + scalar * c + for i, delta in deltas.items(): + cvt[i] += round(delta) + + if 'MVAR' in varfont: + log.info("Mutating MVAR table") + mvar = varfont['MVAR'].table + varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc) + records = mvar.ValueRecord + for rec in records: + mvarTag = rec.ValueTag + if mvarTag not in MVAR_ENTRIES: + continue + tableTag, itemName = MVAR_ENTRIES[mvarTag] + delta = round(varStoreInstancer[rec.VarIdx]) + if not delta: + continue + setattr(varfont[tableTag], itemName, + getattr(varfont[tableTag], itemName) + delta) + + if 'GDEF' in varfont: + log.info("Mutating GDEF/GPOS/GSUB tables") + merger = MutatorMerger(varfont, loc) + + log.info("Building interpolated tables") + merger.instantiate() + + log.info("Removing variable tables") + for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'): + if tag in varfont: + del varfont[tag] + + return varfont + + +def main(args=None): + from fontTools import configLogger + + if args is None: + import sys + args = sys.argv[1:] + + varfilename = args[0] + locargs = args[1:] + outfile = os.path.splitext(varfilename)[0] + '-instance.ttf' + + # TODO Allow to specify logging verbosity as command line option + configLogger(level=logging.INFO) + + loc = {} + for arg in locargs: + tag,val = arg.split('=') + assert len(tag) <= 4 + loc[tag.ljust(4)] = float(val) + log.info("Location: %s", loc) + + log.info("Loading variable font") + varfont = TTFont(varfilename) + + instantiateVariableFont(varfont, loc, inplace=True) + + log.info("Saving instance font %s", outfile) + varfont.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Lib/fontTools/varLib/mvar.py fonttools-3.21.2/Lib/fontTools/varLib/mvar.py --- fonttools-3.0/Lib/fontTools/varLib/mvar.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/mvar.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * + +MVAR_ENTRIES = { + 'hasc': ('OS/2', 'sTypoAscender'), # horizontal ascender + 'hdsc': ('OS/2', 'sTypoDescender'), # horizontal descender + 'hlgp': ('OS/2', 'sTypoLineGap'), # horizontal line gap + 'hcla': ('OS/2', 'usWinAscent'), # horizontal clipping ascent + 'hcld': ('OS/2', 'usWinDescent'), # horizontal clipping descent + 'vasc': ('vhea', 'ascent'), # vertical ascender + 'vdsc': ('vhea', 'descent'), # vertical descender + 'vlgp': ('vhea', 'lineGap'), # vertical line gap + 'hcrs': ('hhea', 'caretSlopeRise'), # horizontal caret rise + 'hcrn': ('hhea', 'caretSlopeRun'), # horizontal caret run + 'hcof': ('hhea', 'caretOffset'), # horizontal caret offset + 'vcrs': ('vhea', 'caretSlopeRise'), # vertical caret rise + 'vcrn': ('vhea', 'caretSlopeRun'), # vertical caret run + 'vcof': ('vhea', 'caretOffset'), # vertical caret offset + 'xhgt': ('OS/2', 'sxHeight'), # x height + 'cpht': ('OS/2', 'sCapHeight'), # cap height + 'sbxs': ('OS/2', 'ySubscriptXSize'), # subscript em x size + 'sbys': ('OS/2', 'ySubscriptYSize'), # subscript em y size + 'sbxo': ('OS/2', 'ySubscriptXOffset'), # subscript em x offset + 'sbyo': ('OS/2', 'ySubscriptYOffset'), # subscript em y offset + 'spxs': ('OS/2', 'ySuperscriptXSize'), # superscript em x size + 'spys': ('OS/2', 'ySuperscriptYSize'), # superscript em y size + 'spxo': ('OS/2', 'ySuperscriptXOffset'), # superscript em x offset + 'spyo': ('OS/2', 'ySuperscriptYOffset'), # superscript em y offset + 'strs': ('OS/2', 'yStrikeoutSize'), # strikeout size + 'stro': ('OS/2', 'yStrikeoutPosition'), # strikeout offset + 'unds': ('post', 'underlineThickness'), # underline size + 'undo': ('post', 'underlinePosition'), # underline offset + #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0] + #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1] + #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2] + #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3] + #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4] + #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5] + #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6] + #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7] + #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8] + #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9] +} diff -Nru fonttools-3.0/Lib/fontTools/varLib/varStore.py fonttools-3.21.2/Lib/fontTools/varLib/varStore.py --- fonttools-3.0/Lib/fontTools/varLib/varStore.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/varLib/varStore.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,100 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.varLib.models import supportScalar +from fontTools.varLib.builder import (buildVarRegionList, buildVarStore, + buildVarRegion, buildVarData, + varDataCalculateNumShorts) + + +def _getLocationKey(loc): + return tuple(sorted(loc.items(), key=lambda kv: kv[0])) + + +class OnlineVarStoreBuilder(object): + + def __init__(self, axisTags): + self._axisTags = axisTags + self._regionMap = {} + self._regionList = buildVarRegionList([], axisTags) + self._store = buildVarStore(self._regionList, []) + + def setModel(self, model): + self._model = model + + regionMap = self._regionMap + regionList = self._regionList + + regions = model.supports[1:] + regionIndices = [] + for region in regions: + key = _getLocationKey(region) + idx = regionMap.get(key) + if idx is None: + varRegion = buildVarRegion(region, self._axisTags) + idx = regionMap[key] = len(regionList.Region) + regionList.Region.append(varRegion) + regionIndices.append(idx) + + data = self._data = buildVarData(regionIndices, [], optimize=False) + self._outer = len(self._store.VarData) + self._store.VarData.append(data) + + def finish(self, optimize=True): + self._regionList.RegionCount = len(self._regionList.Region) + self._store.VarDataCount = len(self._store.VarData) + for data in self._store.VarData: + data.ItemCount = len(data.Item) + varDataCalculateNumShorts(data, optimize) + return self._store + + def storeMasters(self, master_values): + deltas = [round(d) for d in self._model.getDeltas(master_values)] + base = deltas.pop(0) + inner = len(self._data.Item) + self._data.Item.append(deltas) + # TODO Check for full data array? + return base, (self._outer << 16) + inner + + +def VarRegion_get_support(self, fvar_axes): + return {fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord) + for i,reg in enumerate(self.VarRegionAxis)} + +class VarStoreInstancer(object): + + def __init__(self, varstore, fvar_axes, location={}): + self.fvar_axes = fvar_axes + assert varstore is None or varstore.Format == 1 + self._varData = varstore.VarData if varstore else [] + self._regions = varstore.VarRegionList.Region if varstore else [] + self.setLocation(location) + + def setLocation(self, location): + self.location = dict(location) + self._clearCaches() + + def _clearCaches(self): + self._scalars = {} + + def _getScalar(self, regionIdx): + scalar = self._scalars.get(regionIdx) + if scalar is None: + support = VarRegion_get_support(self._regions[regionIdx], self.fvar_axes) + scalar = supportScalar(self.location, support) + self._scalars[regionIdx] = scalar + return scalar + + def __getitem__(self, varidx): + + major, minor = varidx >> 16, varidx & 0xFFFF + + varData = self._varData + scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex] + + deltas = varData[major].Item[minor] + delta = 0. + for d,s in zip(deltas, scalars): + delta += d * s + return delta + diff -Nru fonttools-3.0/Lib/fontTools/voltLib/ast.py fonttools-3.21.2/Lib/fontTools/voltLib/ast.py --- fonttools-3.0/Lib/fontTools/voltLib/ast.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/voltLib/ast.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,257 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError + + +class Statement(object): + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + +class Expression(object): + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + +class Block(Statement): + def __init__(self, location): + Statement.__init__(self, location) + self.statements = [] + + def build(self, builder): + for s in self.statements: + s.build(builder) + + +class VoltFile(Block): + def __init__(self): + Block.__init__(self, location=None) + + +class LookupBlock(Block): + def __init__(self, location, name): + Block.__init__(self, location) + self.name = name + + def build(self, builder): + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + +class GlyphDefinition(Statement): + def __init__(self, location, name, gid, gunicode, gtype, components): + Statement.__init__(self, location) + self.name = name + self.id = gid + self.unicode = gunicode + self.type = gtype + self.components = components + + +class GroupDefinition(Statement): + def __init__(self, location, name, enum): + Statement.__init__(self, location) + self.name = name + self.enum = enum + self.glyphs_ = None + + def glyphSet(self, groups=None): + if groups is not None and self.name in groups: + raise VoltLibError( + 'Group "%s" contains itself.' % (self.name), + self.location) + if self.glyphs_ is None: + if groups is None: + groups = set({self.name}) + else: + groups.add(self.name) + self.glyphs_ = self.enum.glyphSet(groups) + return self.glyphs_ + + +class GlyphName(Expression): + """A single glyph name, such as cedilla.""" + def __init__(self, location, glyph): + Expression.__init__(self, location) + self.glyph = glyph + + def glyphSet(self): + return frozenset((self.glyph,)) + + +class Enum(Expression): + """An enum""" + def __init__(self, location, enum): + Expression.__init__(self, location) + self.enum = enum + + def __iter__(self): + for e in self.glyphSet(): + yield e + + def glyphSet(self, groups=None): + glyphs = set() + for element in self.enum: + if isinstance(element, (GroupName, Enum)): + glyphs = glyphs.union(element.glyphSet(groups)) + else: + glyphs = glyphs.union(element.glyphSet()) + return frozenset(glyphs) + + +class GroupName(Expression): + """A glyph group""" + def __init__(self, location, group, parser): + Expression.__init__(self, location) + self.group = group + self.parser_ = parser + + def glyphSet(self, groups=None): + group = self.parser_.resolve_group(self.group) + if group is not None: + self.glyphs_ = group.glyphSet(groups) + return self.glyphs_ + else: + raise VoltLibError( + 'Group "%s" is used but undefined.' % (self.group), + self.location) + + +class Range(Expression): + """A glyph range""" + def __init__(self, location, start, end, parser): + Expression.__init__(self, location) + self.start = start + self.end = end + self.parser = parser + + def glyphSet(self): + glyphs = self.parser.glyph_range(self.start, self.end) + return frozenset(glyphs) + + +class ScriptDefinition(Statement): + def __init__(self, location, name, tag, langs): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.langs = langs + + +class LangSysDefinition(Statement): + def __init__(self, location, name, tag, features): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.features = features + + +class FeatureDefinition(Statement): + def __init__(self, location, name, tag, lookups): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.lookups = lookups + + +class LookupDefinition(Statement): + def __init__(self, location, name, process_base, process_marks, direction, + reversal, comments, context, sub, pos): + Statement.__init__(self, location) + self.name = name + self.process_base = process_base + self.process_marks = process_marks + self.direction = direction + self.reversal = reversal + self.comments = comments + self.context = context + self.sub = sub + self.pos = pos + + +class SubstitutionDefinition(Statement): + def __init__(self, location, mapping): + Statement.__init__(self, location) + self.mapping = mapping + + +class SubstitutionSingleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionMultipleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionLigatureDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class PositionAttachDefinition(Statement): + def __init__(self, location, coverage, coverage_to): + Statement.__init__(self, location) + self.coverage = coverage + self.coverage_to = coverage_to + + +class PositionAttachCursiveDefinition(Statement): + def __init__(self, location, coverages_exit, coverages_enter): + Statement.__init__(self, location) + self.coverages_exit = coverages_exit + self.coverages_enter = coverages_enter + + +class PositionAdjustPairDefinition(Statement): + def __init__(self, location, coverages_1, coverages_2, adjust_pair): + Statement.__init__(self, location) + self.coverages_1 = coverages_1 + self.coverages_2 = coverages_2 + self.adjust_pair = adjust_pair + + +class PositionAdjustSingleDefinition(Statement): + def __init__(self, location, adjust_single): + Statement.__init__(self, location) + self.adjust_single = adjust_single + + +class ContextDefinition(Statement): + def __init__(self, location, ex_or_in, left=[], right=[]): + Statement.__init__(self, location) + self.ex_or_in = ex_or_in + self.left = left + self.right = right + + +class AnchorDefinition(Statement): + def __init__(self, location, name, gid, glyph_name, component, locked, + pos): + Statement.__init__(self, location) + self.name = name + self.gid = gid + self.glyph_name = glyph_name + self.component = component + self.locked = locked + self.pos = pos + + +class SettingDefinition(Statement): + def __init__(self, location, name, value): + Statement.__init__(self, location) + self.name = name + self.value = value diff -Nru fonttools-3.0/Lib/fontTools/voltLib/error.py fonttools-3.21.2/Lib/fontTools/voltLib/error.py --- fonttools-3.0/Lib/fontTools/voltLib/error.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/voltLib/error.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class VoltLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message diff -Nru fonttools-3.0/Lib/fontTools/voltLib/__init__.py fonttools-3.21.2/Lib/fontTools/voltLib/__init__.py --- fonttools-3.0/Lib/fontTools/voltLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/voltLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +"""fontTools.voltLib -- a package for dealing with Visual OpenType Layout Tool +(VOLT) files.""" + +# See +# http://www.microsoft.com/typography/VOLT.mspx diff -Nru fonttools-3.0/Lib/fontTools/voltLib/lexer.py fonttools-3.21.2/Lib/fontTools/voltLib/lexer.py --- fonttools-3.0/Lib/fontTools/voltLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/voltLib/lexer.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError + +class Lexer(object): + NUMBER = "NUMBER" + STRING = "STRING" + NAME = "NAME" + NEWLINE = "NEWLINE" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_DIGIT_ = "0123456789" + CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz" + CHAR_UNDERSCORE_ = "_" + CHAR_PERIOD_ = "." + CHAR_NAME_START_ = CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + \ + CHAR_UNDERSCORE_ + CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_ + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type not in {Lexer.NEWLINE}: + return (token_type, token, location) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + column = self.pos_ - self.line_start_ + 1 + location = (self.filename_, self.line_, column) + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += (2 if next_char == "\n" else 1) + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"\r\n') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + else: + raise VoltLibError("Expected '\"' to terminate string", + location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start:self.pos_] + return (Lexer.NAME, token, location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + raise VoltLibError("Unexpected character: '%s'" % cur_char, + location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p diff -Nru fonttools-3.0/Lib/fontTools/voltLib/parser.py fonttools-3.21.2/Lib/fontTools/voltLib/parser.py --- fonttools-3.0/Lib/fontTools/voltLib/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Lib/fontTools/voltLib/parser.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,644 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals) +from collections import OrderedDict +import fontTools.voltLib.ast as ast +from fontTools.voltLib.lexer import Lexer +from fontTools.voltLib.error import VoltLibError +from io import open + +PARSE_FUNCS = { + "DEF_GLYPH": "parse_def_glyph_", + "DEF_GROUP": "parse_def_group_", + "DEF_SCRIPT": "parse_def_script_", + "DEF_LOOKUP": "parse_def_lookup_", + "DEF_ANCHOR": "parse_def_anchor_", + "GRID_PPEM": "parse_ppem_", + "PRESENTATION_PPEM": "parse_ppem_", + "PPOSITIONING_PPEM": "parse_ppem_", + "COMPILER_USEEXTENSIONLOOKUPS": "parse_compiler_flag_", + "COMPILER_USEPAIRPOSFORMAT2": "parse_compiler_flag_", + "CMAP_FORMAT": "parse_cmap_format", +} + + +class Parser(object): + def __init__(self, path): + self.doc_ = ast.VoltFile() + self.glyphs_ = OrderedSymbolTable() + self.groups_ = SymbolTable() + self.anchors_ = {} # dictionary of SymbolTable() keyed by glyph + self.scripts_ = SymbolTable() + self.langs_ = SymbolTable() + self.lookups_ = SymbolTable() + self.next_token_type_, self.next_token_ = (None, None) + self.next_token_location_ = None + with open(path, "r") as f: + self.lexer_ = Lexer(f.read(), path) + self.advance_lexer_() + + def parse(self): + statements = self.doc_.statements + while self.next_token_type_ is not None: + self.advance_lexer_() + if self.cur_token_ in PARSE_FUNCS.keys(): + func = getattr(self, PARSE_FUNCS[self.cur_token_]) + statements.append(func()) + elif self.is_cur_keyword_("END"): + if self.next_token_type_ is not None: + raise VoltLibError("Expected the end of the file", + self.cur_token_location_) + return self.doc_ + else: + raise VoltLibError( + "Expected " + ", ".join(sorted(PARSE_FUNCS.keys())), + self.cur_token_location_) + return self.doc_ + + def parse_def_glyph_(self): + assert self.is_cur_keyword_("DEF_GLYPH") + location = self.cur_token_location_ + name = self.expect_string_() + self.expect_keyword_("ID") + gid = self.expect_number_() + if gid < 0: + raise VoltLibError("Invalid glyph ID", self.cur_token_location_) + gunicode = None + if self.next_token_ == "UNICODE": + self.expect_keyword_("UNICODE") + gunicode = [self.expect_number_()] + if gunicode[0] < 0: + raise VoltLibError("Invalid glyph UNICODE", + self.cur_token_location_) + elif self.next_token_ == "UNICODEVALUES": + self.expect_keyword_("UNICODEVALUES") + gunicode = self.parse_unicode_values_() + gtype = None + if self.next_token_ == "TYPE": + self.expect_keyword_("TYPE") + gtype = self.expect_name_() + assert gtype in ("BASE", "LIGATURE", "MARK") + components = None + if self.next_token_ == "COMPONENTS": + self.expect_keyword_("COMPONENTS") + components = self.expect_number_() + self.expect_keyword_("END_GLYPH") + if self.glyphs_.resolve(name) is not None: + raise VoltLibError( + 'Glyph "%s" (gid %i) already defined' % (name, gid), + location + ) + def_glyph = ast.GlyphDefinition(location, name, gid, + gunicode, gtype, components) + self.glyphs_.define(name, def_glyph) + return def_glyph + + def parse_def_group_(self): + assert self.is_cur_keyword_("DEF_GROUP") + location = self.cur_token_location_ + name = self.expect_string_() + enum = None + if self.next_token_ == "ENUM": + self.expect_keyword_("ENUM") + enum = self.parse_enum_() + self.expect_keyword_("END_GROUP") + if self.groups_.resolve(name) is not None: + raise VoltLibError( + 'Glyph group "%s" already defined, ' + 'group names are case insensitive' % name, + location + ) + def_group = ast.GroupDefinition(location, name, enum) + self.groups_.define(name, def_group) + return def_group + + def parse_def_script_(self): + assert self.is_cur_keyword_("DEF_SCRIPT") + location = self.cur_token_location_ + name = None + if self.next_token_ == "NAME": + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + if self.scripts_.resolve(tag) is not None: + raise VoltLibError( + 'Script "%s" already defined, ' + 'script tags are case insensitive' % tag, + location + ) + self.langs_.enter_scope() + langs = [] + while self.next_token_ != "END_SCRIPT": + self.advance_lexer_() + lang = self.parse_langsys_() + self.expect_keyword_("END_LANGSYS") + if self.langs_.resolve(lang.tag) is not None: + raise VoltLibError( + 'Language "%s" already defined in script "%s", ' + 'language tags are case insensitive' % (lang.tag, tag), + location + ) + self.langs_.define(lang.tag, lang) + langs.append(lang) + self.expect_keyword_("END_SCRIPT") + self.langs_.exit_scope() + def_script = ast.ScriptDefinition(location, name, tag, langs) + self.scripts_.define(tag, def_script) + return def_script + + def parse_langsys_(self): + assert self.is_cur_keyword_("DEF_LANGSYS") + location = self.cur_token_location_ + name = None + if self.next_token_ == "NAME": + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + features = [] + while self.next_token_ != "END_LANGSYS": + self.advance_lexer_() + feature = self.parse_feature_() + self.expect_keyword_("END_FEATURE") + features.append(feature) + def_langsys = ast.LangSysDefinition(location, name, tag, features) + return def_langsys + + def parse_feature_(self): + assert self.is_cur_keyword_("DEF_FEATURE") + location = self.cur_token_location_ + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + lookups = [] + while self.next_token_ != "END_FEATURE": + # self.advance_lexer_() + self.expect_keyword_("LOOKUP") + lookup = self.expect_string_() + lookups.append(lookup) + feature = ast.FeatureDefinition(location, name, tag, lookups) + return feature + + def parse_def_lookup_(self): + assert self.is_cur_keyword_("DEF_LOOKUP") + location = self.cur_token_location_ + name = self.expect_string_() + if not name[0].isalpha(): + raise VoltLibError( + 'Lookup name "%s" must start with a letter' % name, + location + ) + if self.lookups_.resolve(name) is not None: + raise VoltLibError( + 'Lookup "%s" already defined, ' + 'lookup names are case insensitive' % name, + location + ) + process_base = True + if self.next_token_ == "PROCESS_BASE": + self.advance_lexer_() + elif self.next_token_ == "SKIP_BASE": + self.advance_lexer_() + process_base = False + process_marks = True + if self.next_token_ == "PROCESS_MARKS": + self.advance_lexer_() + if self.next_token_ == "MARK_GLYPH_SET": + self.advance_lexer_() + process_marks = self.expect_string_() + elif self.next_token_type_ == Lexer.STRING: + process_marks = self.expect_string_() + elif self.next_token_ == "ALL": + self.advance_lexer_() + else: + raise VoltLibError( + "Expected ALL, MARK_GLYPH_SET or an ID. " + "Got %s" % (self.next_token_type_), + location) + elif self.next_token_ == "SKIP_MARKS": + self.advance_lexer_() + process_marks = False + direction = None + if self.next_token_ == "DIRECTION": + self.expect_keyword_("DIRECTION") + direction = self.expect_name_() + assert direction in ("LTR", "RTL") + reversal = None + if self.next_token_ == "REVERSAL": + self.expect_keyword_("REVERSAL") + reversal = True + comments = None + if self.next_token_ == "COMMENTS": + self.expect_keyword_("COMMENTS") + comments = self.expect_string_() + context = [] + while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): + context = self.parse_context_() + as_pos_or_sub = self.expect_name_() + sub = None + pos = None + if as_pos_or_sub == "AS_SUBSTITUTION": + sub = self.parse_substitution_(reversal) + elif as_pos_or_sub == "AS_POSITION": + pos = self.parse_position_() + else: + raise VoltLibError( + "Expected AS_SUBSTITUTION or AS_POSITION. " + "Got %s" % (as_pos_or_sub), + location) + def_lookup = ast.LookupDefinition( + location, name, process_base, process_marks, direction, reversal, + comments, context, sub, pos) + self.lookups_.define(name, def_lookup) + return def_lookup + + def parse_context_(self): + location = self.cur_token_location_ + contexts = [] + while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): + side = None + coverage = None + ex_or_in = self.expect_name_() + # side_contexts = [] # XXX + if self.next_token_ != "END_CONTEXT": + left = [] + right = [] + while self.next_token_ in ("LEFT", "RIGHT"): + side = self.expect_name_() + coverage = self.parse_coverage_() + if side == "LEFT": + left.append(coverage) + else: + right.append(coverage) + self.expect_keyword_("END_CONTEXT") + context = ast.ContextDefinition(location, ex_or_in, left, + right) + contexts.append(context) + else: + self.expect_keyword_("END_CONTEXT") + return contexts + + def parse_substitution_(self, reversal): + assert self.is_cur_keyword_("AS_SUBSTITUTION") + location = self.cur_token_location_ + src = [] + dest = [] + if self.next_token_ != "SUB": + raise VoltLibError("Expected SUB", location) + while self.next_token_ == "SUB": + self.expect_keyword_("SUB") + src.append(self.parse_coverage_()) + self.expect_keyword_("WITH") + dest.append(self.parse_coverage_()) + self.expect_keyword_("END_SUB") + self.expect_keyword_("END_SUBSTITUTION") + max_src = max([len(cov) for cov in src]) + max_dest = max([len(cov) for cov in dest]) + # many to many or mixed is invalid + if ((max_src > 1 and max_dest > 1) or + (reversal and (max_src > 1 or max_dest > 1))): + raise VoltLibError( + "Invalid substitution type", + location) + mapping = OrderedDict(zip(tuple(src), tuple(dest))) + if max_src == 1 and max_dest == 1: + if reversal: + sub = ast.SubstitutionReverseChainingSingleDefinition( + location, mapping) + else: + sub = ast.SubstitutionSingleDefinition(location, mapping) + elif max_src == 1 and max_dest > 1: + sub = ast.SubstitutionMultipleDefinition(location, mapping) + elif max_src > 1 and max_dest == 1: + sub = ast.SubstitutionLigatureDefinition(location, mapping) + return sub + + def parse_position_(self): + assert self.is_cur_keyword_("AS_POSITION") + location = self.cur_token_location_ + pos_type = self.expect_name_() + if pos_type not in ( + "ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"): + raise VoltLibError( + "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", + location) + if pos_type == "ATTACH": + position = self.parse_attach_() + elif pos_type == "ATTACH_CURSIVE": + position = self.parse_attach_cursive_() + elif pos_type == "ADJUST_PAIR": + position = self.parse_adjust_pair_() + elif pos_type == "ADJUST_SINGLE": + position = self.parse_adjust_single_() + self.expect_keyword_("END_POSITION") + return position + + def parse_attach_(self): + assert self.is_cur_keyword_("ATTACH") + location = self.cur_token_location_ + coverage = self.parse_coverage_() + coverage_to = [] + self.expect_keyword_("TO") + while self.next_token_ != "END_ATTACH": + cov = self.parse_coverage_() + self.expect_keyword_("AT") + self.expect_keyword_("ANCHOR") + anchor_name = self.expect_string_() + coverage_to.append((cov, anchor_name)) + self.expect_keyword_("END_ATTACH") + position = ast.PositionAttachDefinition( + location, coverage, coverage_to) + return position + + def parse_attach_cursive_(self): + assert self.is_cur_keyword_("ATTACH_CURSIVE") + location = self.cur_token_location_ + coverages_exit = [] + coverages_enter = [] + while self.next_token_ != "ENTER": + self.expect_keyword_("EXIT") + coverages_exit.append(self.parse_coverage_()) + while self.next_token_ != "END_ATTACH": + self.expect_keyword_("ENTER") + coverages_enter.append(self.parse_coverage_()) + self.expect_keyword_("END_ATTACH") + position = ast.PositionAttachCursiveDefinition( + location, coverages_exit, coverages_enter) + return position + + def parse_adjust_pair_(self): + assert self.is_cur_keyword_("ADJUST_PAIR") + location = self.cur_token_location_ + coverages_1 = [] + coverages_2 = [] + adjust_pair = {} + while self.next_token_ == "FIRST": + self.advance_lexer_() + coverage_1 = self.parse_coverage_() + coverages_1.append(coverage_1) + while self.next_token_ == "SECOND": + self.advance_lexer_() + coverage_2 = self.parse_coverage_() + coverages_2.append(coverage_2) + while self.next_token_ != "END_ADJUST": + id_1 = self.expect_number_() + id_2 = self.expect_number_() + self.expect_keyword_("BY") + pos_1 = self.parse_pos_() + pos_2 = self.parse_pos_() + adjust_pair[(id_1, id_2)] = (pos_1, pos_2) + self.expect_keyword_("END_ADJUST") + position = ast.PositionAdjustPairDefinition( + location, coverages_1, coverages_2, adjust_pair) + return position + + def parse_adjust_single_(self): + assert self.is_cur_keyword_("ADJUST_SINGLE") + location = self.cur_token_location_ + adjust_single = [] + while self.next_token_ != "END_ADJUST": + coverages = self.parse_coverage_() + self.expect_keyword_("BY") + pos = self.parse_pos_() + adjust_single.append((coverages, pos)) + self.expect_keyword_("END_ADJUST") + position = ast.PositionAdjustSingleDefinition( + location, adjust_single) + return position + + def parse_def_anchor_(self): + assert self.is_cur_keyword_("DEF_ANCHOR") + location = self.cur_token_location_ + name = self.expect_string_() + self.expect_keyword_("ON") + gid = self.expect_number_() + self.expect_keyword_("GLYPH") + glyph_name = self.expect_name_() + # check for duplicate anchor names on this glyph + if (glyph_name in self.anchors_ + and self.anchors_[glyph_name].resolve(name) is not None): + raise VoltLibError( + 'Anchor "%s" already defined, ' + 'anchor names are case insensitive' % name, + location + ) + self.expect_keyword_("COMPONENT") + component = self.expect_number_() + if self.next_token_ == "LOCKED": + locked = True + self.advance_lexer_() + else: + locked = False + self.expect_keyword_("AT") + pos = self.parse_pos_() + self.expect_keyword_("END_ANCHOR") + anchor = ast.AnchorDefinition(location, name, gid, glyph_name, + component, locked, pos) + if glyph_name not in self.anchors_: + self.anchors_[glyph_name] = SymbolTable() + self.anchors_[glyph_name].define(name, anchor) + return anchor + + def parse_adjust_by_(self): + self.advance_lexer_() + assert self.is_cur_keyword_("ADJUST_BY") + adjustment = self.expect_number_() + self.expect_keyword_("AT") + size = self.expect_number_() + return adjustment, size + + def parse_pos_(self): + # VOLT syntax doesn't seem to take device Y advance + self.advance_lexer_() + location = self.cur_token_location_ + assert self.is_cur_keyword_("POS"), location + adv = None + dx = None + dy = None + adv_adjust_by = {} + dx_adjust_by = {} + dy_adjust_by = {} + if self.next_token_ == "ADV": + self.advance_lexer_() + adv = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + adv_adjust_by[size] = adjustment + if self.next_token_ == "DX": + self.advance_lexer_() + dx = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + dx_adjust_by[size] = adjustment + if self.next_token_ == "DY": + self.advance_lexer_() + dy = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + dy_adjust_by[size] = adjustment + self.expect_keyword_("END_POS") + return (adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by) + + def parse_unicode_values_(self): + location = self.cur_token_location_ + try: + unicode_values = self.expect_string_().split(",") + unicode_values = [ + int(uni[2:], 16) + for uni in unicode_values if uni != ""] + except ValueError as err: + raise VoltLibError(str(err), location) + return unicode_values if unicode_values != [] else None + + def parse_enum_(self): + assert self.is_cur_keyword_("ENUM") + location = self.cur_token_location_ + enum = self.parse_coverage_() + self.expect_keyword_("END_ENUM") + return enum + + def parse_coverage_(self): + coverage = [] + location = self.cur_token_location_ + while self.next_token_ in ("GLYPH", "GROUP", "RANGE", "ENUM"): + if self.next_token_ == "ENUM": + self.advance_lexer_() + enum = self.parse_enum_() + coverage.append(enum) + elif self.next_token_ == "GLYPH": + self.expect_keyword_("GLYPH") + name = self.expect_string_() + coverage.append(name) + elif self.next_token_ == "GROUP": + self.expect_keyword_("GROUP") + name = self.expect_string_() + # resolved_group = self.groups_.resolve(name) + group = (name,) + coverage.append(group) + # if resolved_group is not None: + # coverage.extend(resolved_group.enum) + # # TODO: check that group exists after all groups are defined + # else: + # group = (name,) + # coverage.append(group) + # # raise VoltLibError( + # # 'Glyph group "%s" is not defined' % name, + # # location) + elif self.next_token_ == "RANGE": + self.expect_keyword_("RANGE") + start = self.expect_string_() + self.expect_keyword_("TO") + end = self.expect_string_() + coverage.append((start, end)) + return tuple(coverage) + + def resolve_group(self, group_name): + return self.groups_.resolve(group_name) + + def glyph_range(self, start, end): + rng = self.glyphs_.range(start, end) + return frozenset(rng) + + def parse_ppem_(self): + location = self.cur_token_location_ + ppem_name = self.cur_token_ + value = self.expect_number_() + setting = ast.SettingDefinition(location, ppem_name, value) + return setting + + def parse_compiler_flag_(self): + location = self.cur_token_location_ + flag_name = self.cur_token_ + value = True + setting = ast.SettingDefinition(location, flag_name, value) + return setting + + def parse_cmap_format(self): + location = self.cur_token_location_ + name = self.cur_token_ + value = (self.expect_number_(), self.expect_number_(), + self.expect_number_()) + setting = ast.SettingDefinition(location, name, value) + return setting + + def is_cur_keyword_(self, k): + return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + + def expect_string_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.STRING: + raise VoltLibError("Expected a string", self.cur_token_location_) + return self.cur_token_ + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise VoltLibError("Expected \"%s\"" % keyword, + self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise VoltLibError("Expected a name", self.cur_token_location_) + + def expect_number_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NUMBER: + raise VoltLibError("Expected a number", self.cur_token_location_) + return self.cur_token_ + + def advance_lexer_(self): + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = self.lexer_.next() + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name, case_insensitive=True): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + if case_insensitive: + for key in scope: + if key.lower() == name.lower(): + return scope[key] + return None + + +class OrderedSymbolTable(SymbolTable): + def __init__(self): + self.scopes_ = [OrderedDict()] + + def enter_scope(self): + self.scopes_.append(OrderedDict()) + + def resolve(self, name, case_insensitive=False): + SymbolTable.resolve(self, name, case_insensitive=case_insensitive) + + def range(self, start, end): + for scope in reversed(self.scopes_): + if start in scope and end in scope: + start_idx = list(scope.keys()).index(start) + end_idx = list(scope.keys()).index(end) + return list(scope.keys())[start_idx:end_idx + 1] + return None diff -Nru fonttools-3.0/Lib/sstruct.py fonttools-3.21.2/Lib/sstruct.py --- fonttools-3.0/Lib/sstruct.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/sstruct.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -# Added here for backward compatibility - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -from fontTools.misc.sstruct import * -from fontTools.misc.sstruct import __doc__ diff -Nru fonttools-3.0/Lib/xmlWriter.py fonttools-3.21.2/Lib/xmlWriter.py --- fonttools-3.0/Lib/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Lib/xmlWriter.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -# Added back here for backward compatibility - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -from fontTools.misc.xmlWriter import * -from fontTools.misc.xmlWriter import __doc__ diff -Nru fonttools-3.0/LICENSE fonttools-3.21.2/LICENSE --- fonttools-3.0/LICENSE 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/LICENSE 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Just van Rossum + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff -Nru fonttools-3.0/LICENSE.external fonttools-3.21.2/LICENSE.external --- fonttools-3.0/LICENSE.external 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/LICENSE.external 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,148 @@ +FontTools includes the following font projects for testing purposes, which are +under SIL Open Font License, Version 1.1: + +Lobster + Copyright (c) 2010, Pablo Impallari (www.impallari.com|impallari@gmail.com), + with Reserved Font Name Lobster. + This Font Software is licensed under the SIL Open Font License, Version 1.1. + +Noto Fonts + This Font Software is licensed under the SIL Open Font License, Version 1.1. + +XITS font project + Copyright (c) 2001-2010 by the STI Pub Companies, consisting of the American + Institute of Physics, the American Chemical Society, the American + Mathematical Society, the American Physical Society, Elsevier, Inc., and The + Institute of Electrical and Electronic Engineers, Inc. (www.stixfonts.org), + with Reserved Font Name STIX Fonts, STIX Fonts (TM) is a trademark of The + Institute of Electrical and Electronics Engineers, Inc. + + Portions copyright (c) 1998-2003 by MicroPress, Inc. + (www.micropress-inc.com), with Reserved Font Name TM Math. To obtain + additional mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow + Street, Forest Hills, NY 11375, USA, Phone: (718) 575-1816. + + Portions copyright (c) 1990 by Elsevier, Inc. + + This Font Software is licensed under the SIL Open Font License, Version 1.1. + +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font +creation efforts of academic and linguistic communities, and to +provide a free and open framework in which fonts may be shared and +improved in partnership with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply to +any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software +components as distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, +deleting, or substituting -- in part or in whole -- any of the +components of the Original Version, by changing formats or by porting +the Font Software to a new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, +modify, redistribute, and sell modified and unmodified copies of the +Font Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, in +Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the +corresponding Copyright Holder. This restriction only applies to the +primary font name as presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created using +the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. + +===== + +FontTools includes Adobe AGL & AGLFN, which is under 3-clauses BSD license: + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +Neither the name of Adobe Systems Incorporated nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff -Nru fonttools-3.0/LICENSE.txt fonttools-3.21.2/LICENSE.txt --- fonttools-3.0/LICENSE.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/LICENSE.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -Copyright 1999-2004 -by Just van Rossum, Letterror, The Netherlands. - - All Rights Reserved - -Permission to use, copy, modify, and distribute this software and -its documentation for any purpose and without fee is hereby granted, -provided that the above copyright notice appear in all copies and -that both that copyright notice and this permission notice appear -in supporting documentation, and that the names of Just van Rossum -or Letterror not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior -permission. - -JUST VAN ROSSUM AND LETTERROR DISCLAIM ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL JUST VAN ROSSUM OR -LETTERROR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL -DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR -PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - - -just@letterror.com diff -Nru fonttools-3.0/Makefile fonttools-3.21.2/Makefile --- fonttools-3.0/Makefile 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Makefile 2018-01-08 12:40:40.000000000 +0000 @@ -1,14 +1,22 @@ all: - ./setup.py bdist + ./setup.py build dist: - ./setup.py sdist + ./setup.py sdist bdist_wheel install: - ./setup.py install + pip install --ignore-installed . install-user: - ./setup.py install --user + pip install --ignore-installed --user . + +uninstall: + pip uninstall --yes fonttools check: all ./run-tests.sh + +clean: + ./setup.py clean --all + +.PHONY: all dist install install-user uninstall check clean diff -Nru fonttools-3.0/MANIFEST.in fonttools-3.21.2/MANIFEST.in --- fonttools-3.0/MANIFEST.in 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/MANIFEST.in 2018-01-08 12:40:40.000000000 +0000 @@ -1,13 +1,34 @@ -include LICENSE.txt -include MANIFEST.in -include Doc/ttx.1 -include Doc/ChangeLog -include Doc/*.txt -include Doc/*.html +include README.rst +include LICENSE +include LICENSE.external +include NEWS.rst +include Makefile +include fonttools +include Snippets/*.py +include Snippets/README.md include MetaTools/*.py -include Windows/mcmillan.bat -include Windows/ttx.ico -include Windows/README.TXT -include Windows/fonttools-win-setup.iss -include Windows/fonttools-win-setup.txt include Lib/fontTools/ttLib/tables/table_API_readme.txt + +include *requirements.txt +include tox.ini +include run-tests.sh + +include .appveyor.yml +include .codecov.yml +include .coveragerc +include .travis.yml +recursive-include .travis *.sh + +include Doc/Makefile +include Doc/make.bat +recursive-include Doc/man/man1 *.1 +recursive-include Doc/source *.py *.rst + +recursive-include Tests *.py *.ttx *.otx *.fea *.feax +recursive-include Tests *.ttc *.ttf *.dfont *.woff *.woff2 +recursive-include Tests *.otf *.ttx.* +recursive-include Tests *.glif *.plist +recursive-include Tests *.txt README +recursive-include Tests *.lwfn *.pfa *.pfb +recursive-include Tests *.xml *.designspace *.bin +recursive-include Tests *.afm diff -Nru fonttools-3.0/MetaTools/buildChangeLog.py fonttools-3.21.2/MetaTools/buildChangeLog.py --- fonttools-3.0/MetaTools/buildChangeLog.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/MetaTools/buildChangeLog.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,10 +0,0 @@ -#! /usr/bin/env python - -import os, sys - -fontToolsDir = os.path.dirname(os.path.dirname(os.path.normpath( - os.path.join(os.getcwd(), sys.argv[0])))) - -os.chdir(fontToolsDir) -os.system("git2cl > Doc/ChangeLog") -print("done.") diff -Nru fonttools-3.0/MetaTools/buildTableList.py fonttools-3.21.2/MetaTools/buildTableList.py --- fonttools-3.0/MetaTools/buildTableList.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/MetaTools/buildTableList.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,13 +4,14 @@ import os import glob from fontTools.ttLib import identifierToTag +import textwrap fontToolsDir = os.path.dirname(os.path.dirname(os.path.join(os.getcwd(), sys.argv[0]))) fontToolsDir= os.path.normpath(fontToolsDir) tablesDir = os.path.join(fontToolsDir, "Lib", "fontTools", "ttLib", "tables") -docFile = os.path.join(fontToolsDir, "Doc", "documentation.html") +docFile = os.path.join(fontToolsDir, "README.rst") names = glob.glob1(tablesDir, "*.py") @@ -56,14 +57,17 @@ file.close() -begin = "" -end = "" +begin = ".. begin table list\n.. code::\n" +end = ".. end table list" doc = open(docFile).read() beginPos = doc.find(begin) assert beginPos > 0 beginPos = beginPos + len(begin) + 1 endPos = doc.find(end) -doc = doc[:beginPos] + ", ".join(tables[:-1]) + " and " + tables[-1] + "\n" + doc[endPos:] +lines = textwrap.wrap(", ".join(tables[:-1]) + " and " + tables[-1], 66) +blockquote = "\n".join(" "*4 + line for line in lines) + "\n" + +doc = doc[:beginPos] + blockquote + doc[endPos:] open(docFile, "w").write(doc) diff -Nru fonttools-3.0/MetaTools/buildUCD.py fonttools-3.21.2/MetaTools/buildUCD.py --- fonttools-3.0/MetaTools/buildUCD.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/MetaTools/buildUCD.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,293 @@ +#!/usr/bin/env python +""" +Tools to parse data files from the Unicode Character Database. +""" + +from __future__ import print_function, absolute_import, division +from __future__ import unicode_literals + +try: + from urllib.request import urlopen +except ImportError: + from urllib2 import urlopen +from contextlib import closing, contextmanager +import re +from codecs import iterdecode +import logging +import os +from io import open +from os.path import abspath, dirname, join as pjoin, pardir, sep + + +try: # pragma: no cover + unicode +except NameError: + unicode = str + + +UNIDATA_URL = "https://unicode.org/Public/UNIDATA/" +UNIDATA_LICENSE_URL = "http://unicode.org/copyright.html#License" + +# by default save output files to ../Lib/fontTools/unicodedata/ +UNIDATA_PATH = pjoin(abspath(dirname(__file__)), pardir, + "Lib", "fontTools", "unicodedata") + sep + +SRC_ENCODING = "# -*- coding: utf-8 -*-\n" + +NOTICE = "# NOTE: This file was auto-generated with MetaTools/buildUCD.py.\n" + +MAX_UNICODE = 0x10FFFF + +log = logging.getLogger() + + +@contextmanager +def open_unidata_file(filename): + """Open a text file from https://unicode.org/Public/UNIDATA/""" + url = UNIDATA_URL + filename + with closing(urlopen(url)) as response: + yield iterdecode(response, encoding="utf-8") + + +def parse_unidata_header(infile): + """Read the top header of data files, until the first line + that does not start with '#'. + """ + header = [] + line = next(infile) + while line.startswith("#"): + header.append(line) + line = next(infile) + return "".join(header) + + +def parse_range_properties(infile, default=None, is_set=False): + """Parse a Unicode data file containing a column with one character or + a range of characters, and another column containing a property value + separated by a semicolon. Comments after '#' are ignored. + + If the ranges defined in the data file are not continuous, assign the + 'default' property to the unassigned codepoints. + + Return a list of (start, end, property_name) tuples. + """ + ranges = [] + line_regex = re.compile( + r"^" + r"([0-9A-F]{4,6})" # first character code + r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code + r"\s*;\s*" + r"([^#]+)") # everything up to the potential comment + for line in infile: + match = line_regex.match(line) + if not match: + continue + + first, last, data = match.groups() + if last is None: + last = first + + first = int(first, 16) + last = int(last, 16) + data = str(data.rstrip()) + + ranges.append((first, last, data)) + + ranges.sort() + + if isinstance(default, unicode): + default = str(default) + + # fill the gaps between explicitly defined ranges + last_start, last_end = -1, -1 + full_ranges = [] + for start, end, value in ranges: + assert last_end < start + assert start <= end + if start - last_end > 1: + full_ranges.append((last_end+1, start-1, default)) + if is_set: + value = set(value.split()) + full_ranges.append((start, end, value)) + last_start, last_end = start, end + if last_end != MAX_UNICODE: + full_ranges.append((last_end+1, MAX_UNICODE, default)) + + # reduce total number of ranges by combining continuous ones + last_start, last_end, last_value = full_ranges.pop(0) + merged_ranges = [] + for start, end, value in full_ranges: + if value == last_value: + continue + else: + merged_ranges.append((last_start, start-1, last_value)) + last_start, line_end, last_value = start, end, value + merged_ranges.append((last_start, MAX_UNICODE, last_value)) + + # make sure that the ranges cover the full unicode repertoire + assert merged_ranges[0][0] == 0 + for (cs, ce, cv), (ns, ne, nv) in zip(merged_ranges, merged_ranges[1:]): + assert ce+1 == ns + assert merged_ranges[-1][1] == MAX_UNICODE + + return merged_ranges + + +def parse_semicolon_separated_data(infile): + """Parse a Unicode data file where each line contains a lists of values + separated by a semicolon (e.g. "PropertyValueAliases.txt"). + The number of the values on different lines may be different. + + Returns a list of lists each containing the values as strings. + """ + data = [] + for line in infile: + line = line.split('#', 1)[0].strip() # remove the comment + if not line: + continue + fields = [str(field.strip()) for field in line.split(';')] + data.append(fields) + return data + + +def _set_repr(value): + return 'None' if value is None else "{{{}}}".format( + ", ".join(repr(v) for v in sorted(value))) + + +def build_ranges(filename, local_ucd=None, output_path=None, + default=None, is_set=False, aliases=None): + """Fetch 'filename' UCD data file from Unicode official website, parse + the property ranges and values and write them as two Python lists + to 'fontTools.unicodedata..py'. + + 'aliases' is an optional mapping of property codes (short names) to long + name aliases (list of strings, with the first item being the preferred + alias). When this is provided, the property values are written using the + short notation, and an additional 'NAMES' dict with the aliases is + written to the output module. + + To load the data file from a local directory, you can use the + 'local_ucd' argument. + """ + modname = os.path.splitext(filename)[0] + ".py" + if not output_path: + output_path = UNIDATA_PATH + modname + + if local_ucd: + log.info("loading '%s' from local directory '%s'", filename, local_ucd) + cm = open(pjoin(local_ucd, filename), "r", encoding="utf-8") + else: + log.info("downloading '%s' from '%s'", filename, UNIDATA_URL) + cm = open_unidata_file(filename) + + with cm as f: + header = parse_unidata_header(f) + ranges = parse_range_properties(f, default=default, is_set=is_set) + + if aliases: + reversed_aliases = {normalize(v[0]): k for k, v in aliases.items()} + max_value_length = 6 # 4-letter tags plus two quotes for repr + else: + max_value_length = min(56, max(len(repr(v)) for _, _, v in ranges)) + + with open(output_path, "w", encoding="utf-8") as f: + f.write(SRC_ENCODING) + f.write("#\n") + f.write(NOTICE) + f.write("# Source: {}{}\n".format(UNIDATA_URL, filename)) + f.write("# License: {}\n".format(UNIDATA_LICENSE_URL)) + f.write("#\n") + f.write(header+"\n\n") + + f.write("RANGES = [\n") + for first, last, value in ranges: + f.write(" 0x{:0>4X}, # .. 0x{:0>4X} ; {}\n".format( + first, last, _set_repr(value) if is_set else value)) + f.write("]\n") + + f.write("\n") + f.write("VALUES = [\n") + for first, last, value in ranges: + comment = "# {:0>4X}..{:0>4X}".format(first, last) + if is_set: + value_repr = "{},".format(_set_repr(value)) + else: + if aliases: + # append long name to comment and use the short code + comment += " ; {}".format(value) + value = reversed_aliases[normalize(value)] + value_repr = "{!r},".format(value) + f.write(" {} {}\n".format( + value_repr.ljust(max_value_length+1), comment)) + f.write("]\n") + + if aliases: + f.write("\n") + f.write("NAMES = {\n") + for value, names in sorted(aliases.items()): + # we only write the first preferred alias + f.write(" {!r}: {!r},\n".format(value, names[0])) + f.write("}\n") + + log.info("saved new file: '%s'", os.path.normpath(output_path)) + + +_normalize_re = re.compile(r"[-_ ]+") + +def normalize(string): + """Remove case, strip space, '-' and '_' for loose matching.""" + return _normalize_re.sub("", string).lower() + + +def parse_property_value_aliases(property_tag, local_ucd=None): + """Fetch the current 'PropertyValueAliases.txt' from the Unicode website, + parse the values for the specified 'property_tag' and return a dictionary + of name aliases (list of strings) keyed by short value codes (strings). + + To load the data file from a local directory, you can use the + 'local_ucd' argument. + """ + filename = "PropertyValueAliases.txt" + if local_ucd: + log.info("loading '%s' from local directory '%s'", filename, local_ucd) + cm = open(pjoin(local_ucd, filename), "r", encoding="utf-8") + else: + log.info("downloading '%s' from '%s'", filename, UNIDATA_URL) + cm = open_unidata_file(filename) + + with cm as f: + header = parse_unidata_header(f) + data = parse_semicolon_separated_data(f) + + aliases = {item[1]: item[2:] for item in data + if item[0] == property_tag} + + return aliases + + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Generate fontTools.unicodedata from UCD data files") + parser.add_argument( + '--ucd-path', help="Path to local folder containing UCD data files") + parser.add_argument('-q', '--quiet', action="store_true") + options = parser.parse_args() + + level = "WARNING" if options.quiet else "INFO" + logging.basicConfig(level=level, format="%(message)s") + + build_ranges("Blocks.txt", local_ucd=options.ucd_path, default="No_Block") + + script_aliases = parse_property_value_aliases("sc", options.ucd_path) + build_ranges("Scripts.txt", local_ucd=options.ucd_path, default="Unknown", + aliases=script_aliases) + build_ranges("ScriptExtensions.txt", local_ucd=options.ucd_path, + is_set=True) + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff -Nru fonttools-3.0/NEWS.rst fonttools-3.21.2/NEWS.rst --- fonttools-3.0/NEWS.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/NEWS.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,775 @@ +3.21.2 (released 2018-01-08) +---------------------------- + +- [varLib] Fixed merging PairPos Format1/2 with missing subtables (#1125). + +3.21.1 (released 2018-01-03) +---------------------------- + +- [feaLib] Allow mixed single/multiple substitutions (#612) +- Added missing ``*.afm`` test assets to MAINFEST.in (#1137). +- Fixed dumping ``SVG`` tables containing color palettes (#1124). + +3.21.0 (released 2017-12-18) +---------------------------- + +- [cmap] when compiling format6 subtable, don't assume gid0 is always called + '.notdef' (1e42224). +- [ot] Allow decompiling fonts with bad Coverage format number (1aafae8). +- Change FontTools licence to MIT (#1127). +- [post] Prune extra names already in standard Mac set (df1e8c7). +- [subset] Delete empty SubrsIndex after subsetting (#994, #1118). +- [varLib] Don't share points in cvar by default, as it currently fails on + some browsers (#1113). +- [afmLib] Make poor old afmLib work on python3. + +3.20.1 (released 2017-11-22) +---------------------------- + +- [unicodedata] Fixed issue with ``script`` and ``script_extension`` functions + returning inconsistent short vs long names. They both return the short four- + letter script codes now. Added ``script_name`` and ``script_code`` functions + to look up the long human-readable script name from the script code, and + viceversa (#1109, #1111). + +3.20.0 (released 2017-11-21) +---------------------------- + +- [unicodedata] Addded new module ``fontTools.unicodedata`` which exports the + same interface as the built-in ``unicodedata`` module, with the addition of + a few functions that are missing from the latter, such as ``script``, + ``script_extension`` and ``block``. Added a ``MetaTools/buildUCD.py`` script + to download and parse data files from the Unicode Character Database and + generate python modules containing lists of ranges and property values. +- [feaLib] Added ``__str__`` method to all ``ast`` elements (delegates to the + ``asFea`` method). +- [feaLib] ``Parser`` constructor now accepts a ``glyphNames`` iterable + instead of ``glyphMap`` dict. The latter still works but with a pending + deprecation warning (#1104). +- [bezierTools] Added arc length calculation functions originally from + ``pens.perimeterPen`` module (#1101). +- [varLib] Started generating STAT table (8af4309). Right now it just reflects + the axes, and even that with certain limitations: + * AxisOrdering is set to the order axes are defined, + * Name-table entries are not shared with fvar. +- [py23] Added backports for ``redirect_stdout`` and ``redirect_stderr`` + context managers (#1097). +- [Graphite] Fixed some round-trip bugs (#1093). + +3.19.0 (released 2017-11-06) +---------------------------- + +- [varLib] Try set of used points instead of all points when testing whether to + share points between tuples (#1090). +- [CFF2] Fixed issue with reading/writing PrivateDict BlueValues to TTX file. + Read the commit message 8b02b5a and issue #1030 for more details. + NOTE: this change invalidates all the TTX files containing CFF2 tables + that where dumped with previous verisons of fonttools. + CFF2 Subr items can have values on the stack after the last operator, thus + a ``CFF2Subr`` class was added to accommodate this (#1091). +- [_k_e_r_n] Fixed compilation of AAT kern version=1.0 tables (#1089, #1094) +- [ttLib] Added getBestCmap() convenience method to TTFont class and cmap table + class that returns a preferred Unicode cmap subtable given a list of options + (#1092). +- [morx] Emit more meaningful subtable flags. Implement InsertionMorphAction + +3.18.0 (released 2017-10-30) +---------------------------- + +- [feaLib] Fixed writing back nested glyph classes (#1086). +- [TupleVariation] Reactivated shared points logic, bugfixes (#1009). +- [AAT] Implemented ``morx`` ligature subtables (#1082). +- [reverseContourPen] Keep duplicate lineTo following a moveTo (#1080, + https://github.com/googlei18n/cu2qu/issues/51). +- [varLib.mutator] Suport instantiation of GPOS, GDEF and MVAR (#1079). +- [sstruct] Fixed issue with ``unicode_literals`` and ``struct`` module in + old versions of python 2.7 (#993). + +3.17.0 (released 2017-10-16) +---------------------------- + +- [svgPathPen] Added an ``SVGPathPen`` that translates segment pen commands + into SVG path descriptions. Copied from Tal Leming's ``ufo2svg.svgPathPen`` + https://github.com/typesupply/ufo2svg/blob/d69f992/Lib/ufo2svg/svgPathPen.py +- [reverseContourPen] Added ``ReverseContourPen``, a filter pen that draws + contours with the winding direction reversed, while keeping the starting + point (#1071). +- [filterPen] Added ``ContourFilterPen`` to manipulate contours as a whole + rather than segment by segment. +- [arrayTools] Added ``Vector`` class to apply math operations on an array + of numbers, and ``pairwise`` function to loop over pairs of items in an + iterable. +- [varLib] Added support for building and interpolation of ``cvar`` table + (f874cf6, a25a401). + +3.16.0 (released 2017-10-03) +---------------------------- + +- [head] Try using ``SOURCE_DATE_EPOCH`` environment variable when setting + the ``head`` modified timestamp to ensure reproducible builds (#1063). + See https://reproducible-builds.org/specs/source-date-epoch/ +- [VTT] Decode VTT's ``TSI*`` tables text as UTF-8 (#1060). +- Added support for Graphite font tables: Feat, Glat, Gloc, Silf and Sill. + Thanks @mhosken! (#1054). +- [varLib] Default to using axis "name" attribute if "labelname" element + is missing (588f524). +- [merge] Added support for merging Script records. Remove unused features + and lookups after merge (d802580, 556508b). +- Added ``fontTools.svgLib`` package. Includes a parser for SVG Paths that + supports the Pen protocol (#1051). Also, added a snippet to convert SVG + outlines to UFO GLIF (#1053). +- [AAT] Added support for ``ankr``, ``bsln``, ``mort``, ``morx``, ``gcid``, + and ``cidg``. +- [subset] Implemented subsetting of ``prop``, ``opbd``, ``bsln``, ``lcar``. + +3.15.1 (released 2017-08-18) +---------------------------- + +- [otConverters] Implemented ``__add__`` and ``__radd__`` methods on + ``otConverters._LazyList`` that decompile a lazy list before adding + it to another list or ``_LazyList`` instance. Fixes an ``AttributeError`` + in the ``subset`` module when attempting to sum ``_LazyList`` objects + (6ef48bd2, 1aef1683). +- [AAT] Support the `opbd` table with optical bounds (a47f6588). +- [AAT] Support `prop` table with glyph properties (d05617b4). + + +3.15.0 (released 2017-08-17) +---------------------------- + +- [AAT] Added support for AAT lookups. The ``lcar`` table can be decompiled + and recompiled; futher work needed to handle ``morx`` table (#1025). +- [subset] Keep (empty) DefaultLangSys for Script 'DFLT' (6eb807b5). +- [subset] Support GSUB/GPOS.FeatureVariations (fe01d87b). +- [varLib] In ``models.supportScalars``, ignore an axis when its peak value + is 0 (fixes #1020). +- [varLib] Add default mappings to all axes in avar to fix rendering issue + in some rasterizers (19c4b377, 04eacf13). +- [varLib] Flatten multiple tail PairPosFormat2 subtables before merging + (c55ef525). +- [ttLib] Added support for recalculating font bounding box in ``CFF`` and + ``head`` tables, and min/max values in ``hhea`` and ``vhea`` tables (#970). + +3.14.0 (released 2017-07-31) +---------------------------- + +- [varLib.merger] Remove Extensions subtables before merging (f7c20cf8). +- [varLib] Initialize the avar segment map with required default entries + (#1014). +- [varLib] Implemented optimal IUP optmiziation (#1019). +- [otData] Add ``AxisValueFormat4`` for STAT table v1.2 from OT v1.8.2 + (#1015). +- [name] Fixed BCP46 language tag for Mac langID=9: 'si' -> 'sl'. +- [subset] Return value from ``_DehintingT2Decompiler.op_hintmask`` + (c0d672ba). +- [cffLib] Allow to get TopDict by index as well as by name (dca96c9c). +- [cffLib] Removed global ``isCFF2`` state; use one set of classes for + both CFF and CFF2, maintaining backward compatibility existing code (#1007). +- [cffLib] Deprecated maxstack operator, per OpenType spec update 1.8.1. +- [cffLib] Added missing default (-100) for UnderlinePosition (#983). +- [feaLib] Enable setting nameIDs greater than 255 (#1003). +- [varLib] Recalculate ValueFormat when merging SinglePos (#996). +- [varLib] Do not emit MVAR if there are no entries in the variation store + (#987). +- [ttx] For ``-x`` option, pad with space if table tag length is < 4. + +3.13.1 (released 2017-05-30) +---------------------------- + +- [feaLib.builder] Removed duplicate lookups optimization. The original + lookup order and semantics of the feature file are preserved (#976). + +3.13.0 (released 2017-05-24) +---------------------------- + +- [varLib.mutator] Implement IUP optimization (#969). +- [_g_l_y_f.GlyphCoordinates] Changed ``__bool__()`` semantics to match those + of other iterables (e46f949). Removed ``__abs__()`` (3db5be2). +- [varLib.interpolate_layout] Added ``mapped`` keyword argument to + ``interpolate_layout`` to allow disabling avar mapping: if False (default), + the location is mapped using the map element of the axes in designspace file; + if True, it is assumed that location is in designspace's internal space and + no mapping is performed (#950, #975). +- [varLib.interpolate_layout] Import designspace-loading logic from varLib. +- [varLib] Fixed bug with recombining PairPosClass2 subtables (81498e5, #914). +- [cffLib.specializer] When copying iterables, cast to list (462b7f86). + +3.12.1 (released 2017-05-18) +---------------------------- + +- [pens.t2CharStringPen] Fixed AttributeError when calling addComponent in + T2CharStringPen (#965). + +3.12.0 (released 2017-05-17) +---------------------------- + +- [cffLib.specializer] Added new ``specializer`` module to optimize CFF + charstrings, used by the T2CharStringPen (#948). +- [varLib.mutator] Sort glyphs by component depth before calculating composite + glyphs' bounding boxes to ensure deltas are correctly caclulated (#945). +- [_g_l_y_f] Fixed loss of precision in GlyphCoordinates by using 'd' (double) + instead of 'f' (float) as ``array.array`` typecode (#963, #964). + +3.11.0 (released 2017-05-03) +---------------------------- + +- [t2CharStringPen] Initial support for specialized Type2 path operators: + vmoveto, hmoveto, vlineto, hlineto, vvcurveto, hhcurveto, vhcurveto and + hvcurveto. This should produce more compact charstrings (#940, #403). +- [Doc] Added Sphinx sources for the documentation. Thanks @gferreira (#935). +- [fvar] Expose flags in XML (#932) +- [name] Add helper function for building multi-lingual names (#921) +- [varLib] Fixed kern merging when a PairPosFormat2 has ClassDef1 with glyphs + that are NOT present in the Coverage (1b5e1c4, #939). +- [varLib] Fixed non-deterministic ClassDef order with PY3 (f056c12, #927). +- [feLib] Throw an error when the same glyph is defined in multiple mark + classes within the same lookup (3e3ff00, #453). + +3.10.0 (released 2017-04-14) +---------------------------- + +- [varLib] Added support for building ``avar`` table, using the designspace + ```` elements. +- [varLib] Removed unused ``build(..., axisMap)`` argument. Axis map should + be specified in designspace file now. We do not accept nonstandard axes + if ```` element is not present. +- [varLib] Removed "custom" axis from the ``standard_axis_map``. This was + added before when glyphsLib was always exporting the (unused) custom axis. +- [varLib] Added partial support for building ``MVAR`` table; does not + implement ``gasp`` table variations yet. +- [pens] Added FilterPen base class, for pens that control another pen; + factored out ``addComponent`` method from BasePen into a separate abstract + DecomposingPen class; added DecomposingRecordingPen, which records + components decomposed as regular contours. +- [TSI1] Fixed computation of the textLength of VTT private tables (#913). +- [loggingTools] Added ``LogMixin`` class providing a ``log`` property to + subclasses, which returns a ``logging.Logger`` named after the latter. +- [loggingTools] Added ``assertRegex`` method to ``CapturingLogHandler``. +- [py23] Added backport for python 3's ``types.SimpleNamespace`` class. +- [EBLC] Fixed issue with python 3 ``zip`` iterator. + +3.9.2 (released 2017-04-08) +--------------------------- + +- [pens] Added pen to draw glyphs using WxPython ``GraphicsPath`` class: + https://wxpython.org/docs/api/wx.GraphicsPath-class.html +- [varLib.merger] Fixed issue with recombining multiple PairPosFormat2 + subtables (#888) +- [varLib] Do not encode gvar deltas that are all zeroes, or if all values + are smaller than tolerance. +- [ttLib] _TTGlyphSet glyphs now also have ``height`` and ``tsb`` (top + side bearing) attributes from the ``vmtx`` table, if present. +- [glyf] In ``GlyphCoordintes`` class, added ``__bool__`` / ``__nonzero__`` + methods, and ``array`` property to get raw array. +- [ttx] Support reading TTX files with BOM (#896) +- [CFF2] Fixed the reporting of the number of regions in the font. + +3.9.1 (released 2017-03-20) +--------------------------- + +- [varLib.merger] Fixed issue while recombining multiple PairPosFormat2 + subtables if they were split because of offset overflows (9798c30). +- [varLib.merger] Only merge multiple PairPosFormat1 subtables if there is + at least one of the fonts with a non-empty Format1 subtable (0f5a46b). +- [varLib.merger] Fixed IndexError with empty ClassDef1 in PairPosFormat2 + (aad0d46). +- [varLib.merger] Avoid reusing Class2Record (mutable) objects (e6125b3). +- [varLib.merger] Calculate ClassDef1 and ClassDef2's Format when merging + PairPosFormat2 (23511fd). +- [macUtils] Added missing ttLib import (b05f203). + +3.9.0 (released 2017-03-13) +--------------------------- + +- [feaLib] Added (partial) support for parsing feature file comments ``# ...`` + appearing in between statements (#879). +- [feaLib] Cleaned up syntax tree for FeatureNames. +- [ttLib] Added support for reading/writing ``CFF2`` table (thanks to + @readroberts at Adobe), and ``TTFA`` (ttfautohint) table. +- [varLib] Fixed regression introduced with 3.8.0 in the calculation of + ``NumShorts``, i.e. the number of deltas in ItemVariationData's delta sets + that use a 16-bit representation (b2825ff). + +3.8.0 (released 2017-03-05) +--------------------------- + +- New pens: MomentsPen, StatisticsPen, RecordingPen, and TeePen. +- [misc] Added new ``fontTools.misc.symfont`` module, for symbolic font + statistical analysis; requires ``sympy`` (http://www.sympy.org/en/index.html) +- [varLib] Added experimental ``fontTools.varLib.interpolatable`` module for + finding wrong contour order between different masters +- [varLib] designspace.load() now returns a dictionary, instead of a tuple, + and supports element (#864); the 'masters' item was renamed 'sources', + like the element in the designspace document +- [ttLib] Fixed issue with recalculating ``head`` modified timestamp when + saving CFF fonts +- [ttLib] In TupleVariation, round deltas before compiling (#861, fixed #592) +- [feaLib] Ignore duplicate glyphs in classes used as MarkFilteringSet and + MarkAttachmentType (#863) +- [merge] Changed the ``gasp`` table merge logic so that only the one from + the first font is retained, similar to other hinting tables (#862) +- [Tests] Added tests for the ``varLib`` package, as well as test fonts + from the "Annotated OpenType Specification" (AOTS) to exercise ``ttLib``'s + table readers/writers () + +3.7.2 (released 2017-02-17) +--------------------------- + +- [subset] Keep advance widths when stripping ".notdef" glyph outline in + CID-keyed CFF fonts (#845) +- [feaLib] Zero values now produce the same results as makeotf (#633, #848) +- [feaLib] More compact encoding for “Contextual positioning with in-line + single positioning rules” (#514) + +3.7.1 (released 2017-02-15) +--------------------------- + +- [subset] Fixed issue with ``--no-hinting`` option whereby advance widths in + Type 2 charstrings were also being stripped (#709, #343) +- [feaLib] include statements now resolve relative paths like makeotf (#838) +- [feaLib] table ``name`` now handles Unicode codepoints beyond the Basic + Multilingual Plane, also supports old-style MacOS platform encodings (#842) +- [feaLib] correctly escape string literals when emitting feature syntax (#780) + +3.7.0 (released 2017-02-11) +--------------------------- + +- [ttx, mtiLib] Preserve ordering of glyph alternates in GSUB type 3 (#833). +- [feaLib] Glyph names can have dashes, as per new AFDKO syntax v1.20 (#559). +- [feaLib] feaLib.Parser now needs the font's glyph map for parsing. +- [varLib] Fix regression where GPOS values were stored as 0. +- [varLib] Allow merging of class-based kerning when ClassDefs are different + +3.6.3 (released 2017-02-06) +--------------------------- + +- [varLib] Fix building variation of PairPosFormat2 (b5c34ce). +- Populate defaults even for otTables that have postRead (e45297b). +- Fix compiling of MultipleSubstFormat1 with zero 'out' glyphs (b887860). + +3.6.2 (released 2017-01-30) +--------------------------- + +- [varLib.merger] Fixed "TypeError: reduce() of empty sequence with no + initial value" (3717dc6). + +3.6.1 (released 2017-01-28) +--------------------------- + +- [py23] Fixed unhandled exception occurring at interpreter shutdown in + the "last resort" logging handler (972b3e6). +- [agl] Ensure all glyph names are of native 'str' type; avoid mixing + 'str' and 'unicode' in TTFont.glyphOrder (d8c4058). +- Fixed inconsistent title levels in README.rst that caused PyPI to + incorrectly render the reStructuredText page. + +3.6.0 (released 2017-01-26) +--------------------------- + +- [varLib] Refactored and improved the variation-font-building process. +- Assembly code in the fpgm, prep, and glyf tables is now indented in + XML output for improved readability. The ``instruction`` element is + written as a simple tag if empty (#819). +- [ttx] Fixed 'I/O operation on closed file' error when dumping + multiple TTXs to standard output with the '-o -' option. +- The unit test modules (``*_test.py``) have been moved outside of the + fontTools package to the Tests folder, thus they are no longer + installed (#811). + +3.5.0 (released 2017-01-14) +--------------------------- + +- Font tables read from XML can now be written back to XML with no + loss. +- GSUB/GPOS LookupType is written out in XML as an element, not + comment. (#792) +- When parsing cmap table, do not store items mapped to glyph id 0. + (#790) +- [otlLib] Make ClassDef sorting deterministic. Fixes #766 (7d1ddb2) +- [mtiLib] Added unit tests (#787) +- [cvar] Implemented cvar table +- [gvar] Renamed GlyphVariation to TupleVariation to match OpenType + terminology. +- [otTables] Handle gracefully empty VarData.Item array when compiling + XML. (#797) +- [varLib] Re-enabled generation of ``HVAR`` table for fonts with + TrueType outlines; removed ``--build-HVAR`` command-line option. +- [feaLib] The parser can now be extended to support non-standard + statements in FEA code by using a customized Abstract Syntax Tree. + See, for example, ``feaLib.builder_test.test_extensions`` and + baseClass.feax (#794, fixes #773). +- [feaLib] Added ``feaLib`` command to the 'fonttools' command-line + tool; applies a feature file to a font. ``fonttools feaLib -h`` for + help. +- [pens] The ``T2CharStringPen`` now takes an optional + ``roundTolerance`` argument to control the rounding of coordinates + (#804, fixes #769). +- [ci] Measure test coverage on all supported python versions and OSes, + combine coverage data and upload to + https://codecov.io/gh/fonttools/fonttools (#786) +- [ci] Configured Travis and Appveyor for running tests on Python 3.6 + (#785, 55c03bc) +- The manual pages installation directory can be customized through + ``FONTTOOLS_MANPATH`` environment variable (#799, fixes #84). +- [Snippets] Added otf2ttf.py, for converting fonts from CFF to + TrueType using the googlei18n/cu2qu module (#802) + +3.4.0 (released 2016-12-21) +--------------------------- + +- [feaLib] Added support for generating FEA text from abstract syntax + tree (AST) objects (#776). Thanks @mhosken +- Added ``agl.toUnicode`` function to convert AGL-compliant glyph names + to Unicode strings (#774) +- Implemented MVAR table (b4d5381) + +3.3.1 (released 2016-12-15) +--------------------------- + +- [setup] We no longer use versioneer.py to compute fonttools version + from git metadata, as this has caused issues for some users (#767). + Now we bump the version strings manually with a custom ``release`` + command of setup.py script. + +3.3.0 (released 2016-12-06) +--------------------------- + +- [ttLib] Implemented STAT table from OpenType 1.8 (#758) +- [cffLib] Fixed decompilation of CFF fonts containing non-standard + key/value pairs in FontDict (issue #740; PR #744) +- [py23] minor: in ``round3`` function, allow the second argument to be + ``None`` (#757) +- The standalone ``sstruct`` and ``xmlWriter`` modules, deprecated + since vesion 3.2.0, have been removed. They can be imported from the + ``fontTools.misc`` package. + +3.2.3 (released 2016-12-02) +--------------------------- + +- [py23] optimized performance of round3 function; added backport for + py35 math.isclose() (9d8dacb) +- [subset] fixed issue with 'narrow' (UCS-2) Python 2 builds and + ``--text``/``--text-file`` options containing non-BMP chararcters + (16d0e5e) +- [varLib] fixed issuewhen normalizing location values (8fa2ee1, #749) +- [inspect] Made it compatible with both python2 and python3 (167ee60, + #748). Thanks @pnemade + +3.2.2 (released 2016-11-24) +--------------------------- + +- [varLib] Do not emit null axes in fvar (1bebcec). Thanks @robmck-ms +- [varLib] Handle fonts without GPOS (7915a45) +- [merge] Ignore LangSys if None (a11bc56) +- [subset] Fix subsetting MathVariants (78d3cbe) +- [OS/2] Fix "Private Use (plane 15)" range (08a0d55). Thanks @mashabow + +3.2.1 (released 2016-11-03) +--------------------------- + +- [OS/2] fix checking ``fsSelection`` bits matching ``head.macStyle`` + bits +- [varLib] added ``--build-HVAR`` option to generate ``HVAR`` table for + fonts with TrueType outlines. For ``CFF2``, it is enabled by default. + +3.2.0 (released 2016-11-02) +--------------------------- + +- [varLib] Improve support for OpenType 1.8 Variable Fonts: +- Implement GDEF's VariationStore +- Implement HVAR/VVAR tables +- Partial support for loading MutatorMath .designspace files with + varLib.designspace module +- Add varLib.models with Variation fonts interpolation models +- Implement GSUB/GPOS FeatureVariations +- Initial support for interpolating and merging OpenType Layout tables + (see ``varLib.interpolate_layout`` and ``varLib.merger`` modules) +- [API change] Change version to be an integer instead of a float in + XML output for GSUB, GPOS, GDEF, MATH, BASE, JSTF, HVAR, VVAR, feat, + hhea and vhea tables. Scripts that set the Version for those to 1.0 + or other float values also need fixing. A warning is emitted when + code or XML needs fix. +- several bug fixes to the cffLib module, contributed by Adobe's + @readroberts +- The XML output for CFF table now has a 'major' and 'minor' elements + for specifying whether it's version 1.0 or 2.0 (support for CFF2 is + coming soon) +- [setup.py] remove undocumented/deprecated ``extra_path`` Distutils + argument. This means that we no longer create a "FontTools" subfolder + in site-packages containing the actual fontTools package, as well as + the standalone xmlWriter and sstruct modules. The latter modules are + also deprecated, and scheduled for removal in upcoming releases. + Please change your import statements to point to from fontTools.misc + import xmlWriter and from fontTools.misc import sstruct. +- [scripts] Add a 'fonttools' command-line tool that simply runs + ``fontTools.*`` sub-modules: e.g. ``fonttools ttx``, + ``fonttools subset``, etc. +- [hmtx/vmts] Read advance width/heights as unsigned short (uint16); + automatically round float values to integers. +- [ttLib/xmlWriter] add 'newlinestr=None' keyword argument to + ``TTFont.saveXML`` for overriding os-specific line endings (passed on + to ``XMLWriter`` instances). +- [versioning] Use versioneer instead of ``setuptools_scm`` to + dynamically load version info from a git checkout at import time. +- [feaLib] Support backslash-prefixed glyph names. + +3.1.2 (released 2016-09-27) +--------------------------- + +- restore Makefile as an alternative way to build/check/install +- README.md: update instructions for installing package from source, + and for running test suite +- NEWS: Change log was out of sync with tagged release + +3.1.1 (released 2016-09-27) +--------------------------- + +- Fix ``ttLibVersion`` attribute in TTX files still showing '3.0' + instead of '3.1'. +- Use ``setuptools_scm`` to manage package versions. + +3.1.0 (released 2016-09-26) +--------------------------- + +- [feaLib] New library to parse and compile Adobe FDK OpenType Feature + files. +- [mtiLib] New library to parse and compile Monotype 'FontDame' + OpenType Layout Tables files. +- [voltLib] New library to parse Microsoft VOLT project files. +- [otlLib] New library to work with OpenType Layout tables. +- [varLib] New library to work with OpenType Font Variations. +- [pens] Add ttGlyphPen to draw to TrueType glyphs, and t2CharStringPen + to draw to Type 2 Charstrings (CFF); add areaPen and perimeterPen. +- [ttLib.tables] Implement 'meta' and 'trak' tables. +- [ttx] Add --flavor option for compiling to 'woff' or 'woff2'; add + ``--with-zopfli`` option to use Zopfli to compress WOFF 1.0 fonts. +- [subset] Support subsetting 'COLR'/'CPAL' and 'CBDT'/'CBLC' color + fonts tables, and 'gvar' table for variation fonts. +- [Snippets] Add ``symfont.py``, for symbolic font statistics analysis; + interpolatable.py, a preliminary script for detecting interpolation + errors; ``{merge,dump}_woff_metadata.py``. +- [classifyTools] Helpers to classify things into classes. +- [CI] Run tests on Windows, Linux and macOS using Appveyor and Travis + CI; check unit test coverage with Coverage.py/Coveralls; automatic + deployment to PyPI on tags. +- [loggingTools] Use Python built-in logging module to print messages. +- [py23] Make round() behave like Python 3 built-in round(); define + round2() and round3(). + +3.0 (released 2015-09-01) +------------------------- + +- Add Snippet scripts for cmap subtable format conversion, printing + GSUB/GPOS features, building a GX font from two masters +- TTX WOFF2 support and a ``-f`` option to overwrite output file(s) +- Support GX tables: ``avar``, ``gvar``, ``fvar``, ``meta`` +- Support ``feat`` and gzip-compressed SVG tables +- Upgrade Mac East Asian encodings to native implementation if + available +- Add Roman Croatian and Romanian encodings, codecs for mac-extended + East Asian encodings +- Implement optimal GLYF glyph outline packing; disabled by default + +2.5 (released 2014-09-24) +------------------------- + +- Add a Qt pen +- Add VDMX table converter +- Load all OpenType sub-structures lazily +- Add support for cmap format 13. +- Add pyftmerge tool +- Update to Unicode 6.3.0d3 +- Add pyftinspect tool +- Add support for Google CBLC/CBDT color bitmaps, standard EBLC/EBDT + embedded bitmaps, and ``SVG`` table (thanks to Read Roberts at Adobe) +- Add support for loading, saving and ttx'ing WOFF file format +- Add support for Microsoft COLR/CPAL layered color glyphs +- Support PyPy +- Support Jython, by replacing numpy with array/lists modules and + removed it, pure-Python StringIO, not cStringIO +- Add pyftsubset and Subsetter object, supporting CFF and TTF +- Add to ttx args for -q for quiet mode, -z to choose a bitmap dump + format + +2.4 (released 2013-06-22) +------------------------- + +- Option to write to arbitrary files +- Better dump format for DSIG +- Better detection of OTF XML +- Fix issue with Apple's kern table format +- Fix mangling of TT glyph programs +- Fix issues related to mona.ttf +- Fix Windows Installer instructions +- Fix some modern MacOS issues +- Fix minor issues and typos + +2.3 (released 2009-11-08) +------------------------- + +- TrueType Collection (TTC) support +- Python 2.6 support +- Update Unicode data to 5.2.0 +- Couple of bug fixes + +2.2 (released 2008-05-18) +------------------------- + +- ClearType support +- cmap format 1 support +- PFA font support +- Switched from Numeric to numpy +- Update Unicode data to 5.1.0 +- Update AGLFN data to 1.6 +- Many bug fixes + +2.1 (released 2008-01-28) +------------------------- + +- Many years worth of fixes and features + +2.0b2 (released 2002-??-??) +--------------------------- + +- Be "forgiving" when interpreting the maxp table version field: + interpret any value as 1.0 if it's not 0.5. Fixes dumping of these + GPL fonts: http://www.freebsd.org/cgi/pds.cgi?ports/chinese/wangttf +- Fixed ttx -l: it turned out this part of the code didn't work with + Python 2.2.1 and earlier. My bad to do most of my testing with a + different version than I shipped TTX with :-( +- Fixed bug in ClassDef format 1 subtable (Andreas Seidel bumped into + this one). + +2.0b1 (released 2002-09-10) +--------------------------- + +- Fixed embarrassing bug: the master checksum in the head table is now + calculated correctly even on little-endian platforms (such as Intel). +- Made the cmap format 4 compiler smarter: the binary data it creates + is now more or less as compact as possible. TTX now makes more + compact data than in any shipping font I've tested it with. +- Dump glyph names as a separate "GlyphOrder" pseudo table as opposed + to as part of the glyf table (obviously needed for CFF-OTF's). +- Added proper support for the CFF table. +- Don't barf on empty tables (questionable, but "there are font out + there...") +- When writing TT glyf data, align glyphs on 4-byte boundaries. This + seems to be the current recommendation by MS. Also: don't barf on + fonts which are already 4-byte aligned. +- Windows installer contributed bu Adam Twardoch! Yay! +- Changed the command line interface again, now by creating one new + tool replacing the old ones: ttx It dumps and compiles, depending on + input file types. The options have changed somewhat. +- The -d option is back (output dir) +- ttcompile's -i options is now called -m (as in "merge"), to avoid + clash with dump's -i. +- The -s option ("split tables") no longer creates a directory, but + instead outputs a small .ttx file containing references to the + individual table files. This is not a true link, it's a simple file + name, and the referenced file should be in the same directory so + ttcompile can find them. +- compile no longer accepts a directory as input argument. Instead it + can parse the new "mini-ttx" format as output by "ttx -s". +- all arguments are input files +- Renamed the command line programs and moved them to the Tools + subdirectory. They are now installed by the setup.py install script. +- Added OpenType support. BASE, GDEF, GPOS, GSUB and JSTF are (almost) + fully supported. The XML output is not yet final, as I'm still + considering to output certain subtables in a more human-friendly + manner. +- Fixed 'kern' table to correctly accept subtables it doesn't know + about, as well as interpreting Apple's definition of the 'kern' table + headers correctly. +- Fixed bug where glyphnames were not calculated from 'cmap' if it was + (one of the) first tables to be decompiled. More specifically: it + cmap was the first to ask for a glyphID -> glyphName mapping. +- Switched XML parsers: use expat instead of xmlproc. Should be faster. +- Removed my UnicodeString object: I now require Python 2.0 or up, + which has unicode support built in. +- Removed assert in glyf table: redundant data at the end of the table + is now ignored instead of raising an error. Should become a warning. +- Fixed bug in hmtx/vmtx code that only occured if all advances were + equal. +- Fixed subtle bug in TT instruction disassembler. +- Couple of fixes to the 'post' table. +- Updated OS/2 table to latest spec. + +1.0b1 (released 2001-08-10) +--------------------------- + +- Reorganized the command line interface for ttDump.py and + ttCompile.py, they now behave more like "normal" command line tool, + in that they accept multiple input files for batch processing. +- ttDump.py and ttCompile.py don't silently override files anymore, but + ask before doing so. Can be overridden by -f. +- Added -d option to both ttDump.py and ttCompile.py. +- Installation is now done with distutils. (Needs work for environments + without compilers.) +- Updated installation instructions. +- Added some workarounds so as to handle certain buggy fonts more + gracefully. +- Updated Unicode table to Unicode 3.0 (Thanks Antoine!) +- Included a Python script by Adam Twardoch that adds some useful stuff + to the Windows registry. +- Moved the project to SourceForge. + +1.0a6 (released 2000-03-15) +--------------------------- + +- Big reorganization: made ttLib a subpackage of the new fontTools + package, changed several module names. Called the entire suite + "FontTools" +- Added several submodules to fontTools, some new, some older. +- Added experimental CFF/GPOS/GSUB support to ttLib, read-only (but XML + dumping of GPOS/GSUB is for now disabled) +- Fixed hdmx endian bug +- Added -b option to ttCompile.py, it disables recalculation of + bounding boxes, as requested by Werner Lemberg. +- Renamed tt2xml.pt to ttDump.py and xml2tt.py to ttCompile.py +- Use ".ttx" as file extension instead of ".xml". +- TTX is now the name of the XML-based *format* for TT fonts, and not + just an application. + +1.0a5 +----- + +Never released + +- More tables supported: hdmx, vhea, vmtx + +1.0a3 & 1.0a4 +------------- + +Never released + +- fixed most portability issues +- retracted the "Euro_or_currency" change from 1.0a2: it was + nonsense! + +1.0a2 (released 1999-05-02) +--------------------------- + +- binary release for MacOS +- genenates full FOND resources: including width table, PS font name + info and kern table if applicable. +- added cmap format 4 support. Extra: dumps Unicode char names as XML + comments! +- added cmap format 6 support +- now accepts true type files starting with "true" (instead of just + 0x00010000 and "OTTO") +- 'glyf' table support is now complete: I added support for composite + scale, xy-scale and two-by-two for the 'glyf' table. For now, + component offset scale behaviour defaults to Apple-style. This only + affects the (re)calculation of the glyph bounding box. +- changed "Euro" to "Euro_or_currency" in the Standard Apple Glyph + order list, since we cannot tell from the 'post' table which is + meant. I should probably doublecheck with a Unicode encoding if + available. (This does not affect the output!) + +Fixed bugs: - 'hhea' table is now recalculated correctly - fixed wrong +assumption about sfnt resource names + +1.0a1 (released 1999-04-27) +--------------------------- + +- initial binary release for MacOS diff -Nru fonttools-3.0/README.md fonttools-3.21.2/README.md --- fonttools-3.0/README.md 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/README.md 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -[![Build Status](https://travis-ci.org/behdad/fonttools.svg)](https://travis-ci.org/behdad/fonttools) -[![Health](https://landscape.io/github/behdad/fonttools/master/landscape.svg?style=flat)](https://landscape.io/github/behdad/fonttools/master) -[![Coverage Status](https://img.shields.io/coveralls/behdad/fonttools.svg)](https://coveralls.io/r/behdad/fonttools) - -### What it is ? - -Quoting from [TTX/FontTools Sourceforge Project](http://sourceforge.net/projects/fonttools/) -> a tool to convert OpenType and TrueType fonts to and from XML. FontTools is a library for manipulating fonts, written in Python. It supports TrueType, OpenType, AFM and to an extent Type 1 and some Mac-specific formats. - -### Quick start - -```python setup.py install``` - -From your command line type the above command to get fontools installed on your system. FontTools requires Python 2.7, or Python 3.3 or later. - -### Installation - -See [install.txt](https://github.com/behdad/fonttools/blob/master/Doc/install.txt) in the 'Doc' subdirectory for instructions on how to build and install TTX/FontTools from the sources. - - -### Documentation - -#### What is TTX ? - -See [documentation.html](https://rawgit.com/behdad/fonttools/master/Doc/documentation.html) in the "Doc" subdirectory for TTX usage instructions and information about the TTX file format. - -#### History - -The fontTools project was started by Just van Rossum in 1999, and was maintained as an open source project at . In 2008, Paul Wise (pabs3) began helping Just with stability maintenance. In 2013 Behdad Esfahbod began a friendly fork, thoroughly reviewing the codebase and making changes at to add new features and support for new font formats. - -### Community - -* https://groups.google.com/d/forum/fonttools - -### License - -See "LICENSE.txt" for licensing information. - - - -Have fun! - -Just van Rossum diff -Nru fonttools-3.0/README.rst fonttools-3.21.2/README.rst --- fonttools-3.0/README.rst 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/README.rst 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,362 @@ +|Travis Build Status| |Appveyor Build status| |Health| |Coverage Status| +|PyPI| + +What is this? +~~~~~~~~~~~~~ + +| fontTools is a library for manipulating fonts, written in Python. The + project includes the TTX tool, that can convert TrueType and OpenType + fonts to and from an XML text format, which is also called TTX. It + supports TrueType, OpenType, AFM and to an extent Type 1 and some + Mac-specific formats. The project has a `MIT open-source + licence `__. +| Among other things this means you can use it free of charge. + +Installation +~~~~~~~~~~~~ + +FontTools requires `Python `__ 2.7, 3.4 +or later. + +The package is listed in the Python Package Index (PyPI), so you can +install it with `pip `__: + +.. code:: sh + + pip install fonttools + +If you would like to contribute to its development, you can clone the +repository from Github, install the package in 'editable' mode and +modify the source code in place. We recommend creating a virtual +environment, using `virtualenv `__ or +Python 3 `venv `__ module. + +.. code:: sh + + # download the source code to 'fonttools' folder + git clone https://github.com/fonttools/fonttools.git + cd fonttools + + # create new virtual environment called e.g. 'fonttools-venv', or anything you like + python -m virtualenv fonttools-venv + + # source the `activate` shell script to enter the environment (Un\*x); to exit, just type `deactivate` + . fonttools-venv/bin/activate + + # to activate the virtual environment in Windows `cmd.exe`, do + fonttools-venv\Scripts\activate.bat + + # install in 'editable' mode + pip install -e . + +TTX – From OpenType and TrueType to XML and Back +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once installed you can use the ``ttx`` command to convert binary font +files (``.otf``, ``.ttf``, etc) to the TTX xml format, edit them, and +convert them back to binary format. TTX files have a .ttx file +extension. + +.. code:: sh + + ttx /path/to/font.otf + ttx /path/to/font.ttx + +The TTX application works can be used in two ways, depending on what +platform you run it on: + +- As a command line tool (Windows/DOS, Unix, MacOSX) +- By dropping files onto the application (Windows, MacOS) + +TTX detects what kind of files it is fed: it will output a ``.ttx`` file +when it sees a ``.ttf`` or ``.otf``, and it will compile a ``.ttf`` or +``.otf`` when the input file is a ``.ttx`` file. By default, the output +file is created in the same folder as the input file, and will have the +same name as the input file but with a different extension. TTX will +*never* overwrite existing files, but if necessary will append a unique +number to the output filename (before the extension) such as +``Arial#1.ttf`` + +When using TTX from the command line there are a bunch of extra options, +these are explained in the help text, as displayed when typing +``ttx -h`` at the command prompt. These additional options include: + +- specifying the folder where the output files are created +- specifying which tables to dump or which tables to exclude +- merging partial ``.ttx`` files with existing ``.ttf`` or ``.otf`` + files +- listing brief table info instead of dumping to ``.ttx`` +- splitting tables to separate ``.ttx`` files +- disabling TrueType instruction disassembly + +The TTX file format +------------------- + +The following tables are currently supported: + +.. begin table list +.. code:: + + BASE, CBDT, CBLC, CFF, CFF2, COLR, CPAL, DSIG, EBDT, EBLC, FFTM, + Feat, GDEF, GMAP, GPKG, GPOS, GSUB, Glat, Gloc, HVAR, JSTF, LTSH, + MATH, META, MVAR, OS/2, SING, STAT, SVG, Silf, Sill, TSI0, TSI1, + TSI2, TSI3, TSI5, TSIB, TSID, TSIJ, TSIP, TSIS, TSIV, TTFA, VDMX, + VORG, VVAR, ankr, avar, bsln, cidg, cmap, cvar, cvt, feat, fpgm, + fvar, gasp, gcid, glyf, gvar, hdmx, head, hhea, hmtx, kern, lcar, + loca, ltag, maxp, meta, mort, morx, name, opbd, post, prep, prop, + sbix, trak, vhea and vmtx +.. end table list + +Other tables are dumped as hexadecimal data. + +TrueType fonts use glyph indices (GlyphIDs) to refer to glyphs in most +places. While this is fine in binary form, it is really hard to work +with for humans. Therefore we use names instead. + +The glyph names are either extracted from the ``CFF`` table or the +``post`` table, or are derived from a Unicode ``cmap`` table. In the +latter case the Adobe Glyph List is used to calculate names based on +Unicode values. If all of these methods fail, names are invented based +on GlyphID (eg ``glyph00142``) + +It is possible that different glyphs use the same name. If this happens, +we force the names to be unique by appending ``#n`` to the name (``n`` +being an integer number.) The original names are being kept, so this has +no influence on a "round tripped" font. + +Because the order in which glyphs are stored inside the binary font is +important, we maintain an ordered list of glyph names in the font. + +Other Tools +~~~~~~~~~~~ + +Commands for inspecting, merging and subsetting fonts are also +available: + +.. code:: sh + + pyftinspect + pyftmerge + pyftsubset + +fontTools Python Module +~~~~~~~~~~~~~~~~~~~~~~~ + +The fontTools python module provides a convenient way to +programmatically edit font files. + +.. code:: py + + >>> from fontTools.ttLib import TTFont + >>> font = TTFont('/path/to/font.ttf') + >>> font + + >>> + +A selection of sample python programs is in the +`Snippets `__ +directory. + +Optional Requirements +--------------------- + +The ``fontTools`` package currently has no (required) external dependencies +besides the modules included in the Python Standard Library. +However, a few extra dependencies are required by some of its modules, which +are needed to unlock optional features. + +- ``Lib/fontTools/ttLib/woff2.py`` + + Module to compress/decompress WOFF 2.0 web fonts; it requires: + + - `brotli `__: Python bindings of + the Brotli compression library. + +- ``Lib/fontTools/ttLib/sfnt.py`` + + To better compress WOFF 1.0 web fonts, the following module can be used + instead of the built-in ``zlib`` library: + + - `zopfli `__: Python bindings of + the Zopfli compression library. + +- ``Lib/fontTools/unicode.py`` + + To display the Unicode character names when dumping the ``cmap`` table + with ``ttx`` we use the ``unicodedata`` module in the Standard Library. + The version included in there varies between different Python versions. + To use the latest available data, you can install: + + - `unicodedata2 `__: + ``unicodedata`` backport for Python 2.7 and 3.5 updated to the latest + Unicode version 9.0. Note this is not necessary if you use Python 3.6 + as the latter already comes with an up-to-date ``unicodedata``. + +- ``Lib/fontTools/varLib/interpolatable.py`` + + Module for finding wrong contour/component order between different masters. + It requires one of the following packages in order to solve the so-called + "minimum weight perfect matching problem in bipartite graphs", or + the Assignment problem: + + * `scipy `__: the Scientific Library + for Python, which internally uses `NumPy `__ + arrays and hence is very fast; + * `munkres `__: a pure-Python + module that implements the Hungarian or Kuhn-Munkres algorithm. + +- ``Lib/fontTools/misc/symfont.py`` + + Advanced module for symbolic font statistics analysis; it requires: + + * `sympy `__: the Python library for + symbolic mathematics. + +- ``Lib/fontTools/t1Lib.py`` + + To get the file creator and type of Macintosh PostScript Type 1 fonts + on Python 3 you need to install the following module, as the old ``MacOS`` + module is no longer included in Mac Python: + + * `xattr `__: Python wrapper for + extended filesystem attributes (macOS platform only). + +- ``Lib/fontTools/pens/cocoaPen.py`` + + Pen for drawing glyphs with Cocoa ``NSBezierPath``, requires: + + * `PyObjC `__: the bridge between + Python and the Objective-C runtime (macOS platform only). + +- ``Lib/fontTools/pens/qtPen.py`` + + Pen for drawing glyphs with Qt's ``QPainterPath``, requires: + + * `PyQt5 `__: Python bindings for + the Qt cross platform UI and application toolkit. + +- ``Lib/fontTools/pens/reportLabPen.py`` + + Pen to drawing glyphs as PNG images, requires: + + * `reportlab `__: Python toolkit + for generating PDFs and graphics. + +- ``Lib/fontTools/inspect.py`` + + A GUI font inspector, requires one of the following packages: + + * `PyGTK `__: Python bindings for + GTK  2.x (only works with Python 2). + * `PyGObject `__ : + Python bindings for GTK 3.x and gobject-introspection libraries (also + compatible with Python 3). + +Testing +~~~~~~~ + +To run the test suite, you can do: + +.. code:: sh + + python setup.py test + +If you have `pytest `__, you can run +the ``pytest`` command directly. The tests will run against the +installed ``fontTools`` package, or the first one found in the +``PYTHONPATH``. + +You can also use `tox `__ to +automatically run tests on different Python versions in isolated virtual +environments. + +.. code:: sh + + pip install tox + tox + +Note that when you run ``tox`` without arguments, the tests are executed +for all the environments listed in tox.ini's ``envlist``. In our case, +this includes Python 2.7 and 3.6, so for this to work the ``python2.7`` +and ``python3.6`` executables must be available in your ``PATH``. + +You can specify an alternative environment list via the ``-e`` option, +or the ``TOXENV`` environment variable: + +.. code:: sh + + tox -e py27-nocov + TOXENV="py36-cov,htmlcov" tox + +Development Community +~~~~~~~~~~~~~~~~~~~~~ + +TTX/FontTools development is ongoing in an active community of +developers, that includes professional developers employed at major +software corporations and type foundries as well as hobbyists. + +Feature requests and bug reports are always welcome at +https://github.com/fonttools/fonttools/issues/ + +The best place for discussions about TTX from an end-user perspective as +well as TTX/FontTools development is the +https://groups.google.com/d/forum/fonttools mailing list. There is also +a development https://groups.google.com/d/forum/fonttools-dev mailing +list for continuous integration notifications. You can also email Behdad +privately at behdad@behdad.org + +History +~~~~~~~ + +The fontTools project was started by Just van Rossum in 1999, and was +maintained as an open source project at +http://sourceforge.net/projects/fonttools/. In 2008, Paul Wise (pabs3) +began helping Just with stability maintenance. In 2013 Behdad Esfahbod +began a friendly fork, thoroughly reviewing the codebase and making +changes at https://github.com/behdad/fonttools to add new features and +support for new font formats. + +Acknowledgements +~~~~~~~~~~~~~~~~ + +In alphabetical order: + +Olivier Berten, Samyak Bhuta, Erik van Blokland, Petr van Blokland, +Jelle Bosma, Sascha Brawer, Tom Byrer, Frédéric Coiffier, Vincent +Connare, Dave Crossland, Simon Daniels, Behdad Esfahbod, Behnam +Esfahbod, Hannes Famira, Sam Fishman, Matt Fontaine, Yannis Haralambous, +Greg Hitchcock, Jeremie Hornus, Khaled Hosny, John Hudson, Denis Moyogo +Jacquerye, Jack Jansen, Tom Kacvinsky, Jens Kutilek, Antoine Leca, +Werner Lemberg, Tal Leming, Peter Lofting, Cosimo Lupo, Masaya Nakamura, +Dave Opstad, Laurence Penney, Roozbeh Pournader, Garret Rieger, Read +Roberts, Guido van Rossum, Just van Rossum, Andreas Seidel, Georg +Seifert, Miguel Sousa, Adam Twardoch, Adrien Tétar, Vitaly Volkov, Paul +Wise. + +Copyrights +~~~~~~~~~~ + +| Copyright (c) 1999-2004 Just van Rossum, LettError + (just@letterror.com) +| See `LICENSE `__ for the full license. + +Copyright (c) 2000 BeOpen.com. All Rights Reserved. + +Copyright (c) 1995-2001 Corporation for National Research Initiatives. +All Rights Reserved. + +Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All +Rights Reserved. + +Have fun! + +.. |Travis Build Status| image:: https://travis-ci.org/fonttools/fonttools.svg + :target: https://travis-ci.org/fonttools/fonttools +.. |Appveyor Build status| image:: https://ci.appveyor.com/api/projects/status/0f7fmee9as744sl7/branch/master?svg=true + :target: https://ci.appveyor.com/project/fonttools/fonttools/branch/master +.. |Health| image:: https://landscape.io/github/behdad/fonttools/master/landscape.svg?style=flat + :target: https://landscape.io/github/behdad/fonttools/master +.. |Coverage Status| image:: https://codecov.io/gh/fonttools/fonttools/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fonttools/fonttools +.. |PyPI| image:: https://img.shields.io/pypi/v/fonttools.svg + :target: https://pypi.org/project/FontTools diff -Nru fonttools-3.0/requirements.txt fonttools-3.21.2/requirements.txt --- fonttools-3.0/requirements.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/requirements.txt 2018-01-08 12:40:40.000000000 +0000 @@ -1 +1,6 @@ -git+https://github.com/google/brotli@v0.1.0#egg=Brotli \ No newline at end of file +# we use the official Brotli module on CPython and the CFFI-based +# extension 'brotlipy' on PyPy +brotli==1.0.1; platform_python_implementation != "PyPy" +brotlipy==0.7.0; platform_python_implementation == "PyPy" +unicodedata2==10.0.0; python_version < '3.7' and platform_python_implementation != "PyPy" +munkres==1.0.10 diff -Nru fonttools-3.0/run-tests.sh fonttools-3.21.2/run-tests.sh --- fonttools-3.0/run-tests.sh 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/run-tests.sh 2018-01-08 12:40:40.000000000 +0000 @@ -1,5 +1,8 @@ #!/bin/sh +# exit if any subcommand return non-zero status +set -e + # Choose python version if test "x$1" = x-3; then PYTHON=python3 @@ -9,42 +12,17 @@ shift fi test "x$PYTHON" = x && PYTHON=python -echo "$(which $PYTHON) --version" -$PYTHON --version 2>&1 -echo - -# Setup environment -DIR=`dirname "$0"` -cd "$DIR/Lib" -PYTHONPATH=".:$PYTHONPATH" -export PYTHONPATH # Find tests -FILTER= +FILTERS= for arg in "$@"; do - test "x$FILTER" != x && FILTER="$FILTER|" - FILTER="$FILTER$arg" + test "x$FILTERS" != x && FILTERS="$FILTERS or " + FILTERS="$FILTERS$arg" done -test "x$FILTER" = "x" && FILTER=. -TESTS=`grep -r --include='*.py' -l -e doctest -e unittest * | grep -E "$FILTER"` - -ret=0 -FAILS= -for test in $TESTS; do - echo "Running tests in $test" - test=`echo "$test" | sed 's@[/\\]@.@g;s@[.]py$@@'` - if ! $PYTHON -m $test -v; then - ret=$((ret+1)) - FAILS="$FAILS -$test" - fi -done - echo - echo "SUMMARY:" -if test $ret = 0; then - echo "All tests passed." +# Run tests +if [ -z "$FILTERS" ]; then + $PYTHON setup.py test else - echo "$ret source file(s) had tests failing:$FAILS" >&2 + $PYTHON setup.py test --addopts="-k \"$FILTERS\"" fi -exit $ret diff -Nru fonttools-3.0/setup.cfg fonttools-3.21.2/setup.cfg --- fonttools-3.0/setup.cfg 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/setup.cfg 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,54 @@ +[bumpversion] +current_version = 3.21.2 +commit = True +tag = False +tag_name = {new_version} +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\.(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}.{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = final +values = + dev + final + +[bumpversion:part:dev] + +[bumpversion:file:Lib/fontTools/__init__.py] +search = __version__ = "{current_version}" +replace = __version__ = "{new_version}" + +[bumpversion:file:setup.py] +search = version="{current_version}" +replace = version="{new_version}" + +[wheel] +universal = 1 + +[sdist] +formats = zip + +[aliases] +test = pytest + +[metadata] +license_file = LICENSE + +[tool:pytest] +minversion = 3.0 +testpaths = + Tests + fontTools +python_files = + *_test.py +python_classes = + *Test +addopts = + -v + -r a + --doctest-modules + --doctest-ignore-import-errors + --pyargs + diff -Nru fonttools-3.0/setup.py fonttools-3.21.2/setup.py --- fonttools-3.0/setup.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/setup.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,24 +1,17 @@ #! /usr/bin/env python from __future__ import print_function -import os, sys - -# if setuptools is not installed, fall back to distutils -try: - from setuptools import setup -except ImportError: - from distutils.core import setup - distutils_scripts = [ - "Tools/ttx", "Tools/pyftsubset", "Tools/pyftinspect", "Tools/pyftmerge"] -else: - distutils_scripts = [] - -try: - import xml.parsers.expat -except ImportError: - print("*** Warning: FontTools needs PyXML, see:") - print(" http://sourceforge.net/projects/pyxml/") - +import io +import sys +import os +from os.path import isfile, join as pjoin +from glob import glob +from setuptools import setup, find_packages, Command +from distutils import log +from distutils.util import convert_path +import subprocess as sp +import contextlib +import re # Force distutils to use py_compile.compile() function with 'doraise' argument # set to True, in order to raise an exception on compilation errors @@ -26,67 +19,326 @@ orig_py_compile = py_compile.compile def doraise_py_compile(file, cfile=None, dfile=None, doraise=False): - orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True) + orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True) py_compile.compile = doraise_py_compile +needs_pytest = {'pytest', 'test'}.intersection(sys.argv) +pytest_runner = ['pytest_runner'] if needs_pytest else [] +needs_wheel = {'bdist_wheel'}.intersection(sys.argv) +wheel = ['wheel'] if needs_wheel else [] +needs_bumpversion = {'release'}.intersection(sys.argv) +bumpversion = ['bump2version'] if needs_bumpversion else [] # Trove classifiers for PyPI classifiers = {"classifiers": [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Other Environment", "Intended Audience :: Developers", "Intended Audience :: End Users/Desktop", - "License :: OSI Approved :: BSD License", + "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 3", + "Topic :: Text Processing :: Fonts", "Topic :: Multimedia :: Graphics", "Topic :: Multimedia :: Graphics :: Graphics Conversion", ]} -long_description = """\ -FontTools/TTX is a library to manipulate font files from Python. -It supports reading and writing of TrueType/OpenType fonts, reading -and writing of AFM files, reading (and partially writing) of PS Type 1 -fonts. The package also contains a tool called "TTX" which converts -TrueType/OpenType fonts to and from an XML-based format. -""" + +# concatenate README.rst and NEWS.rest into long_description so they are +# displayed on the FontTols project page on PyPI +with io.open("README.rst", "r", encoding="utf-8") as readme: + long_description = readme.read() +long_description += "\nChangelog\n~~~~~~~~~\n\n" +with io.open("NEWS.rst", "r", encoding="utf-8") as changelog: + long_description += changelog.read() + + +@contextlib.contextmanager +def capture_logger(name): + """ Context manager to capture a logger output with a StringIO stream. + """ + import logging + + logger = logging.getLogger(name) + try: + import StringIO + stream = StringIO.StringIO() + except ImportError: + stream = io.StringIO() + handler = logging.StreamHandler(stream) + logger.addHandler(handler) + try: + yield stream + finally: + logger.removeHandler(handler) + + +class release(Command): + """ + Tag a new release with a single command, using the 'bumpversion' tool + to update all the version strings in the source code. + The version scheme conforms to 'SemVer' and PEP 440 specifications. + + Firstly, the pre-release '.devN' suffix is dropped to signal that this is + a stable release. If '--major' or '--minor' options are passed, the + the first or second 'semver' digit is also incremented. Major is usually + for backward-incompatible API changes, while minor is used when adding + new backward-compatible functionalities. No options imply 'patch' or bug-fix + release. + + A new header is also added to the changelog file ("NEWS.rst"), containing + the new version string and the current 'YYYY-MM-DD' date. + + All changes are committed, and an annotated git tag is generated. With the + --sign option, the tag is GPG-signed with the user's default key. + + Finally, the 'patch' part of the version string is bumped again, and a + pre-release suffix '.dev0' is appended to mark the opening of a new + development cycle. + + Links: + - http://semver.org/ + - https://www.python.org/dev/peps/pep-0440/ + - https://github.com/c4urself/bump2version + """ + + description = "update version strings for release" + + user_options = [ + ("major", None, "bump the first digit (incompatible API changes)"), + ("minor", None, "bump the second digit (new backward-compatible features)"), + ("sign", "s", "make a GPG-signed tag, using the default key"), + ("allow-dirty", None, "don't abort if working directory is dirty"), + ] + + changelog_name = "NEWS.rst" + version_RE = re.compile("^[0-9]+\.[0-9]+") + date_fmt = u"%Y-%m-%d" + header_fmt = u"%s (released %s)" + commit_message = "Release {new_version}" + tag_name = "{new_version}" + version_files = [ + "setup.cfg", + "setup.py", + "Lib/fontTools/__init__.py", + ] + + def initialize_options(self): + self.minor = False + self.major = False + self.sign = False + self.allow_dirty = False + + def finalize_options(self): + if all([self.major, self.minor]): + from distutils.errors import DistutilsOptionError + raise DistutilsOptionError("--major/--minor are mutually exclusive") + self.part = "major" if self.major else "minor" if self.minor else None + + def run(self): + if self.part is not None: + log.info("bumping '%s' version" % self.part) + self.bumpversion(self.part, commit=False) + release_version = self.bumpversion( + "release", commit=False, allow_dirty=True) + else: + log.info("stripping pre-release suffix") + release_version = self.bumpversion("release") + log.info(" version = %s" % release_version) + + changes = self.format_changelog(release_version) + + self.git_commit(release_version) + self.git_tag(release_version, changes, self.sign) + + log.info("bumping 'patch' version and pre-release suffix") + next_dev_version = self.bumpversion('patch', commit=True) + log.info(" version = %s" % next_dev_version) + + def git_commit(self, version): + """ Stage and commit all relevant version files, and format the commit + message with specified 'version' string. + """ + files = self.version_files + [self.changelog_name] + + log.info("committing changes") + for f in files: + log.info(" %s" % f) + if self.dry_run: + return + sp.check_call(["git", "add"] + files) + msg = self.commit_message.format(new_version=version) + sp.check_call(["git", "commit", "-m", msg], stdout=sp.PIPE) + + def git_tag(self, version, message, sign=False): + """ Create annotated git tag with given 'version' and 'message'. + Optionally 'sign' the tag with the user's GPG key. + """ + log.info("creating %s git tag '%s'" % ( + "signed" if sign else "annotated", version)) + if self.dry_run: + return + # create an annotated (or signed) tag from the new version + tag_opt = "-s" if sign else "-a" + tag_name = self.tag_name.format(new_version=version) + proc = sp.Popen( + ["git", "tag", tag_opt, "-F", "-", tag_name], stdin=sp.PIPE) + # use the latest changes from the changelog file as the tag message + tag_message = u"%s\n\n%s" % (tag_name, message) + proc.communicate(tag_message.encode('utf-8')) + if proc.returncode != 0: + sys.exit(proc.returncode) + + def bumpversion(self, part, commit=False, message=None, allow_dirty=None): + """ Run bumpversion.main() with the specified arguments, and return the + new computed version string (cf. 'bumpversion --help' for more info) + """ + import bumpversion + + args = ( + (['--verbose'] if self.verbose > 1 else []) + + (['--dry-run'] if self.dry_run else []) + + (['--allow-dirty'] if (allow_dirty or self.allow_dirty) else []) + + (['--commit'] if commit else ['--no-commit']) + + (['--message', message] if message is not None else []) + + ['--list', part] + ) + log.debug("$ bumpversion %s" % " ".join(a.replace(" ", "\\ ") for a in args)) + + with capture_logger("bumpversion.list") as out: + bumpversion.main(args) + + last_line = out.getvalue().splitlines()[-1] + new_version = last_line.replace("new_version=", "") + return new_version + + def format_changelog(self, version): + """ Write new header at beginning of changelog file with the specified + 'version' and the current date. + Return the changelog content for the current release. + """ + from datetime import datetime + + log.info("formatting changelog") + + changes = [] + with io.open(self.changelog_name, "r+", encoding="utf-8") as f: + for ln in f: + if self.version_RE.match(ln): + break + else: + changes.append(ln) + if not self.dry_run: + f.seek(0) + content = f.read() + date = datetime.today().strftime(self.date_fmt) + f.seek(0) + header = self.header_fmt % (version, date) + f.write(header + u"\n" + u"-"*len(header) + u"\n\n" + content) + + return u"".join(changes) + + +class PassCommand(Command): + """ This is used with Travis `dpl` tool so that it skips creating sdist + and wheel packages, but simply uploads to PyPI the files found in ./dist + folder, that were previously built inside the tox 'bdist' environment. + This ensures that the same files are uploaded to Github Releases and PyPI. + """ + + description = "do nothing" + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + pass + + +def find_data_files(manpath="share/man"): + """ Find FontTools's data_files (just man pages at this point). + + By default, we install man pages to "share/man" directory relative to the + base installation directory for data_files. The latter can be changed with + the --install-data option of 'setup.py install' sub-command. + + E.g., if the data files installation directory is "/usr", the default man + page installation directory will be "/usr/share/man". + + You can override this via the $FONTTOOLS_MANPATH environment variable. + + E.g., on some BSD systems man pages are installed to 'man' instead of + 'share/man'; you can export $FONTTOOLS_MANPATH variable just before + installing: + + $ FONTTOOLS_MANPATH="man" pip install -v . + [...] + running install_data + copying Doc/man/ttx.1 -> /usr/man/man1 + + When installing from PyPI, for this variable to have effect you need to + force pip to install from the source distribution instead of the wheel + package (otherwise setup.py is not run), by using the --no-binary option: + + $ FONTTOOLS_MANPATH="man" pip install --no-binary=fonttools fonttools + + Note that you can only override the base man path, i.e. without the + section number (man1, man3, etc.). The latter is always implied to be 1, + for "general commands". + """ + + # get base installation directory for man pages + manpagebase = os.environ.get('FONTTOOLS_MANPATH', convert_path(manpath)) + # all our man pages go to section 1 + manpagedir = pjoin(manpagebase, 'man1') + + manpages = [f for f in glob(pjoin('Doc', 'man', 'man1', '*.1')) if isfile(f)] + + data_files = [(manpagedir, manpages)] + return data_files + setup( - name = "fonttools", - version = "3.0", - description = "Tools to manipulate font files", - author = "Just van Rossum", - author_email = "just@letterror.com", - maintainer = "Behdad Esfahbod", - maintainer_email = "behdad@behdad.org", - url = "http://github.com/behdad/fonttools", - license = "OpenSource, BSD-style", - platforms = ["Any"], - long_description = long_description, - - packages = [ - "fontTools", - "fontTools.encodings", - "fontTools.misc", - "fontTools.pens", - "fontTools.ttLib", - "fontTools.ttLib.tables", - ], - py_modules = ['sstruct', 'xmlWriter'], - package_dir = {'': 'Lib'}, - extra_path = 'FontTools', - data_files = [('share/man/man1', ["Doc/ttx.1"])], - scripts = distutils_scripts, - entry_points = { - 'console_scripts': [ - "ttx = fontTools.ttx:main", - "pyftsubset = fontTools.subset:main", - "pyftmerge = fontTools.merge:main", - "pyftinspect = fontTools.inspect:main" - ] - }, - **classifiers - ) + name="fonttools", + version="3.21.2", + description="Tools to manipulate font files", + author="Just van Rossum", + author_email="just@letterror.com", + maintainer="Behdad Esfahbod", + maintainer_email="behdad@behdad.org", + url="http://github.com/fonttools/fonttools", + license="MIT", + platforms=["Any"], + long_description=long_description, + package_dir={'': 'Lib'}, + packages=find_packages("Lib"), + include_package_data=True, + data_files=find_data_files(), + setup_requires=pytest_runner + wheel + bumpversion, + tests_require=[ + 'pytest>=3.0', + ], + entry_points={ + 'console_scripts': [ + "fonttools = fontTools.__main__:main", + "ttx = fontTools.ttx:main", + "pyftsubset = fontTools.subset:main", + "pyftmerge = fontTools.merge:main", + "pyftinspect = fontTools.inspect:main" + ] + }, + cmdclass={ + "release": release, + 'pass': PassCommand, + }, + **classifiers +) diff -Nru fonttools-3.0/Snippets/dump_woff_metadata.py fonttools-3.21.2/Snippets/dump_woff_metadata.py --- fonttools-3.0/Snippets/dump_woff_metadata.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/dump_woff_metadata.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ +from __future__ import print_function +import sys +from fontTools.ttx import makeOutputFileName +from fontTools.ttLib import TTFont + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + if len(args) < 1: + print("usage: dump_woff_metadata.py " + "INPUT.woff [OUTPUT.xml]", file=sys.stderr) + return 1 + + infile = args[0] + if len(args) > 1: + outfile = args[1] + else: + outfile = makeOutputFileName(infile, None, ".xml") + + font = TTFont(infile) + + if not font.flavorData or not font.flavorData.metaData: + print("No WOFF metadata") + return 1 + + with open(outfile, "wb") as f: + f.write(font.flavorData.metaData) + + +if __name__ == "__main__": + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fix-dflt-langsys.py fonttools-3.21.2/Snippets/fix-dflt-langsys.py --- fonttools-3.0/Snippets/fix-dflt-langsys.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fix-dflt-langsys.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,85 @@ +#!/usr/bin/env python + +import argparse +import logging +import os +import sys + +from fontTools.ttLib import TTFont + + +def ProcessTable(table): + found = set() + + for rec in table.ScriptList.ScriptRecord: + if rec.ScriptTag == "DFLT" and rec.Script.LangSysCount != 0: + tags = [r.LangSysTag for r in rec.Script.LangSysRecord] + logging.info("Removing %d extraneous LangSys records: %s", + rec.Script.LangSysCount, " ".join(tags)) + rec.Script.LangSysRecord = [] + rec.Script.LangSysCount = 0 + found.update(tags) + + if not found: + logging.info("All fine") + return False + else: + for rec in table.ScriptList.ScriptRecord: + tags = set([r.LangSysTag for r in rec.Script.LangSysRecord]) + found -= tags + + if found: + logging.warning("Records are missing from non-DFLT scripts: %s", + " ".join(found)) + return True + + +def ProcessFont(font): + found = False + for tag in ("GSUB", "GPOS"): + if tag in font: + logging.info("Processing %s table", tag) + if ProcessTable(font[tag].table): + found = True + else: + # Unmark the table as loaded so that it is read from disk when + # writing the font, to avoid any unnecessary changes caused by + # decompiling then recompiling again. + del font.tables[tag] + + return found + + +def ProcessFiles(filenames): + for filename in filenames: + logging.info("Processing %s", filename) + font = TTFont(filename) + name, ext = os.path.splitext(filename) + fixedname = name + ".fixed" + ext + if ProcessFont(font): + logging.info("Saving fixed font to %s\n", fixedname) + font.save(fixedname) + else: + logging.info("Font file is fine, nothing to fix\n") + + +def main(): + parser = argparse.ArgumentParser( + description="Fix LangSys records for DFLT script") + parser.add_argument("files", metavar="FILE", type=str, nargs="+", + help="input font to process") + parser.add_argument("-s", "--silent", action='store_true', + help="suppress normal messages") + + args = parser.parse_args() + + logformat = "%(levelname)s: %(message)s" + if args.silent: + logging.basicConfig(format=logformat, level=logging.DEBUG) + else: + logging.basicConfig(format=logformat, level=logging.INFO) + + ProcessFiles(args.files) + +if __name__ == "__main__": + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/afmLib.py fonttools-3.21.2/Snippets/fontTools/afmLib.py --- fonttools-3.0/Snippets/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/afmLib.py 2018-01-08 12:40:40.000000000 +0000 @@ -337,22 +337,13 @@ def readlines(path): - f = open(path, 'rb') - data = f.read() - f.close() - # read any text file, regardless whether it's formatted for Mac, Unix or Dos - sep = "" - if '\r' in data: - sep = sep + '\r' # mac or dos - if '\n' in data: - sep = sep + '\n' # unix or dos - return data.split(sep) + with open(path, "r", encoding="ascii") as f: + data = f.read() + return data.splitlines() def writelines(path, lines, sep='\r'): - f = open(path, 'wb') - for line in lines: - f.write(line + sep) - f.close() + with open(path, "w", encoding="ascii", newline=sep) as f: + f.write("\n".join(lines) + "\n") if __name__ == "__main__": diff -Nru fonttools-3.0/Snippets/fontTools/agl.py fonttools-3.21.2/Snippets/fontTools/agl.py --- fonttools-3.0/Snippets/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/agl.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,8 +1,12 @@ +# -*- coding: utf-8 -*- # The table below is taken from # http://www.adobe.com/devnet/opentype/archives/aglfn.txt -from __future__ import print_function, division, absolute_import +from __future__ import (print_function, division, absolute_import, + unicode_literals) from fontTools.misc.py23 import * +import re + _aglText = """\ # ----------------------------------------------------------- @@ -727,7 +731,7 @@ unicode = m.group(1) assert len(unicode) == 4 unicode = int(unicode, 16) - glyphName = m.group(2) + glyphName = tostr(m.group(2)) if glyphName in AGL2UV: # the above table contains identical duplicates assert AGL2UV[glyphName] == unicode @@ -736,3 +740,136 @@ UV2AGL[unicode] = glyphName _builddicts() + + +def toUnicode(glyph, isZapfDingbats=False): + """Convert glyph names to Unicode, such as 'longs_t.oldstyle' --> u'ſt' + + If isZapfDingbats is True, the implementation recognizes additional + glyph names (as required by the AGL specification). + """ + # https://github.com/adobe-type-tools/agl-specification#2-the-mapping + # + # 1. Drop all the characters from the glyph name starting with + # the first occurrence of a period (U+002E; FULL STOP), if any. + glyph = glyph.split(".", 1)[0] + + # 2. Split the remaining string into a sequence of components, + # using underscore (U+005F; LOW LINE) as the delimiter. + components = glyph.split("_") + + # 3. Map each component to a character string according to the + # procedure below, and concatenate those strings; the result + # is the character string to which the glyph name is mapped. + result = [_glyphComponentToUnicode(c, isZapfDingbats) + for c in components] + return "".join(result) + + +def _glyphComponentToUnicode(component, isZapfDingbats): + # If the font is Zapf Dingbats (PostScript FontName: ZapfDingbats), + # and the component is in the ITC Zapf Dingbats Glyph List, then + # map it to the corresponding character in that list. + dingbat = _zapfDingbatsToUnicode(component) if isZapfDingbats else None + if dingbat: + return dingbat + + # Otherwise, if the component is in AGL, then map it + # to the corresponding character in that list. + # + # TODO: We currently use the AGLFN (Adobe glyph list for new fonts), + # although the spec actually mandates the legacy AGL which is + # a superset of the AGLFN. + # https://github.com/fonttools/fonttools/issues/775 + uchar = AGL2UV.get(component) + if uchar: + return unichr(uchar) + + # Otherwise, if the component is of the form "uni" (U+0075, + # U+006E, and U+0069) followed by a sequence of uppercase + # hexadecimal digits (0–9 and A–F, meaning U+0030 through + # U+0039 and U+0041 through U+0046), if the length of that + # sequence is a multiple of four, and if each group of four + # digits represents a value in the ranges 0000 through D7FF + # or E000 through FFFF, then interpret each as a Unicode scalar + # value and map the component to the string made of those + # scalar values. Note that the range and digit-length + # restrictions mean that the "uni" glyph name prefix can be + # used only with UVs in the Basic Multilingual Plane (BMP). + uni = _uniToUnicode(component) + if uni: + return uni + + # Otherwise, if the component is of the form "u" (U+0075) + # followed by a sequence of four to six uppercase hexadecimal + # digits (0–9 and A–F, meaning U+0030 through U+0039 and + # U+0041 through U+0046), and those digits represents a value + # in the ranges 0000 through D7FF or E000 through 10FFFF, then + # interpret it as a Unicode scalar value and map the component + # to the string made of this scalar value. + uni = _uToUnicode(component) + if uni: + return uni + + # Otherwise, map the component to an empty string. + return '' + + +# https://github.com/adobe-type-tools/agl-aglfn/blob/master/zapfdingbats.txt +_AGL_ZAPF_DINGBATS = ( + " ✁✂✄☎✆✝✞✟✠✡☛☞✌✍✎✏✑✒✓✔✕✖✗✘✙✚✛✜✢✣✤✥✦✧★✩✪✫✬✭✮✯✰✱✲✳✴✵✶✷✸✹✺✻✼✽✾✿❀" + "❁❂❃❄❅❆❇❈❉❊❋●❍■❏❑▲▼◆❖ ◗❘❙❚❯❱❲❳❨❩❬❭❪❫❴❵❛❜❝❞❡❢❣❤✐❥❦❧♠♥♦♣ ✉✈✇" + "①②③④⑤⑥⑦⑧⑨⑩❶❷❸❹❺❻❼❽❾❿➀➁➂➃➄➅➆➇➈➉➊➋➌➍➎➏➐➑➒➓➔→➣↔" + "↕➙➛➜➝➞➟➠➡➢➤➥➦➧➨➩➫➭➯➲➳➵➸➺➻➼➽➾➚➪➶➹➘➴➷➬➮➱✃❐❒❮❰") + + +def _zapfDingbatsToUnicode(glyph): + """Helper for toUnicode().""" + if len(glyph) < 2 or glyph[0] != 'a': + return None + try: + gid = int(glyph[1:]) + except ValueError: + return None + if gid < 0 or gid >= len(_AGL_ZAPF_DINGBATS): + return None + uchar = _AGL_ZAPF_DINGBATS[gid] + return uchar if uchar != ' ' else None + + +_re_uni = re.compile("^uni([0-9A-F]+)$") + + +def _uniToUnicode(component): + """Helper for toUnicode() to handle "uniABCD" components.""" + match = _re_uni.match(component) + if match is None: + return None + digits = match.group(1) + if len(digits) % 4 != 0: + return None + chars = [int(digits[i : i + 4], 16) + for i in range(0, len(digits), 4)] + if any(c >= 0xD800 and c <= 0xDFFF for c in chars): + # The AGL specification explicitly excluded surrogate pairs. + return None + return ''.join([unichr(c) for c in chars]) + + +_re_u = re.compile("^u([0-9A-F]{4,6})$") + + +def _uToUnicode(component): + """Helper for toUnicode() to handle "u1ABCD" components.""" + match = _re_u.match(component) + if match is None: + return None + digits = match.group(1) + try: + value = int(digits, 16) + except ValueError: + return None + if ((value >= 0x0000 and value <= 0xD7FF) or + (value >= 0xE000 and value <= 0x10FFFF)): + return unichr(value) + return None diff -Nru fonttools-3.0/Snippets/fontTools/cffLib/__init__.py fonttools-3.21.2/Snippets/fontTools/cffLib/__init__.py --- fonttools-3.0/Snippets/fontTools/cffLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/cffLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,2639 @@ +"""cffLib.py -- read/write tools for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc import psCharStrings +from fontTools.misc.arrayTools import unionRect, intRect +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables.otBase import OTTableWriter +from fontTools.ttLib.tables.otBase import OTTableReader +from fontTools.ttLib.tables import otTables as ot +import struct +import logging +import re + +# mute cffLib debug messages when running ttx in verbose mode +DEBUG = logging.DEBUG - 1 +log = logging.getLogger(__name__) + +cffHeaderFormat = """ + major: B + minor: B + hdrSize: B +""" + +maxStackLimit = 513 +# maxstack operator has been deprecated. max stack is now always 513. + + +class CFFFontSet(object): + + def decompile(self, file, otFont, isCFF2=None): + self.otFont = otFont + sstruct.unpack(cffHeaderFormat, file.read(3), self) + if isCFF2 is not None: + # called from ttLib: assert 'major' as read from file matches the + # expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use 'major' version from file to determine if isCFF2 + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + if not isCFF2: + self.offSize = struct.unpack("B", file.read(1))[0] + file.seek(self.hdrSize) + self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2)) + self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2) + self.strings = IndexedStrings(file) + else: # isCFF2 + self.topDictSize = struct.unpack(">H", file.read(2))[0] + file.seek(self.hdrSize) + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = otFont.getGlyphOrder + # in CFF2, offsetSize is the size of the TopDict data. + self.topDictIndex = TopDictIndex( + file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2) + self.strings = None + self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2) + self.topDictIndex.strings = self.strings + self.topDictIndex.GlobalSubrs = self.GlobalSubrs + + def __len__(self): + return len(self.fontNames) + + def keys(self): + return list(self.fontNames) + + def values(self): + return self.topDictIndex + + def __getitem__(self, nameOrIndex): + """ Return TopDict instance identified by name (str) or index (int + or any object that implements `__index__`). + """ + if hasattr(nameOrIndex, "__index__"): + index = nameOrIndex.__index__() + elif isinstance(nameOrIndex, basestring): + name = nameOrIndex + try: + index = self.fontNames.index(name) + except ValueError: + raise KeyError(nameOrIndex) + else: + raise TypeError(nameOrIndex) + return self.topDictIndex[index] + + def compile(self, file, otFont, isCFF2=None): + self.otFont = otFont + if isCFF2 is not None: + # called from ttLib: assert 'major' value matches expected version + expected_major = (2 if isCFF2 else 1) + if self.major != expected_major: + raise ValueError( + "Invalid CFF 'major' version: expected %d, found %d" % + (expected_major, self.major)) + else: + # use current 'major' value to determine output format + assert self.major in (1, 2), "Unknown CFF format" + isCFF2 = self.major == 2 + + if otFont.recalcBBoxes and not isCFF2: + for topDict in self.topDictIndex: + topDict.recalcFontBBox() + + if not isCFF2: + strings = IndexedStrings() + else: + strings = None + writer = CFFWriter(isCFF2) + topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2) + if isCFF2: + self.hdrSize = 5 + writer.add(sstruct.pack(cffHeaderFormat, self)) + # Note: topDictSize will most likely change in CFFWriter.toFile(). + self.topDictSize = topCompiler.getDataLength() + writer.add(struct.pack(">H", self.topDictSize)) + else: + self.hdrSize = 4 + self.offSize = 4 # will most likely change in CFFWriter.toFile(). + writer.add(sstruct.pack(cffHeaderFormat, self)) + writer.add(struct.pack("B", self.offSize)) + if not isCFF2: + fontNames = Index() + for name in self.fontNames: + fontNames.append(name) + writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2)) + writer.add(topCompiler) + if not isCFF2: + writer.add(strings.getCompiler()) + writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2)) + + for topDict in self.topDictIndex: + if not hasattr(topDict, "charset") or topDict.charset is None: + charset = otFont.getGlyphOrder() + topDict.charset = charset + children = topCompiler.getChildren(strings) + for child in children: + writer.add(child) + + writer.toFile(file) + + def toXML(self, xmlWriter, progress=None): + xmlWriter.simpletag("major", value=self.major) + xmlWriter.newline() + xmlWriter.simpletag("minor", value=self.minor) + xmlWriter.newline() + for fontName in self.fontNames: + xmlWriter.begintag("CFFFont", name=tostr(fontName)) + xmlWriter.newline() + font = self[fontName] + font.toXML(xmlWriter, progress) + xmlWriter.endtag("CFFFont") + xmlWriter.newline() + xmlWriter.newline() + xmlWriter.begintag("GlobalSubrs") + xmlWriter.newline() + self.GlobalSubrs.toXML(xmlWriter, progress) + xmlWriter.endtag("GlobalSubrs") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, otFont=None): + self.otFont = otFont + + # set defaults. These will be replaced if there are entries for them + # in the XML file. + if not hasattr(self, "major"): + self.major = 1 + if not hasattr(self, "minor"): + self.minor = 0 + + if name == "CFFFont": + if self.major == 1: + if not hasattr(self, "offSize"): + # this will be recalculated when the cff is compiled. + self.offSize = 4 + if not hasattr(self, "hdrSize"): + self.hdrSize = 4 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = [] + self.topDictIndex = TopDictIndex() + fontName = attrs["name"] + self.fontNames.append(fontName) + topDict = TopDict(GlobalSubrs=self.GlobalSubrs) + topDict.charset = None # gets filled in later + elif self.major == 2: + if not hasattr(self, "hdrSize"): + self.hdrSize = 5 + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + if not hasattr(self, "fontNames"): + self.fontNames = ["CFF2Font"] + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDict = TopDict( + GlobalSubrs=self.GlobalSubrs, + cff2GetGlyphOrder=cff2GetGlyphOrder) + self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) + self.topDictIndex.append(topDict) + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + topDict.fromXML(name, attrs, content) + elif name == "GlobalSubrs": + subrCharStringClass = psCharStrings.T2CharString + if not hasattr(self, "GlobalSubrs"): + self.GlobalSubrs = GlobalSubrsIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + subr = subrCharStringClass() + subr.fromXML(name, attrs, content) + self.GlobalSubrs.append(subr) + elif name == "major": + self.major = int(attrs['value']) + elif name == "minor": + self.minor = int(attrs['value']) + + def convertCFFToCFF2(self, otFont): + # This assumes a decompiled CFF table. + self.major = 2 + cff2GetGlyphOrder = self.otFont.getGlyphOrder + topDictData = TopDictIndex(None, cff2GetGlyphOrder, None) + topDictData.items = self.topDictIndex.items + self.topDictIndex = topDictData + topDict = topDictData[0] + if hasattr(topDict, 'Private'): + privateDict = topDict.Private + else: + privateDict = None + opOrder = buildOrder(topDictOperators2) + topDict.order = opOrder + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + for entry in topDictOperators: + key = entry[1] + if key not in opOrder: + if key in topDict.rawDict: + del topDict.rawDict[key] + if hasattr(topDict, key): + exec("del topDict.%s" % (key)) + + if not hasattr(topDict, "FDArray"): + fdArray = topDict.FDArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = topDict.GlobalSubrs + topDict.GlobalSubrs.fdArray = fdArray + charStrings = topDict.CharStrings + if charStrings.charStringsAreIndexed: + charStrings.charStringsIndex.fdArray = fdArray + else: + charStrings.fdArray = fdArray + fontDict = FontDict() + fdArray.append(fontDict) + fontDict.Private = privateDict + privateOpOrder = buildOrder(privateDictOperators2) + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + exec("del privateDict.%s" % (key)) + # print "Removing privateDict attr", key + else: + # clean up the PrivateDicts in the fdArray + privateOpOrder = buildOrder(privateDictOperators2) + for fontDict in fdArray: + privateDict = fontDict.Private + for entry in privateDictOperators: + key = entry[1] + if key not in privateOpOrder: + if key in privateDict.rawDict: + # print "Removing private dict", key + del privateDict.rawDict[key] + if hasattr(privateDict, key): + exec("del privateDict.%s" % (key)) + # print "Removing privateDict attr", key + # At this point, the Subrs and Charstrings are all still T2Charstring class + # easiest to fix this by compiling, then decompiling again + file = BytesIO() + self.compile(file, otFont, isCFF2=True) + file.seek(0) + self.decompile(file, otFont, isCFF2=True) + + +class CFFWriter(object): + + def __init__(self, isCFF2): + self.data = [] + self.isCFF2 = isCFF2 + + def add(self, table): + self.data.append(table) + + def toFile(self, file): + lastPosList = None + count = 1 + while True: + log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count) + count = count + 1 + pos = 0 + posList = [pos] + for item in self.data: + if hasattr(item, "getDataLength"): + endPos = pos + item.getDataLength() + if isinstance(item, TopDictIndexCompiler) and item.isCFF2: + self.topDictSize = item.getDataLength() + else: + endPos = pos + len(item) + if hasattr(item, "setPos"): + item.setPos(pos, endPos) + pos = endPos + posList.append(pos) + if posList == lastPosList: + break + lastPosList = posList + log.log(DEBUG, "CFFWriter.toFile() writing to file.") + begin = file.tell() + if self.isCFF2: + self.data[1] = struct.pack(">H", self.topDictSize) + else: + self.offSize = calcOffSize(lastPosList[-1]) + self.data[1] = struct.pack("B", self.offSize) + posList = [0] + for item in self.data: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + posList.append(file.tell() - begin) + assert posList == lastPosList + + +def calcOffSize(largestOffset): + if largestOffset < 0x100: + offSize = 1 + elif largestOffset < 0x10000: + offSize = 2 + elif largestOffset < 0x1000000: + offSize = 3 + else: + offSize = 4 + return offSize + + +class IndexCompiler(object): + + def __init__(self, items, strings, parent, isCFF2=None): + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.items = self.getItems(items, strings) + self.parent = parent + + def getItems(self, items, strings): + return items + + def getOffsets(self): + # An empty INDEX contains only the count field. + if self.items: + pos = 1 + offsets = [pos] + for item in self.items: + if hasattr(item, "getDataLength"): + pos = pos + item.getDataLength() + else: + pos = pos + len(item) + offsets.append(pos) + else: + offsets = [] + return offsets + + def getDataLength(self): + if self.isCFF2: + countSize = 4 + else: + countSize = 2 + + if self.items: + lastOffset = self.getOffsets()[-1] + offSize = calcOffSize(lastOffset) + dataLength = ( + countSize + # count + 1 + # offSize + (len(self.items) + 1) * offSize + # the offsets + lastOffset - 1 # size of object data + ) + else: + # count. For empty INDEX tables, this is the only entry. + dataLength = countSize + + return dataLength + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + # An empty INDEX contains only the count field. + if self.items: + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + data = tobytes(item, encoding="latin1") + file.write(data) + + +class IndexedStringsCompiler(IndexCompiler): + + def getItems(self, items, strings): + return items.strings + + +class TopDictIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for topDict in self.items: + children.extend(topDict.getChildren(strings)) + return children + + def getOffsets(self): + if self.isCFF2: + offsets = [0, self.items[0].getDataLength()] + return offsets + else: + return super(TopDictIndexCompiler, self).getOffsets() + + def getDataLength(self): + if self.isCFF2: + dataLength = self.items[0].getDataLength() + return dataLength + else: + return super(TopDictIndexCompiler, self).getDataLength() + + def toFile(self, file): + if self.isCFF2: + self.items[0].toFile(file) + else: + super(TopDictIndexCompiler, self).toFile(file) + + +class FDArrayIndexCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for item in items: + out.append(item.getCompiler(strings, self)) + return out + + def getChildren(self, strings): + children = [] + for fontDict in self.items: + children.extend(fontDict.getChildren(strings)) + return children + + def toFile(self, file): + offsets = self.getOffsets() + if self.isCFF2: + writeCard32(file, len(self.items)) + else: + writeCard16(file, len(self.items)) + offSize = calcOffSize(offsets[-1]) + writeCard8(file, offSize) + offSize = -offSize + pack = struct.pack + for offset in offsets: + binOffset = pack(">l", offset)[offSize:] + assert len(binOffset) == -offSize + file.write(binOffset) + for item in self.items: + if hasattr(item, "toFile"): + item.toFile(file) + else: + file.write(item) + + def setPos(self, pos, endPos): + self.parent.rawDict["FDArray"] = pos + + +class GlobalSubrsCompiler(IndexCompiler): + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + +class SubrsCompiler(GlobalSubrsCompiler): + + def setPos(self, pos, endPos): + offset = pos - self.parent.pos + self.parent.rawDict["Subrs"] = offset + + +class CharStringsCompiler(GlobalSubrsCompiler): + + def getItems(self, items, strings): + out = [] + for cs in items: + cs.compile(self.isCFF2) + out.append(cs.bytecode) + return out + + def setPos(self, pos, endPos): + self.parent.rawDict["CharStrings"] = pos + + +class Index(object): + + """This class represents what the CFF spec calls an INDEX.""" + + compilerClass = IndexCompiler + + def __init__(self, file=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.items = [] + name = self.__class__.__name__ + if file is None: + return + self._isCFF2 = isCFF2 + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + if isCFF2: + count = readCard32(file) + else: + count = readCard16(file) + if count == 0: + return + self.items = [None] * count + offSize = readCard8(file) + log.log(DEBUG, " index count: %s offSize: %s", count, offSize) + assert offSize <= 4, "offSize too large: %s" % offSize + self.offsets = offsets = [] + pad = b'\0' * (4 - offSize) + for index in range(count + 1): + chunk = file.read(offSize) + chunk = pad + chunk + offset, = struct.unpack(">L", chunk) + offsets.append(int(offset)) + self.offsetBase = file.tell() - 1 + file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot + log.log(DEBUG, " end of %s at %s", name, file.tell()) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + item = self.items[index] + if item is not None: + return item + offset = self.offsets[index] + self.offsetBase + size = self.offsets[index + 1] - self.offsets[index] + file = self.file + file.seek(offset) + data = file.read(size) + assert len(data) == size + item = self.produceItem(index, data, file, offset) + self.items[index] = item + return item + + def __setitem__(self, index, item): + self.items[index] = item + + def produceItem(self, index, data, file, offset): + return data + + def append(self, item): + self.items.append(item) + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + +class GlobalSubrsIndex(Index): + + compilerClass = GlobalSubrsCompiler + subrClass = psCharStrings.T2CharString + charStringClass = psCharStrings.T2CharString + + def __init__(self, file=None, globalSubrs=None, private=None, + fdSelect=None, fdArray=None, isCFF2=None): + super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) + self.globalSubrs = globalSubrs + self.private = private + if fdSelect: + self.fdSelect = fdSelect + if fdArray: + self.fdArray = fdArray + if isCFF2: + # CFF2Subr's can have numeric arguments on the stack after the last operator. + self.subrClass = psCharStrings.CFF2Subr + self.charStringClass = psCharStrings.CFF2Subr + + + def produceItem(self, index, data, file, offset): + if self.private is not None: + private = self.private + elif hasattr(self, 'fdArray') and self.fdArray is not None: + if hasattr(self, 'fdSelect') and self.fdSelect is not None: + fdIndex = self.fdSelect[index] + else: + fdIndex = 0 + private = self.fdArray[fdIndex].Private + else: + private = None + return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) + + def toXML(self, xmlWriter, progress): + xmlWriter.comment( + "The 'index' attribute is only for humans; " + "it is ignored when parsed.") + xmlWriter.newline() + for i in range(len(self)): + subr = self[i] + if subr.needsDecompilation(): + xmlWriter.begintag("CharString", index=i, raw=1) + else: + xmlWriter.begintag("CharString", index=i) + xmlWriter.newline() + subr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + if name != "CharString": + return + subr = self.subrClass() + subr.fromXML(name, attrs, content) + self.append(subr) + + def getItemAndSelector(self, index): + sel = None + if hasattr(self, 'fdSelect'): + sel = self.fdSelect[index] + return self[index], sel + + +class SubrsIndex(GlobalSubrsIndex): + compilerClass = SubrsCompiler + + +class TopDictIndex(Index): + + compilerClass = TopDictIndexCompiler + + def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, + isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + if file is not None and isCFF2: + self._isCFF2 = isCFF2 + self.items = [] + name = self.__class__.__name__ + log.log(DEBUG, "loading %s at %s", name, file.tell()) + self.file = file + count = 1 + self.items = [None] * count + self.offsets = [0, topSize] + self.offsetBase = file.tell() + # pretend we've read the whole lot + file.seek(self.offsetBase + topSize) + log.log(DEBUG, " end of %s at %s", name, file.tell()) + else: + super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) + + def produceItem(self, index, data, file, offset): + top = TopDict( + self.strings, file, offset, self.GlobalSubrs, + self.cff2GetGlyphOrder, isCFF2=self._isCFF2) + top.decompile(data) + return top + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + +class FDArrayIndex(Index): + + compilerClass = FDArrayIndexCompiler + + def toXML(self, xmlWriter, progress): + for i in range(len(self)): + xmlWriter.begintag("FontDict", index=i) + xmlWriter.newline() + self[i].toXML(xmlWriter, progress) + xmlWriter.endtag("FontDict") + xmlWriter.newline() + + def produceItem(self, index, data, file, offset): + fontDict = FontDict( + self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2, + vstore=self.vstore) + fontDict.decompile(data) + return fontDict + + def fromXML(self, name, attrs, content): + if name != "FontDict": + return + fontDict = FontDict() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fontDict.fromXML(name, attrs, content) + self.append(fontDict) + + +class VarStoreData(object): + + def __init__(self, file=None, otVarStore=None): + self.file = file + self.data = None + self.otVarStore = otVarStore + self.font = TTFont() # dummy font for the decompile function. + + def decompile(self): + if self.file: + class GlobalState(object): + def __init__(self, tableType, cachingStats): + self.tableType = tableType + self.cachingStats = cachingStats + globalState = GlobalState(tableType="VarStore", cachingStats={}) + # read data in from file. Assume position is correct. + length = readCard16(self.file) + self.data = self.file.read(length) + globalState = {} + reader = OTTableReader(self.data, globalState) + self.otVarStore = ot.VarStore() + self.otVarStore.decompile(reader, self.font) + return self + + def compile(self): + writer = OTTableWriter() + self.otVarStore.compile(writer, self.font) + # Note that this omits the initial Card16 length from the CFF2 + # VarStore data block + self.data = writer.getAllData() + + def writeXML(self, xmlWriter, name): + self.otVarStore.toXML(xmlWriter, self.font) + + def xmlRead(self, name, attrs, content, parent): + self.otVarStore = ot.VarStore() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + self.otVarStore.fromXML(name, attrs, content, self.font) + else: + pass + return None + + def __len__(self): + return len(self.data) + + def getNumRegions(self, vsIndex): + varData = self.otVarStore.VarData[vsIndex] + numRegions = varData.VarRegionCount + return numRegions + + +class FDSelect(object): + + def __init__(self, file=None, numGlyphs=None, format=None): + if file: + # read data in from file + self.format = readCard8(file) + if self.format == 0: + from array import array + self.gidArray = array("B", file.read(numGlyphs)).tolist() + elif self.format == 3: + gidArray = [None] * numGlyphs + nRanges = readCard16(file) + fd = None + prev = None + for i in range(nRanges): + first = readCard16(file) + if prev is not None: + for glyphID in range(prev, first): + gidArray[glyphID] = fd + prev = first + fd = readCard8(file) + if prev is not None: + first = readCard16(file) + for glyphID in range(prev, first): + gidArray[glyphID] = fd + self.gidArray = gidArray + else: + assert False, "unsupported FDSelect format: %s" % format + else: + # reading from XML. Make empty gidArray, and leave format as passed in. + # format is None will result in the smallest representation being used. + self.format = format + self.gidArray = [] + + def __len__(self): + return len(self.gidArray) + + def __getitem__(self, index): + return self.gidArray[index] + + def __setitem__(self, index, fdSelectValue): + self.gidArray[index] = fdSelectValue + + def append(self, fdSelectValue): + self.gidArray.append(fdSelectValue) + + +class CharStrings(object): + + def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray, + isCFF2=None): + self.globalSubrs = globalSubrs + if file is not None: + self.charStringsIndex = SubrsIndex( + file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + self.charStrings = charStrings = {} + for i in range(len(charset)): + charStrings[charset[i]] = i + # read from OTF file: charStrings.values() are indices into + # charStringsIndex. + self.charStringsAreIndexed = 1 + else: + self.charStrings = {} + # read from ttx file: charStrings.values() are actual charstrings + self.charStringsAreIndexed = 0 + self.private = private + if fdSelect is not None: + self.fdSelect = fdSelect + if fdArray is not None: + self.fdArray = fdArray + + def keys(self): + return list(self.charStrings.keys()) + + def values(self): + if self.charStringsAreIndexed: + return self.charStringsIndex + else: + return list(self.charStrings.values()) + + def has_key(self, name): + return name in self.charStrings + + __contains__ = has_key + + def __len__(self): + return len(self.charStrings) + + def __getitem__(self, name): + charString = self.charStrings[name] + if self.charStringsAreIndexed: + charString = self.charStringsIndex[charString] + return charString + + def __setitem__(self, name, charString): + if self.charStringsAreIndexed: + index = self.charStrings[name] + self.charStringsIndex[index] = charString + else: + self.charStrings[name] = charString + + def getItemAndSelector(self, name): + if self.charStringsAreIndexed: + index = self.charStrings[name] + return self.charStringsIndex.getItemAndSelector(index) + else: + if hasattr(self, 'fdArray'): + if hasattr(self, 'fdSelect'): + sel = self.charStrings[name].fdSelectIndex + else: + sel = 0 + else: + sel = None + return self.charStrings[name], sel + + def toXML(self, xmlWriter, progress): + names = sorted(self.keys()) + i = 0 + step = 10 + numGlyphs = len(names) + for name in names: + charStr, fdSelectIndex = self.getItemAndSelector(name) + if charStr.needsDecompilation(): + raw = [("raw", 1)] + else: + raw = [] + if fdSelectIndex is None: + xmlWriter.begintag("CharString", [('name', name)] + raw) + else: + xmlWriter.begintag( + "CharString", + [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) + xmlWriter.newline() + charStr.toXML(xmlWriter) + xmlWriter.endtag("CharString") + xmlWriter.newline() + if not i % step and progress is not None: + progress.setLabel("Dumping 'CFF ' table... (%s)" % name) + progress.increment(step / numGlyphs) + i = i + 1 + + def fromXML(self, name, attrs, content): + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + if name != "CharString": + continue + fdID = -1 + if hasattr(self, "fdArray"): + try: + fdID = safeEval(attrs["fdSelectIndex"]) + except KeyError: + fdID = 0 + private = self.fdArray[fdID].Private + else: + private = self.private + + glyphName = attrs["name"] + charStringClass = psCharStrings.T2CharString + charString = charStringClass( + private=private, + globalSubrs=self.globalSubrs) + charString.fromXML(name, attrs, content) + if fdID >= 0: + charString.fdSelectIndex = fdID + self[glyphName] = charString + + +def readCard8(file): + return byteord(file.read(1)) + + +def readCard16(file): + value, = struct.unpack(">H", file.read(2)) + return value + + +def readCard32(file): + value, = struct.unpack(">L", file.read(4)) + return value + + +def writeCard8(file, value): + file.write(bytechr(value)) + + +def writeCard16(file, value): + file.write(struct.pack(">H", value)) + + +def writeCard32(file, value): + file.write(struct.pack(">L", value)) + + +def packCard8(value): + return bytechr(value) + + +def packCard16(value): + return struct.pack(">H", value) + + +def buildOperatorDict(table): + d = {} + for op, name, arg, default, conv in table: + d[op] = (name, arg) + return d + + +def buildOpcodeDict(table): + d = {} + for op, name, arg, default, conv in table: + if isinstance(op, tuple): + op = bytechr(op[0]) + bytechr(op[1]) + else: + op = bytechr(op) + d[name] = (op, arg) + return d + + +def buildOrder(table): + l = [] + for op, name, arg, default, conv in table: + l.append(name) + return l + + +def buildDefaults(table): + d = {} + for op, name, arg, default, conv in table: + if default is not None: + d[name] = default + return d + + +def buildConverters(table): + d = {} + for op, name, arg, default, conv in table: + d[name] = conv + return d + + +class SimpleConverter(object): + + def read(self, parent, value): + return value + + def write(self, parent, value): + return value + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return attrs["value"] + + +class ASCIIConverter(SimpleConverter): + + def read(self, parent, value): + return tostr(value, encoding='ascii') + + def write(self, parent, value): + return tobytes(value, encoding='ascii') + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("ascii")) + + +class Latin1Converter(SimpleConverter): + + def read(self, parent, value): + return tostr(value, encoding='latin1') + + def write(self, parent, value): + return tobytes(value, encoding='latin1') + + def xmlWrite(self, xmlWriter, name, value, progress): + value = tounicode(value, encoding="latin1") + if name in ['Notice', 'Copyright']: + value = re.sub(r"[\r\n]\s+", " ", value) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return tobytes(attrs["value"], encoding=("latin1")) + + +def parseNum(s): + try: + value = int(s) + except: + value = float(s) + return value + + +def parseBlendList(s): + valueList = [] + for element in s: + if isinstance(element, basestring): + continue + name, attrs, content = element + blendList = attrs["value"].split() + blendList = [eval(val) for val in blendList] + valueList.append(blendList) + if len(valueList) == 1: + valueList = valueList[0] + return valueList + + +class NumberConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + if isinstance(value, list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + blendValue = " ".join([str(val) for val in value]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + value = parseBlendList(content) + else: + value = parseNum(attrs["value"]) + return value + + +class ArrayConverter(SimpleConverter): + def xmlWrite(self, xmlWriter, name, value, progress): + if value and isinstance(value[0], list): + xmlWriter.begintag(name) + xmlWriter.newline() + xmlWriter.indent() + for valueList in value: + blendValue = " ".join([str(val) for val in valueList]) + xmlWriter.simpletag(kBlendDictOpName, value=blendValue) + xmlWriter.newline() + xmlWriter.dedent() + xmlWriter.endtag(name) + xmlWriter.newline() + else: + value = " ".join([str(val) for val in value]) + xmlWriter.simpletag(name, value=value) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + valueString = attrs.get("value", None) + if valueString is None: + valueList = parseBlendList(content) + else: + values = valueString.split() + valueList = [parseNum(value) for value in values] + return valueList + + +class TableConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.begintag(name) + xmlWriter.newline() + value.toXML(xmlWriter, progress) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + ob = self.getClass()() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + ob.fromXML(name, attrs, content) + return ob + + +class PrivateDictConverter(TableConverter): + + def getClass(self): + return PrivateDict + + def read(self, parent, value): + size, offset = value + file = parent.file + isCFF2 = parent._isCFF2 + try: + vstore = parent.vstore + except AttributeError: + vstore = None + priv = PrivateDict( + parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore) + file.seek(offset) + data = file.read(size) + assert len(data) == size + priv.decompile(data) + return priv + + def write(self, parent, value): + return (0, 0) # dummy value + + +class SubrsConverter(TableConverter): + + def getClass(self): + return SubrsIndex + + def read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(parent.offset + value) # Offset(self) + return SubrsIndex(file, isCFF2=isCFF2) + + def write(self, parent, value): + return 0 # dummy value + + +class CharStringsConverter(TableConverter): + + def read(self, parent, value): + file = parent.file + isCFF2 = parent._isCFF2 + charset = parent.charset + globalSubrs = parent.GlobalSubrs + if hasattr(parent, "FDArray"): + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + fdSelect, fdArray = None, None + private = parent.Private + file.seek(value) # Offset(0) + charStrings = CharStrings( + file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) + return charStrings + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + if hasattr(parent, "FDArray"): + # if it is a CID-keyed font, then the private Dict is extracted from the + # parent.FDArray + fdArray = parent.FDArray + if hasattr(parent, "FDSelect"): + fdSelect = parent.FDSelect + else: + fdSelect = None + private = None + else: + # if it is a name-keyed font, then the private dict is in the top dict, + # and + # there is no fdArray. + private, fdSelect, fdArray = parent.Private, None, None + charStrings = CharStrings( + None, None, parent.GlobalSubrs, private, fdSelect, fdArray) + charStrings.fromXML(name, attrs, content) + return charStrings + + +class CharsetConverter(object): + def read(self, parent, value): + isCID = hasattr(parent, "ROS") + if value > 2: + numGlyphs = parent.numGlyphs + file = parent.file + file.seek(value) + log.log(DEBUG, "loading charset at %s", value) + format = readCard8(file) + if format == 0: + charset = parseCharset0(numGlyphs, file, parent.strings, isCID) + elif format == 1 or format == 2: + charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) + else: + raise NotImplementedError + assert len(charset) == numGlyphs + log.log(DEBUG, " charset end at %s", file.tell()) + else: # offset == 0 -> no charset data. + if isCID or "CharStrings" not in parent.rawDict: + # We get here only when processing fontDicts from the FDArray of + # CFF-CID fonts. Only the real topDict references the chrset. + assert value == 0 + charset = None + elif value == 0: + charset = cffISOAdobeStrings + elif value == 1: + charset = cffIExpertStrings + elif value == 2: + charset = cffExpertSubsetStrings + if charset and (len(charset) != parent.numGlyphs): + charset = charset[:parent.numGlyphs] + return charset + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + # XXX only write charset when not in OT/TTX context, where we + # dump charset as a separate "GlyphOrder" table. + # # xmlWriter.simpletag("charset") + xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + pass + + +class CharsetCompiler(object): + + def __init__(self, strings, charset, parent): + assert charset[0] == '.notdef' + isCID = hasattr(parent.dictObj, "ROS") + data0 = packCharset0(charset, isCID, strings) + data = packCharset(charset, isCID, strings) + if len(data) < len(data0): + self.data = data + else: + self.data = data0 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["charset"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +def getStdCharSet(charset): + # check to see if we can use a predefined charset value. + predefinedCharSetVal = None + predefinedCharSets = [ + (cffISOAdobeStringCount, cffISOAdobeStrings, 0), + (cffExpertStringCount, cffIExpertStrings, 1), + (cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)] + lcs = len(charset) + for cnt, pcs, csv in predefinedCharSets: + if predefinedCharSetVal is not None: + break + if lcs > cnt: + continue + predefinedCharSetVal = csv + for i in range(lcs): + if charset[i] != pcs[i]: + predefinedCharSetVal = None + break + return predefinedCharSetVal + + +def getCIDfromName(name, strings): + return int(name[3:]) + + +def getSIDfromName(name, strings): + return strings.getSID(name) + + +def packCharset0(charset, isCID, strings): + fmt = 0 + data = [packCard8(fmt)] + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + data.append(packCard16(getNameID(name, strings))) + return bytesjoin(data) + + +def packCharset(charset, isCID, strings): + fmt = 1 + ranges = [] + first = None + end = 0 + if isCID: + getNameID = getCIDfromName + else: + getNameID = getSIDfromName + + for name in charset[1:]: + SID = getNameID(name, strings) + if first is None: + first = SID + elif end + 1 != SID: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + first = SID + end = SID + if end: + nLeft = end - first + if nLeft > 255: + fmt = 2 + ranges.append((first, nLeft)) + + data = [packCard8(fmt)] + if fmt == 1: + nLeftFunc = packCard8 + else: + nLeftFunc = packCard16 + for first, nLeft in ranges: + data.append(packCard16(first) + nLeftFunc(nLeft)) + return bytesjoin(data) + + +def parseCharset0(numGlyphs, file, strings, isCID): + charset = [".notdef"] + if isCID: + for i in range(numGlyphs - 1): + CID = readCard16(file) + charset.append("cid" + str(CID).zfill(5)) + else: + for i in range(numGlyphs - 1): + SID = readCard16(file) + charset.append(strings[SID]) + return charset + + +def parseCharset(numGlyphs, file, strings, isCID, fmt): + charset = ['.notdef'] + count = 1 + if fmt == 1: + nLeftFunc = readCard8 + else: + nLeftFunc = readCard16 + while count < numGlyphs: + first = readCard16(file) + nLeft = nLeftFunc(file) + if isCID: + for CID in range(first, first + nLeft + 1): + charset.append("cid" + str(CID).zfill(5)) + else: + for SID in range(first, first + nLeft + 1): + charset.append(strings[SID]) + count = count + nLeft + 1 + return charset + + +class EncodingCompiler(object): + + def __init__(self, strings, encoding, parent): + assert not isinstance(encoding, basestring) + data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) + data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) + if len(data0) < len(data1): + self.data = data0 + else: + self.data = data1 + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["Encoding"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class EncodingConverter(SimpleConverter): + + def read(self, parent, value): + if value == 0: + return "StandardEncoding" + elif value == 1: + return "ExpertEncoding" + else: + assert value > 1 + file = parent.file + file.seek(value) + log.log(DEBUG, "loading Encoding at %s", value) + fmt = readCard8(file) + haveSupplement = fmt & 0x80 + if haveSupplement: + raise NotImplementedError("Encoding supplements are not yet supported") + fmt = fmt & 0x7f + if fmt == 0: + encoding = parseEncoding0(parent.charset, file, haveSupplement, + parent.strings) + elif fmt == 1: + encoding = parseEncoding1(parent.charset, file, haveSupplement, + parent.strings) + return encoding + + def write(self, parent, value): + if value == "StandardEncoding": + return 0 + elif value == "ExpertEncoding": + return 1 + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + if value in ("StandardEncoding", "ExpertEncoding"): + xmlWriter.simpletag(name, name=value) + xmlWriter.newline() + return + xmlWriter.begintag(name) + xmlWriter.newline() + for code in range(len(value)): + glyphName = value[code] + if glyphName != ".notdef": + xmlWriter.simpletag("map", code=hex(code), name=glyphName) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + if "name" in attrs: + return attrs["name"] + encoding = [".notdef"] * 256 + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + code = safeEval(attrs["code"]) + glyphName = attrs["name"] + encoding[code] = glyphName + return encoding + + +def parseEncoding0(charset, file, haveSupplement, strings): + nCodes = readCard8(file) + encoding = [".notdef"] * 256 + for glyphID in range(1, nCodes + 1): + code = readCard8(file) + if code != 0: + encoding[code] = charset[glyphID] + return encoding + + +def parseEncoding1(charset, file, haveSupplement, strings): + nRanges = readCard8(file) + encoding = [".notdef"] * 256 + glyphID = 1 + for i in range(nRanges): + code = readCard8(file) + nLeft = readCard8(file) + for glyphID in range(glyphID, glyphID + nLeft + 1): + encoding[code] = charset[glyphID] + code = code + 1 + glyphID = glyphID + 1 + return encoding + + +def packEncoding0(charset, encoding, strings): + fmt = 0 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + codes = [] + for name in charset[1:]: + code = m.get(name) + codes.append(code) + + while codes and codes[-1] is None: + codes.pop() + + data = [packCard8(fmt), packCard8(len(codes))] + for code in codes: + if code is None: + code = 0 + data.append(packCard8(code)) + return bytesjoin(data) + + +def packEncoding1(charset, encoding, strings): + fmt = 1 + m = {} + for code in range(len(encoding)): + name = encoding[code] + if name != ".notdef": + m[name] = code + ranges = [] + first = None + end = 0 + for name in charset[1:]: + code = m.get(name, -1) + if first is None: + first = code + elif end + 1 != code: + nLeft = end - first + ranges.append((first, nLeft)) + first = code + end = code + nLeft = end - first + ranges.append((first, nLeft)) + + # remove unencoded glyphs at the end. + while ranges and ranges[-1][0] == -1: + ranges.pop() + + data = [packCard8(fmt), packCard8(len(ranges))] + for first, nLeft in ranges: + if first == -1: # unencoded + first = 0 + data.append(packCard8(first) + packCard8(nLeft)) + return bytesjoin(data) + + +class FDArrayConverter(TableConverter): + + def read(self, parent, value): + try: + vstore = parent.VarStore + except AttributeError: + vstore = None + file = parent.file + isCFF2 = parent._isCFF2 + file.seek(value) + fdArray = FDArrayIndex(file, isCFF2=isCFF2) + fdArray.vstore = vstore + fdArray.strings = parent.strings + fdArray.GlobalSubrs = parent.GlobalSubrs + return fdArray + + def write(self, parent, value): + return 0 # dummy value + + def xmlRead(self, name, attrs, content, parent): + fdArray = FDArrayIndex() + for element in content: + if isinstance(element, basestring): + continue + name, attrs, content = element + fdArray.fromXML(name, attrs, content) + return fdArray + + +class FDSelectConverter(object): + + def read(self, parent, value): + file = parent.file + file.seek(value) + fdSelect = FDSelect(file, parent.numGlyphs) + return fdSelect + + def write(self, parent, value): + return 0 # dummy value + + # The FDSelect glyph data is written out to XML in the charstring keys, + # so we write out only the format selector + def xmlWrite(self, xmlWriter, name, value, progress): + xmlWriter.simpletag(name, [('format', value.format)]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + fmt = safeEval(attrs["format"]) + file = None + numGlyphs = None + fdSelect = FDSelect(file, numGlyphs, fmt) + return fdSelect + + +class VarStoreConverter(SimpleConverter): + + def read(self, parent, value): + file = parent.file + file.seek(value) + varStore = VarStoreData(file) + varStore.decompile() + return varStore + + def write(self, parent, value): + return 0 # dummy value + + def xmlWrite(self, xmlWriter, name, value, progress): + value.writeXML(xmlWriter, name) + + def xmlRead(self, name, attrs, content, parent): + varStore = VarStoreData() + varStore.xmlRead(name, attrs, content, parent) + return varStore + + +def packFDSelect0(fdSelectArray): + fmt = 0 + data = [packCard8(fmt)] + for index in fdSelectArray: + data.append(packCard8(index)) + return bytesjoin(data) + + +def packFDSelect3(fdSelectArray): + fmt = 3 + fdRanges = [] + lenArray = len(fdSelectArray) + lastFDIndex = -1 + for i in range(lenArray): + fdIndex = fdSelectArray[i] + if lastFDIndex != fdIndex: + fdRanges.append([i, fdIndex]) + lastFDIndex = fdIndex + sentinelGID = i + 1 + + data = [packCard8(fmt)] + data.append(packCard16(len(fdRanges))) + for fdRange in fdRanges: + data.append(packCard16(fdRange[0])) + data.append(packCard8(fdRange[1])) + data.append(packCard16(sentinelGID)) + return bytesjoin(data) + + +class FDSelectCompiler(object): + + def __init__(self, fdSelect, parent): + fmt = fdSelect.format + fdSelectArray = fdSelect.gidArray + if fmt == 0: + self.data = packFDSelect0(fdSelectArray) + elif fmt == 3: + self.data = packFDSelect3(fdSelectArray) + else: + # choose smaller of the two formats + data0 = packFDSelect0(fdSelectArray) + data3 = packFDSelect3(fdSelectArray) + if len(data0) < len(data3): + self.data = data0 + fdSelect.format = 0 + else: + self.data = data3 + fdSelect.format = 3 + + self.parent = parent + + def setPos(self, pos, endPos): + self.parent.rawDict["FDSelect"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class VarStoreCompiler(object): + + def __init__(self, varStoreData, parent): + self.parent = parent + if not varStoreData.data: + varStoreData.compile() + data = [ + packCard16(len(varStoreData.data)), + varStoreData.data + ] + self.data = bytesjoin(data) + + def setPos(self, pos, endPos): + self.parent.rawDict["VarStore"] = pos + + def getDataLength(self): + return len(self.data) + + def toFile(self, file): + file.write(self.data) + + +class ROSConverter(SimpleConverter): + + def xmlWrite(self, xmlWriter, name, value, progress): + registry, order, supplement = value + xmlWriter.simpletag( + name, + [ + ('Registry', tostr(registry)), + ('Order', tostr(order)), + ('Supplement', supplement) + ]) + xmlWriter.newline() + + def xmlRead(self, name, attrs, content, parent): + return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) + +topDictOperators = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), + ((12, 20), 'SyntheticBase', 'number', None, None), + (0, 'version', 'SID', None, None), + (1, 'Notice', 'SID', None, Latin1Converter()), + ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), + (2, 'FullName', 'SID', None, None), + ((12, 38), 'FontName', 'SID', None, None), + (3, 'FamilyName', 'SID', None, None), + (4, 'Weight', 'SID', None, None), + ((12, 1), 'isFixedPitch', 'number', 0, None), + ((12, 2), 'ItalicAngle', 'number', 0, None), + ((12, 3), 'UnderlinePosition', 'number', -100, None), + ((12, 4), 'UnderlineThickness', 'number', 50, None), + ((12, 5), 'PaintType', 'number', 0, None), + ((12, 6), 'CharstringType', 'number', 2, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + (13, 'UniqueID', 'number', None, None), + (5, 'FontBBox', 'array', [0, 0, 0, 0], None), + ((12, 8), 'StrokeWidth', 'number', 0, None), + (14, 'XUID', 'array', None, None), + ((12, 21), 'PostScript', 'SID', None, None), + ((12, 22), 'BaseFontName', 'SID', None, None), + ((12, 23), 'BaseFontBlend', 'delta', None, None), + ((12, 31), 'CIDFontVersion', 'number', 0, None), + ((12, 32), 'CIDFontRevision', 'number', 0, None), + ((12, 33), 'CIDFontType', 'number', 0, None), + ((12, 34), 'CIDCount', 'number', 8720, None), + (15, 'charset', 'number', None, CharsetConverter()), + ((12, 35), 'UIDBase', 'number', None, None), + (16, 'Encoding', 'number', 0, EncodingConverter()), + (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +topDictOperators2 = [ +# opcode name argument type default converter + (25, 'maxstack', 'number', None, None), + ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), + ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), + ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), + (17, 'CharStrings', 'number', None, CharStringsConverter()), + (24, 'VarStore', 'number', None, VarStoreConverter()), +] + +# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, +# in order for the font to compile back from xml. + +kBlendDictOpName = "blend" +blendOp = 23 + +privateDictOperators = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + ((12, 14), 'ForceBold', 'number', 0, None), + ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated + ((12, 16), 'lenIV', 'number', None, None), # deprecated + ((12, 17), 'LanguageGroup', 'number', 0, None), + ((12, 18), 'ExpansionFactor', 'number', 0.06, None), + ((12, 19), 'initialRandomSeed', 'number', 0, None), + (20, 'defaultWidthX', 'number', 0, None), + (21, 'nominalWidthX', 'number', 0, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + +privateDictOperators2 = [ +# opcode name argument type default converter + (22, "vsindex", 'number', None, None), + (blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF. + (6, 'BlueValues', 'delta', None, None), + (7, 'OtherBlues', 'delta', None, None), + (8, 'FamilyBlues', 'delta', None, None), + (9, 'FamilyOtherBlues', 'delta', None, None), + ((12, 9), 'BlueScale', 'number', 0.039625, None), + ((12, 10), 'BlueShift', 'number', 7, None), + ((12, 11), 'BlueFuzz', 'number', 1, None), + (10, 'StdHW', 'number', None, None), + (11, 'StdVW', 'number', None, None), + ((12, 12), 'StemSnapH', 'delta', None, None), + ((12, 13), 'StemSnapV', 'delta', None, None), + (19, 'Subrs', 'number', None, SubrsConverter()), +] + + +def addConverters(table): + for i in range(len(table)): + op, name, arg, default, conv = table[i] + if conv is not None: + continue + if arg in ("delta", "array"): + conv = ArrayConverter() + elif arg == "number": + conv = NumberConverter() + elif arg == "SID": + conv = ASCIIConverter() + elif arg == 'blendList': + conv = None + else: + assert False + table[i] = op, name, arg, default, conv + + +addConverters(privateDictOperators) +addConverters(topDictOperators) + + +class TopDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(topDictOperators) + + +class PrivateDictDecompiler(psCharStrings.DictDecompiler): + operators = buildOperatorDict(privateDictOperators) + + +class DictCompiler(object): + maxBlendStack = 0 + + def __init__(self, dictObj, strings, parent, isCFF2=None): + if strings: + assert isinstance(strings, IndexedStrings) + if isCFF2 is None and hasattr(parent, "isCFF2"): + isCFF2 = parent.isCFF2 + assert isCFF2 is not None + self.isCFF2 = isCFF2 + self.dictObj = dictObj + self.strings = strings + self.parent = parent + rawDict = {} + for name in dictObj.order: + value = getattr(dictObj, name, None) + if value is None: + continue + conv = dictObj.converters[name] + value = conv.write(dictObj, value) + if value == dictObj.defaults.get(name): + continue + rawDict[name] = value + self.rawDict = rawDict + + def setPos(self, pos, endPos): + pass + + def getDataLength(self): + return len(self.compile("getDataLength")) + + def compile(self, reason): + log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason) + rawDict = self.rawDict + data = [] + for name in self.dictObj.order: + value = rawDict.get(name) + if value is None: + continue + op, argType = self.opcodes[name] + if isinstance(argType, tuple): + l = len(argType) + assert len(value) == l, "value doesn't match arg type" + for i in range(l): + arg = argType[i] + v = value[i] + arghandler = getattr(self, "arg_" + arg) + data.append(arghandler(v)) + else: + arghandler = getattr(self, "arg_" + argType) + data.append(arghandler(value)) + data.append(op) + data = bytesjoin(data) + return data + + def toFile(self, file): + data = self.compile("toFile") + file.write(data) + + def arg_number(self, num): + if isinstance(num, list): + data = [encodeNumber(val) for val in num] + data.append(encodeNumber(1)) + data.append(bytechr(blendOp)) + datum = bytesjoin(data) + else: + datum = encodeNumber(num) + return datum + + def arg_SID(self, s): + return psCharStrings.encodeIntCFF(self.strings.getSID(s)) + + def arg_array(self, value): + data = [] + for num in value: + data.append(self.arg_number(num)) + return bytesjoin(data) + + def arg_delta(self, value): + if not value: + return b"" + val0 = value[0] + if isinstance(val0, list): + data = self.arg_delta_blend(value) + else: + out = [] + last = 0 + for v in value: + out.append(v - last) + last = v + data = [] + for num in out: + data.append(encodeNumber(num)) + return bytesjoin(data) + + + def arg_delta_blend(self, value): + """ A delta list with blend lists has to be *all* blend lists. + The value is a list is arranged as follows. + [ + [V0, d0..dn] + [V1, d0..dn] + ... + [Vm, d0..dn] + ] + V is the absolute coordinate value from the default font, and d0-dn are + the delta values from the n regions. Each V is an absolute coordinate + from the default font. + We want to return a list: + [ + [v0, v1..vm] + [d0..dn] + ... + [d0..dn] + numBlends + blendOp + ] + where each v is relative to the previous default font value. + """ + numMasters = len(value[0]) + numBlends = len(value) + numStack = (numBlends * numMasters) + 1 + if numStack > self.maxBlendStack: + # Figure out the max number of value we can blend + # and divide this list up into chunks of that size. + + numBlendValues = int((self.maxBlendStack - 1) / numMasters) + out = [] + while True: + numVal = min(len(value), numBlendValues) + if numVal == 0: + break + valList = value[0:numVal] + out1 = self.arg_delta_blend(valList) + out.extend(out1) + value = value[numVal:] + else: + firstList = [0] * numBlends + deltaList = [None] * numBlends + i = 0 + prevVal = 0 + while i < numBlends: + # For PrivateDict BlueValues, the default font + # values are absolute, not relative. + # Must convert these back to relative coordinates + # befor writing to CFF2. + defaultValue = value[i][0] + firstList[i] = defaultValue - prevVal + prevVal = defaultValue + deltaList[i] = value[i][1:] + i += 1 + + relValueList = firstList + for blendList in deltaList: + relValueList.extend(blendList) + out = [encodeNumber(val) for val in relValueList] + out.append(encodeNumber(numBlends)) + out.append(bytechr(blendOp)) + return out + + +def encodeNumber(num): + if isinstance(num, float): + return psCharStrings.encodeFloat(num) + else: + return psCharStrings.encodeIntCFF(num) + + +class TopDictCompiler(DictCompiler): + + opcodes = buildOpcodeDict(topDictOperators) + + def getChildren(self, strings): + isCFF2 = self.isCFF2 + children = [] + if self.dictObj.cff2GetGlyphOrder is None: + if hasattr(self.dictObj, "charset") and self.dictObj.charset: + if hasattr(self.dictObj, "ROS"): # aka isCID + charsetCode = None + else: + charsetCode = getStdCharSet(self.dictObj.charset) + if charsetCode is None: + children.append(CharsetCompiler(strings, self.dictObj.charset, self)) + else: + self.rawDict["charset"] = charsetCode + if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding: + encoding = self.dictObj.Encoding + if not isinstance(encoding, basestring): + children.append(EncodingCompiler(strings, encoding, self)) + else: + if hasattr(self.dictObj, "VarStore"): + varStoreData = self.dictObj.VarStore + varStoreComp = VarStoreCompiler(varStoreData, self) + children.append(varStoreComp) + if hasattr(self.dictObj, "FDSelect"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that + # either the font was read from XML, and the FDSelect indices are all + # in the charstring data, or the FDSelect array is already fully defined. + fdSelect = self.dictObj.FDSelect + # probably read in from XML; assume fdIndex in CharString data + if len(fdSelect) == 0: + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + fdSelect.append(charStrings[name].fdSelectIndex) + fdSelectComp = FDSelectCompiler(fdSelect, self) + children.append(fdSelectComp) + if hasattr(self.dictObj, "CharStrings"): + items = [] + charStrings = self.dictObj.CharStrings + for name in self.dictObj.charset: + items.append(charStrings[name]) + charStringsComp = CharStringsCompiler( + items, strings, self, isCFF2=isCFF2) + children.append(charStringsComp) + if hasattr(self.dictObj, "FDArray"): + # I have not yet supported merging a ttx CFF-CID font, as there are + # interesting issues about merging the FDArrays. Here I assume that the + # FDArray info is correct and complete. + fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) + children.append(fdArrayIndexComp) + children.extend(fdArrayIndexComp.getChildren(strings)) + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class FontDictCompiler(DictCompiler): + opcodes = buildOpcodeDict(topDictOperators) + + def __init__(self, dictObj, strings, parent, isCFF2=None): + super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2) + # + # We now take some effort to detect if there were any key/value pairs + # supplied that were ignored in the FontDict context, and issue a warning + # for those cases. + # + ignoredNames = [] + dictObj = self.dictObj + for name in sorted(set(dictObj.converters) - set(dictObj.order)): + if name in dictObj.rawDict: + # The font was directly read from binary. In this + # case, we want to report *all* "useless" key/value + # pairs that are in the font, not just the ones that + # are different from the default. + ignoredNames.append(name) + else: + # The font was probably read from a TTX file. We only + # warn about keys whos value is not the default. The + # ones that have the default value will not be written + # to binary anyway. + default = dictObj.defaults.get(name) + if default is not None: + conv = dictObj.converters[name] + default = conv.read(dictObj, default) + if getattr(dictObj, name, None) != default: + ignoredNames.append(name) + if ignoredNames: + log.warning( + "Some CFF FDArray/FontDict keys were ignored upon compile: " + + " ".join(sorted(ignoredNames))) + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Private"): + privComp = self.dictObj.Private.getCompiler(strings, self) + children.append(privComp) + children.extend(privComp.getChildren(strings)) + return children + + +class PrivateDictCompiler(DictCompiler): + + maxBlendStack = maxStackLimit + opcodes = buildOpcodeDict(privateDictOperators) + + def setPos(self, pos, endPos): + size = endPos - pos + self.parent.rawDict["Private"] = size, pos + self.pos = pos + + def getChildren(self, strings): + children = [] + if hasattr(self.dictObj, "Subrs"): + children.append(self.dictObj.Subrs.getCompiler(strings, self)) + return children + + +class BaseDict(object): + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None): + assert (isCFF2 is None) == (file is None) + self.rawDict = {} + self.skipNames = [] + self.strings = strings + if file is None: + return + self._isCFF2 = isCFF2 + self.file = file + if offset is not None: + log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset) + self.offset = offset + + def decompile(self, data): + log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data)) + dec = self.decompilerClass(self.strings, self) + dec.decompile(data) + self.rawDict = dec.getDict() + self.postDecompile() + + def postDecompile(self): + pass + + def getCompiler(self, strings, parent, isCFF2=None): + return self.compilerClass(self, strings, parent, isCFF2=isCFF2) + + def __getattr__(self, name): + value = self.rawDict.get(name, None) + if value is None: + value = self.defaults.get(name) + if value is None: + raise AttributeError(name) + conv = self.converters[name] + value = conv.read(self, value) + setattr(self, name, value) + return value + + def toXML(self, xmlWriter, progress): + for name in self.order: + if name in self.skipNames: + continue + value = getattr(self, name, None) + # XXX For "charset" we never skip calling xmlWrite even if the + # value is None, so we always write the following XML comment: + # + # + # + # Charset is None when 'CFF ' table is imported from XML into an + # empty TTFont(). By writing this comment all the time, we obtain + # the same XML output whether roundtripping XML-to-XML or + # dumping binary-to-XML + if value is None and name != "charset": + continue + conv = self.converters[name] + conv.xmlWrite(xmlWriter, name, value, progress) + ignoredNames = set(self.rawDict) - set(self.order) + if ignoredNames: + xmlWriter.comment( + "some keys were ignored: %s" % " ".join(sorted(ignoredNames))) + xmlWriter.newline() + + def fromXML(self, name, attrs, content): + conv = self.converters[name] + value = conv.xmlRead(name, attrs, content, self) + setattr(self, name, value) + + +class TopDict(BaseDict): + + defaults = buildDefaults(topDictOperators) + converters = buildConverters(topDictOperators) + compilerClass = TopDictCompiler + order = buildOrder(topDictOperators) + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None): + super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.cff2GetGlyphOrder = cff2GetGlyphOrder + self.GlobalSubrs = GlobalSubrs + if isCFF2: + self.defaults = buildDefaults(topDictOperators2) + self.charset = cff2GetGlyphOrder() + self.order = buildOrder(topDictOperators2) + else: + self.defaults = buildDefaults(topDictOperators) + self.order = buildOrder(topDictOperators) + + def getGlyphOrder(self): + return self.charset + + def postDecompile(self): + offset = self.rawDict.get("CharStrings") + if offset is None: + return + # get the number of glyphs beforehand. + self.file.seek(offset) + if self._isCFF2: + self.numGlyphs = readCard32(self.file) + else: + self.numGlyphs = readCard16(self.file) + + def toXML(self, xmlWriter, progress): + if hasattr(self, "CharStrings"): + self.decompileAllCharStrings(progress) + if hasattr(self, "ROS"): + self.skipNames = ['Encoding'] + if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): + # these values have default values, but I only want them to show up + # in CID fonts. + self.skipNames = [ + 'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount'] + BaseDict.toXML(self, xmlWriter, progress) + + def decompileAllCharStrings(self, progress): + # Make sure that all the Private Dicts have been instantiated. + i = 0 + for charString in self.CharStrings.values(): + try: + charString.decompile() + except: + log.error("Error in charstring %s", i) + raise + if not i % 30 and progress: + progress.increment(0) # update + i = i + 1 + + def recalcFontBBox(self): + fontBBox = None + for charString in self.CharStrings.values(): + bounds = charString.calcBounds() + if bounds is not None: + if fontBBox is not None: + fontBBox = unionRect(fontBBox, bounds) + else: + fontBBox = bounds + + if fontBBox is None: + self.FontBBox = self.defaults['FontBBox'][:] + else: + self.FontBBox = list(intRect(fontBBox)) + + +class FontDict(BaseDict): + # + # Since fonttools used to pass a lot of fields that are not relevant in the FDArray + # FontDict, there are 'ttx' files in the wild that contain all these. These got in + # the ttx files because fonttools writes explicit values for all the TopDict default + # values. These are not actually illegal in the context of an FDArray FontDict - you + # can legally, per spec, put any arbitrary key/value pair in a FontDict - but are + # useless since current major company CFF interpreters ignore anything but the set + # listed in this file. So, we just silently skip them. An exception is Weight: this + # is not used by any interpreter, but some foundries have asked that this be + # supported in FDArray FontDicts just to preserve information about the design when + # the font is being inspected. + # + # On top of that, there are fonts out there that contain such useless FontDict values. + # + # By subclassing TopDict, we *allow* all key/values from TopDict, both when reading + # from binary or when reading from XML, but by overriding `order` with a limited + # list of names, we ensure that only the useful names ever get exported to XML and + # ever get compiled into the binary font. + # + # We override compilerClass so we can warn about "useless" key/value pairs, either + # from the original binary font or from TTX input. + # + # See: + # - https://github.com/fonttools/fonttools/issues/740 + # - https://github.com/fonttools/fonttools/issues/601 + # - https://github.com/adobe-type-tools/afdko/issues/137 + # + defaults = {} + converters = buildConverters(topDictOperators) + compilerClass = FontDictCompiler + order = ['FontName', 'FontMatrix', 'Weight', 'Private'] + decompilerClass = TopDictDecompiler + + def __init__(self, strings=None, file=None, offset=None, + GlobalSubrs=None, isCFF2=None, vstore=None): + super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + + +class PrivateDict(BaseDict): + defaults = buildDefaults(privateDictOperators) + converters = buildConverters(privateDictOperators) + order = buildOrder(privateDictOperators) + decompilerClass = PrivateDictDecompiler + compilerClass = PrivateDictCompiler + + def __init__(self, strings=None, file=None, offset=None, isCFF2=None, + vstore=None): + super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2) + self.vstore = vstore + if isCFF2: + self.defaults = buildDefaults(privateDictOperators2) + self.order = buildOrder(privateDictOperators2) + else: + self.defaults = buildDefaults(privateDictOperators) + self.order = buildOrder(privateDictOperators) + + def getNumRegions(self, vi=None): # called from misc/psCharStrings.py + # if getNumRegions is being called, we can assume that VarStore exists. + if vi is None: + if hasattr(self, 'vsindex'): + vi = self.vsindex + else: + vi = 0 + numRegions = self.vstore.getNumRegions(vi) + return numRegions + + +class IndexedStrings(object): + + """SID -> string mapping.""" + + def __init__(self, file=None): + if file is None: + strings = [] + else: + strings = [ + tostr(s, encoding="latin1") + for s in Index(file, isCFF2=False) + ] + self.strings = strings + + def getCompiler(self): + return IndexedStringsCompiler(self, None, self, isCFF2=False) + + def __len__(self): + return len(self.strings) + + def __getitem__(self, SID): + if SID < cffStandardStringCount: + return cffStandardStrings[SID] + else: + return self.strings[SID - cffStandardStringCount] + + def getSID(self, s): + if not hasattr(self, "stringMapping"): + self.buildStringMapping() + s = tostr(s, encoding="latin1") + if s in cffStandardStringMapping: + SID = cffStandardStringMapping[s] + elif s in self.stringMapping: + SID = self.stringMapping[s] + else: + SID = len(self.strings) + cffStandardStringCount + self.strings.append(s) + self.stringMapping[s] = SID + return SID + + def getStrings(self): + return self.strings + + def buildStringMapping(self): + self.stringMapping = {} + for index in range(len(self.strings)): + self.stringMapping[self.strings[index]] = index + cffStandardStringCount + + +# The 391 Standard Strings as used in the CFF format. +# from Adobe Technical None #5176, version 1.0, 18 March 1998 + +cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', + 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', + 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', + 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', + 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', + 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', + 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', + 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', + 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', + 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', + 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', + 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', + 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', + 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', + 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', + 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', + 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', + 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', + 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', + 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', + 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', + 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', + 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', + 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', + 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', + 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', + 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', + 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', + 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', + 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', + 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', + 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', + 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', + 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', + 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', + 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', + 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', + 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', + 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', + 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', + 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', + 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', + 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', + 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', + 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', + 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', + 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', + 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', + 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', + 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', + 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', + 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', + 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', + 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', + 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', + 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', + 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', + 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', + 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', + 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', + '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', + 'Semibold' +] + +cffStandardStringCount = 391 +assert len(cffStandardStrings) == cffStandardStringCount +# build reverse mapping +cffStandardStringMapping = {} +for _i in range(cffStandardStringCount): + cffStandardStringMapping[cffStandardStrings[_i]] = _i + +cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", +"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", +"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", +"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", +"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", +"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", +"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", +"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", +"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", +"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", +"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", +"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", +"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", +"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", +"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", +"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", +"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", +"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", +"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", +"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", +"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", +"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", +"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", +"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", +"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", +"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", +"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", +"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", +"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", +"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", +"zcaron"] + +cffISOAdobeStringCount = 229 +assert len(cffISOAdobeStrings) == cffISOAdobeStringCount + +cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", +"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", +"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", +"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", +"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", +"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", +"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", +"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", +"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", +"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", +"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", +"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", +"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", +"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", +"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", +"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", +"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", +"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", +"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", +"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", +"centinferior", "dollarinferior", "periodinferior", "commainferior", +"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", +"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", +"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", +"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", +"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", +"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", +"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", +"Ydieresissmall"] + +cffExpertStringCount = 166 +assert len(cffIExpertStrings) == cffExpertStringCount + +cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", +"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", +"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", +"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", +"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", +"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", +"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", +"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", +"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", +"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", +"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", +"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", +"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", +"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", +"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", +"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", +"eightinferior", "nineinferior", "centinferior", "dollarinferior", +"periodinferior", "commainferior"] + +cffExpertSubsetStringCount = 87 +assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-3.0/Snippets/fontTools/cffLib/specializer.py fonttools-3.21.2/Snippets/fontTools/cffLib/specializer.py --- fonttools-3.0/Snippets/fontTools/cffLib/specializer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/cffLib/specializer.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,551 @@ +# -*- coding: utf-8 -*- + +"""T2CharString operator specializer and generalizer.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + + +def stringToProgram(string): + if isinstance(string, basestring): + string = string.split() + program = [] + for token in string: + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + pass + program.append(token) + return program + +def programToString(program): + return ' '.join(str(x) for x in program) + + +def programToCommands(program): + """Takes a T2CharString program list and returns list of commands. + Each command is a two-tuple of commandname,arg-list. The commandname might + be empty string if no commandname shall be emitted (used for glyph width, + hintmask/cntrmask argument, as well as stray arguments at the end of the + program (¯\_(ツ)_/¯).""" + + width = None + commands = [] + stack = [] + it = iter(program) + for token in it: + if not isinstance(token, basestring): + stack.append(token) + continue + + if width is None and token in {'hstem', 'hstemhm', 'vstem', 'vstemhm', + 'cntrmask', 'hintmask', + 'hmoveto', 'vmoveto', 'rmoveto', + 'endchar'}: + parity = token in {'hmoveto', 'vmoveto'} + if stack and (len(stack) % 2) ^ parity: + width = stack.pop(0) + commands.append(('', [width])) + + if token in {'hintmask', 'cntrmask'}: + if stack: + commands.append(('', stack)) + commands.append((token, [])) + commands.append(('', [next(it)])) + else: + commands.append((token,stack)) + stack = [] + if stack: + commands.append(('', stack)) + return commands + +def commandsToProgram(commands): + """Takes a commands list as returned by programToCommands() and converts + it back to a T2CharString program list.""" + program = [] + for op,args in commands: + program.extend(args) + if op: + program.append(op) + return program + + +def _everyN(el, n): + """Group the list el into groups of size n""" + if len(el) % n != 0: raise ValueError(el) + for i in range(0, len(el), n): + yield el[i:i+n] + + +class _GeneralizerDecombinerCommandsMap(object): + + @staticmethod + def rmoveto(args): + if len(args) != 2: raise ValueError(args) + yield ('rmoveto', args) + @staticmethod + def hmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [args[0], 0]) + @staticmethod + def vmoveto(args): + if len(args) != 1: raise ValueError(args) + yield ('rmoveto', [0, args[0]]) + + @staticmethod + def rlineto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 2): + yield ('rlineto', args) + @staticmethod + def hlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [next(it), 0]) + yield ('rlineto', [0, next(it)]) + except StopIteration: + pass + @staticmethod + def vlineto(args): + if not args: raise ValueError(args) + it = iter(args) + try: + while True: + yield ('rlineto', [0, next(it)]) + yield ('rlineto', [next(it), 0]) + except StopIteration: + pass + @staticmethod + def rrcurveto(args): + if not args: raise ValueError(args) + for args in _everyN(args, 6): + yield ('rrcurveto', args) + @staticmethod + def hhcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[1], args[0], args[2], args[3], args[4], 0]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[3], 0]) + @staticmethod + def vvcurveto(args): + if len(args) < 4 or len(args) % 4 > 1: raise ValueError(args) + if len(args) % 2 == 1: + yield ('rrcurveto', [args[0], args[1], args[2], args[3], 0, args[4]]) + args = args[5:] + for args in _everyN(args, 4): + yield ('rrcurveto', [0, args[0], args[1], args[2], 0, args[3]]) + @staticmethod + def hvcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + else: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + @staticmethod + def vhcurveto(args): + if len(args) < 4 or len(args) % 8 not in {0,1,4,5}: raise ValueError(args) + last_args = None + if len(args) % 2 == 1: + lastStraight = len(args) % 8 == 5 + args, last_args = args[:-5], args[-5:] + it = _everyN(args, 4) + try: + while True: + args = next(it) + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], 0]) + args = next(it) + yield ('rrcurveto', [args[0], 0, args[1], args[2], 0, args[3]]) + except StopIteration: + pass + if last_args: + args = last_args + if lastStraight: + yield ('rrcurveto', [0, args[0], args[1], args[2], args[3], args[4]]) + else: + yield ('rrcurveto', [args[0], 0, args[1], args[2], args[4], args[3]]) + + @staticmethod + def rcurveline(args): + if len(args) < 8 or len(args) % 6 != 2: raise ValueError(args) + args, last_args = args[:-2], args[-2:] + for args in _everyN(args, 6): + yield ('rrcurveto', args) + yield ('rlineto', last_args) + @staticmethod + def rlinecurve(args): + if len(args) < 8 or len(args) % 2 != 0: raise ValueError(args) + args, last_args = args[:-6], args[-6:] + for args in _everyN(args, 2): + yield ('rlineto', args) + yield ('rrcurveto', last_args) + + +def generalizeCommands(commands, ignoreErrors=False): + result = [] + mapping = _GeneralizerDecombinerCommandsMap + for op,args in commands: + func = getattr(mapping, op, None) + if not func: + result.append((op,args)) + continue + try: + for command in func(args): + result.append(command) + except ValueError: + if ignoreErrors: + # Store op as data, such that consumers of commands do not have to + # deal with incorrect number of arguments. + result.append(('', args)) + result.append(('', [op])) + else: + raise + return result + +def generalizeProgram(program, **kwargs): + return commandsToProgram(generalizeCommands(programToCommands(program), **kwargs)) + + +def _categorizeVector(v): + """ + Takes X,Y vector v and returns one of r, h, v, or 0 depending on which + of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, + it returns a single zero still. + + >>> _categorizeVector((0,0)) + ('0', (0,)) + >>> _categorizeVector((1,0)) + ('h', (1,)) + >>> _categorizeVector((0,2)) + ('v', (2,)) + >>> _categorizeVector((1,2)) + ('r', (1, 2)) + """ + if not v[0]: + if not v[1]: + return '0', v[:1] + else: + return 'v', v[1:] + else: + if not v[1]: + return 'h', v[:1] + else: + return 'r', v + +def _mergeCategories(a, b): + if a == '0': return b + if b == '0': return a + if a == b: return a + return None + +def _negateCategory(a): + if a == 'h': return 'v' + if a == 'v': return 'h' + assert a in '0r' + return a + +def specializeCommands(commands, + ignoreErrors=False, + generalizeFirst=True, + preserveTopology=False, + maxstack=48): + + # We perform several rounds of optimizations. They are carefully ordered and are: + # + # 0. Generalize commands. + # This ensures that they are in our expected simple form, with each line/curve only + # having arguments for one segment, and using the generic form (rlineto/rrcurveto). + # If caller is sure the input is in this form, they can turn off generalization to + # save time. + # + # 1. Combine successive rmoveto operations. + # + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # We specialize into some, made-up, variants as well, which simplifies following + # passes. + # + # 3. Merge or delete redundant operations, to the extent requested. + # OpenType spec declares point numbers in CFF undefined. As such, we happily + # change topology. If client relies on point numbers (in GPOS anchors, or for + # hinting purposes(what?)) they can turn this off. + # + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + # + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + # + # 6. Resolve any remaining made-up operators into real operators. + # + # I have convinced myself that this produces optimal bytecode (except for, possibly + # one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-) + # A dynamic-programming approach can do the same but would be significantly slower. + + + # 0. Generalize commands. + if generalizeFirst: + commands = generalizeCommands(commands, ignoreErrors=ignoreErrors) + else: + commands = list(commands) # Make copy since we modify in-place later. + + # 1. Combine successive rmoveto operations. + for i in range(len(commands)-1, 0, -1): + if 'rmoveto' == commands[i][0] == commands[i-1][0]: + v1, v2 = commands[i-1][1], commands[i][1] + commands[i-1] = ('rmoveto', [v1[0]+v2[0], v1[1]+v2[1]]) + del commands[i] + + # 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants. + # + # We, in fact, specialize into more, made-up, variants that special-case when both + # X and Y components are zero. This simplifies the following optimization passes. + # This case is rare, but OCD does not let me skip it. + # + # After this round, we will have four variants that use the following mnemonics: + # + # - 'r' for relative, ie. non-zero X and non-zero Y, + # - 'h' for horizontal, ie. zero X and non-zero Y, + # - 'v' for vertical, ie. non-zero X and zero Y, + # - '0' for zeros, ie. zero X and zero Y. + # + # The '0' pseudo-operators are not part of the spec, but help simplify the following + # optimization rounds. We resolve them at the end. So, after this, we will have four + # moveto and four lineto variants: + # + # - 0moveto, 0lineto + # - hmoveto, hlineto + # - vmoveto, vlineto + # - rmoveto, rlineto + # + # and sixteen curveto variants. For example, a '0hcurveto' operator means a curve + # dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3. + # An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3. + # + # There are nine different variants of curves without the '0'. Those nine map exactly + # to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto, + # vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of + # arguments and one without. Eg. an hhcurveto with an extra argument (odd number of + # arguments) is in fact an rhcurveto. The operators in the spec are designed such that + # all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve. + # + # Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest + # of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be + # thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always + # encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute + # the '0' with either 'h' or 'v' and it works. + # + # When we get to curve splines however, things become more complicated... XXX finish this. + # There's one more complexity with splines. If one side of the spline is not horizontal or + # vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode. + # Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and + # only hvcurveto and vhcurveto operators can encode a spline ending with 'r'. + # This limits our merge opportunities later. + # + for i in range(len(commands)): + op,args = commands[i] + + if op in {'rmoveto', 'rlineto'}: + c, args = _categorizeVector(args) + commands[i] = c+op[1:], args + continue + + if op == 'rrcurveto': + c1, args1 = _categorizeVector(args[:2]) + c2, args2 = _categorizeVector(args[-2:]) + commands[i] = c1+c2+'curveto', args1+args[2:4]+args2 + continue + + # 3. Merge or delete redundant operations, to the extent requested. + # + # TODO + # A 0moveto that comes before all other path operations can be removed. + # though I find conflicting evidence for this. + # + # TODO + # "If hstem and vstem hints are both declared at the beginning of a + # CharString, and this sequence is followed directly by the hintmask or + # cntrmask operators, then the vstem hint operator (or, if applicable, + # the vstemhm operator) need not be included." + # + # "The sequence and form of a CFF2 CharString program may be represented as: + # {hs* vs* cm* hm* mt subpath}? {mt subpath}*" + # + # https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1 + # + # For Type2 CharStrings the sequence is: + # w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar" + + + # Some other redundancies change topology (point numbers). + if not preserveTopology: + for i in range(len(commands)-1, -1, -1): + op, args = commands[i] + + # A 00curveto is demoted to a (specialized) lineto. + if op == '00curveto': + assert len(args) == 4 + c, args = _categorizeVector(args[1:3]) + op = c+'lineto' + commands[i] = op, args + # and then... + + # A 0lineto can be deleted. + if op == '0lineto': + del commands[i] + continue + + # Merge adjacent hlineto's and vlineto's. + if i and op in {'hlineto', 'vlineto'} and op == commands[i-1][0]: + _, other_args = commands[i-1] + assert len(args) == 1 and len(other_args) == 1 + commands[i-1] = (op, [other_args[0]+args[0]]) + del commands[i] + continue + + # 4. Peephole optimization to revert back some of the h/v variants back into their + # original "relative" operator (rline/rrcurveto) if that saves a byte. + for i in range(1, len(commands)-1): + op,args = commands[i] + prv,nxt = commands[i-1][0], commands[i+1][0] + + if op in {'0lineto', 'hlineto', 'vlineto'} and prv == nxt == 'rlineto': + assert len(args) == 1 + args = [0, args[0]] if op[0] == 'v' else [args[0], 0] + commands[i] = ('rlineto', args) + continue + + if op[2:] == 'curveto' and len(args) == 5 and prv == nxt == 'rrcurveto': + assert (op[0] == 'r') ^ (op[1] == 'r') + args = list(args) + if op[0] == 'v': + pos = 0 + elif op[0] != 'r': + pos = 1 + elif op[1] == 'v': + pos = 4 + else: + pos = 5 + args.insert(pos, 0) + commands[i] = ('rrcurveto', args) + continue + + # 5. Combine adjacent operators when possible, minding not to go over max stack size. + for i in range(len(commands)-1, 0, -1): + op1,args1 = commands[i-1] + op2,args2 = commands[i] + new_op = None + + # Merge logic... + if {op1, op2} <= {'rlineto', 'rrcurveto'}: + if op1 == op2: + new_op = op1 + else: + if op2 == 'rrcurveto' and len(args2) == 6: + new_op = 'rlinecurve' + elif len(args2) == 2: + new_op = 'rcurveline' + + elif (op1, op2) in {('rlineto', 'rlinecurve'), ('rrcurveto', 'rcurveline')}: + new_op = op2 + + elif {op1, op2} == {'vlineto', 'hlineto'}: + new_op = op1 + + elif 'curveto' == op1[2:] == op2[2:]: + d0, d1 = op1[:2] + d2, d3 = op2[:2] + + if d1 == 'r' or d2 == 'r' or d0 == d3 == 'r': + continue + + d = _mergeCategories(d1, d2) + if d is None: continue + if d0 == 'r': + d = _mergeCategories(d, d3) + if d is None: continue + new_op = 'r'+d+'curveto' + elif d3 == 'r': + d0 = _mergeCategories(d0, _negateCategory(d)) + if d0 is None: continue + new_op = d0+'r'+'curveto' + else: + d0 = _mergeCategories(d0, d3) + if d0 is None: continue + new_op = d0+d+'curveto' + + if new_op and len(args1) + len(args2) <= maxstack: + commands[i-1] = (new_op, args1+args2) + del commands[i] + + # 6. Resolve any remaining made-up operators into real operators. + for i in range(len(commands)): + op,args = commands[i] + + if op in {'0moveto', '0lineto'}: + commands[i] = 'h'+op[1:], args + continue + + if op[2:] == 'curveto' and op[:2] not in {'rr', 'hh', 'vv', 'vh', 'hv'}: + op0, op1 = op[:2] + if (op0 == 'r') ^ (op1 == 'r'): + assert len(args) % 2 == 1 + if op0 == '0': op0 = 'h' + if op1 == '0': op1 = 'h' + if op0 == 'r': op0 = op1 + if op1 == 'r': op1 = _negateCategory(op0) + assert {op0,op1} <= {'h','v'}, (op0, op1) + + if len(args) % 2: + if op0 != op1: # vhcurveto / hvcurveto + if (op0 == 'h') ^ (len(args) % 8 == 1): + # Swap last two args order + args = args[:-2]+args[-1:]+args[-2:-1] + else: # hhcurveto / vvcurveto + if op0 == 'h': # hhcurveto + # Swap first two args order + args = args[1:2]+args[:1]+args[2:] + + commands[i] = op0+op1+'curveto', args + continue + + return commands + +def specializeProgram(program, **kwargs): + return commandsToProgram(specializeCommands(programToCommands(program), **kwargs)) + + +if __name__ == '__main__': + import sys + if len(sys.argv) == 1: + import doctest + sys.exit(doctest.testmod().failed) + program = stringToProgram(sys.argv[1:]) + print("Program:"); print(programToString(program)) + commands = programToCommands(program) + print("Commands:"); print(commands) + program2 = commandsToProgram(commands) + print("Program from commands:"); print(programToString(program2)) + assert program == program2 + print("Generalized program:"); print(programToString(generalizeProgram(program))) + print("Specialized program:"); print(programToString(specializeProgram(program))) + diff -Nru fonttools-3.0/Snippets/fontTools/cffLib.py fonttools-3.21.2/Snippets/fontTools/cffLib.py --- fonttools-3.0/Snippets/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/cffLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1810 +0,0 @@ -"""cffLib.py -- read/write tools for Adobe CFF fonts.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc import psCharStrings -from fontTools.misc.textTools import safeEval -import struct - -DEBUG = 0 - - -cffHeaderFormat = """ - major: B - minor: B - hdrSize: B - offSize: B -""" - -class CFFFontSet(object): - - def __init__(self): - pass - - def decompile(self, file, otFont): - sstruct.unpack(cffHeaderFormat, file.read(4), self) - assert self.major == 1 and self.minor == 0, \ - "unknown CFF format: %d.%d" % (self.major, self.minor) - - file.seek(self.hdrSize) - self.fontNames = list(Index(file)) - self.topDictIndex = TopDictIndex(file) - self.strings = IndexedStrings(file) - self.GlobalSubrs = GlobalSubrsIndex(file) - self.topDictIndex.strings = self.strings - self.topDictIndex.GlobalSubrs = self.GlobalSubrs - - def __len__(self): - return len(self.fontNames) - - def keys(self): - return list(self.fontNames) - - def values(self): - return self.topDictIndex - - def __getitem__(self, name): - try: - index = self.fontNames.index(name) - except ValueError: - raise KeyError(name) - return self.topDictIndex[index] - - def compile(self, file, otFont): - strings = IndexedStrings() - writer = CFFWriter() - writer.add(sstruct.pack(cffHeaderFormat, self)) - fontNames = Index() - for name in self.fontNames: - fontNames.append(name) - writer.add(fontNames.getCompiler(strings, None)) - topCompiler = self.topDictIndex.getCompiler(strings, None) - writer.add(topCompiler) - writer.add(strings.getCompiler()) - writer.add(self.GlobalSubrs.getCompiler(strings, None)) - - for topDict in self.topDictIndex: - if not hasattr(topDict, "charset") or topDict.charset is None: - charset = otFont.getGlyphOrder() - topDict.charset = charset - - for child in topCompiler.getChildren(strings): - writer.add(child) - - writer.toFile(file) - - def toXML(self, xmlWriter, progress=None): - for fontName in self.fontNames: - xmlWriter.begintag("CFFFont", name=tostr(fontName)) - xmlWriter.newline() - font = self[fontName] - font.toXML(xmlWriter, progress) - xmlWriter.endtag("CFFFont") - xmlWriter.newline() - xmlWriter.newline() - xmlWriter.begintag("GlobalSubrs") - xmlWriter.newline() - self.GlobalSubrs.toXML(xmlWriter, progress) - xmlWriter.endtag("GlobalSubrs") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if not hasattr(self, "GlobalSubrs"): - self.GlobalSubrs = GlobalSubrsIndex() - self.major = 1 - self.minor = 0 - self.hdrSize = 4 - self.offSize = 4 # XXX ?? - if name == "CFFFont": - if not hasattr(self, "fontNames"): - self.fontNames = [] - self.topDictIndex = TopDictIndex() - fontName = attrs["name"] - topDict = TopDict(GlobalSubrs=self.GlobalSubrs) - topDict.charset = None # gets filled in later - self.fontNames.append(fontName) - self.topDictIndex.append(topDict) - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - topDict.fromXML(name, attrs, content) - elif name == "GlobalSubrs": - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.GlobalSubrs.append(subr) - - -class CFFWriter(object): - - def __init__(self): - self.data = [] - - def add(self, table): - self.data.append(table) - - def toFile(self, file): - lastPosList = None - count = 1 - while True: - if DEBUG: - print("CFFWriter.toFile() iteration:", count) - count = count + 1 - pos = 0 - posList = [pos] - for item in self.data: - if hasattr(item, "getDataLength"): - endPos = pos + item.getDataLength() - else: - endPos = pos + len(item) - if hasattr(item, "setPos"): - item.setPos(pos, endPos) - pos = endPos - posList.append(pos) - if posList == lastPosList: - break - lastPosList = posList - if DEBUG: - print("CFFWriter.toFile() writing to file.") - begin = file.tell() - posList = [0] - for item in self.data: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - posList.append(file.tell() - begin) - assert posList == lastPosList - - -def calcOffSize(largestOffset): - if largestOffset < 0x100: - offSize = 1 - elif largestOffset < 0x10000: - offSize = 2 - elif largestOffset < 0x1000000: - offSize = 3 - else: - offSize = 4 - return offSize - - -class IndexCompiler(object): - - def __init__(self, items, strings, parent): - self.items = self.getItems(items, strings) - self.parent = parent - - def getItems(self, items, strings): - return items - - def getOffsets(self): - pos = 1 - offsets = [pos] - for item in self.items: - if hasattr(item, "getDataLength"): - pos = pos + item.getDataLength() - else: - pos = pos + len(item) - offsets.append(pos) - return offsets - - def getDataLength(self): - lastOffset = self.getOffsets()[-1] - offSize = calcOffSize(lastOffset) - dataLength = ( - 2 + # count - 1 + # offSize - (len(self.items) + 1) * offSize + # the offsets - lastOffset - 1 # size of object data - ) - return dataLength - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(tobytes(item, encoding="latin1")) - - -class IndexedStringsCompiler(IndexCompiler): - - def getItems(self, items, strings): - return items.strings - - -class TopDictIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for topDict in self.items: - children.extend(topDict.getChildren(strings)) - return children - - -class FDArrayIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for fontDict in self.items: - children.extend(fontDict.getChildren(strings)) - return children - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - - def setPos(self, pos, endPos): - self.parent.rawDict["FDArray"] = pos - - -class GlobalSubrsCompiler(IndexCompiler): - def getItems(self, items, strings): - out = [] - for cs in items: - cs.compile() - out.append(cs.bytecode) - return out - -class SubrsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - offset = pos - self.parent.pos - self.parent.rawDict["Subrs"] = offset - -class CharStringsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - self.parent.rawDict["CharStrings"] = pos - - -class Index(object): - - """This class represents what the CFF spec calls an INDEX.""" - - compilerClass = IndexCompiler - - def __init__(self, file=None): - self.items = [] - name = self.__class__.__name__ - if file is None: - return - if DEBUG: - print("loading %s at %s" % (name, file.tell())) - self.file = file - count = readCard16(file) - if count == 0: - return - self.items = [None] * count - offSize = readCard8(file) - if DEBUG: - print(" index count: %s offSize: %s" % (count, offSize)) - assert offSize <= 4, "offSize too large: %s" % offSize - self.offsets = offsets = [] - pad = b'\0' * (4 - offSize) - for index in range(count+1): - chunk = file.read(offSize) - chunk = pad + chunk - offset, = struct.unpack(">L", chunk) - offsets.append(int(offset)) - self.offsetBase = file.tell() - 1 - file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot - if DEBUG: - print(" end of %s at %s" % (name, file.tell())) - - def __len__(self): - return len(self.items) - - def __getitem__(self, index): - item = self.items[index] - if item is not None: - return item - offset = self.offsets[index] + self.offsetBase - size = self.offsets[index+1] - self.offsets[index] - file = self.file - file.seek(offset) - data = file.read(size) - assert len(data) == size - item = self.produceItem(index, data, file, offset, size) - self.items[index] = item - return item - - def produceItem(self, index, data, file, offset, size): - return data - - def append(self, item): - self.items.append(item) - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - -class GlobalSubrsIndex(Index): - - compilerClass = GlobalSubrsCompiler - - def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): - Index.__init__(self, file) - self.globalSubrs = globalSubrs - self.private = private - if fdSelect: - self.fdSelect = fdSelect - if fdArray: - self.fdArray = fdArray - - def produceItem(self, index, data, file, offset, size): - if self.private is not None: - private = self.private - elif hasattr(self, 'fdArray') and self.fdArray is not None: - private = self.fdArray[self.fdSelect[index]].Private - else: - private = None - return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) - - def toXML(self, xmlWriter, progress): - xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") - xmlWriter.newline() - for i in range(len(self)): - subr = self[i] - if subr.needsDecompilation(): - xmlWriter.begintag("CharString", index=i, raw=1) - else: - xmlWriter.begintag("CharString", index=i) - xmlWriter.newline() - subr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if name != "CharString": - return - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.append(subr) - - def getItemAndSelector(self, index): - sel = None - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] - return self[index], sel - - -class SubrsIndex(GlobalSubrsIndex): - compilerClass = SubrsCompiler - - -class TopDictIndex(Index): - - compilerClass = TopDictIndexCompiler - - def produceItem(self, index, data, file, offset, size): - top = TopDict(self.strings, file, offset, self.GlobalSubrs) - top.decompile(data) - return top - - def toXML(self, xmlWriter, progress): - for i in range(len(self)): - xmlWriter.begintag("FontDict", index=i) - xmlWriter.newline() - self[i].toXML(xmlWriter, progress) - xmlWriter.endtag("FontDict") - xmlWriter.newline() - - -class FDArrayIndex(TopDictIndex): - - compilerClass = FDArrayIndexCompiler - - def fromXML(self, name, attrs, content): - if name != "FontDict": - return - fontDict = FontDict() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fontDict.fromXML(name, attrs, content) - self.append(fontDict) - - -class FDSelect: - def __init__(self, file=None, numGlyphs=None, format=None): - if file: - # read data in from file - self.format = readCard8(file) - if self.format == 0: - from array import array - self.gidArray = array("B", file.read(numGlyphs)).tolist() - elif self.format == 3: - gidArray = [None] * numGlyphs - nRanges = readCard16(file) - fd = None - prev = None - for i in range(nRanges): - first = readCard16(file) - if prev is not None: - for glyphID in range(prev, first): - gidArray[glyphID] = fd - prev = first - fd = readCard8(file) - if prev is not None: - first = readCard16(file) - for glyphID in range(prev, first): - gidArray[glyphID] = fd - self.gidArray = gidArray - else: - assert False, "unsupported FDSelect format: %s" % format - else: - # reading from XML. Make empty gidArray,, and leave format as passed in. - # format is None will result in the smallest representation being used. - self.format = format - self.gidArray = [] - - def __len__(self): - return len(self.gidArray) - - def __getitem__(self, index): - return self.gidArray[index] - - def __setitem__(self, index, fdSelectValue): - self.gidArray[index] = fdSelectValue - - def append(self, fdSelectValue): - self.gidArray.append(fdSelectValue) - - -class CharStrings(object): - - def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): - if file is not None: - self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) - self.charStrings = charStrings = {} - for i in range(len(charset)): - charStrings[charset[i]] = i - self.charStringsAreIndexed = 1 - else: - self.charStrings = {} - self.charStringsAreIndexed = 0 - self.globalSubrs = globalSubrs - self.private = private - if fdSelect is not None: - self.fdSelect = fdSelect - if fdArray is not None: - self.fdArray = fdArray - - def keys(self): - return list(self.charStrings.keys()) - - def values(self): - if self.charStringsAreIndexed: - return self.charStringsIndex - else: - return list(self.charStrings.values()) - - def has_key(self, name): - return name in self.charStrings - - __contains__ = has_key - - def __len__(self): - return len(self.charStrings) - - def __getitem__(self, name): - charString = self.charStrings[name] - if self.charStringsAreIndexed: - charString = self.charStringsIndex[charString] - return charString - - def __setitem__(self, name, charString): - if self.charStringsAreIndexed: - index = self.charStrings[name] - self.charStringsIndex[index] = charString - else: - self.charStrings[name] = charString - - def getItemAndSelector(self, name): - if self.charStringsAreIndexed: - index = self.charStrings[name] - return self.charStringsIndex.getItemAndSelector(index) - else: - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] # index is not defined at this point. Read R. ? - else: - raise KeyError("fdSelect array not yet defined.") - return self.charStrings[name], sel - - def toXML(self, xmlWriter, progress): - names = sorted(self.keys()) - i = 0 - step = 10 - numGlyphs = len(names) - for name in names: - charStr, fdSelectIndex = self.getItemAndSelector(name) - if charStr.needsDecompilation(): - raw = [("raw", 1)] - else: - raw = [] - if fdSelectIndex is None: - xmlWriter.begintag("CharString", [('name', name)] + raw) - else: - xmlWriter.begintag("CharString", - [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) - xmlWriter.newline() - charStr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - if not i % step and progress is not None: - progress.setLabel("Dumping 'CFF ' table... (%s)" % name) - progress.increment(step / numGlyphs) - i = i + 1 - - def fromXML(self, name, attrs, content): - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - if name != "CharString": - continue - fdID = -1 - if hasattr(self, "fdArray"): - fdID = safeEval(attrs["fdSelectIndex"]) - private = self.fdArray[fdID].Private - else: - private = self.private - - glyphName = attrs["name"] - charString = psCharStrings.T2CharString( - private=private, - globalSubrs=self.globalSubrs) - charString.fromXML(name, attrs, content) - if fdID >= 0: - charString.fdSelectIndex = fdID - self[glyphName] = charString - - -def readCard8(file): - return byteord(file.read(1)) - -def readCard16(file): - value, = struct.unpack(">H", file.read(2)) - return value - -def writeCard8(file, value): - file.write(bytechr(value)) - -def writeCard16(file, value): - file.write(struct.pack(">H", value)) - -def packCard8(value): - return bytechr(value) - -def packCard16(value): - return struct.pack(">H", value) - -def buildOperatorDict(table): - d = {} - for op, name, arg, default, conv in table: - d[op] = (name, arg) - return d - -def buildOpcodeDict(table): - d = {} - for op, name, arg, default, conv in table: - if isinstance(op, tuple): - op = bytechr(op[0]) + bytechr(op[1]) - else: - op = bytechr(op) - d[name] = (op, arg) - return d - -def buildOrder(table): - l = [] - for op, name, arg, default, conv in table: - l.append(name) - return l - -def buildDefaults(table): - d = {} - for op, name, arg, default, conv in table: - if default is not None: - d[name] = default - return d - -def buildConverters(table): - d = {} - for op, name, arg, default, conv in table: - d[name] = conv - return d - - -class SimpleConverter(object): - def read(self, parent, value): - return value - def write(self, parent, value): - return value - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return attrs["value"] - -class ASCIIConverter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='ascii') - def write(self, parent, value): - return tobytes(value, encoding='ascii') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("ascii")) - -class Latin1Converter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='latin1') - def write(self, parent, value): - return tobytes(value, encoding='latin1') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("latin1")) - - -def parseNum(s): - try: - value = int(s) - except: - value = float(s) - return value - -class NumberConverter(SimpleConverter): - def xmlRead(self, name, attrs, content, parent): - return parseNum(attrs["value"]) - -class ArrayConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - value = " ".join(map(str, value)) - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - values = attrs["value"].split() - return [parseNum(value) for value in values] - -class TableConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.begintag(name) - xmlWriter.newline() - value.toXML(xmlWriter, progress) - xmlWriter.endtag(name) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - ob = self.getClass()() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - ob.fromXML(name, attrs, content) - return ob - -class PrivateDictConverter(TableConverter): - def getClass(self): - return PrivateDict - def read(self, parent, value): - size, offset = value - file = parent.file - priv = PrivateDict(parent.strings, file, offset) - file.seek(offset) - data = file.read(size) - assert len(data) == size - priv.decompile(data) - return priv - def write(self, parent, value): - return (0, 0) # dummy value - -class SubrsConverter(TableConverter): - def getClass(self): - return SubrsIndex - def read(self, parent, value): - file = parent.file - file.seek(parent.offset + value) # Offset(self) - return SubrsIndex(file) - def write(self, parent, value): - return 0 # dummy value - -class CharStringsConverter(TableConverter): - def read(self, parent, value): - file = parent.file - charset = parent.charset - globalSubrs = parent.GlobalSubrs - if hasattr(parent, "ROS"): - fdSelect, fdArray = parent.FDSelect, parent.FDArray - private = None - else: - fdSelect, fdArray = None, None - private = parent.Private - file.seek(value) # Offset(0) - return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) - def write(self, parent, value): - return 0 # dummy value - def xmlRead(self, name, attrs, content, parent): - if hasattr(parent, "ROS"): - # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray - private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray - else: - # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. - private, fdSelect, fdArray = parent.Private, None, None - charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) - charStrings.fromXML(name, attrs, content) - return charStrings - -class CharsetConverter(object): - def read(self, parent, value): - isCID = hasattr(parent, "ROS") - if value > 2: - numGlyphs = parent.numGlyphs - file = parent.file - file.seek(value) - if DEBUG: - print("loading charset at %s" % value) - format = readCard8(file) - if format == 0: - charset = parseCharset0(numGlyphs, file, parent.strings, isCID) - elif format == 1 or format == 2: - charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) - else: - raise NotImplementedError - assert len(charset) == numGlyphs - if DEBUG: - print(" charset end at %s" % file.tell()) - else: # offset == 0 -> no charset data. - if isCID or "CharStrings" not in parent.rawDict: - assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. - charset = None - elif value == 0: - charset = cffISOAdobeStrings - elif value == 1: - charset = cffIExpertStrings - elif value == 2: - charset = cffExpertSubsetStrings - return charset - - def write(self, parent, value): - return 0 # dummy value - def xmlWrite(self, xmlWriter, name, value, progress): - # XXX only write charset when not in OT/TTX context, where we - # dump charset as a separate "GlyphOrder" table. - ##xmlWriter.simpletag("charset") - xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - if 0: - return safeEval(attrs["value"]) - - -class CharsetCompiler(object): - - def __init__(self, strings, charset, parent): - assert charset[0] == '.notdef' - isCID = hasattr(parent.dictObj, "ROS") - data0 = packCharset0(charset, isCID, strings) - data = packCharset(charset, isCID, strings) - if len(data) < len(data0): - self.data = data - else: - self.data = data0 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["charset"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -def getCIDfromName(name, strings): - return int(name[3:]) - -def getSIDfromName(name, strings): - return strings.getSID(name) - -def packCharset0(charset, isCID, strings): - fmt = 0 - data = [packCard8(fmt)] - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - data.append(packCard16(getNameID(name,strings))) - return bytesjoin(data) - - -def packCharset(charset, isCID, strings): - fmt = 1 - ranges = [] - first = None - end = 0 - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - SID = getNameID(name, strings) - if first is None: - first = SID - elif end + 1 != SID: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - first = SID - end = SID - if end: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - - data = [packCard8(fmt)] - if fmt == 1: - nLeftFunc = packCard8 - else: - nLeftFunc = packCard16 - for first, nLeft in ranges: - data.append(packCard16(first) + nLeftFunc(nLeft)) - return bytesjoin(data) - -def parseCharset0(numGlyphs, file, strings, isCID): - charset = [".notdef"] - if isCID: - for i in range(numGlyphs - 1): - CID = readCard16(file) - charset.append("cid" + str(CID).zfill(5)) - else: - for i in range(numGlyphs - 1): - SID = readCard16(file) - charset.append(strings[SID]) - return charset - -def parseCharset(numGlyphs, file, strings, isCID, fmt): - charset = ['.notdef'] - count = 1 - if fmt == 1: - nLeftFunc = readCard8 - else: - nLeftFunc = readCard16 - while count < numGlyphs: - first = readCard16(file) - nLeft = nLeftFunc(file) - if isCID: - for CID in range(first, first+nLeft+1): - charset.append("cid" + str(CID).zfill(5)) - else: - for SID in range(first, first+nLeft+1): - charset.append(strings[SID]) - count = count + nLeft + 1 - return charset - - -class EncodingCompiler(object): - - def __init__(self, strings, encoding, parent): - assert not isinstance(encoding, basestring) - data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) - data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) - if len(data0) < len(data1): - self.data = data0 - else: - self.data = data1 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["Encoding"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class EncodingConverter(SimpleConverter): - - def read(self, parent, value): - if value == 0: - return "StandardEncoding" - elif value == 1: - return "ExpertEncoding" - else: - assert value > 1 - file = parent.file - file.seek(value) - if DEBUG: - print("loading Encoding at %s" % value) - fmt = readCard8(file) - haveSupplement = fmt & 0x80 - if haveSupplement: - raise NotImplementedError("Encoding supplements are not yet supported") - fmt = fmt & 0x7f - if fmt == 0: - encoding = parseEncoding0(parent.charset, file, haveSupplement, - parent.strings) - elif fmt == 1: - encoding = parseEncoding1(parent.charset, file, haveSupplement, - parent.strings) - return encoding - - def write(self, parent, value): - if value == "StandardEncoding": - return 0 - elif value == "ExpertEncoding": - return 1 - return 0 # dummy value - - def xmlWrite(self, xmlWriter, name, value, progress): - if value in ("StandardEncoding", "ExpertEncoding"): - xmlWriter.simpletag(name, name=value) - xmlWriter.newline() - return - xmlWriter.begintag(name) - xmlWriter.newline() - for code in range(len(value)): - glyphName = value[code] - if glyphName != ".notdef": - xmlWriter.simpletag("map", code=hex(code), name=glyphName) - xmlWriter.newline() - xmlWriter.endtag(name) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - if "name" in attrs: - return attrs["name"] - encoding = [".notdef"] * 256 - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - code = safeEval(attrs["code"]) - glyphName = attrs["name"] - encoding[code] = glyphName - return encoding - - -def parseEncoding0(charset, file, haveSupplement, strings): - nCodes = readCard8(file) - encoding = [".notdef"] * 256 - for glyphID in range(1, nCodes + 1): - code = readCard8(file) - if code != 0: - encoding[code] = charset[glyphID] - return encoding - -def parseEncoding1(charset, file, haveSupplement, strings): - nRanges = readCard8(file) - encoding = [".notdef"] * 256 - glyphID = 1 - for i in range(nRanges): - code = readCard8(file) - nLeft = readCard8(file) - for glyphID in range(glyphID, glyphID + nLeft + 1): - encoding[code] = charset[glyphID] - code = code + 1 - glyphID = glyphID + 1 - return encoding - -def packEncoding0(charset, encoding, strings): - fmt = 0 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - codes = [] - for name in charset[1:]: - code = m.get(name) - codes.append(code) - - while codes and codes[-1] is None: - codes.pop() - - data = [packCard8(fmt), packCard8(len(codes))] - for code in codes: - if code is None: - code = 0 - data.append(packCard8(code)) - return bytesjoin(data) - -def packEncoding1(charset, encoding, strings): - fmt = 1 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - ranges = [] - first = None - end = 0 - for name in charset[1:]: - code = m.get(name, -1) - if first is None: - first = code - elif end + 1 != code: - nLeft = end - first - ranges.append((first, nLeft)) - first = code - end = code - nLeft = end - first - ranges.append((first, nLeft)) - - # remove unencoded glyphs at the end. - while ranges and ranges[-1][0] == -1: - ranges.pop() - - data = [packCard8(fmt), packCard8(len(ranges))] - for first, nLeft in ranges: - if first == -1: # unencoded - first = 0 - data.append(packCard8(first) + packCard8(nLeft)) - return bytesjoin(data) - - -class FDArrayConverter(TableConverter): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdArray = FDArrayIndex(file) - fdArray.strings = parent.strings - fdArray.GlobalSubrs = parent.GlobalSubrs - return fdArray - - def write(self, parent, value): - return 0 # dummy value - - def xmlRead(self, name, attrs, content, parent): - fdArray = FDArrayIndex() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fdArray.fromXML(name, attrs, content) - return fdArray - - -class FDSelectConverter(object): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdSelect = FDSelect(file, parent.numGlyphs) - return fdSelect - - def write(self, parent, value): - return 0 # dummy value - - # The FDSelect glyph data is written out to XML in the charstring keys, - # so we write out only the format selector - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, [('format', value.format)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - fmt = safeEval(attrs["format"]) - file = None - numGlyphs = None - fdSelect = FDSelect(file, numGlyphs, fmt) - return fdSelect - - -def packFDSelect0(fdSelectArray): - fmt = 0 - data = [packCard8(fmt)] - for index in fdSelectArray: - data.append(packCard8(index)) - return bytesjoin(data) - - -def packFDSelect3(fdSelectArray): - fmt = 3 - fdRanges = [] - first = None - end = 0 - lenArray = len(fdSelectArray) - lastFDIndex = -1 - for i in range(lenArray): - fdIndex = fdSelectArray[i] - if lastFDIndex != fdIndex: - fdRanges.append([i, fdIndex]) - lastFDIndex = fdIndex - sentinelGID = i + 1 - - data = [packCard8(fmt)] - data.append(packCard16( len(fdRanges) )) - for fdRange in fdRanges: - data.append(packCard16(fdRange[0])) - data.append(packCard8(fdRange[1])) - data.append(packCard16(sentinelGID)) - return bytesjoin(data) - - -class FDSelectCompiler(object): - - def __init__(self, fdSelect, parent): - fmt = fdSelect.format - fdSelectArray = fdSelect.gidArray - if fmt == 0: - self.data = packFDSelect0(fdSelectArray) - elif fmt == 3: - self.data = packFDSelect3(fdSelectArray) - else: - # choose smaller of the two formats - data0 = packFDSelect0(fdSelectArray) - data3 = packFDSelect3(fdSelectArray) - if len(data0) < len(data3): - self.data = data0 - fdSelect.format = 0 - else: - self.data = data3 - fdSelect.format = 3 - - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["FDSelect"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class ROSConverter(SimpleConverter): - - def xmlWrite(self, xmlWriter, name, value, progress): - registry, order, supplement = value - xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), - ('Supplement', supplement)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) - - -topDictOperators = [ -# opcode name argument type default converter - ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), - ((12, 20), 'SyntheticBase', 'number', None, None), - (0, 'version', 'SID', None, None), - (1, 'Notice', 'SID', None, Latin1Converter()), - ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), - (2, 'FullName', 'SID', None, None), - ((12, 38), 'FontName', 'SID', None, None), - (3, 'FamilyName', 'SID', None, None), - (4, 'Weight', 'SID', None, None), - ((12, 1), 'isFixedPitch', 'number', 0, None), - ((12, 2), 'ItalicAngle', 'number', 0, None), - ((12, 3), 'UnderlinePosition', 'number', None, None), - ((12, 4), 'UnderlineThickness', 'number', 50, None), - ((12, 5), 'PaintType', 'number', 0, None), - ((12, 6), 'CharstringType', 'number', 2, None), - ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), - (13, 'UniqueID', 'number', None, None), - (5, 'FontBBox', 'array', [0, 0, 0, 0], None), - ((12, 8), 'StrokeWidth', 'number', 0, None), - (14, 'XUID', 'array', None, None), - ((12, 21), 'PostScript', 'SID', None, None), - ((12, 22), 'BaseFontName', 'SID', None, None), - ((12, 23), 'BaseFontBlend', 'delta', None, None), - ((12, 31), 'CIDFontVersion', 'number', 0, None), - ((12, 32), 'CIDFontRevision', 'number', 0, None), - ((12, 33), 'CIDFontType', 'number', 0, None), - ((12, 34), 'CIDCount', 'number', 8720, None), - (15, 'charset', 'number', 0, CharsetConverter()), - ((12, 35), 'UIDBase', 'number', None, None), - (16, 'Encoding', 'number', 0, EncodingConverter()), - (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), - ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), - ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), - (17, 'CharStrings', 'number', None, CharStringsConverter()), -] - -# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, -# in order for the font to compile back from xml. - - -privateDictOperators = [ -# opcode name argument type default converter - (6, 'BlueValues', 'delta', None, None), - (7, 'OtherBlues', 'delta', None, None), - (8, 'FamilyBlues', 'delta', None, None), - (9, 'FamilyOtherBlues', 'delta', None, None), - ((12, 9), 'BlueScale', 'number', 0.039625, None), - ((12, 10), 'BlueShift', 'number', 7, None), - ((12, 11), 'BlueFuzz', 'number', 1, None), - (10, 'StdHW', 'number', None, None), - (11, 'StdVW', 'number', None, None), - ((12, 12), 'StemSnapH', 'delta', None, None), - ((12, 13), 'StemSnapV', 'delta', None, None), - ((12, 14), 'ForceBold', 'number', 0, None), - ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated - ((12, 16), 'lenIV', 'number', None, None), # deprecated - ((12, 17), 'LanguageGroup', 'number', 0, None), - ((12, 18), 'ExpansionFactor', 'number', 0.06, None), - ((12, 19), 'initialRandomSeed', 'number', 0, None), - (20, 'defaultWidthX', 'number', 0, None), - (21, 'nominalWidthX', 'number', 0, None), - (19, 'Subrs', 'number', None, SubrsConverter()), -] - -def addConverters(table): - for i in range(len(table)): - op, name, arg, default, conv = table[i] - if conv is not None: - continue - if arg in ("delta", "array"): - conv = ArrayConverter() - elif arg == "number": - conv = NumberConverter() - elif arg == "SID": - conv = ASCIIConverter() - else: - assert False - table[i] = op, name, arg, default, conv - -addConverters(privateDictOperators) -addConverters(topDictOperators) - - -class TopDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(topDictOperators) - - -class PrivateDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(privateDictOperators) - - -class DictCompiler(object): - - def __init__(self, dictObj, strings, parent): - assert isinstance(strings, IndexedStrings) - self.dictObj = dictObj - self.strings = strings - self.parent = parent - rawDict = {} - for name in dictObj.order: - value = getattr(dictObj, name, None) - if value is None: - continue - conv = dictObj.converters[name] - value = conv.write(dictObj, value) - if value == dictObj.defaults.get(name): - continue - rawDict[name] = value - self.rawDict = rawDict - - def setPos(self, pos, endPos): - pass - - def getDataLength(self): - return len(self.compile("getDataLength")) - - def compile(self, reason): - if DEBUG: - print("-- compiling %s for %s" % (self.__class__.__name__, reason)) - print("in baseDict: ", self) - rawDict = self.rawDict - data = [] - for name in self.dictObj.order: - value = rawDict.get(name) - if value is None: - continue - op, argType = self.opcodes[name] - if isinstance(argType, tuple): - l = len(argType) - assert len(value) == l, "value doesn't match arg type" - for i in range(l): - arg = argType[i] - v = value[i] - arghandler = getattr(self, "arg_" + arg) - data.append(arghandler(v)) - else: - arghandler = getattr(self, "arg_" + argType) - data.append(arghandler(value)) - data.append(op) - return bytesjoin(data) - - def toFile(self, file): - file.write(self.compile("toFile")) - - def arg_number(self, num): - return encodeNumber(num) - def arg_SID(self, s): - return psCharStrings.encodeIntCFF(self.strings.getSID(s)) - def arg_array(self, value): - data = [] - for num in value: - data.append(encodeNumber(num)) - return bytesjoin(data) - def arg_delta(self, value): - out = [] - last = 0 - for v in value: - out.append(v - last) - last = v - data = [] - for num in out: - data.append(encodeNumber(num)) - return bytesjoin(data) - - -def encodeNumber(num): - if isinstance(num, float): - return psCharStrings.encodeFloat(num) - else: - return psCharStrings.encodeIntCFF(num) - - -class TopDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "charset") and self.dictObj.charset: - children.append(CharsetCompiler(strings, self.dictObj.charset, self)) - if hasattr(self.dictObj, "Encoding"): - encoding = self.dictObj.Encoding - if not isinstance(encoding, basestring): - children.append(EncodingCompiler(strings, encoding, self)) - if hasattr(self.dictObj, "FDSelect"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that - # either the font was read from XML, and teh FDSelect indices are all - # in the charstring data, or the FDSelect array is already fully defined. - fdSelect = self.dictObj.FDSelect - if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - fdSelect.append(charStrings[name].fdSelectIndex) - fdSelectComp = FDSelectCompiler(fdSelect, self) - children.append(fdSelectComp) - if hasattr(self.dictObj, "CharStrings"): - items = [] - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - items.append(charStrings[name]) - charStringsComp = CharStringsCompiler(items, strings, self) - children.append(charStringsComp) - if hasattr(self.dictObj, "FDArray"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that the FDArray info is correct - # and complete. - fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) - children.append(fdArrayIndexComp) - children.extend(fdArrayIndexComp.getChildren(strings)) - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class FontDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class PrivateDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(privateDictOperators) - - def setPos(self, pos, endPos): - size = endPos - pos - self.parent.rawDict["Private"] = size, pos - self.pos = pos - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Subrs"): - children.append(self.dictObj.Subrs.getCompiler(strings, self)) - return children - - -class BaseDict(object): - - def __init__(self, strings=None, file=None, offset=None): - self.rawDict = {} - if DEBUG: - print("loading %s at %s" % (self.__class__.__name__, offset)) - self.file = file - self.offset = offset - self.strings = strings - self.skipNames = [] - - def decompile(self, data): - if DEBUG: - print(" length %s is %s" % (self.__class__.__name__, len(data))) - dec = self.decompilerClass(self.strings) - dec.decompile(data) - self.rawDict = dec.getDict() - self.postDecompile() - - def postDecompile(self): - pass - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - def __getattr__(self, name): - value = self.rawDict.get(name) - if value is None: - value = self.defaults.get(name) - if value is None: - raise AttributeError(name) - conv = self.converters[name] - value = conv.read(self, value) - setattr(self, name, value) - return value - - def toXML(self, xmlWriter, progress): - for name in self.order: - if name in self.skipNames: - continue - value = getattr(self, name, None) - if value is None: - continue - conv = self.converters[name] - conv.xmlWrite(xmlWriter, name, value, progress) - - def fromXML(self, name, attrs, content): - conv = self.converters[name] - value = conv.xmlRead(name, attrs, content, self) - setattr(self, name, value) - - -class TopDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = TopDictDecompiler - compilerClass = TopDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def postDecompile(self): - offset = self.rawDict.get("CharStrings") - if offset is None: - return - # get the number of glyphs beforehand. - self.file.seek(offset) - self.numGlyphs = readCard16(self.file) - - def toXML(self, xmlWriter, progress): - if hasattr(self, "CharStrings"): - self.decompileAllCharStrings(progress) - if hasattr(self, "ROS"): - self.skipNames = ['Encoding'] - if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): - # these values have default values, but I only want them to show up - # in CID fonts. - self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', - 'CIDCount'] - BaseDict.toXML(self, xmlWriter, progress) - - def decompileAllCharStrings(self, progress): - # XXX only when doing ttdump -i? - i = 0 - for charString in self.CharStrings.values(): - try: - charString.decompile() - except: - print("Error in charstring ", i) - import sys - typ, value = sys.exc_info()[0:2] - raise typ(value) - if not i % 30 and progress: - progress.increment(0) # update - i = i + 1 - - -class FontDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = None - compilerClass = FontDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def toXML(self, xmlWriter, progress): - self.skipNames = ['Encoding'] - BaseDict.toXML(self, xmlWriter, progress) - - -class PrivateDict(BaseDict): - defaults = buildDefaults(privateDictOperators) - converters = buildConverters(privateDictOperators) - order = buildOrder(privateDictOperators) - decompilerClass = PrivateDictDecompiler - compilerClass = PrivateDictCompiler - - -class IndexedStrings(object): - - """SID -> string mapping.""" - - def __init__(self, file=None): - if file is None: - strings = [] - else: - strings = [tostr(s, encoding="latin1") for s in Index(file)] - self.strings = strings - - def getCompiler(self): - return IndexedStringsCompiler(self, None, None) - - def __len__(self): - return len(self.strings) - - def __getitem__(self, SID): - if SID < cffStandardStringCount: - return cffStandardStrings[SID] - else: - return self.strings[SID - cffStandardStringCount] - - def getSID(self, s): - if not hasattr(self, "stringMapping"): - self.buildStringMapping() - if s in cffStandardStringMapping: - SID = cffStandardStringMapping[s] - elif s in self.stringMapping: - SID = self.stringMapping[s] - else: - SID = len(self.strings) + cffStandardStringCount - self.strings.append(s) - self.stringMapping[s] = SID - return SID - - def getStrings(self): - return self.strings - - def buildStringMapping(self): - self.stringMapping = {} - for index in range(len(self.strings)): - self.stringMapping[self.strings[index]] = index + cffStandardStringCount - - -# The 391 Standard Strings as used in the CFF format. -# from Adobe Technical None #5176, version 1.0, 18 March 1998 - -cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', - 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', - 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', - 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', - 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', - 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', - 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', - 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', - 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', - 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', - 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', - 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', - 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', - 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', - 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', - 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', - 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', - 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', - 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', - 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', - 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', - 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', - 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', - 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', - 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', - 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', - 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', - 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', - 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', - 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', - 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', - 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', - 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', - 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', - 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', - 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', - 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', - 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', - 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', - 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', - 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', - 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', - 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', - 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', - 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', - 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', - 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', - 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', - 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', - 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', - 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', - 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', - 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', - 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', - 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', - 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', - 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', - 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', - 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', - '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', - 'Semibold' -] - -cffStandardStringCount = 391 -assert len(cffStandardStrings) == cffStandardStringCount -# build reverse mapping -cffStandardStringMapping = {} -for _i in range(cffStandardStringCount): - cffStandardStringMapping[cffStandardStrings[_i]] = _i - -cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", -"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", -"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", -"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", -"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", -"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", -"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", -"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", -"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", -"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", -"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", -"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", -"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", -"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", -"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", -"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", -"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", -"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", -"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", -"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", -"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", -"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", -"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", -"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", -"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", -"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", -"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", -"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", -"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", -"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", -"zcaron"] - -cffISOAdobeStringCount = 229 -assert len(cffISOAdobeStrings) == cffISOAdobeStringCount - -cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", -"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", -"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", -"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", -"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", -"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", -"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", -"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", -"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", -"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", -"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", -"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", -"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", -"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", -"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", -"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", -"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", -"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", -"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", -"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", -"centinferior", "dollarinferior", "periodinferior", "commainferior", -"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", -"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", -"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", -"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", -"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", -"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", -"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", -"Ydieresissmall"] - -cffExpertStringCount = 166 -assert len(cffIExpertStrings) == cffExpertStringCount - -cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", -"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", -"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", -"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", -"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", -"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", -"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", -"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", -"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", -"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", -"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", -"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", -"eightinferior", "nineinferior", "centinferior", "dollarinferior", -"periodinferior", "commainferior"] - -cffExpertSubsetStringCount = 87 -assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-3.0/Snippets/fontTools/encodings/codecs_test.py fonttools-3.21.2/Snippets/fontTools/encodings/codecs_test.py --- fonttools-3.0/Snippets/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -import fontTools.encodings.codecs # Not to be confused with "import codecs" - -class ExtendedCodecsTest(unittest.TestCase): - - def test_decode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), - unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) - - def test_encode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy', - (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) - - def test_decode_mac_trad_chinese(self): - self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), - unichr(0x5C)) - - def test_decode_mac_romanian(self): - self.assertEqual(b'x\xfb'.decode("mac_romanian"), - unichr(0x78)+unichr(0x02DA)) - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/ast.py fonttools-3.21.2/Snippets/fontTools/feaLib/ast.py --- fonttools-3.0/Snippets/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/ast.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,98 +1,1198 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.feaLib.error import FeatureLibError +from fontTools.misc.encodingTools import getEncoding +from collections import OrderedDict +import itertools + +SHIFT = " " * 4 + + +def deviceToString(device): + if device is None: + return "" + else: + return "" % ", ".join("%d %d" % t for t in device) + + +fea_keywords = set([ + "anchor", "anchordef", "anon", "anonymous", + "by", + "contour", "cursive", + "device", + "enum", "enumerate", "excludedflt", "exclude_dflt", + "feature", "from", + "ignore", "ignorebaseglyphs", "ignoreligatures", "ignoremarks", + "include", "includedflt", "include_dflt", + "language", "languagesystem", "lookup", "lookupflag", + "mark", "markattachmenttype", "markclass", + "nameid", "null", + "parameters", "pos", "position", + "required", "righttoleft", "reversesub", "rsub", + "script", "sub", "substitute", "subtable", + "table", + "usemarkfilteringset", "useextension", "valuerecorddef"] +) + + +def asFea(g): + if hasattr(g, 'asFea'): + return g.asFea() + elif isinstance(g, tuple) and len(g) == 2: + return asFea(g[0]) + "-" + asFea(g[1]) # a range + elif g.lower() in fea_keywords: + return "\\" + g + else: + return g -class FeatureFile(object): - def __init__(self): +class Element(object): + + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + def asFea(self, indent=""): + raise NotImplementedError + + def __str__(self): + return self.asFea() + + +class Statement(Element): + pass + + +class Expression(Element): + pass + + +class Comment(Element): + def __init__(self, location, text): + super(Comment, self).__init__(location) + self.text = text + + def asFea(self, indent=""): + return self.text + + +class GlyphName(Expression): + """A single glyph name, such as cedilla.""" + def __init__(self, location, glyph): + Expression.__init__(self, location) + self.glyph = glyph + + def glyphSet(self): + return (self.glyph,) + + def asFea(self, indent=""): + return str(self.glyph) + + +class GlyphClass(Expression): + """A glyph class, such as [acute cedilla grave].""" + def __init__(self, location, glyphs=None): + Expression.__init__(self, location) + self.glyphs = glyphs if glyphs is not None else [] + self.original = [] + self.curr = 0 + + def glyphSet(self): + return tuple(self.glyphs) + + def asFea(self, indent=""): + if len(self.original): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.curr = len(self.glyphs) + return "[" + " ".join(map(asFea, self.original)) + "]" + else: + return "[" + " ".join(map(asFea, self.glyphs)) + "]" + + def extend(self, glyphs): + self.glyphs.extend(glyphs) + + def append(self, glyph): + self.glyphs.append(glyph) + + def add_range(self, start, end, glyphs): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append((start, end)) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_cid_range(self, start, end, glyphs): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append(("cid{:05d}".format(start), "cid{:05d}".format(end))) + self.glyphs.extend(glyphs) + self.curr = len(self.glyphs) + + def add_class(self, gc): + if self.curr < len(self.glyphs): + self.original.extend(self.glyphs[self.curr:]) + self.original.append(gc) + self.glyphs.extend(gc.glyphSet()) + self.curr = len(self.glyphs) + + +class GlyphClassName(Expression): + """A glyph class name, such as @FRENCH_MARKS.""" + def __init__(self, location, glyphclass): + Expression.__init__(self, location) + assert isinstance(glyphclass, GlyphClassDefinition) + self.glyphclass = glyphclass + + def glyphSet(self): + return tuple(self.glyphclass.glyphSet()) + + def asFea(self, indent=""): + return "@" + self.glyphclass.name + + +class MarkClassName(Expression): + """A mark class name, such as @FRENCH_MARKS defined with markClass.""" + def __init__(self, location, markClass): + Expression.__init__(self, location) + assert isinstance(markClass, MarkClass) + self.markClass = markClass + + def glyphSet(self): + return self.markClass.glyphSet() + + def asFea(self, indent=""): + return "@" + self.markClass.name + + +class AnonymousBlock(Statement): + def __init__(self, tag, content, location): + Statement.__init__(self, location) + self.tag, self.content = tag, content + + def asFea(self, indent=""): + res = "anon {} {{\n".format(self.tag) + res += self.content + res += "}} {};\n\n".format(self.tag) + return res + + +class Block(Statement): + def __init__(self, location): + Statement.__init__(self, location) self.statements = [] + def build(self, builder): + for s in self.statements: + s.build(builder) + + def asFea(self, indent=""): + indent += SHIFT + return indent + ("\n" + indent).join( + [s.asFea(indent=indent) for s in self.statements]) + "\n" + + +class FeatureFile(Block): + def __init__(self): + Block.__init__(self, location=None) + self.markClasses = {} # name --> ast.MarkClass + + def asFea(self, indent=""): + return "\n".join(s.asFea(indent=indent) for s in self.statements) + -class FeatureBlock(object): +class FeatureBlock(Block): def __init__(self, location, name, use_extension): - self.location = location + Block.__init__(self, location) self.name, self.use_extension = name, use_extension - self.statements = [] + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_feature(self.location, self.name) + # language exclude_dflt statements modify builder.features_ + # limit them to this block with temporary builder.features_ + features = builder.features_ + builder.features_ = {} + Block.build(self, builder) + for key, value in builder.features_.items(): + features.setdefault(key, []).extend(value) + builder.features_ = features + builder.end_feature() + + def asFea(self, indent=""): + res = indent + "feature %s {\n" % self.name.strip() + res += Block.asFea(self, indent=indent) + res += indent + "} %s;\n" % self.name.strip() + return res + + +class FeatureNamesBlock(Block): + def __init__(self, location): + Block.__init__(self, location) + + def asFea(self, indent=""): + res = indent + "featureNames {\n" + res += Block.asFea(self, indent=indent) + res += indent + "};\n" + return res -class LookupBlock(object): + +class LookupBlock(Block): def __init__(self, location, name, use_extension): - self.location = location + Block.__init__(self, location) self.name, self.use_extension = name, use_extension - self.statements = [] + def build(self, builder): + # TODO(sascha): Handle use_extension. + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + def asFea(self, indent=""): + res = "lookup {} {{\n".format(self.name) + res += Block.asFea(self, indent=indent) + res += "{}}} {};\n".format(indent, self.name) + return res + + +class TableBlock(Block): + def __init__(self, location, name): + Block.__init__(self, location) + self.name = name -class GlyphClassDefinition(object): + def asFea(self, indent=""): + res = "table {} {{\n".format(self.name.strip()) + res += super(TableBlock, self).asFea(indent=indent) + res += "}} {};\n".format(self.name.strip()) + return res + + +class GlyphClassDefinition(Statement): + """Example: @UPPERCASE = [A-Z];""" def __init__(self, location, name, glyphs): - self.location = location + Statement.__init__(self, location) self.name = name self.glyphs = glyphs + def glyphSet(self): + return tuple(self.glyphs.glyphSet()) -class AlternateSubstitution(object): - def __init__(self, location, glyph, from_class): - self.location = location - self.glyph, self.from_class = (glyph, from_class) + def asFea(self, indent=""): + return "@" + self.name + " = " + self.glyphs.asFea() + ";" + + +class GlyphClassDefStatement(Statement): + """Example: GlyphClassDef @UPPERCASE, [B], [C], [D];""" + def __init__(self, location, baseGlyphs, markGlyphs, + ligatureGlyphs, componentGlyphs): + Statement.__init__(self, location) + self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs) + self.ligatureGlyphs = ligatureGlyphs + self.componentGlyphs = componentGlyphs + + def build(self, builder): + base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple() + liga = self.ligatureGlyphs.glyphSet() \ + if self.ligatureGlyphs else tuple() + mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple() + comp = (self.componentGlyphs.glyphSet() + if self.componentGlyphs else tuple()) + builder.add_glyphClassDef(self.location, base, liga, mark, comp) + + def asFea(self, indent=""): + return "GlyphClassDef {}, {}, {}, {};".format( + self.baseGlyphs.asFea() if self.baseGlyphs else "", + self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "", + self.markGlyphs.asFea() if self.markGlyphs else "", + self.componentGlyphs.asFea() if self.componentGlyphs else "") + + +# While glyph classes can be defined only once, the feature file format +# allows expanding mark classes with multiple definitions, each using +# different glyphs and anchors. The following are two MarkClassDefinitions +# for the same MarkClass: +# markClass [acute grave] @FRENCH_ACCENTS; +# markClass [cedilla] @FRENCH_ACCENTS; +class MarkClass(object): + def __init__(self, name): + self.name = name + self.definitions = [] + self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions + def addDefinition(self, definition): + assert isinstance(definition, MarkClassDefinition) + self.definitions.append(definition) + for glyph in definition.glyphSet(): + if glyph in self.glyphs: + otherLoc = self.glyphs[glyph].location + raise FeatureLibError( + "Glyph %s already defined at %s:%d:%d" % ( + glyph, otherLoc[0], otherLoc[1], otherLoc[2]), + definition.location) + self.glyphs[glyph] = definition + + def glyphSet(self): + return tuple(self.glyphs.keys()) + + def asFea(self, indent=""): + res = "\n".join(d.asFea(indent=indent) for d in self.definitions) + return res + + +class MarkClassDefinition(Statement): + def __init__(self, location, markClass, anchor, glyphs): + Statement.__init__(self, location) + assert isinstance(markClass, MarkClass) + assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression) + self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs + + def glyphSet(self): + return self.glyphs.glyphSet() + + def asFea(self, indent=""): + return "{}markClass {} {} @{};".format( + indent, self.glyphs.asFea(), self.anchor.asFea(), + self.markClass.name) + + +class AlternateSubstStatement(Statement): + def __init__(self, location, prefix, glyph, suffix, replacement): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix) + self.replacement = replacement + + def build(self, builder): + glyph = self.glyph.glyphSet() + assert len(glyph) == 1, glyph + glyph = list(glyph)[0] + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + replacement = self.replacement.glyphSet() + builder.add_alternate_subst(self.location, prefix, glyph, suffix, + replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" # even though we really only use 1 + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " from " + res += asFea(self.replacement) + res += ";" + return res + + +class Anchor(Expression): + def __init__(self, location, name, x, y, contourpoint, + xDeviceTable, yDeviceTable): + Expression.__init__(self, location) + self.name = name + self.x, self.y, self.contourpoint = x, y, contourpoint + self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable + + def asFea(self, indent=""): + if self.name is not None: + return "".format(self.name) + res = "" + exit = self.exitAnchor.asFea() if self.exitAnchor else "" + return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit) + + +class FeatureReferenceStatement(Statement): + """Example: feature salt;""" + def __init__(self, location, featureName): + Statement.__init__(self, location) + self.location, self.featureName = (location, featureName) + + def build(self, builder): + builder.add_feature_reference(self.location, self.featureName) + + def asFea(self, indent=""): + return "feature {};".format(self.featureName) + + +class IgnorePosStatement(Statement): + def __init__(self, location, chainContexts): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_pos( + self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore pos " + ", ".join(contexts) + ";" + + +class IgnoreSubstStatement(Statement): + def __init__(self, location, chainContexts): + Statement.__init__(self, location) + self.chainContexts = chainContexts + + def build(self, builder): + for prefix, glyphs, suffix in self.chainContexts: + prefix = [p.glyphSet() for p in prefix] + glyphs = [g.glyphSet() for g in glyphs] + suffix = [s.glyphSet() for s in suffix] + builder.add_chain_context_subst( + self.location, prefix, glyphs, suffix, []) + + def asFea(self, indent=""): + contexts = [] + for prefix, glyphs, suffix in self.chainContexts: + res = "" + if len(prefix) or len(suffix): + if len(prefix): + res += " ".join(map(asFea, prefix)) + " " + res += " ".join(g.asFea() + "'" for g in glyphs) + if len(suffix): + res += " " + " ".join(map(asFea, suffix)) + else: + res += " ".join(map(asFea, glyphs)) + contexts.append(res) + return "ignore sub " + ", ".join(contexts) + ";" -class LanguageStatement(object): + +class LanguageStatement(Statement): def __init__(self, location, language, include_default, required): - self.location = location + Statement.__init__(self, location) + assert(len(language) == 4) self.language = language self.include_default = include_default self.required = required + def build(self, builder): + builder.set_language(location=self.location, language=self.language, + include_default=self.include_default, + required=self.required) + + def asFea(self, indent=""): + res = "language {}".format(self.language.strip()) + if not self.include_default: + res += " exclude_dflt" + if self.required: + res += " required" + res += ";" + return res -class LanguageSystemStatement(object): + +class LanguageSystemStatement(Statement): def __init__(self, location, script, language): - self.location = location + Statement.__init__(self, location) self.script, self.language = (script, language) + def build(self, builder): + builder.add_language_system(self.location, self.script, self.language) -class IgnoreSubstitutionRule(object): - def __init__(self, location, prefix, glyphs, suffix): - self.location = location + def asFea(self, indent=""): + return "languagesystem {} {};".format(self.script, self.language.strip()) + + +class FontRevisionStatement(Statement): + def __init__(self, location, revision): + Statement.__init__(self, location) + self.revision = revision + + def build(self, builder): + builder.set_font_revision(self.location, self.revision) + + def asFea(self, indent=""): + return "FontRevision {:.3f};".format(self.revision) + + +class LigatureCaretByIndexStatement(Statement): + def __init__(self, location, glyphs, carets): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByIndex {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets)) + + +class LigatureCaretByPosStatement(Statement): + def __init__(self, location, glyphs, carets): + Statement.__init__(self, location) + self.glyphs, self.carets = (glyphs, carets) + + def build(self, builder): + glyphs = self.glyphs.glyphSet() + builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets)) + + def asFea(self, indent=""): + return "LigatureCaretByPos {} {};".format( + self.glyphs.asFea(), " ".join(str(x) for x in self.carets)) + + +class LigatureSubstStatement(Statement): + def __init__(self, location, prefix, glyphs, suffix, replacement, + forceChain): + Statement.__init__(self, location) self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) + self.replacement, self.forceChain = replacement, forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + glyphs = [g.glyphSet() for g in self.glyphs] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_ligature_subst( + self.location, prefix, glyphs, suffix, self.replacement, + self.forceChain) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(g.asFea() for g in self.prefix) + " " + res += " ".join(g.asFea() + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(g.asFea() for g in self.suffix) + else: + res += " ".join(g.asFea() for g in self.glyphs) + res += " by " + res += asFea(self.replacement) + res += ";" + return res + + +class LookupFlagStatement(Statement): + def __init__(self, location, value, markAttachment, markFilteringSet): + Statement.__init__(self, location) + self.value = value + self.markAttachment = markAttachment + self.markFilteringSet = markFilteringSet + + def build(self, builder): + markAttach = None + if self.markAttachment is not None: + markAttach = self.markAttachment.glyphSet() + markFilter = None + if self.markFilteringSet is not None: + markFilter = self.markFilteringSet.glyphSet() + builder.set_lookup_flag(self.location, self.value, + markAttach, markFilter) + + def asFea(self, indent=""): + res = "lookupflag" + flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"] + curr = 1 + for i in range(len(flags)): + if self.value & curr != 0: + res += " " + flags[i] + curr = curr << 1 + if self.markAttachment is not None: + res += " MarkAttachmentType {}".format(self.markAttachment.asFea()) + if self.markFilteringSet is not None: + res += " UseMarkFilteringSet {}".format(self.markFilteringSet.asFea()) + res += ";" + return res -class LookupReferenceStatement(object): +class LookupReferenceStatement(Statement): def __init__(self, location, lookup): + Statement.__init__(self, location) self.location, self.lookup = (location, lookup) + def build(self, builder): + builder.add_lookup_call(self.lookup.name) -class ScriptStatement(object): + def asFea(self, indent=""): + return "lookup {};".format(self.lookup.name) + + +class MarkBasePosStatement(Statement): + def __init__(self, location, base, marks): + Statement.__init__(self, location) + self.base, self.marks = base, marks + + def build(self, builder): + builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos base {}".format(self.base.asFea()) + for a, m in self.marks: + res += " {} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MarkLigPosStatement(Statement): + def __init__(self, location, ligatures, marks): + Statement.__init__(self, location) + self.ligatures, self.marks = ligatures, marks + + def build(self, builder): + builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos ligature {}".format(self.ligatures.asFea()) + ligs = [] + for l in self.marks: + temp = "" + if l is None or not len(l): + temp = " " + else: + for a, m in l: + temp += " {} mark @{}".format(a.asFea(), m.name) + ligs.append(temp) + res += ("\n" + indent + SHIFT + "ligComponent").join(ligs) + res += ";" + return res + + +class MarkMarkPosStatement(Statement): + def __init__(self, location, baseMarks, marks): + Statement.__init__(self, location) + self.baseMarks, self.marks = baseMarks, marks + + def build(self, builder): + builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks) + + def asFea(self, indent=""): + res = "pos mark {}".format(self.baseMarks.asFea()) + for a, m in self.marks: + res += " {} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + +class MultipleSubstStatement(Statement): + def __init__(self, location, prefix, glyph, suffix, replacement): + Statement.__init__(self, location) + self.prefix, self.glyph, self.suffix = prefix, glyph, suffix + self.replacement = replacement + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + builder.add_multiple_subst( + self.location, prefix, self.glyph, suffix, self.replacement) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix): + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += asFea(self.glyph) + "'" + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += asFea(self.glyph) + res += " by " + res += " ".join(map(asFea, self.replacement)) + res += ";" + return res + + +class PairPosStatement(Statement): + def __init__(self, location, enumerated, + glyphs1, valuerecord1, glyphs2, valuerecord2): + Statement.__init__(self, location) + self.enumerated = enumerated + self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1 + self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2 + + def build(self, builder): + if self.enumerated: + g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()] + for glyph1, glyph2 in itertools.product(*g): + builder.add_specific_pair_pos( + self.location, glyph1, self.valuerecord1, + glyph2, self.valuerecord2) + return + + is_specific = (isinstance(self.glyphs1, GlyphName) and + isinstance(self.glyphs2, GlyphName)) + if is_specific: + builder.add_specific_pair_pos( + self.location, self.glyphs1.glyph, self.valuerecord1, + self.glyphs2.glyph, self.valuerecord2) + else: + builder.add_class_pair_pos( + self.location, self.glyphs1.glyphSet(), self.valuerecord1, + self.glyphs2.glyphSet(), self.valuerecord2) + + def asFea(self, indent=""): + res = "enum " if self.enumerated else "" + if self.valuerecord2: + res += "pos {} {} {} {};".format( + self.glyphs1.asFea(), self.valuerecord1.makeString(), + self.glyphs2.asFea(), self.valuerecord2.makeString()) + else: + res += "pos {} {} {};".format( + self.glyphs1.asFea(), self.glyphs2.asFea(), + self.valuerecord1.makeString()) + return res + + +class ReverseChainSingleSubstStatement(Statement): + def __init__(self, location, old_prefix, old_suffix, glyphs, replacements): + Statement.__init__(self, location) + self.old_prefix, self.old_suffix = old_prefix, old_suffix + self.glyphs = glyphs + self.replacements = replacements + + def build(self, builder): + prefix = [p.glyphSet() for p in self.old_prefix] + suffix = [s.glyphSet() for s in self.old_suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_reverse_chain_single_subst( + self.location, prefix, suffix, dict(zip(originals, replaces))) + + def asFea(self, indent=""): + res = "rsub " + if len(self.old_prefix) or len(self.old_suffix): + if len(self.old_prefix): + res += " ".join(asFea(g) for g in self.old_prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.old_suffix): + res += " " + " ".join(asFea(g) for g in self.old_suffix) + else: + res += " ".join(map(asFea, self.glyphs)) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class SingleSubstStatement(Statement): + def __init__(self, location, glyphs, replace, prefix, suffix, forceChain): + Statement.__init__(self, location) + self.prefix, self.suffix = prefix, suffix + self.forceChain = forceChain + self.glyphs = glyphs + self.replacements = replace + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + originals = self.glyphs[0].glyphSet() + replaces = self.replacements[0].glyphSet() + if len(replaces) == 1: + replaces = replaces * len(originals) + builder.add_single_subst(self.location, prefix, suffix, + OrderedDict(zip(originals, replaces)), + self.forceChain) + + def asFea(self, indent=""): + res = "sub " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(asFea(g) for g in self.prefix) + " " + res += " ".join(asFea(g) + "'" for g in self.glyphs) + if len(self.suffix): + res += " " + " ".join(asFea(g) for g in self.suffix) + else: + res += " ".join(asFea(g) for g in self.glyphs) + res += " by {};".format(" ".join(asFea(g) for g in self.replacements)) + return res + + +class ScriptStatement(Statement): def __init__(self, location, script): - self.location = location + Statement.__init__(self, location) self.script = script + def build(self, builder): + builder.set_script(self.location, self.script) -class SubtableStatement(object): - def __init__(self, location): - self.location = location + def asFea(self, indent=""): + return "script {};".format(self.script.strip()) -class SubstitutionRule(object): - def __init__(self, location, old, new): - self.location, self.old, self.new = (location, old, new) - self.old_prefix = [] - self.old_suffix = [] - self.lookups = [None] * len(old) +class SinglePosStatement(Statement): + def __init__(self, location, pos, prefix, suffix, forceChain): + Statement.__init__(self, location) + self.pos, self.prefix, self.suffix = pos, prefix, suffix + self.forceChain = forceChain + + def build(self, builder): + prefix = [p.glyphSet() for p in self.prefix] + suffix = [s.glyphSet() for s in self.suffix] + pos = [(g.glyphSet(), value) for g, value in self.pos] + builder.add_single_pos(self.location, prefix, suffix, + pos, self.forceChain) + + def asFea(self, indent=""): + res = "pos " + if len(self.prefix) or len(self.suffix) or self.forceChain: + if len(self.prefix): + res += " ".join(map(asFea, self.prefix)) + " " + res += " ".join([asFea(x[0]) + "'" + ( + (" " + x[1].makeString()) if x[1] else "") for x in self.pos]) + if len(self.suffix): + res += " " + " ".join(map(asFea, self.suffix)) + else: + res += " ".join([asFea(x[0]) + " " + + (x[1].makeString() if x[1] else "") for x in self.pos]) + res += ";" + return res -class ValueRecord(object): - def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): - self.location = location +class SubtableStatement(Statement): + def __init__(self, location): + Statement.__init__(self, location) + + +class ValueRecord(Expression): + def __init__(self, location, vertical, + xPlacement, yPlacement, xAdvance, yAdvance, + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice): + Expression.__init__(self, location) self.xPlacement, self.yPlacement = (xPlacement, yPlacement) self.xAdvance, self.yAdvance = (xAdvance, yAdvance) + self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice) + self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice) + self.vertical = vertical + + def __eq__(self, other): + return (self.xPlacement == other.xPlacement and + self.yPlacement == other.yPlacement and + self.xAdvance == other.xAdvance and + self.yAdvance == other.yAdvance and + self.xPlaDevice == other.xPlaDevice and + self.xAdvDevice == other.xAdvDevice) + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return (hash(self.xPlacement) ^ hash(self.yPlacement) ^ + hash(self.xAdvance) ^ hash(self.yAdvance) ^ + hash(self.xPlaDevice) ^ hash(self.yPlaDevice) ^ + hash(self.xAdvDevice) ^ hash(self.yAdvDevice)) + + def makeString(self, vertical=None): + x, y = self.xPlacement, self.yPlacement + xAdvance, yAdvance = self.xAdvance, self.yAdvance + xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice + xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice + if vertical is None: + vertical = self.vertical + + # Try format A, if possible. + if x is None and y is None: + if xAdvance is None and vertical: + return str(yAdvance) + elif yAdvance is None and not vertical: + return str(xAdvance) + + # Try format B, if possible. + if (xPlaDevice is None and yPlaDevice is None and + xAdvDevice is None and yAdvDevice is None): + return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance) + + # Last resort is format C. + return "<%s %s %s %s %s %s %s %s>" % ( + x, y, xAdvance, yAdvance, + deviceToString(xPlaDevice), deviceToString(yPlaDevice), + deviceToString(xAdvDevice), deviceToString(yAdvDevice)) -class ValueRecordDefinition(object): +class ValueRecordDefinition(Statement): def __init__(self, location, name, value): - self.location = location + Statement.__init__(self, location) self.name = name self.value = value + + def asFea(self, indent=""): + return "valueRecordDef {} {};".format(self.value.asFea(), self.name) + + +def simplify_name_attributes(pid, eid, lid): + if pid == 3 and eid == 1 and lid == 1033: + return "" + elif pid == 1 and eid == 0 and lid == 0: + return "1" + else: + return "{} {} {}".format(pid, eid, lid) + + +class NameRecord(Statement): + def __init__(self, location, nameID, platformID, + platEncID, langID, string): + Statement.__init__(self, location) + self.nameID = nameID + self.platformID = platformID + self.platEncID = platEncID + self.langID = langID + self.string = string + + def build(self, builder): + builder.add_name_record( + self.location, self.nameID, self.platformID, + self.platEncID, self.langID, self.string) + + def asFea(self, indent=""): + def escape(c, escape_pattern): + # Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS + if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C): + return unichr(c) + else: + return escape_pattern % c + encoding = getEncoding(self.platformID, self.platEncID, self.langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", self.location) + s = tobytes(self.string, encoding=encoding) + if encoding == "utf_16_be": + escaped_string = "".join([ + escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x") + for i in range(0, len(s), 2)]) + else: + escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s]) + plat = simplify_name_attributes( + self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return "nameid {} {}\"{}\";".format(self.nameID, plat, escaped_string) + + +class FeatureNameStatement(NameRecord): + def build(self, builder): + NameRecord.build(self, builder) + builder.add_featureName(self.location, self.nameID) + + def asFea(self, indent=""): + if self.nameID == "size": + tag = "sizemenuname" + else: + tag = "name" + plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID) + if plat != "": + plat += " " + return "{} {}\"{}\";".format(tag, plat, self.string) + + +class SizeParameters(Statement): + def __init__(self, location, DesignSize, SubfamilyID, RangeStart, + RangeEnd): + Statement.__init__(self, location) + self.DesignSize = DesignSize + self.SubfamilyID = SubfamilyID + self.RangeStart = RangeStart + self.RangeEnd = RangeEnd + + def build(self, builder): + builder.set_size_parameters(self.location, self.DesignSize, + self.SubfamilyID, self.RangeStart, self.RangeEnd) + + def asFea(self, indent=""): + res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID) + if self.RangeStart != 0 or self.RangeEnd != 0: + res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10)) + return res + ";" + + +class BaseAxis(Statement): + def __init__(self, location, bases, scripts, vertical): + Statement.__init__(self, location) + self.bases = bases + self.scripts = scripts + self.vertical = vertical + + def build(self, builder): + builder.set_base_axis(self.bases, self.scripts, self.vertical) + + def asFea(self, indent=""): + direction = "Vert" if self.vertical else "Horiz" + scripts = ["{} {} {}".format(a[0], a[1], " ".join(map(str, a[2]))) for a in self.scripts] + return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format( + direction, " ".join(self.bases), indent, direction, ", ".join(scripts)) + + +class OS2Field(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_os2_field(self.key, self.value) + + def asFea(self, indent=""): + def intarr2str(x): + return " ".join(map(str, x)) + numbers = ("FSType", "TypoAscender", "TypoDescender", "TypoLineGap", + "winAscent", "winDescent", "XHeight", "CapHeight", + "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize") + ranges = ("UnicodeRange", "CodePageRange") + keywords = dict([(x.lower(), [x, str]) for x in numbers]) + keywords.update([(x.lower(), [x, intarr2str]) for x in ranges]) + keywords["panose"] = ["Panose", intarr2str] + keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)] + if self.key in keywords: + return "{} {};".format(keywords[self.key][0], keywords[self.key][1](self.value)) + return "" # should raise exception + + +class HheaField(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_hhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) + + +class VheaField(Statement): + def __init__(self, location, key, value): + Statement.__init__(self, location) + self.key = key + self.value = value + + def build(self, builder): + builder.add_vhea_field(self.key, self.value) + + def asFea(self, indent=""): + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + keywords = dict([(x.lower(), x) for x in fields]) + return "{} {};".format(keywords[self.key], self.value) diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/builder.py fonttools-3.21.2/Snippets/fontTools/feaLib/builder.py --- fonttools-3.0/Snippets/fontTools/feaLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1503 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import binary2num, safeEval +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.parser import Parser +from fontTools.otlLib import builder as otl +from fontTools.ttLib import newTable, getTableModule +from fontTools.ttLib.tables import otBase, otTables +import itertools + + +def addOpenTypeFeatures(font, featurefile): + builder = Builder(font, featurefile) + builder.build() + + +def addOpenTypeFeaturesFromString(font, features, filename=None): + featurefile = UnicodeIO(tounicode(features)) + if filename: + # the directory containing 'filename' is used as the root of relative + # include paths; if None is provided, the current directory is assumed + featurefile.name = filename + addOpenTypeFeatures(font, featurefile) + + +class Builder(object): + def __init__(self, font, featurefile): + self.font = font + self.file = featurefile + self.glyphMap = font.getReverseGlyphMap() + self.default_language_systems_ = set() + self.script_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.language_systems = set() + self.named_lookups_ = {} + self.cur_lookup_ = None + self.cur_lookup_name_ = None + self.cur_feature_name_ = None + self.lookups_ = [] + self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*] + self.parseTree = None + self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp' + # for feature 'aalt' + self.aalt_features_ = [] # [(location, featureName)*], for 'aalt' + self.aalt_location_ = None + self.aalt_alternates_ = {} + # for 'featureNames' + self.featureNames_ = [] + self.featureNames_ids_ = {} + # for feature 'size' + self.size_parameters_ = None + # for table 'head' + self.fontRevision_ = None # 2.71 + # for table 'name' + self.names_ = [] + # for table 'BASE' + self.base_horiz_axis_ = None + self.base_vert_axis_ = None + # for table 'GDEF' + self.attachPoints_ = {} # "a" --> {3, 7} + self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600} + self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7} + self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column)) + self.markAttach_ = {} # "acute" --> (4, (file, line, column)) + self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4 + self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4 + # for table 'OS/2' + self.os2_ = {} + # for table 'hhea' + self.hhea_ = {} + # for table 'vhea' + self.vhea_ = {} + + def build(self): + self.parseTree = Parser(self.file, self.glyphMap).parse() + self.parseTree.build(self) + self.build_feature_aalt_() + self.build_head() + self.build_hhea() + self.build_vhea() + self.build_name() + self.build_OS_2() + for tag in ('GPOS', 'GSUB'): + table = self.makeTable(tag) + if (table.ScriptList.ScriptCount > 0 or + table.FeatureList.FeatureCount > 0 or + table.LookupList.LookupCount > 0): + fontTable = self.font[tag] = newTable(tag) + fontTable.table = table + elif tag in self.font: + del self.font[tag] + gdef = self.buildGDEF() + if gdef: + self.font["GDEF"] = gdef + elif "GDEF" in self.font: + del self.font["GDEF"] + base = self.buildBASE() + if base: + self.font["BASE"] = base + elif "BASE" in self.font: + del self.font["BASE"] + + def get_chained_lookup_(self, location, builder_class): + result = builder_class(self.font, location) + result.lookupflag = self.lookupflag_ + result.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(result) + return result + + def add_lookup_to_feature_(self, lookup, feature_name): + for script, lang in self.language_systems: + key = (script, lang, feature_name) + self.features_.setdefault(key, []).append(lookup) + + def get_lookup_(self, location, builder_class): + if (self.cur_lookup_ and + type(self.cur_lookup_) == builder_class and + self.cur_lookup_.lookupflag == self.lookupflag_ and + self.cur_lookup_.markFilterSet == + self.lookupflag_markFilterSet_): + return self.cur_lookup_ + if self.cur_lookup_name_ and self.cur_lookup_: + raise FeatureLibError( + "Within a named lookup block, all rules must be of " + "the same lookup type and flag", location) + self.cur_lookup_ = builder_class(self.font, location) + self.cur_lookup_.lookupflag = self.lookupflag_ + self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_ + self.lookups_.append(self.cur_lookup_) + if self.cur_lookup_name_: + # We are starting a lookup rule inside a named lookup block. + self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_ + if self.cur_feature_name_: + # We are starting a lookup rule inside a feature. This includes + # lookup rules inside named lookups inside features. + self.add_lookup_to_feature_(self.cur_lookup_, + self.cur_feature_name_) + return self.cur_lookup_ + + def build_feature_aalt_(self): + if not self.aalt_features_ and not self.aalt_alternates_: + return + alternates = {g: set(a) for g, a in self.aalt_alternates_.items()} + for location, name in self.aalt_features_ + [(None, "aalt")]: + feature = [(script, lang, feature, lookups) + for (script, lang, feature), lookups + in self.features_.items() + if feature == name] + # "aalt" does not have to specify its own lookups, but it might. + if not feature and name != "aalt": + raise FeatureLibError("Feature %s has not been defined" % name, + location) + for script, lang, feature, lookups in feature: + for lookup in lookups: + for glyph, alts in lookup.getAlternateGlyphs().items(): + alternates.setdefault(glyph, set()).update(alts) + single = {glyph: list(repl)[0] for glyph, repl in alternates.items() + if len(repl) == 1} + # TODO: Figure out the glyph alternate ordering used by makeotf. + # https://github.com/fonttools/fonttools/issues/836 + multi = {glyph: sorted(repl, key=self.font.getGlyphID) + for glyph, repl in alternates.items() + if len(repl) > 1} + if not single and not multi: + return + self.features_ = {(script, lang, feature): lookups + for (script, lang, feature), lookups + in self.features_.items() + if feature != "aalt"} + old_lookups = self.lookups_ + self.lookups_ = [] + self.start_feature(self.aalt_location_, "aalt") + if single: + single_lookup = self.get_lookup_(location, SingleSubstBuilder) + single_lookup.mapping = single + if multi: + multi_lookup = self.get_lookup_(location, AlternateSubstBuilder) + multi_lookup.alternates = multi + self.end_feature() + self.lookups_.extend(old_lookups) + + def build_head(self): + if not self.fontRevision_: + return + table = self.font.get("head") + if not table: # this only happens for unit tests + table = self.font["head"] = newTable("head") + table.decompile(b"\0" * 54, self.font) + table.tableVersion = 1.0 + table.created = table.modified = 3406620153 # 2011-12-13 11:22:33 + table.fontRevision = self.fontRevision_ + + def build_hhea(self): + if not self.hhea_: + return + table = self.font.get("hhea") + if not table: # this only happens for unit tests + table = self.font["hhea"] = newTable("hhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00010000 + if "caretoffset" in self.hhea_: + table.caretOffset = self.hhea_["caretoffset"] + if "ascender" in self.hhea_: + table.ascent = self.hhea_["ascender"] + if "descender" in self.hhea_: + table.descent = self.hhea_["descender"] + if "linegap" in self.hhea_: + table.lineGap = self.hhea_["linegap"] + + def build_vhea(self): + if not self.vhea_: + return + table = self.font.get("vhea") + if not table: # this only happens for unit tests + table = self.font["vhea"] = newTable("vhea") + table.decompile(b"\0" * 36, self.font) + table.tableVersion = 0x00011000 + if "verttypoascender" in self.vhea_: + table.ascent = self.vhea_["verttypoascender"] + if "verttypodescender" in self.vhea_: + table.descent = self.vhea_["verttypodescender"] + if "verttypolinegap" in self.vhea_: + table.lineGap = self.vhea_["verttypolinegap"] + + def get_user_name_id(self, table): + # Try to find first unused font-specific name id + nameIDs = [name.nameID for name in table.names] + for user_name_id in range(256, 32767): + if user_name_id not in nameIDs: + return user_name_id + + def buildFeatureParams(self, tag): + params = None + if tag == "size": + params = otTables.FeatureParamsSize() + params.DesignSize, params.SubfamilyID, params.RangeStart, \ + params.RangeEnd = self.size_parameters_ + if tag in self.featureNames_ids_: + params.SubfamilyNameID = self.featureNames_ids_[tag] + else: + params.SubfamilyNameID = 0 + elif tag in self.featureNames_: + assert tag in self.featureNames_ids_ + params = otTables.FeatureParamsStylisticSet() + params.Version = 0 + params.UINameID = self.featureNames_ids_[tag] + return params + + def build_name(self): + if not self.names_: + return + table = self.font.get("name") + if not table: # this only happens for unit tests + table = self.font["name"] = newTable("name") + table.names = [] + for name in self.names_: + nameID, platformID, platEncID, langID, string = name + if not isinstance(nameID, int): + # A featureNames name and nameID is actually the tag + tag = nameID + if tag not in self.featureNames_ids_: + self.featureNames_ids_[tag] = self.get_user_name_id(table) + assert self.featureNames_ids_[tag] is not None + nameID = self.featureNames_ids_[tag] + table.setName(string, nameID, platformID, platEncID, langID) + + def build_OS_2(self): + if not self.os2_: + return + table = self.font.get("OS/2") + if not table: # this only happens for unit tests + table = self.font["OS/2"] = newTable("OS/2") + data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0) + table.decompile(data, self.font) + version = 0 + if "fstype" in self.os2_: + table.fsType = self.os2_["fstype"] + if "panose" in self.os2_: + panose = getTableModule("OS/2").Panose() + panose.bFamilyType, panose.bSerifStyle, panose.bWeight,\ + panose.bProportion, panose.bContrast, panose.bStrokeVariation,\ + panose.bArmStyle, panose.bLetterForm, panose.bMidline, \ + panose.bXHeight = self.os2_["panose"] + table.panose = panose + if "typoascender" in self.os2_: + table.sTypoAscender = self.os2_["typoascender"] + if "typodescender" in self.os2_: + table.sTypoDescender = self.os2_["typodescender"] + if "typolinegap" in self.os2_: + table.sTypoLineGap = self.os2_["typolinegap"] + if "winascent" in self.os2_: + table.usWinAscent = self.os2_["winascent"] + if "windescent" in self.os2_: + table.usWinDescent = self.os2_["windescent"] + if "vendor" in self.os2_: + table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''") + if "weightclass" in self.os2_: + table.usWeightClass = self.os2_["weightclass"] + if "widthclass" in self.os2_: + table.usWidthClass = self.os2_["widthclass"] + if "unicoderange" in self.os2_: + table.setUnicodeRanges(self.os2_["unicoderange"]) + if "codepagerange" in self.os2_: + pages = self.build_codepages_(self.os2_["codepagerange"]) + table.ulCodePageRange1, table.ulCodePageRange2 = pages + version = 1 + if "xheight" in self.os2_: + table.sxHeight = self.os2_["xheight"] + version = 2 + if "capheight" in self.os2_: + table.sCapHeight = self.os2_["capheight"] + version = 2 + if "loweropsize" in self.os2_: + table.usLowerOpticalPointSize = self.os2_["loweropsize"] + version = 5 + if "upperopsize" in self.os2_: + table.usUpperOpticalPointSize = self.os2_["upperopsize"] + version = 5 + def checkattr(table, attrs): + for attr in attrs: + if not hasattr(table, attr): + setattr(table, attr, 0) + table.version = max(version, table.version) + # this only happens for unit tests + if version >= 1: + checkattr(table, ("ulCodePageRange1", "ulCodePageRange2")) + if version >= 2: + checkattr(table, ("sxHeight", "sCapHeight", "usDefaultChar", + "usBreakChar", "usMaxContext")) + if version >= 5: + checkattr(table, ("usLowerOpticalPointSize", + "usUpperOpticalPointSize")) + + def build_codepages_(self, pages): + pages2bits = { + 1252: 0, 1250: 1, 1251: 2, 1253: 3, 1254: 4, 1255: 5, 1256: 6, + 1257: 7, 1258: 8, 874: 16, 932: 17, 936: 18, 949: 19, 950: 20, + 1361: 21, 869: 48, 866: 49, 865: 50, 864: 51, 863: 52, 862: 53, + 861: 54, 860: 55, 857: 56, 855: 57, 852: 58, 775: 59, 737: 60, + 708: 61, 850: 62, 437: 63, + } + bits = [pages2bits[p] for p in pages if p in pages2bits] + pages = [] + for i in range(2): + pages.append("") + for j in range(i * 32, (i + 1) * 32): + if j in bits: + pages[i] += "1" + else: + pages[i] += "0" + return [binary2num(p[::-1]) for p in pages] + + def buildBASE(self): + if not self.base_horiz_axis_ and not self.base_vert_axis_: + return None + base = otTables.BASE() + base.Version = 0x00010000 + base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_) + base.VertAxis = self.buildBASEAxis(self.base_vert_axis_) + + result = newTable("BASE") + result.table = base + return result + + def buildBASEAxis(self, axis): + if not axis: + return + bases, scripts = axis + axis = otTables.Axis() + axis.BaseTagList = otTables.BaseTagList() + axis.BaseTagList.BaselineTag = bases + axis.BaseTagList.BaseTagCount = len(bases) + axis.BaseScriptList = otTables.BaseScriptList() + axis.BaseScriptList.BaseScriptRecord = [] + axis.BaseScriptList.BaseScriptCount = len(scripts) + for script in sorted(scripts): + record = otTables.BaseScriptRecord() + record.BaseScriptTag = script[0] + record.BaseScript = otTables.BaseScript() + record.BaseScript.BaseLangSysCount = 0 + record.BaseScript.BaseValues = otTables.BaseValues() + record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1]) + record.BaseScript.BaseValues.BaseCoord = [] + record.BaseScript.BaseValues.BaseCoordCount = len(script[2]) + for c in script[2]: + coord = otTables.BaseCoord() + coord.Format = 1 + coord.Coordinate = c + record.BaseScript.BaseValues.BaseCoord.append(coord) + axis.BaseScriptList.BaseScriptRecord.append(record) + return axis + + def buildGDEF(self): + gdef = otTables.GDEF() + gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_() + gdef.AttachList = \ + otl.buildAttachList(self.attachPoints_, self.glyphMap) + gdef.LigCaretList = \ + otl.buildLigCaretList(self.ligCaretCoords_, self.ligCaretPoints_, + self.glyphMap) + gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_() + gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_() + gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000 + if any((gdef.GlyphClassDef, gdef.AttachList, gdef.LigCaretList, + gdef.MarkAttachClassDef, gdef.MarkGlyphSetsDef)): + result = newTable("GDEF") + result.table = gdef + return result + else: + return None + + def buildGDEFGlyphClassDef_(self): + if self.glyphClassDefs_: + classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()} + else: + classes = {} + for lookup in self.lookups_: + classes.update(lookup.inferGlyphClasses()) + for markClass in self.parseTree.markClasses.values(): + for markClassDef in markClass.definitions: + for glyph in markClassDef.glyphSet(): + classes[glyph] = 3 + if classes: + result = otTables.GlyphClassDef() + result.classDefs = classes + return result + else: + return None + + def buildGDEFMarkAttachClassDef_(self): + classDefs = {g: c for g, (c, _) in self.markAttach_.items()} + if not classDefs: + return None + result = otTables.MarkAttachClassDef() + result.classDefs = classDefs + return result + + def buildGDEFMarkGlyphSetsDef_(self): + sets = [] + for glyphs, id_ in sorted(self.markFilterSets_.items(), + key=lambda item: item[1]): + sets.append(glyphs) + return otl.buildMarkGlyphSetsDef(sets, self.glyphMap) + + def buildLookups_(self, tag): + assert tag in ('GPOS', 'GSUB'), tag + for lookup in self.lookups_: + lookup.lookup_index = None + lookups = [] + for lookup in self.lookups_: + if lookup.table != tag: + continue + lookup.lookup_index = len(lookups) + lookups.append(lookup) + return [l.build() for l in lookups] + + def makeTable(self, tag): + table = getattr(otTables, tag, None)() + table.Version = 0x00010000 + table.ScriptList = otTables.ScriptList() + table.ScriptList.ScriptRecord = [] + table.FeatureList = otTables.FeatureList() + table.FeatureList.FeatureRecord = [] + table.LookupList = otTables.LookupList() + table.LookupList.Lookup = self.buildLookups_(tag) + + # Build a table for mapping (tag, lookup_indices) to feature_index. + # For example, ('liga', (2,3,7)) --> 23. + feature_indices = {} + required_feature_indices = {} # ('latn', 'DEU') --> 23 + scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24 + # Sort the feature table by feature tag: + # https://github.com/behdad/fonttools/issues/568 + sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1]) + for key, lookups in sorted(self.features_.items(), key=sortFeatureTag): + script, lang, feature_tag = key + # l.lookup_index will be None when a lookup is not needed + # for the table under construction. For example, substitution + # rules will have no lookup_index while building GPOS tables. + lookup_indices = tuple([l.lookup_index for l in lookups + if l.lookup_index is not None]) + + size_feature = (tag == "GPOS" and feature_tag == "size") + if len(lookup_indices) == 0 and not size_feature: + continue + + feature_key = (feature_tag, lookup_indices) + feature_index = feature_indices.get(feature_key) + if feature_index is None: + feature_index = len(table.FeatureList.FeatureRecord) + frec = otTables.FeatureRecord() + frec.FeatureTag = feature_tag + frec.Feature = otTables.Feature() + frec.Feature.FeatureParams = self.buildFeatureParams( + feature_tag) + frec.Feature.LookupListIndex = lookup_indices + frec.Feature.LookupCount = len(lookup_indices) + table.FeatureList.FeatureRecord.append(frec) + feature_indices[feature_key] = feature_index + scripts.setdefault(script, {}).setdefault(lang, []).append( + feature_index) + if self.required_features_.get((script, lang)) == feature_tag: + required_feature_indices[(script, lang)] = feature_index + + # Build ScriptList. + for script, lang_features in sorted(scripts.items()): + srec = otTables.ScriptRecord() + srec.ScriptTag = script + srec.Script = otTables.Script() + srec.Script.DefaultLangSys = None + srec.Script.LangSysRecord = [] + for lang, feature_indices in sorted(lang_features.items()): + langrec = otTables.LangSysRecord() + langrec.LangSys = otTables.LangSys() + langrec.LangSys.LookupOrder = None + + req_feature_index = \ + required_feature_indices.get((script, lang)) + if req_feature_index is None: + langrec.LangSys.ReqFeatureIndex = 0xFFFF + else: + langrec.LangSys.ReqFeatureIndex = req_feature_index + + langrec.LangSys.FeatureIndex = [i for i in feature_indices + if i != req_feature_index] + langrec.LangSys.FeatureCount = \ + len(langrec.LangSys.FeatureIndex) + + if lang == "dflt": + srec.Script.DefaultLangSys = langrec.LangSys + else: + langrec.LangSysTag = lang + srec.Script.LangSysRecord.append(langrec) + srec.Script.LangSysCount = len(srec.Script.LangSysRecord) + table.ScriptList.ScriptRecord.append(srec) + + table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord) + table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord) + table.LookupList.LookupCount = len(table.LookupList.Lookup) + return table + + def add_language_system(self, location, script, language): + # OpenType Feature File Specification, section 4.b.i + if (script == "DFLT" and language == "dflt" and + self.default_language_systems_): + raise FeatureLibError( + 'If "languagesystem DFLT dflt" is present, it must be ' + 'the first of the languagesystem statements', location) + if (script, language) in self.default_language_systems_: + raise FeatureLibError( + '"languagesystem %s %s" has already been specified' % + (script.strip(), language.strip()), location) + self.default_language_systems_.add((script, language)) + + def get_default_language_systems_(self): + # OpenType Feature File specification, 4.b.i. languagesystem: + # If no "languagesystem" statement is present, then the + # implementation must behave exactly as though the following + # statement were present at the beginning of the feature file: + # languagesystem DFLT dflt; + if self.default_language_systems_: + return frozenset(self.default_language_systems_) + else: + return frozenset({('DFLT', 'dflt')}) + + def start_feature(self, location, name): + self.language_systems = self.get_default_language_systems_() + self.script_ = 'DFLT' + self.cur_lookup_ = None + self.cur_feature_name_ = name + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + if name == "aalt": + self.aalt_location_ = location + + def end_feature(self): + assert self.cur_feature_name_ is not None + self.cur_feature_name_ = None + self.language_systems = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def start_lookup_block(self, location, name): + if name in self.named_lookups_: + raise FeatureLibError( + 'Lookup "%s" has already been defined' % name, location) + if self.cur_feature_name_ == "aalt": + raise FeatureLibError( + "Lookup blocks cannot be placed inside 'aalt' features; " + "move it out, and then refer to it with a lookup statement", + location) + self.cur_lookup_name_ = name + self.named_lookups_[name] = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def end_lookup_block(self): + assert self.cur_lookup_name_ is not None + self.cur_lookup_name_ = None + self.cur_lookup_ = None + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + + def add_lookup_call(self, lookup_name): + assert lookup_name in self.named_lookups_, lookup_name + self.cur_lookup_ = None + lookup = self.named_lookups_[lookup_name] + self.add_lookup_to_feature_(lookup, self.cur_feature_name_) + + def set_font_revision(self, location, revision): + self.fontRevision_ = revision + + def set_language(self, location, language, include_default, required): + assert(len(language) == 4) + if self.cur_feature_name_ in ('aalt', 'size'): + raise FeatureLibError( + "Language statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + if language != 'dflt' and self.script_ == 'DFLT': + raise FeatureLibError("Need non-DFLT script when using non-dflt " + "language (was: \"%s\")" % language, location) + self.cur_lookup_ = None + + key = (self.script_, language, self.cur_feature_name_) + if not include_default: + # don't include any lookups added by script DFLT in this feature + self.features_[key] = [] + elif language != 'dflt': + # add rules defined between script statement and its first following + # language statement to each of its explicitly specified languages: + # http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html#4.b.ii + lookups = self.features_.get((key[0], 'dflt', key[2])) + dflt_lookups = self.features_.get(('DFLT', 'dflt', key[2]), []) + if lookups: + if key[:2] in self.get_default_language_systems_(): + lookups = [l for l in lookups if l not in dflt_lookups] + self.features_.setdefault(key, []).extend(lookups) + if self.script_ == 'DFLT': + langsys = set(self.get_default_language_systems_()) + else: + langsys = set() + langsys.add((self.script_, language)) + self.language_systems = frozenset(langsys) + + if required: + key = (self.script_, language) + if key in self.required_features_: + raise FeatureLibError( + "Language %s (script %s) has already " + "specified feature %s as its required feature" % ( + language.strip(), self.script_.strip(), + self.required_features_[key].strip()), + location) + self.required_features_[key] = self.cur_feature_name_ + + def getMarkAttachClass_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markAttachClassID_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markAttachClassID_) + 1 + self.markAttachClassID_[glyphs] = id_ + for glyph in glyphs: + if glyph in self.markAttach_: + _, loc = self.markAttach_[glyph] + raise FeatureLibError( + "Glyph %s already has been assigned " + "a MarkAttachmentType at %s:%d:%d" % ( + glyph, loc[0], loc[1], loc[2]), + location) + self.markAttach_[glyph] = (id_, location) + return id_ + + def getMarkFilterSet_(self, location, glyphs): + glyphs = frozenset(glyphs) + id_ = self.markFilterSets_.get(glyphs) + if id_ is not None: + return id_ + id_ = len(self.markFilterSets_) + self.markFilterSets_[glyphs] = id_ + return id_ + + def set_lookup_flag(self, location, value, markAttach, markFilter): + value = value & 0xFF + if markAttach: + markAttachClass = self.getMarkAttachClass_(location, markAttach) + value = value | (markAttachClass << 8) + if markFilter: + markFilterSet = self.getMarkFilterSet_(location, markFilter) + value = value | 0x10 + self.lookupflag_markFilterSet_ = markFilterSet + else: + self.lookupflag_markFilterSet_ = None + self.lookupflag_ = value + + def set_script(self, location, script): + if self.cur_feature_name_ in ('aalt', 'size'): + raise FeatureLibError( + "Script statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + self.cur_lookup_ = None + self.script_ = script + self.lookupflag_ = 0 + self.lookupflag_markFilterSet_ = None + self.set_language(location, "dflt", + include_default=True, required=False) + + def find_lookup_builders_(self, lookups): + """Helper for building chain contextual substitutions + + Given a list of lookup names, finds the LookupBuilder for each name. + If an input name is None, it gets mapped to a None LookupBuilder. + """ + lookup_builders = [] + for lookup in lookups: + if lookup is not None: + lookup_builders.append(self.named_lookups_.get(lookup.name)) + else: + lookup_builders.append(None) + return lookup_builders + + def add_attach_points(self, location, glyphs, contourPoints): + for glyph in glyphs: + self.attachPoints_.setdefault(glyph, set()).update(contourPoints) + + def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups): + lookup = self.get_lookup_(location, ChainContextPosBuilder) + lookup.rules.append((prefix, glyphs, suffix, + self.find_lookup_builders_(lookups))) + + def add_chain_context_subst(self, location, + prefix, glyphs, suffix, lookups): + lookup = self.get_lookup_(location, ChainContextSubstBuilder) + lookup.substitutions.append((prefix, glyphs, suffix, + self.find_lookup_builders_(lookups))) + + def add_alternate_subst(self, location, + prefix, glyph, suffix, replacement): + if self.cur_feature_name_ == "aalt": + alts = self.aalt_alternates_.setdefault(glyph, set()) + alts.update(replacement) + return + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, AlternateSubstBuilder) + chain.substitutions.append((prefix, [glyph], suffix, [lookup])) + else: + lookup = self.get_lookup_(location, AlternateSubstBuilder) + if glyph in lookup.alternates: + raise FeatureLibError( + 'Already defined alternates for glyph "%s"' % glyph, + location) + lookup.alternates[glyph] = replacement + + def add_feature_reference(self, location, featureName): + if self.cur_feature_name_ != "aalt": + raise FeatureLibError( + 'Feature references are only allowed inside "feature aalt"', + location) + self.aalt_features_.append((location, featureName)) + + def add_featureName(self, location, tag): + self.featureNames_.append(tag) + + def set_base_axis(self, bases, scripts, vertical): + if vertical: + self.base_vert_axis_ = (bases, scripts) + else: + self.base_horiz_axis_ = (bases, scripts) + + def set_size_parameters(self, location, DesignSize, SubfamilyID, + RangeStart, RangeEnd): + if self.cur_feature_name_ != 'size': + raise FeatureLibError( + "Parameters statements are not allowed " + "within \"feature %s\"" % self.cur_feature_name_, location) + self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd] + for script, lang in self.language_systems: + key = (script, lang, self.cur_feature_name_) + self.features_.setdefault(key, []) + + def add_ligature_subst(self, location, + prefix, glyphs, suffix, replacement, forceChain): + if prefix or suffix or forceChain: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + lookup = self.get_chained_lookup_(location, LigatureSubstBuilder) + chain.substitutions.append((prefix, glyphs, suffix, [lookup])) + else: + lookup = self.get_lookup_(location, LigatureSubstBuilder) + + # OpenType feature file syntax, section 5.d, "Ligature substitution": + # "Since the OpenType specification does not allow ligature + # substitutions to be specified on target sequences that contain + # glyph classes, the implementation software will enumerate + # all specific glyph sequences if glyph classes are detected" + for g in sorted(itertools.product(*glyphs)): + lookup.ligatures[g] = replacement + + def add_multiple_subst(self, location, + prefix, glyph, suffix, replacements): + if prefix or suffix: + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = self.get_chained_lookup_(location, MultipleSubstBuilder) + sub.mapping[glyph] = replacements + chain.substitutions.append((prefix, [{glyph}], suffix, [sub])) + return + lookup = self.get_lookup_(location, MultipleSubstBuilder) + if glyph in lookup.mapping: + raise FeatureLibError( + 'Already defined substitution for glyph "%s"' % glyph, + location) + lookup.mapping[glyph] = replacements + + def add_reverse_chain_single_subst(self, location, old_prefix, + old_suffix, mapping): + lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder) + lookup.substitutions.append((old_prefix, old_suffix, mapping)) + + def add_single_subst(self, location, prefix, suffix, mapping, forceChain): + if self.cur_feature_name_ == "aalt": + for (from_glyph, to_glyph) in mapping.items(): + alts = self.aalt_alternates_.setdefault(from_glyph, set()) + alts.add(to_glyph) + return + if prefix or suffix or forceChain: + self.add_single_subst_chained_(location, prefix, suffix, mapping) + return + lookup = self.get_lookup_(location, SingleSubstBuilder) + for (from_glyph, to_glyph) in mapping.items(): + if from_glyph in lookup.mapping: + raise FeatureLibError( + 'Already defined rule for replacing glyph "%s" by "%s"' % + (from_glyph, lookup.mapping[from_glyph]), + location) + lookup.mapping[from_glyph] = to_glyph + + def find_chainable_SingleSubst_(self, chain, glyphs): + """Helper for add_single_subst_chained_()""" + for _, _, _, substitutions in chain.substitutions: + for sub in substitutions: + if (isinstance(sub, SingleSubstBuilder) and + not any(g in glyphs for g in sub.mapping.keys())): + return sub + return None + + def add_single_subst_chained_(self, location, prefix, suffix, mapping): + # https://github.com/behdad/fonttools/issues/512 + chain = self.get_lookup_(location, ChainContextSubstBuilder) + sub = self.find_chainable_SingleSubst_(chain, set(mapping.keys())) + if sub is None: + sub = self.get_chained_lookup_(location, SingleSubstBuilder) + sub.mapping.update(mapping) + chain.substitutions.append((prefix, [mapping.keys()], suffix, [sub])) + + def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor): + lookup = self.get_lookup_(location, CursivePosBuilder) + lookup.add_attachment( + location, glyphclass, + makeOpenTypeAnchor(entryAnchor), + makeOpenTypeAnchor(exitAnchor)) + + def add_marks_(self, location, lookupBuilder, marks): + """Helper for add_mark_{base,liga,mark}_pos.""" + for _, markClass in marks: + for markClassDef in markClass.definitions: + for mark in markClassDef.glyphs.glyphSet(): + if mark not in lookupBuilder.marks: + otMarkAnchor = makeOpenTypeAnchor(markClassDef.anchor) + lookupBuilder.marks[mark] = ( + markClass.name, otMarkAnchor) + else: + existingMarkClass = lookupBuilder.marks[mark][0] + if markClass.name != existingMarkClass: + raise FeatureLibError( + "Glyph %s cannot be in both @%s and @%s" % ( + mark, existingMarkClass, markClass.name), + location) + + def add_mark_base_pos(self, location, bases, marks): + builder = self.get_lookup_(location, MarkBasePosBuilder) + self.add_marks_(location, builder, marks) + for baseAnchor, markClass in marks: + otBaseAnchor = makeOpenTypeAnchor(baseAnchor) + for base in bases: + builder.bases.setdefault(base, {})[markClass.name] = ( + otBaseAnchor) + + def add_mark_lig_pos(self, location, ligatures, components): + builder = self.get_lookup_(location, MarkLigPosBuilder) + componentAnchors = [] + for marks in components: + anchors = {} + self.add_marks_(location, builder, marks) + for ligAnchor, markClass in marks: + anchors[markClass.name] = makeOpenTypeAnchor(ligAnchor) + componentAnchors.append(anchors) + for glyph in ligatures: + builder.ligatures[glyph] = componentAnchors + + def add_mark_mark_pos(self, location, baseMarks, marks): + builder = self.get_lookup_(location, MarkMarkPosBuilder) + self.add_marks_(location, builder, marks) + for baseAnchor, markClass in marks: + otBaseAnchor = makeOpenTypeAnchor(baseAnchor) + for baseMark in baseMarks: + builder.baseMarks.setdefault(baseMark, {})[markClass.name] = ( + otBaseAnchor) + + def add_class_pair_pos(self, location, glyphclass1, value1, + glyphclass2, value2): + lookup = self.get_lookup_(location, PairPosBuilder) + lookup.addClassPair(location, glyphclass1, value1, glyphclass2, value2) + + def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2): + lookup = self.get_lookup_(location, PairPosBuilder) + lookup.addGlyphPair(location, glyph1, value1, glyph2, value2) + + def add_single_pos(self, location, prefix, suffix, pos, forceChain): + if prefix or suffix or forceChain: + self.add_single_pos_chained_(location, prefix, suffix, pos) + else: + lookup = self.get_lookup_(location, SinglePosBuilder) + for glyphs, value in pos: + for glyph in glyphs: + lookup.add_pos(location, glyph, value) + + def find_chainable_SinglePos_(self, lookups, glyphs, value): + """Helper for add_single_pos_chained_()""" + for look in lookups: + if all(look.can_add(glyph, value) for glyph in glyphs): + return look + return None + + def add_single_pos_chained_(self, location, prefix, suffix, pos): + # https://github.com/fonttools/fonttools/issues/514 + chain = self.get_lookup_(location, ChainContextPosBuilder) + targets = [] + for _, _, _, lookups in chain.rules: + for lookup in lookups: + if isinstance(lookup, SinglePosBuilder): + targets.append(lookup) + subs = [] + for glyphs, value in pos: + if value is None: + subs.append(None) + continue + otValue, _ = makeOpenTypeValueRecord(value, pairPosContext=False) + sub = self.find_chainable_SinglePos_(targets, glyphs, otValue) + if sub is None: + sub = self.get_chained_lookup_(location, SinglePosBuilder) + targets.append(sub) + for glyph in glyphs: + sub.add_pos(location, glyph, value) + subs.append(sub) + assert len(pos) == len(subs), (pos, subs) + chain.rules.append( + (prefix, [g for g, v in pos], suffix, subs)) + + def setGlyphClass_(self, location, glyph, glyphClass): + oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None)) + if oldClass and oldClass != glyphClass: + raise FeatureLibError( + "Glyph %s was assigned to a different class at %s:%s:%s" % + (glyph, oldLocation[0], oldLocation[1], oldLocation[2]), + location) + self.glyphClassDefs_[glyph] = (glyphClass, location) + + def add_glyphClassDef(self, location, baseGlyphs, ligatureGlyphs, + markGlyphs, componentGlyphs): + for glyph in baseGlyphs: + self.setGlyphClass_(location, glyph, 1) + for glyph in ligatureGlyphs: + self.setGlyphClass_(location, glyph, 2) + for glyph in markGlyphs: + self.setGlyphClass_(location, glyph, 3) + for glyph in componentGlyphs: + self.setGlyphClass_(location, glyph, 4) + + def add_ligatureCaretByIndex_(self, location, glyphs, carets): + for glyph in glyphs: + self.ligCaretPoints_.setdefault(glyph, set()).update(carets) + + def add_ligatureCaretByPos_(self, location, glyphs, carets): + for glyph in glyphs: + self.ligCaretCoords_.setdefault(glyph, set()).update(carets) + + def add_name_record(self, location, nameID, platformID, platEncID, + langID, string): + self.names_.append([nameID, platformID, platEncID, langID, string]) + + def add_os2_field(self, key, value): + self.os2_[key] = value + + def add_hhea_field(self, key, value): + self.hhea_[key] = value + + def add_vhea_field(self, key, value): + self.vhea_[key] = value + + +def makeOpenTypeAnchor(anchor): + """ast.Anchor --> otTables.Anchor""" + if anchor is None: + return None + deviceX, deviceY = None, None + if anchor.xDeviceTable is not None: + deviceX = otl.buildDevice(dict(anchor.xDeviceTable)) + if anchor.yDeviceTable is not None: + deviceY = otl.buildDevice(dict(anchor.yDeviceTable)) + return otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, + deviceX, deviceY) + + +_VALUEREC_ATTRS = { + name[0].lower() + name[1:]: (name, isDevice) + for _, name, isDevice, _ in otBase.valueRecordFormat + if not name.startswith("Reserved") +} + + +def makeOpenTypeValueRecord(v, pairPosContext): + """ast.ValueRecord --> (otBase.ValueRecord, int ValueFormat)""" + if v is None: + return None, 0 + + vr = {} + for astName, (otName, isDevice) in _VALUEREC_ATTRS.items(): + val = getattr(v, astName, None) + if val: + vr[otName] = otl.buildDevice(dict(val)) if isDevice else val + if pairPosContext and not vr: + vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0} + valRec = otl.buildValue(vr) + return valRec, valRec.getFormat() + + +class LookupBuilder(object): + def __init__(self, font, location, table, lookup_type): + self.font = font + self.glyphMap = font.getReverseGlyphMap() + self.location = location + self.table, self.lookup_type = table, lookup_type + self.lookupflag = 0 + self.markFilterSet = None + self.lookup_index = None # assigned when making final tables + assert table in ('GPOS', 'GSUB') + + def equals(self, other): + return (isinstance(other, self.__class__) and + self.table == other.table and + self.lookupflag == other.lookupflag and + self.markFilterSet == other.markFilterSet) + + def inferGlyphClasses(self): + """Infers glyph glasses for the GDEF table, such as {"cedilla":3}.""" + return {} + + def getAlternateGlyphs(self): + """Helper for building 'aalt' features.""" + return {} + + def buildLookup_(self, subtables): + return otl.buildLookup(subtables, self.lookupflag, self.markFilterSet) + + def buildMarkClasses_(self, marks): + """{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1} + + Helper for MarkBasePostBuilder, MarkLigPosBuilder, and + MarkMarkPosBuilder. Seems to return the same numeric IDs + for mark classes as the AFDKO makeotf tool. + """ + ids = {} + for mark in sorted(marks.keys(), key=self.font.getGlyphID): + markClassName, _markAnchor = marks[mark] + if markClassName not in ids: + ids[markClassName] = len(ids) + return ids + + def setBacktrackCoverage_(self, prefix, subtable): + subtable.BacktrackGlyphCount = len(prefix) + subtable.BacktrackCoverage = [] + for p in reversed(prefix): + coverage = otl.buildCoverage(p, self.glyphMap) + subtable.BacktrackCoverage.append(coverage) + + def setLookAheadCoverage_(self, suffix, subtable): + subtable.LookAheadGlyphCount = len(suffix) + subtable.LookAheadCoverage = [] + for s in suffix: + coverage = otl.buildCoverage(s, self.glyphMap) + subtable.LookAheadCoverage.append(coverage) + + def setInputCoverage_(self, glyphs, subtable): + subtable.InputGlyphCount = len(glyphs) + subtable.InputCoverage = [] + for g in glyphs: + coverage = otl.buildCoverage(g, self.glyphMap) + subtable.InputCoverage.append(coverage) + + +class AlternateSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 3) + self.alternates = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.alternates == other.alternates) + + def build(self): + subtable = otl.buildAlternateSubstSubtable(self.alternates) + return self.buildLookup_([subtable]) + + def getAlternateGlyphs(self): + return self.alternates + + +class ChainContextPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 8) + self.rules = [] # (prefix, input, suffix, lookups) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.rules == other.rules) + + def build(self): + subtables = [] + for (prefix, glyphs, suffix, lookups) in self.rules: + st = otTables.ChainContextPos() + subtables.append(st) + st.Format = 3 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + self.setInputCoverage_(glyphs, st) + + st.PosCount = len([l for l in lookups if l is not None]) + st.PosLookupRecord = [] + for sequenceIndex, l in enumerate(lookups): + if l is not None: + rec = otTables.PosLookupRecord() + rec.SequenceIndex = sequenceIndex + rec.LookupListIndex = l.lookup_index + st.PosLookupRecord.append(rec) + return self.buildLookup_(subtables) + + +class ChainContextSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 6) + self.substitutions = [] # (prefix, input, suffix, lookups) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.substitutions == other.substitutions) + + def build(self): + subtables = [] + for (prefix, input, suffix, lookups) in self.substitutions: + st = otTables.ChainContextSubst() + subtables.append(st) + st.Format = 3 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + self.setInputCoverage_(input, st) + + st.SubstCount = len([l for l in lookups if l is not None]) + st.SubstLookupRecord = [] + for sequenceIndex, l in enumerate(lookups): + if l is not None: + rec = otTables.SubstLookupRecord() + rec.SequenceIndex = sequenceIndex + rec.LookupListIndex = l.lookup_index + st.SubstLookupRecord.append(rec) + return self.buildLookup_(subtables) + + def getAlternateGlyphs(self): + result = {} + for (_prefix, _input, _suffix, lookups) in self.substitutions: + for lookup in lookups: + alts = lookup.getAlternateGlyphs() + for glyph, replacements in alts.items(): + result.setdefault(glyph, set()).update(replacements) + return result + + +class LigatureSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 4) + self.ligatures = {} # {('f','f','i'): 'f_f_i'} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.ligatures == other.ligatures) + + def build(self): + subtable = otl.buildLigatureSubstSubtable(self.ligatures) + return self.buildLookup_([subtable]) + + +class MultipleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 2) + self.mapping = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtable = otl.buildMultipleSubstSubtable(self.mapping) + return self.buildLookup_([subtable]) + + +class CursivePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 3) + self.attachments = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.attachments == other.attachments) + + def add_attachment(self, location, glyphs, entryAnchor, exitAnchor): + for glyph in glyphs: + self.attachments[glyph] = (entryAnchor, exitAnchor) + + def build(self): + st = otl.buildCursivePosSubtable(self.attachments, self.glyphMap) + return self.buildLookup_([st]) + + +class MarkBasePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 4) + self.marks = {} # glyphName -> (markClassName, anchor) + self.bases = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.bases == other.bases) + + def inferGlyphClasses(self): + result = {glyph: 1 for glyph in self.bases} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + bases = {} + for glyph, anchors in self.bases.items(): + bases[glyph] = {markClasses[mc]: anchor + for (mc, anchor) in anchors.items()} + subtables = otl.buildMarkBasePos(marks, bases, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkLigPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 5) + self.marks = {} # glyphName -> (markClassName, anchor) + self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...] + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.ligatures == other.ligatures) + + def inferGlyphClasses(self): + result = {glyph: 2 for glyph in self.ligatures} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + ligs = {} + for lig, components in self.ligatures.items(): + ligs[lig] = [] + for c in components: + ligs[lig].append({markClasses[mc]: a for mc, a in c.items()}) + subtables = otl.buildMarkLigPos(marks, ligs, self.glyphMap) + return self.buildLookup_(subtables) + + +class MarkMarkPosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 6) + self.marks = {} # glyphName -> (markClassName, anchor) + self.baseMarks = {} # glyphName -> {markClassName: anchor} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.marks == other.marks and + self.baseMarks == other.baseMarks) + + def inferGlyphClasses(self): + result = {glyph: 3 for glyph in self.baseMarks} + result.update({glyph: 3 for glyph in self.marks}) + return result + + def build(self): + markClasses = self.buildMarkClasses_(self.marks) + markClassList = sorted(markClasses.keys(), key=markClasses.get) + marks = {mark: (markClasses[mc], anchor) + for mark, (mc, anchor) in self.marks.items()} + + st = otTables.MarkMarkPos() + st.Format = 1 + st.ClassCount = len(markClasses) + st.Mark1Coverage = otl.buildCoverage(marks, self.glyphMap) + st.Mark2Coverage = otl.buildCoverage(self.baseMarks, self.glyphMap) + st.Mark1Array = otl.buildMarkArray(marks, self.glyphMap) + st.Mark2Array = otTables.Mark2Array() + st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs) + st.Mark2Array.Mark2Record = [] + for base in st.Mark2Coverage.glyphs: + anchors = [self.baseMarks[base].get(mc) for mc in markClassList] + st.Mark2Array.Mark2Record.append(otl.buildMark2Record(anchors)) + return self.buildLookup_([st]) + + +class ReverseChainSingleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 8) + self.substitutions = [] # (prefix, suffix, mapping) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.substitutions == other.substitutions) + + def build(self): + subtables = [] + for prefix, suffix, mapping in self.substitutions: + st = otTables.ReverseChainSingleSubst() + st.Format = 1 + self.setBacktrackCoverage_(prefix, st) + self.setLookAheadCoverage_(suffix, st) + st.Coverage = otl.buildCoverage(mapping.keys(), self.glyphMap) + st.GlyphCount = len(mapping) + st.Substitute = [mapping[g] for g in st.Coverage.glyphs] + subtables.append(st) + return self.buildLookup_(subtables) + + +class SingleSubstBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GSUB', 1) + self.mapping = {} + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtable = otl.buildSingleSubstSubtable(self.mapping) + return self.buildLookup_([subtable]) + + def getAlternateGlyphs(self): + return {glyph: set([repl]) for glyph, repl in self.mapping.items()} + + +class ClassPairPosSubtableBuilder(object): + def __init__(self, builder, valueFormat1, valueFormat2): + self.builder_ = builder + self.classDef1_, self.classDef2_ = None, None + self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2) + self.valueFormat1_, self.valueFormat2_ = valueFormat1, valueFormat2 + self.forceSubtableBreak_ = False + self.subtables_ = [] + + def addPair(self, gc1, value1, gc2, value2): + mergeable = (not self.forceSubtableBreak_ and + self.classDef1_ is not None and + self.classDef1_.canAdd(gc1) and + self.classDef2_ is not None and + self.classDef2_.canAdd(gc2)) + if not mergeable: + self.flush_() + self.classDef1_ = otl.ClassDefBuilder(useClass0=True) + self.classDef2_ = otl.ClassDefBuilder(useClass0=False) + self.values_ = {} + self.classDef1_.add(gc1) + self.classDef2_.add(gc2) + self.values_[(gc1, gc2)] = (value1, value2) + + def addSubtableBreak(self): + self.forceSubtableBreak_ = True + + def subtables(self): + self.flush_() + return self.subtables_ + + def flush_(self): + if self.classDef1_ is None or self.classDef2_ is None: + return + st = otl.buildPairPosClassesSubtable(self.values_, + self.builder_.glyphMap) + self.subtables_.append(st) + + +class PairPosBuilder(LookupBuilder): + SUBTABLE_BREAK_ = "SUBTABLE_BREAK" + + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 2) + self.pairs = [] # [(gc1, value1, gc2, value2)*] + self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2) + self.locations = {} # (gc1, gc2) --> (filepath, line, column) + + def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2): + self.pairs.append((glyphclass1, value1, glyphclass2, value2)) + + def addGlyphPair(self, location, glyph1, value1, glyph2, value2): + key = (glyph1, glyph2) + oldValue = self.glyphPairs.get(key, None) + if oldValue is not None: + otherLoc = self.locations[key] + raise FeatureLibError( + 'Already defined position for pair %s %s at %s:%d:%d' + % (glyph1, glyph2, otherLoc[0], otherLoc[1], otherLoc[2]), + location) + val1, _ = makeOpenTypeValueRecord(value1, pairPosContext=True) + val2, _ = makeOpenTypeValueRecord(value2, pairPosContext=True) + self.glyphPairs[key] = (val1, val2) + self.locations[key] = location + + def add_subtable_break(self, location): + self.pairs.append((self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_, + self.SUBTABLE_BREAK_, self.SUBTABLE_BREAK_)) + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.glyphPairs == other.glyphPairs and + self.pairs == other.pairs) + + def build(self): + builders = {} + builder = None + for glyphclass1, value1, glyphclass2, value2 in self.pairs: + if glyphclass1 is self.SUBTABLE_BREAK_: + if builder is not None: + builder.addSubtableBreak() + continue + val1, valFormat1 = makeOpenTypeValueRecord( + value1, pairPosContext=True) + val2, valFormat2 = makeOpenTypeValueRecord( + value2, pairPosContext=True) + builder = builders.get((valFormat1, valFormat2)) + if builder is None: + builder = ClassPairPosSubtableBuilder( + self, valFormat1, valFormat2) + builders[(valFormat1, valFormat2)] = builder + builder.addPair(glyphclass1, val1, glyphclass2, val2) + subtables = [] + if self.glyphPairs: + subtables.extend( + otl.buildPairPosGlyphs(self.glyphPairs, self.glyphMap)) + for key in sorted(builders.keys()): + subtables.extend(builders[key].subtables()) + return self.buildLookup_(subtables) + + +class SinglePosBuilder(LookupBuilder): + def __init__(self, font, location): + LookupBuilder.__init__(self, font, location, 'GPOS', 1) + self.locations = {} # glyph -> (filename, line, column) + self.mapping = {} # glyph -> otTables.ValueRecord + + def add_pos(self, location, glyph, valueRecord): + otValueRecord, _ = makeOpenTypeValueRecord( + valueRecord, pairPosContext=False) + if not self.can_add(glyph, otValueRecord): + otherLoc = self.locations[glyph] + raise FeatureLibError( + 'Already defined different position for glyph "%s" at %s:%d:%d' + % (glyph, otherLoc[0], otherLoc[1], otherLoc[2]), + location) + if otValueRecord: + self.mapping[glyph] = otValueRecord + self.locations[glyph] = location + + def can_add(self, glyph, value): + assert isinstance(value, otl.ValueRecord) + curValue = self.mapping.get(glyph) + return curValue is None or curValue == value + + def equals(self, other): + return (LookupBuilder.equals(self, other) and + self.mapping == other.mapping) + + def build(self): + subtables = otl.buildSinglePos(self.mapping, self.glyphMap) + return self.buildLookup_(subtables) diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/error.py fonttools-3.21.2/Snippets/fontTools/feaLib/error.py --- fonttools-3.0/Snippets/fontTools/feaLib/error.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/error.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class FeatureLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/lexer.py fonttools-3.21.2/Snippets/fontTools/feaLib/lexer.py --- fonttools-3.0/Snippets/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/lexer.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,25 +1,14 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals -import codecs +from fontTools.misc.py23 import * +from fontTools.feaLib.error import FeatureLibError +import re import os -class LexerError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message - - class Lexer(object): NUMBER = "NUMBER" + FLOAT = "FLOAT" STRING = "STRING" NAME = "NAME" FILENAME = "FILENAME" @@ -28,15 +17,18 @@ SYMBOL = "SYMBOL" COMMENT = "COMMENT" NEWLINE = "NEWLINE" + ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK" CHAR_WHITESPACE_ = " \t" CHAR_NEWLINE_ = "\r\n" - CHAR_SYMBOL_ = ";:-+'{}[]<>()=" + CHAR_SYMBOL_ = ",;:-+'{}[]<>()=" CHAR_DIGIT_ = "0123456789" CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" - CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." + CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\" + CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-" + + RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.]+$") MODE_NORMAL_ = "NORMAL" MODE_FILENAME_ = "FILENAME" @@ -59,13 +51,16 @@ def __next__(self): # Python 3 while True: token_type, token, location = self.next_() - if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: + if token_type != Lexer.NEWLINE: return (token_type, token, location) + def location_(self): + column = self.pos_ - self.line_start_ + 1 + return (self.filename_, self.line_, column) + def next_(self): self.scan_over_(Lexer.CHAR_WHITESPACE_) - column = self.pos_ - self.line_start_ + 1 - location = (self.filename_, self.line_, column) + location = self.location_() start = self.pos_ text = self.text_ limit = len(text) @@ -90,11 +85,13 @@ if self.mode_ is Lexer.MODE_FILENAME_: if cur_char != "(": - raise LexerError("Expected '(' before file name", location) + raise FeatureLibError("Expected '(' before file name", + location) self.scan_until_(")") cur_char = text[self.pos_] if self.pos_ < limit else None if cur_char != ")": - raise LexerError("Expected ')' after file name", location) + raise FeatureLibError("Expected ')' after file name", + location) self.pos_ += 1 self.mode_ = Lexer.MODE_NORMAL_ return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) @@ -108,11 +105,15 @@ self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) glyphclass = text[start + 1:self.pos_] if len(glyphclass) < 1: - raise LexerError("Expected glyph class name", location) - if len(glyphclass) > 30: - raise LexerError( - "Glyph class names must not be longer than 30 characters", + raise FeatureLibError("Expected glyph class name", location) + if len(glyphclass) > 63: + raise FeatureLibError( + "Glyph class names must not be longer than 63 characters", location) + if not Lexer.RE_GLYPHCLASS.match(glyphclass): + raise FeatureLibError( + "Glyph class names must consist of letters, digits, " + "underscore, or period", location) return (Lexer.GLYPHCLASS, glyphclass, location) if cur_char in Lexer.CHAR_NAME_START_: self.pos_ += 1 @@ -127,23 +128,35 @@ return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) if cur_char in Lexer.CHAR_DIGIT_: self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start:self.pos_]), location) if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: self.pos_ += 1 self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if self.pos_ >= limit or text[self.pos_] != ".": + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + self.scan_over_(".") + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.FLOAT, float(text[start:self.pos_]), location) if cur_char in Lexer.CHAR_SYMBOL_: self.pos_ += 1 return (Lexer.SYMBOL, cur_char, location) if cur_char == '"': self.pos_ += 1 - self.scan_until_('"\r\n') + self.scan_until_('"') if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': self.pos_ += 1 - return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + # strip newlines embedded within a string + string = re.sub("[\r\n]", "", text[start + 1:self.pos_ - 1]) + return (Lexer.STRING, string, location) else: - raise LexerError("Expected '\"' to terminate string", location) - raise LexerError("Unexpected character: '%s'" % cur_char, location) + raise FeatureLibError("Expected '\"' to terminate string", + location) + raise FeatureLibError("Unexpected character: %r" % cur_char, + location) def scan_over_(self, valid): p = self.pos_ @@ -157,10 +170,25 @@ p += 1 self.pos_ = p + def scan_anonymous_block(self, tag): + location = self.location_() + tag = tag.strip() + self.scan_until_(Lexer.CHAR_NEWLINE_) + self.scan_over_(Lexer.CHAR_NEWLINE_) + regexp = r'}\s*' + tag + r'\s*;' + split = re.split(regexp, self.text_[self.pos_:], maxsplit=1) + if len(split) != 2: + raise FeatureLibError( + "Expected '} %s;' to terminate anonymous block" % tag, + location) + self.pos_ += len(split[0]) + return (Lexer.ANONYMOUS_BLOCK, split[0], location) + class IncludingLexer(object): - def __init__(self, filename): - self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] + def __init__(self, featurefile): + self.lexers_ = [self.make_lexer_(featurefile)] + self.featurefilepath = self.lexers_[0].filename_ def __iter__(self): return self @@ -172,22 +200,22 @@ while self.lexers_: lexer = self.lexers_[-1] try: - token_type, token, location = lexer.next() + token_type, token, location = next(lexer) except StopIteration: self.lexers_.pop() continue if token_type is Lexer.NAME and token == "include": fname_type, fname_token, fname_location = lexer.next() if fname_type is not Lexer.FILENAME: - raise LexerError("Expected file name", fname_location) - semi_type, semi_token, semi_location = lexer.next() - if semi_type is not Lexer.SYMBOL or semi_token != ";": - raise LexerError("Expected ';'", semi_location) - curpath, _ = os.path.split(lexer.filename_) + raise FeatureLibError("Expected file name", fname_location) + #semi_type, semi_token, semi_location = lexer.next() + #if semi_type is not Lexer.SYMBOL or semi_token != ";": + # raise FeatureLibError("Expected ';'", semi_location) + curpath = os.path.dirname(self.featurefilepath) path = os.path.join(curpath, fname_token) if len(self.lexers_) >= 5: - raise LexerError("Too many recursive includes", - fname_location) + raise FeatureLibError("Too many recursive includes", + fname_location) self.lexers_.append(self.make_lexer_(path, fname_location)) continue else: @@ -195,9 +223,20 @@ raise StopIteration() @staticmethod - def make_lexer_(filename, location): - try: - with codecs.open(filename, "rb", "utf-8") as f: - return Lexer(f.read(), filename) - except IOError as err: - raise LexerError(str(err), location) + def make_lexer_(file_or_path, location=None): + if hasattr(file_or_path, "read"): + fileobj, closing = file_or_path, False + else: + filename, closing = file_or_path, True + try: + fileobj = open(filename, "r", encoding="utf-8") + except IOError as err: + raise FeatureLibError(str(err), location) + data = fileobj.read() + filename = fileobj.name if hasattr(fileobj, "name") else "" + if closing: + fileobj.close() + return Lexer(data, filename) + + def scan_anonymous_block(self, tag): + return self.lexers_[-1].scan_anonymous_block(tag) diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/lexer_test.py fonttools-3.21.2/Snippets/fontTools/feaLib/lexer_test.py --- fonttools-3.0/Snippets/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError -import os -import unittest - - -def lex(s): - return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] - - -class LexerErrorTest(unittest.TestCase): - def test_str(self): - err = LexerError("Squeak!", ("foo.fea", 23, 42)) - self.assertEqual(str(err), "foo.fea:23:42: Squeak!") - - def test_str_nolocation(self): - err = LexerError("Squeak!", None) - self.assertEqual(str(err), "Squeak!") - - -class LexerTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_empty(self): - self.assertEqual(lex(""), []) - self.assertEqual(lex(" \t "), []) - - def test_name(self): - self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) - self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) - self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) - self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) - self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) - - def test_cid(self): - self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) - - def test_glyphclass(self): - self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") - self.assertRaisesRegex(LexerError, "not be longer than 30 characters", - lex, "@a123456789.a123456789.a123456789.x") - - def test_include(self): - self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "~/foo/bar baz.fea"), - (Lexer.SYMBOL, ";") - ]) - self.assertEqual(lex("include # Comment\n (foo) \n;"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "foo"), - (Lexer.SYMBOL, ";") - ]) - self.assertRaises(LexerError, lex, "include blah") - self.assertRaises(LexerError, lex, "include (blah") - - def test_number(self): - self.assertEqual(lex("123 -456"), - [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) - self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) - self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) - - def test_symbol(self): - self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) - self.assertEqual( - lex("foo - -2"), - [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) - - def test_comment(self): - self.assertEqual(lex("# Comment\n#"), []) - - def test_string(self): - self.assertEqual(lex('"foo" "bar"'), - [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) - self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) - - def test_bad_character(self): - self.assertRaises(LexerError, lambda: lex("123 \u0001")) - - def test_newline(self): - lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix - self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh - self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows - self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed - - def test_location(self): - locs = lambda s: ["%s:%d:%d" % loc - for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(locs("a b # Comment\n12 @x"), [ - "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", - "test.fea:2:4" - ]) - - def test_scan_over_(self): - lexer = Lexer("abbacabba12", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("xyz") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("0123456789") - self.assertEqual(lexer.pos_, 11) - - def test_scan_until_(self): - lexer = Lexer("foo'bar", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - - -class IncludingLexerTest(unittest.TestCase): - @staticmethod - def getpath(filename): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", filename) - - def test_include(self): - lexer = IncludingLexer(self.getpath("include4.fea")) - result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) - for _, token, loc in lexer] - self.assertEqual(result, [ - "I4a include4.fea:1", - "I3a include3.fea:1", - "I2a include2.fea:1", - "I1a include1.fea:1", - "I0 include0.fea:1", - "I1b include1.fea:3", - "I2b include2.fea:3", - "I3b include3.fea:3", - "I4b include4.fea:3" - ]) - - def test_include_limit(self): - lexer = IncludingLexer(self.getpath("include6.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_self(self): - lexer = IncludingLexer(self.getpath("includeself.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_missing_file(self): - lexer = IncludingLexer(self.getpath("includemissingfile.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/__main__.py fonttools-3.21.2/Snippets/fontTools/feaLib/__main__.py --- fonttools-3.0/Snippets/fontTools/feaLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.feaLib.builder import addOpenTypeFeatures +from fontTools import configLogger +from fontTools.misc.cliTools import makeOutputFileName +import sys +import argparse +import logging + + +log = logging.getLogger("fontTools.feaLib") + + +def main(args=None): + parser = argparse.ArgumentParser( + description="Use fontTools to compile OpenType feature files (*.fea).") + parser.add_argument( + "input_fea", metavar="FEATURES", help="Path to the feature file") + parser.add_argument( + "input_font", metavar="INPUT_FONT", help="Path to the input font") + parser.add_argument( + "-o", "--output", dest="output_font", metavar="OUTPUT_FONT", + help="Path to the output font.") + parser.add_argument( + "-v", "--verbose", help="increase the logger verbosity. Multiple -v " + "options are allowed.", action="count", default=0) + options = parser.parse_args(args) + + levels = ["WARNING", "INFO", "DEBUG"] + configLogger(level=levels[min(len(levels) - 1, options.verbose)]) + + output_font = options.output_font or makeOutputFileName(options.input_font) + log.info("Compiling features to '%s'" % (output_font)) + + font = TTFont(options.input_font) + addOpenTypeFeatures(font, options.input_fea) + font.save(output_font) + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/parser.py fonttools-3.21.2/Snippets/fontTools/feaLib/parser.py --- fonttools-3.0/Snippets/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/parser.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,64 +1,137 @@ from __future__ import print_function, division, absolute_import from __future__ import unicode_literals +from fontTools.feaLib.error import FeatureLibError from fontTools.feaLib.lexer import Lexer, IncludingLexer +from fontTools.misc.encodingTools import getEncoding +from fontTools.misc.py23 import * import fontTools.feaLib.ast as ast +import logging import os import re -class ParserError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message +log = logging.getLogger(__name__) class Parser(object): - def __init__(self, path): - self.doc_ = ast.FeatureFile() + extensions = {} + ast = ast + + def __init__(self, featurefile, glyphNames=(), **kwargs): + if "glyphMap" in kwargs: + from fontTools.misc.loggingTools import deprecateArgument + deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead") + if glyphNames: + raise TypeError("'glyphNames' and (deprecated) 'glyphMap' are " + "mutually exclusive") + glyphNames = kwargs.pop("glyphMap") + if kwargs: + raise TypeError("unsupported keyword argument%s: %s" + % ("" if len(kwargs) == 1 else "s", + ", ".join(repr(k) for k in kwargs))) + + self.glyphNames_ = set(glyphNames) + self.doc_ = self.ast.FeatureFile() self.anchors_ = SymbolTable() self.glyphclasses_ = SymbolTable() self.lookups_ = SymbolTable() self.valuerecords_ = SymbolTable() self.symbol_tables_ = { - self.anchors_, self.glyphclasses_, - self.lookups_, self.valuerecords_ + self.anchors_, self.valuerecords_ } self.next_token_type_, self.next_token_ = (None, None) + self.cur_comments_ = [] self.next_token_location_ = None - self.lexer_ = IncludingLexer(path) - self.advance_lexer_() + self.lexer_ = IncludingLexer(featurefile) + self.advance_lexer_(comments=True) def parse(self): statements = self.doc_.statements while self.next_token_type_ is not None: - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) + elif self.is_cur_keyword_(("anon", "anonymous")): + statements.append(self.parse_anonymous_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) elif self.is_cur_keyword_("languagesystem"): statements.append(self.parse_languagesystem_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical=False)) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) elif self.is_cur_keyword_("feature"): statements.append(self.parse_feature_block_()) + elif self.is_cur_keyword_("table"): + statements.append(self.parse_table_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append( self.parse_valuerecord_definition_(vertical=False)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions: + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";": + continue else: - raise ParserError("Expected feature, languagesystem, " - "lookup, or glyph class definition", - self.cur_token_location_) + raise FeatureLibError( + "Expected feature, languagesystem, lookup, markClass, " + "table, or glyph class definition, got {} \"{}\"".format(self.cur_token_type_, self.cur_token_), + self.cur_token_location_) return self.doc_ + def parse_anchor_(self): + self.expect_symbol_("<") + self.expect_keyword_("anchor") + location = self.cur_token_location_ + + if self.next_token_ == "NULL": + self.expect_keyword_("NULL") + self.expect_symbol_(">") + return None + + if self.next_token_type_ == Lexer.NAME: + name = self.expect_name_() + anchordef = self.anchors_.resolve(name) + if anchordef is None: + raise FeatureLibError( + 'Unknown anchor "%s"' % name, + self.cur_token_location_) + self.expect_symbol_(">") + return self.ast.Anchor(location, name, anchordef.x, anchordef.y, + anchordef.contourpoint, + xDeviceTable=None, yDeviceTable=None) + + x, y = self.expect_number_(), self.expect_number_() + + contourpoint = None + if self.next_token_ == "contourpoint": + self.expect_keyword_("contourpoint") + contourpoint = self.expect_number_() + + if self.next_token_ == "<": + xDeviceTable = self.parse_device_() + yDeviceTable = self.parse_device_() + else: + xDeviceTable, yDeviceTable = None, None + + self.expect_symbol_(">") + return self.ast.Anchor(location, None, x, y, contourpoint, + xDeviceTable, yDeviceTable) + + def parse_anchor_marks_(self): + """Parses a sequence of [ mark @MARKCLASS]*.""" + anchorMarks = [] # [(self.ast.Anchor, markClassName)*] + while self.next_token_ == "<": + anchor = self.parse_anchor_() + if anchor is None and self.next_token_ != "mark": + continue # without mark, eg. in GPOS type 5 + self.expect_keyword_("mark") + markClass = self.expect_markClass_reference_() + anchorMarks.append((anchor, markClass)) + return anchorMarks + def parse_anchordef_(self): assert self.is_cur_keyword_("anchorDef") location = self.cur_token_location_ @@ -69,113 +142,288 @@ contourpoint = self.expect_number_() name = self.expect_name_() self.expect_symbol_(";") - anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) + anchordef = self.ast.AnchorDefinition(location, name, x, y, contourpoint) self.anchors_.define(name, anchordef) return anchordef + def parse_anonymous_(self): + assert self.is_cur_keyword_(("anon", "anonymous")) + tag = self.expect_tag_() + _, content, location = self.lexer_.scan_anonymous_block(tag) + self.advance_lexer_() + self.expect_symbol_('}') + end_tag = self.expect_tag_() + assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()" + self.expect_symbol_(';') + return self.ast.AnonymousBlock(tag, content, location) + + def parse_attach_(self): + assert self.is_cur_keyword_("Attach") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + contourPoints = {self.expect_number_()} + while self.next_token_ != ";": + contourPoints.add(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.AttachStatement(location, glyphs, contourPoints) + + def parse_enumerate_(self, vertical): + assert self.cur_token_ in {"enumerate", "enum"} + self.advance_lexer_() + return self.parse_position_(enumerated=True, vertical=vertical) + + def parse_GlyphClassDef_(self): + """Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;'""" + assert self.is_cur_keyword_("GlyphClassDef") + location = self.cur_token_location_ + if self.next_token_ != ",": + baseGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + baseGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + ligatureGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ",": + markGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + markGlyphs = None + self.expect_symbol_(",") + if self.next_token_ != ";": + componentGlyphs = self.parse_glyphclass_(accept_glyphname=False) + else: + componentGlyphs = None + self.expect_symbol_(";") + return self.ast.GlyphClassDefStatement(location, baseGlyphs, markGlyphs, + ligatureGlyphs, componentGlyphs) + def parse_glyphclass_definition_(self): + """Parses glyph class definitions such as '@UPPERCASE = [A-Z];'""" location, name = self.cur_token_location_, self.cur_token_ self.expect_symbol_("=") glyphs = self.parse_glyphclass_(accept_glyphname=False) self.expect_symbol_(";") - if self.glyphclasses_.resolve(name) is not None: - raise ParserError("Glyph class @%s already defined" % name, - location) - glyphclass = ast.GlyphClassDefinition(location, name, glyphs) + glyphclass = self.ast.GlyphClassDefinition(location, name, glyphs) self.glyphclasses_.define(name, glyphclass) return glyphclass + def split_glyph_range_(self, name, location): + # Since v1.20, the OpenType Feature File specification allows + # for dashes in glyph names. A sequence like "a-b-c-d" could + # therefore mean a single glyph whose name happens to be + # "a-b-c-d", or it could mean a range from glyph "a" to glyph + # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a + # range from glyph "a-b-c" to glyph "d".Technically, this + # example could be resolved because the (pretty complex) + # definition of glyph ranges renders most of these splits + # invalid. But the specification does not say that a compiler + # should try to apply such fancy heuristics. To encourage + # unambiguous feature files, we therefore try all possible + # splits and reject the feature file if there are multiple + # splits possible. It is intentional that we don't just emit a + # warning; warnings tend to get ignored. To fix the problem, + # font designers can trivially add spaces around the intended + # split point, and we emit a compiler error that suggests + # how exactly the source should be rewritten to make things + # unambiguous. + parts = name.split("-") + solutions = [] + for i in range(len(parts)): + start, limit = "-".join(parts[0:i]), "-".join(parts[i:]) + if start in self.glyphNames_ and limit in self.glyphNames_: + solutions.append((start, limit)) + if len(solutions) == 1: + start, limit = solutions[0] + return start, limit + elif len(solutions) == 0: + raise FeatureLibError( + "\"%s\" is not a glyph in the font, and it can not be split " + "into a range of known glyphs" % name, location) + else: + ranges = " or ".join(["\"%s - %s\"" % (s, l) for s, l in solutions]) + raise FeatureLibError( + "Ambiguous glyph range \"%s\"; " + "please use %s to clarify what you mean" % (name, ranges), + location) + def parse_glyphclass_(self, accept_glyphname): - result = set() - if accept_glyphname and self.next_token_type_ is Lexer.NAME: - result.add(self.expect_name_()) - return result + if (accept_glyphname and + self.next_token_type_ in (Lexer.NAME, Lexer.CID)): + glyph = self.expect_glyph_() + return self.ast.GlyphName(self.cur_token_location_, glyph) if self.next_token_type_ is Lexer.GLYPHCLASS: self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: - raise ParserError("Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_) - result.update(gc.glyphs) - return result + raise FeatureLibError( + "Unknown glyph class @%s" % self.cur_token_, + self.cur_token_location_) + if isinstance(gc, self.ast.MarkClass): + return self.ast.MarkClassName(self.cur_token_location_, gc) + else: + return self.ast.GlyphClassName(self.cur_token_location_, gc) self.expect_symbol_("[") + location = self.cur_token_location_ + glyphs = self.ast.GlyphClass(location) while self.next_token_ != "]": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: + if self.next_token_type_ is Lexer.NAME: + glyph = self.expect_glyph_() + location = self.cur_token_location_ + if '-' in glyph and glyph not in self.glyphNames_: + start, limit = self.split_glyph_range_(glyph, location) + glyphs.add_range( + start, limit, + self.make_glyph_range_(location, start, limit)) + elif self.next_token_ == "-": + start = glyph + self.expect_symbol_("-") + limit = self.expect_glyph_() + glyphs.add_range( + start, limit, + self.make_glyph_range_(location, start, limit)) + else: + glyphs.append(glyph) + elif self.next_token_type_ is Lexer.CID: + glyph = self.expect_glyph_() if self.next_token_ == "-": - range_location_ = self.cur_token_location_ + range_location = self.cur_token_location_ range_start = self.cur_token_ self.expect_symbol_("-") - range_end = self.expect_name_() - result.update(self.make_glyph_range_(range_location_, - range_start, - range_end)) + range_end = self.expect_cid_() + glyphs.add_cid_range(range_start, range_end, + self.make_cid_range_(range_location, + range_start, range_end)) else: - result.add(self.cur_token_) - elif self.cur_token_type_ is Lexer.GLYPHCLASS: + glyphs.append("cid%05d" % self.cur_token_) + elif self.next_token_type_ is Lexer.GLYPHCLASS: + self.advance_lexer_() gc = self.glyphclasses_.resolve(self.cur_token_) if gc is None: - raise ParserError( + raise FeatureLibError( "Unknown glyph class @%s" % self.cur_token_, self.cur_token_location_) - result.update(gc.glyphs) + if isinstance(gc, self.ast.MarkClass): + gc = self.ast.MarkClassName(self.cur_token_location_, gc) + else: + gc = self.ast.GlyphClassName(self.cur_token_location_, gc) + glyphs.add_class(gc) else: - raise ParserError( + raise FeatureLibError( "Expected glyph name, glyph range, " "or glyph class reference", - self.cur_token_location_) + self.next_token_location_) self.expect_symbol_("]") - return result + return glyphs - def parse_glyph_pattern_(self): - prefix, glyphs, lookups, suffix = ([], [], [], []) - while self.next_token_ not in {"by", "from", ";"}: + def parse_class_name_(self): + name = self.expect_class_name_() + gc = self.glyphclasses_.resolve(name) + if gc is None: + raise FeatureLibError( + "Unknown glyph class @%s" % name, + self.cur_token_location_) + if isinstance(gc, self.ast.MarkClass): + return self.ast.MarkClassName(self.cur_token_location_, gc) + else: + return self.ast.GlyphClassName(self.cur_token_location_, gc) + + def parse_glyph_pattern_(self, vertical): + prefix, glyphs, lookups, values, suffix = ([], [], [], [], []) + hasMarks = False + while self.next_token_ not in {"by", "from", ";", ","}: gc = self.parse_glyphclass_(accept_glyphname=True) marked = False if self.next_token_ == "'": self.expect_symbol_("'") - marked = True + hasMarks = marked = True if marked: + if suffix: + # makeotf also reports this as an error, while FontForge + # silently inserts ' in all the intervening glyphs. + # https://github.com/fonttools/fonttools/pull/1096 + raise FeatureLibError( + "Unsupported contextual target sequence: at most " + "one run of marked (') glyph/class names allowed", + self.cur_token_location_) glyphs.append(gc) elif glyphs: suffix.append(gc) else: prefix.append(gc) + if self.is_next_value_(): + values.append(self.parse_valuerecord_(vertical)) + else: + values.append(None) + lookup = None if self.next_token_ == "lookup": self.expect_keyword_("lookup") if not marked: - raise ParserError("Lookups can only follow marked glyphs", - self.cur_token_location_) + raise FeatureLibError( + "Lookups can only follow marked glyphs", + self.cur_token_location_) lookup_name = self.expect_name_() lookup = self.lookups_.resolve(lookup_name) if lookup is None: - raise ParserError('Unknown lookup "%s"' % lookup_name, - self.cur_token_location_) + raise FeatureLibError( + 'Unknown lookup "%s"' % lookup_name, + self.cur_token_location_) if marked: lookups.append(lookup) if not glyphs and not suffix: # eg., "sub f f i by" assert lookups == [] - return ([], prefix, [None] * len(prefix), []) + return ([], prefix, [None] * len(prefix), values, [], hasMarks) else: - return (prefix, glyphs, lookups, suffix) + assert not any(values[:len(prefix)]), values + values = values[len(prefix):][:len(glyphs)] + return (prefix, glyphs, lookups, values, suffix, hasMarks) + + def parse_chain_context_(self): + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + chainContext = [(prefix, glyphs, suffix)] + hasLookups = any(lookups) + while self.next_token_ == ",": + self.expect_symbol_(",") + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + chainContext.append((prefix, glyphs, suffix)) + hasLookups = hasLookups or any(lookups) + self.expect_symbol_(";") + return chainContext, hasLookups def parse_ignore_(self): assert self.is_cur_keyword_("ignore") location = self.cur_token_location_ self.advance_lexer_() if self.cur_token_ in ["substitute", "sub"]: - prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() - self.expect_symbol_(";") - return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) - raise ParserError("Expected \"substitute\"", self.next_token_location_) + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + "No lookups can be specified for \"ignore sub\"", + location) + return self.ast.IgnoreSubstStatement(location, chainContext) + if self.cur_token_ in ["position", "pos"]: + chainContext, hasLookups = self.parse_chain_context_() + if hasLookups: + raise FeatureLibError( + "No lookups can be specified for \"ignore pos\"", + location) + return self.ast.IgnorePosStatement(location, chainContext) + raise FeatureLibError( + "Expected \"substitute\" or \"position\"", + self.cur_token_location_) def parse_language_(self): assert self.is_cur_keyword_("language") - location, language = self.cur_token_location_, self.expect_tag_() + location = self.cur_token_location_ + language = self.expect_language_tag_() include_default, required = (True, False) if self.next_token_ in {"exclude_dflt", "include_dflt"}: include_default = (self.expect_name_() == "include_dflt") @@ -183,8 +431,28 @@ self.expect_keyword_("required") required = True self.expect_symbol_(";") - return ast.LanguageStatement(location, language.strip(), - include_default, required) + return self.ast.LanguageStatement(location, language, + include_default, required) + + def parse_ligatureCaretByIndex_(self): + assert self.is_cur_keyword_("LigatureCaretByIndex") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByIndexStatement(location, glyphs, carets) + + def parse_ligatureCaretByPos_(self): + assert self.is_cur_keyword_("LigatureCaretByPos") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + carets = [self.expect_number_()] + while self.next_token_ != ";": + carets.append(self.expect_number_()) + self.expect_symbol_(";") + return self.ast.LigatureCaretByPosStatement(location, glyphs, carets) def parse_lookup_(self, vertical): assert self.is_cur_keyword_("lookup") @@ -193,37 +461,194 @@ if self.next_token_ == ";": lookup = self.lookups_.resolve(name) if lookup is None: - raise ParserError("Unknown lookup \"%s\"" % name, - self.cur_token_location_) + raise FeatureLibError("Unknown lookup \"%s\"" % name, + self.cur_token_location_) self.expect_symbol_(";") - return ast.LookupReferenceStatement(location, lookup) + return self.ast.LookupReferenceStatement(location, lookup) use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True - block = ast.LookupBlock(location, name, use_extension) + block = self.ast.LookupBlock(location, name, use_extension) self.parse_block_(block, vertical) self.lookups_.define(name, block) return block + def parse_lookupflag_(self): + assert self.is_cur_keyword_("lookupflag") + location = self.cur_token_location_ + + # format B: "lookupflag 6;" + if self.next_token_type_ == Lexer.NUMBER: + value = self.expect_number_() + self.expect_symbol_(";") + return self.ast.LookupFlagStatement(location, value, None, None) + + # format A: "lookupflag RightToLeft MarkAttachmentType @M;" + value, markAttachment, markFilteringSet = 0, None, None + flags = { + "RightToLeft": 1, "IgnoreBaseGlyphs": 2, + "IgnoreLigatures": 4, "IgnoreMarks": 8 + } + seen = set() + while self.next_token_ != ";": + if self.next_token_ in seen: + raise FeatureLibError( + "%s can be specified only once" % self.next_token_, + self.next_token_location_) + seen.add(self.next_token_) + if self.next_token_ == "MarkAttachmentType": + self.expect_keyword_("MarkAttachmentType") + markAttachment = self.parse_class_name_() + elif self.next_token_ == "UseMarkFilteringSet": + self.expect_keyword_("UseMarkFilteringSet") + markFilteringSet = self.parse_class_name_() + elif self.next_token_ in flags: + value = value | flags[self.expect_name_()] + else: + raise FeatureLibError( + '"%s" is not a recognized lookupflag' % self.next_token_, + self.next_token_location_) + self.expect_symbol_(";") + return self.ast.LookupFlagStatement(location, value, + markAttachment, markFilteringSet) + + def parse_markClass_(self): + assert self.is_cur_keyword_("markClass") + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + anchor = self.parse_anchor_() + name = self.expect_class_name_() + self.expect_symbol_(";") + markClass = self.doc_.markClasses.get(name) + if markClass is None: + markClass = self.ast.MarkClass(name) + self.doc_.markClasses[name] = markClass + self.glyphclasses_.define(name, markClass) + mcdef = self.ast.MarkClassDefinition(location, markClass, anchor, glyphs) + markClass.addDefinition(mcdef) + return mcdef + + def parse_position_(self, enumerated, vertical): + assert self.cur_token_ in {"position", "pos"} + if self.next_token_ == "cursive": # GPOS type 3 + return self.parse_position_cursive_(enumerated, vertical) + elif self.next_token_ == "base": # GPOS type 4 + return self.parse_position_base_(enumerated, vertical) + elif self.next_token_ == "ligature": # GPOS type 5 + return self.parse_position_ligature_(enumerated, vertical) + elif self.next_token_ == "mark": # GPOS type 6 + return self.parse_position_mark_(enumerated, vertical) + + location = self.cur_token_location_ + prefix, glyphs, lookups, values, suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical) + self.expect_symbol_(";") + + if any(lookups): + # GPOS type 8: Chaining contextual positioning; explicit lookups + if any(values): + raise FeatureLibError( + "If \"lookup\" is present, no values must be specified", + location) + return self.ast.ChainContextPosStatement( + location, prefix, glyphs, suffix, lookups) + + # Pair positioning, format A: "pos V 10 A -10;" + # Pair positioning, format B: "pos V A -20;" + if not prefix and not suffix and len(glyphs) == 2 and not hasMarks: + if values[0] is None: # Format B: "pos V A -20;" + values.reverse() + return self.ast.PairPosStatement( + location, enumerated, + glyphs[0], values[0], glyphs[1], values[1]) + + if enumerated: + raise FeatureLibError( + '"enumerate" is only allowed with pair positionings', location) + return self.ast.SinglePosStatement(location, list(zip(glyphs, values)), + prefix, suffix, forceChain=hasMarks) + + def parse_position_cursive_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("cursive") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'cursive attachment positioning', + location) + glyphclass = self.parse_glyphclass_(accept_glyphname=True) + entryAnchor = self.parse_anchor_() + exitAnchor = self.parse_anchor_() + self.expect_symbol_(";") + return self.ast.CursivePosStatement( + location, glyphclass, entryAnchor, exitAnchor) + + def parse_position_base_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("base") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-base attachment positioning', + location) + base = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkBasePosStatement(location, base, marks) + + def parse_position_ligature_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("ligature") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-ligature attachment positioning', + location) + ligatures = self.parse_glyphclass_(accept_glyphname=True) + marks = [self.parse_anchor_marks_()] + while self.next_token_ == "ligComponent": + self.expect_keyword_("ligComponent") + marks.append(self.parse_anchor_marks_()) + self.expect_symbol_(";") + return self.ast.MarkLigPosStatement(location, ligatures, marks) + + def parse_position_mark_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("mark") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-mark attachment positioning', + location) + baseMarks = self.parse_glyphclass_(accept_glyphname=True) + marks = self.parse_anchor_marks_() + self.expect_symbol_(";") + return self.ast.MarkMarkPosStatement(location, baseMarks, marks) + def parse_script_(self): assert self.is_cur_keyword_("script") - location, script = self.cur_token_location_, self.expect_tag_() + location, script = self.cur_token_location_, self.expect_script_tag_() self.expect_symbol_(";") - return ast.ScriptStatement(location, script) + return self.ast.ScriptStatement(location, script) def parse_substitute_(self): - assert self.cur_token_ in {"substitute", "sub"} + assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"} location = self.cur_token_location_ - old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() - + reverse = self.cur_token_ in {"reversesub", "rsub"} + old_prefix, old, lookups, values, old_suffix, hasMarks = \ + self.parse_glyph_pattern_(vertical=False) + if any(values): + raise FeatureLibError( + "Substitution statements cannot contain values", location) new = [] if self.next_token_ == "by": keyword = self.expect_keyword_("by") while self.next_token_ != ";": - new.append(self.parse_glyphclass_(accept_glyphname=True)) + gc = self.parse_glyphclass_(accept_glyphname=True) + new.append(gc) elif self.next_token_ == "from": keyword = self.expect_keyword_("from") new = [self.parse_glyphclass_(accept_glyphname=False)] @@ -231,46 +656,443 @@ keyword = None self.expect_symbol_(";") if len(new) is 0 and not any(lookups): - raise ParserError( + raise FeatureLibError( 'Expected "by", "from" or explicit lookup references', self.cur_token_location_) + # GSUB lookup type 3: Alternate substitution. + # Format: "substitute a from [a.1 a.2 a.3];" if keyword == "from": - if len(old) != 1 or len(old[0]) != 1: - raise ParserError('Expected a single glyph before "from"', - location) + if reverse: + raise FeatureLibError( + 'Reverse chaining substitutions do not support "from"', + location) + if len(old) != 1 or len(old[0].glyphSet()) != 1: + raise FeatureLibError( + 'Expected a single glyph before "from"', + location) if len(new) != 1: - raise ParserError('Expected a single glyphclass after "from"', - location) - return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) - - rule = ast.SubstitutionRule(location, old, new) - rule.old_prefix, rule.old_suffix = old_prefix, old_suffix - rule.lookups = lookups + raise FeatureLibError( + 'Expected a single glyphclass after "from"', + location) + return self.ast.AlternateSubstStatement( + location, old_prefix, old[0], old_suffix, new[0]) + + num_lookups = len([l for l in lookups if l is not None]) + + # GSUB lookup type 1: Single substitution. + # Format A: "substitute a by a.sc;" + # Format B: "substitute [one.fitted one.oldstyle] by one;" + # Format C: "substitute [a-d] by [A.sc-D.sc];" + if (not reverse and len(old) == 1 and len(new) == 1 and + num_lookups == 0): + glyphs = list(old[0].glyphSet()) + replacements = list(new[0].glyphSet()) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + 'but found a glyph class with %d elements' % + (len(glyphs), len(replacements)), location) + return self.ast.SingleSubstStatement( + location, old, new, + old_prefix, old_suffix, + forceChain=hasMarks + ) + + # GSUB lookup type 2: Multiple substitution. + # Format: "substitute f_f_i by f f i;" + if (not reverse and + len(old) == 1 and len(old[0].glyphSet()) == 1 and + len(new) > 1 and max([len(n.glyphSet()) for n in new]) == 1 and + num_lookups == 0): + return self.ast.MultipleSubstStatement( + location, old_prefix, tuple(old[0].glyphSet())[0], old_suffix, + tuple([list(n.glyphSet())[0] for n in new])) + + # GSUB lookup type 4: Ligature substitution. + # Format: "substitute f f i by f_f_i;" + if (not reverse and + len(old) > 1 and len(new) == 1 and + len(new[0].glyphSet()) == 1 and + num_lookups == 0): + return self.ast.LigatureSubstStatement( + location, old_prefix, old, old_suffix, + list(new[0].glyphSet())[0], forceChain=hasMarks) + + # GSUB lookup type 8: Reverse chaining substitution. + if reverse: + if len(old) != 1: + raise FeatureLibError( + "In reverse chaining single substitutions, " + "only a single glyph or glyph class can be replaced", + location) + if len(new) != 1: + raise FeatureLibError( + 'In reverse chaining single substitutions, ' + 'the replacement (after "by") must be a single glyph ' + 'or glyph class', location) + if num_lookups != 0: + raise FeatureLibError( + "Reverse chaining substitutions cannot call named lookups", + location) + glyphs = sorted(list(old[0].glyphSet())) + replacements = sorted(list(new[0].glyphSet())) + if len(replacements) == 1: + replacements = replacements * len(glyphs) + if len(glyphs) != len(replacements): + raise FeatureLibError( + 'Expected a glyph class with %d elements after "by", ' + 'but found a glyph class with %d elements' % + (len(glyphs), len(replacements)), location) + return self.ast.ReverseChainSingleSubstStatement( + location, old_prefix, old_suffix, old, new) + + # GSUB lookup type 6: Chaining contextual substitution. + assert len(new) == 0, new + rule = self.ast.ChainContextSubstStatement( + location, old_prefix, old, old_suffix, lookups) return rule def parse_subtable_(self): assert self.is_cur_keyword_("subtable") location = self.cur_token_location_ self.expect_symbol_(";") - return ast.SubtableStatement(location) + return self.ast.SubtableStatement(location) + + def parse_size_parameters_(self): + assert self.is_cur_keyword_("parameters") + location = self.cur_token_location_ + DesignSize = self.expect_decipoint_() + SubfamilyID = self.expect_number_() + RangeStart = 0 + RangeEnd = 0 + if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or \ + SubfamilyID != 0: + RangeStart = self.expect_decipoint_() + RangeEnd = self.expect_decipoint_() + + self.expect_symbol_(";") + return self.ast.SizeParameters(location, DesignSize, SubfamilyID, + RangeStart, RangeEnd) + + def parse_size_menuname_(self): + assert self.is_cur_keyword_("sizemenuname") + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.FeatureNameStatement(location, "size", platformID, + platEncID, langID, string) + + def parse_table_(self): + assert self.is_cur_keyword_("table") + location, name = self.cur_token_location_, self.expect_tag_() + table = self.ast.TableBlock(location, name) + self.expect_symbol_("{") + handler = { + "GDEF": self.parse_table_GDEF_, + "head": self.parse_table_head_, + "hhea": self.parse_table_hhea_, + "vhea": self.parse_table_vhea_, + "name": self.parse_table_name_, + "BASE": self.parse_table_BASE_, + "OS/2": self.parse_table_OS_2_, + }.get(name) + if handler: + handler(table) + else: + raise FeatureLibError('"table %s" is not supported' % name.strip(), + location) + self.expect_symbol_("}") + end_tag = self.expect_tag_() + if end_tag != name: + raise FeatureLibError('Expected "%s"' % name.strip(), + self.cur_token_location_) + self.expect_symbol_(";") + return table + + def parse_table_GDEF_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("Attach"): + statements.append(self.parse_attach_()) + elif self.is_cur_keyword_("GlyphClassDef"): + statements.append(self.parse_GlyphClassDef_()) + elif self.is_cur_keyword_("LigatureCaretByIndex"): + statements.append(self.parse_ligatureCaretByIndex_()) + elif self.is_cur_keyword_("LigatureCaretByPos"): + statements.append(self.parse_ligatureCaretByPos_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError( + "Expected Attach, LigatureCaretByIndex, " + "or LigatureCaretByPos", + self.cur_token_location_) + + def parse_table_head_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("FontRevision"): + statements.append(self.parse_FontRevision_()) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected FontRevision", + self.cur_token_location_) + + def parse_table_hhea_(self, table): + statements = table.statements + fields = ("CaretOffset", "Ascender", "Descender", "LineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.HheaField(self.cur_token_location_, key, value)) + if self.next_token_ != ";": + raise FeatureLibError("Incomplete statement", self.next_token_location_) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected CaretOffset, Ascender, " + "Descender or LineGap", + self.cur_token_location_) + + def parse_table_vhea_(self, table): + statements = table.statements + fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: + key = self.cur_token_.lower() + value = self.expect_number_() + statements.append( + self.ast.VheaField(self.cur_token_location_, key, value)) + if self.next_token_ != ";": + raise FeatureLibError("Incomplete statement", self.next_token_location_) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected VertTypoAscender, " + "VertTypoDescender or VertTypoLineGap", + self.cur_token_location_) + + def parse_table_name_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("nameid"): + statement = self.parse_nameid_() + if statement: + statements.append(statement) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError("Expected nameid", + self.cur_token_location_) + + def parse_name_(self): + platEncID = None + langID = None + if self.next_token_type_ == Lexer.NUMBER: + platformID = self.expect_number_() + location = self.cur_token_location_ + if platformID not in (1, 3): + raise FeatureLibError("Expected platform id 1 or 3", location) + if self.next_token_type_ == Lexer.NUMBER: + platEncID = self.expect_number_() + langID = self.expect_number_() + else: + platformID = 3 + location = self.cur_token_location_ + + if platformID == 1: # Macintosh + platEncID = platEncID or 0 # Roman + langID = langID or 0 # English + else: # 3, Windows + platEncID = platEncID or 1 # Unicode + langID = langID or 0x0409 # English + + string = self.expect_string_() + self.expect_symbol_(";") + + encoding = getEncoding(platformID, platEncID, langID) + if encoding is None: + raise FeatureLibError("Unsupported encoding", location) + unescaped = self.unescape_string_(string, encoding) + return platformID, platEncID, langID, unescaped + + def parse_nameid_(self): + assert self.cur_token_ == "nameid", self.cur_token_ + location, nameID = self.cur_token_location_, self.expect_number_() + if nameID > 32767: + raise FeatureLibError("Name id value cannot be greater than 32767", + self.cur_token_location_) + if 1 <= nameID <= 6: + log.warning("Name id %d cannot be set from the feature file. " + "Ignoring record" % nameID) + self.parse_name_() # skip to the next record + return None + + platformID, platEncID, langID, string = self.parse_name_() + return self.ast.NameRecord(location, nameID, platformID, platEncID, + langID, string) + + def unescape_string_(self, string, encoding): + if encoding == "utf_16_be": + s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string) + else: + unescape = lambda m: self.unescape_byte_(m, encoding) + s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string) + # We now have a Unicode string, but it might contain surrogate pairs. + # We convert surrogates to actual Unicode by round-tripping through + # Python's UTF-16 codec in a special mode. + utf16 = tobytes(s, "utf_16_be", "surrogatepass") + return tounicode(utf16, "utf_16_be") + + @staticmethod + def unescape_unichr_(match): + n = match.group(0)[1:] + return unichr(int(n, 16)) + + @staticmethod + def unescape_byte_(match, encoding): + n = match.group(0)[1:] + return bytechr(int(n, 16)).decode(encoding) + + def parse_table_BASE_(self, table): + statements = table.statements + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("HorizAxis.BaseTagList"): + horiz_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("HorizAxis.BaseScriptList"): + horiz_scripts = self.parse_base_script_list_(len(horiz_bases)) + statements.append( + self.ast.BaseAxis(self.cur_token_location_, horiz_bases, + horiz_scripts, False)) + elif self.is_cur_keyword_("VertAxis.BaseTagList"): + vert_bases = self.parse_base_tag_list_() + elif self.is_cur_keyword_("VertAxis.BaseScriptList"): + vert_scripts = self.parse_base_script_list_(len(vert_bases)) + statements.append( + self.ast.BaseAxis(self.cur_token_location_, vert_bases, + vert_scripts, True)) + elif self.cur_token_ == ";": + continue + + def parse_table_OS_2_(self, table): + statements = table.statements + numbers = ("FSType", "TypoAscender", "TypoDescender", "TypoLineGap", + "winAscent", "winDescent", "XHeight", "CapHeight", + "WeightClass", "WidthClass", "LowerOpSize", "UpperOpSize") + ranges = ("UnicodeRange", "CodePageRange") + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.NAME: + key = self.cur_token_.lower() + value = None + if self.cur_token_ in numbers: + value = self.expect_number_() + elif self.is_cur_keyword_("Panose"): + value = [] + for i in range(10): + value.append(self.expect_number_()) + elif self.cur_token_ in ranges: + value = [] + while self.next_token_ != ";": + value.append(self.expect_number_()) + elif self.is_cur_keyword_("Vendor"): + value = self.expect_string_() + statements.append( + self.ast.OS2Field(self.cur_token_location_, key, value)) + elif self.cur_token_ == ";": + continue + + def parse_base_tag_list_(self): + assert self.cur_token_ in ("HorizAxis.BaseTagList", + "VertAxis.BaseTagList"), self.cur_token_ + bases = [] + while self.next_token_ != ";": + bases.append(self.expect_script_tag_()) + self.expect_symbol_(";") + return bases + + def parse_base_script_list_(self, count): + assert self.cur_token_ in ("HorizAxis.BaseScriptList", + "VertAxis.BaseScriptList"), self.cur_token_ + scripts = [(self.parse_base_script_record_(count))] + while self.next_token_ == ",": + self.expect_symbol_(",") + scripts.append(self.parse_base_script_record_(count)) + self.expect_symbol_(";") + return scripts + + def parse_base_script_record_(self, count): + script_tag = self.expect_script_tag_() + base_tag = self.expect_script_tag_() + coords = [self.expect_number_() for i in range(count)] + return script_tag, base_tag, coords + + def parse_device_(self): + result = None + self.expect_symbol_("<") + self.expect_keyword_("device") + if self.next_token_ == "NULL": + self.expect_keyword_("NULL") + else: + result = [(self.expect_number_(), self.expect_number_())] + while self.next_token_ == ",": + self.expect_symbol_(",") + result.append((self.expect_number_(), self.expect_number_())) + result = tuple(result) # make it hashable + self.expect_symbol_(">") + return result + + def is_next_value_(self): + return self.next_token_type_ is Lexer.NUMBER or self.next_token_ == "<" def parse_valuerecord_(self, vertical): if self.next_token_type_ is Lexer.NUMBER: number, location = self.expect_number_(), self.cur_token_location_ if vertical: - val = ast.ValueRecord(location, 0, 0, 0, number) + val = self.ast.ValueRecord(location, vertical, + None, None, None, number, + None, None, None, None) else: - val = ast.ValueRecord(location, 0, 0, number, 0) + val = self.ast.ValueRecord(location, vertical, + None, None, number, None, + None, None, None, None) return val self.expect_symbol_("<") location = self.cur_token_location_ if self.next_token_type_ is Lexer.NAME: name = self.expect_name_() + if name == "NULL": + self.expect_symbol_(">") + return None vrd = self.valuerecords_.resolve(name) if vrd is None: - raise ParserError("Unknown valueRecordDef \"%s\"" % name, - self.cur_token_location_) + raise FeatureLibError("Unknown valueRecordDef \"%s\"" % name, + self.cur_token_location_) value = vrd.value xPlacement, yPlacement = (value.xPlacement, value.yPlacement) xAdvance, yAdvance = (value.xAdvance, value.yAdvance) @@ -278,9 +1100,30 @@ xPlacement, yPlacement, xAdvance, yAdvance = ( self.expect_number_(), self.expect_number_(), self.expect_number_(), self.expect_number_()) + + if self.next_token_ == "<": + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( + self.parse_device_(), self.parse_device_(), + self.parse_device_(), self.parse_device_()) + allDeltas = sorted([ + delta + for size, delta + in (xPlaDevice if xPlaDevice else ()) + + (yPlaDevice if yPlaDevice else ()) + + (xAdvDevice if xAdvDevice else ()) + + (yAdvDevice if yAdvDevice else ())]) + if allDeltas[0] < -128 or allDeltas[-1] > 127: + raise FeatureLibError( + "Device value out of valid range (-128..127)", + self.cur_token_location_) + else: + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( + None, None, None, None) + self.expect_symbol_(">") - return ast.ValueRecord( - location, xPlacement, yPlacement, xAdvance, yAdvance) + return self.ast.ValueRecord( + location, vertical, xPlacement, yPlacement, xAdvance, yAdvance, + xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice) def parse_valuerecord_definition_(self, vertical): assert self.is_cur_keyword_("valueRecordDef") @@ -288,62 +1131,139 @@ value = self.parse_valuerecord_(vertical) name = self.expect_name_() self.expect_symbol_(";") - vrd = ast.ValueRecordDefinition(location, name, value) + vrd = self.ast.ValueRecordDefinition(location, name, value) self.valuerecords_.define(name, vrd) return vrd def parse_languagesystem_(self): assert self.cur_token_ == "languagesystem" location = self.cur_token_location_ - script, language = self.expect_tag_(), self.expect_tag_() + script = self.expect_script_tag_() + language = self.expect_language_tag_() self.expect_symbol_(";") - return ast.LanguageSystemStatement(location, script, language) + if script == "DFLT" and language != "dflt": + raise FeatureLibError( + 'For script "DFLT", the language must be "dflt"', + self.cur_token_location_) + return self.ast.LanguageSystemStatement(location, script, language) def parse_feature_block_(self): assert self.cur_token_ == "feature" location = self.cur_token_location_ tag = self.expect_tag_() - vertical = (tag == "vkrn") + vertical = (tag in {"vkrn", "vpal", "vhal", "valt"}) + stylisticset = None + if tag in ["ss%02d" % i for i in range(1, 20+1)]: + stylisticset = tag + + size_feature = (tag == "size") use_extension = False if self.next_token_ == "useExtension": self.expect_keyword_("useExtension") use_extension = True - block = ast.FeatureBlock(location, tag, use_extension) - self.parse_block_(block, vertical) + block = self.ast.FeatureBlock(location, tag, use_extension) + self.parse_block_(block, vertical, stylisticset, size_feature) + return block + + def parse_feature_reference_(self): + assert self.cur_token_ == "feature", self.cur_token_ + location = self.cur_token_location_ + featureName = self.expect_tag_() + self.expect_symbol_(";") + return self.ast.FeatureReferenceStatement(location, featureName) + + def parse_featureNames_(self, tag): + assert self.cur_token_ == "featureNames", self.cur_token_ + block = self.ast.FeatureNamesBlock(self.cur_token_location_) + self.expect_symbol_("{") + for symtab in self.symbol_tables_: + symtab.enter_scope() + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + block.statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.is_cur_keyword_("name"): + location = self.cur_token_location_ + platformID, platEncID, langID, string = self.parse_name_() + block.statements.append( + self.ast.FeatureNameStatement(location, tag, platformID, + platEncID, langID, string)) + elif self.cur_token_ == ";": + continue + else: + raise FeatureLibError('Expected "name"', + self.cur_token_location_) + self.expect_symbol_("}") + for symtab in self.symbol_tables_: + symtab.exit_scope() + self.expect_symbol_(";") return block - def parse_block_(self, block, vertical): + def parse_FontRevision_(self): + assert self.cur_token_ == "FontRevision", self.cur_token_ + location, version = self.cur_token_location_, self.expect_float_() + self.expect_symbol_(";") + if version <= 0: + raise FeatureLibError("Font revision numbers must be positive", + location) + return self.ast.FontRevisionStatement(location, version) + + def parse_block_(self, block, vertical, stylisticset=None, + size_feature=False): self.expect_symbol_("{") for symtab in self.symbol_tables_: symtab.enter_scope() statements = block.statements - while self.next_token_ != "}": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: + while self.next_token_ != "}" or self.cur_comments_: + self.advance_lexer_(comments=True) + if self.cur_token_type_ is Lexer.COMMENT: + statements.append(self.ast.Comment(self.cur_token_location_, self.cur_token_)) + elif self.cur_token_type_ is Lexer.GLYPHCLASS: statements.append(self.parse_glyphclass_definition_()) elif self.is_cur_keyword_("anchorDef"): statements.append(self.parse_anchordef_()) + elif self.is_cur_keyword_({"enum", "enumerate"}): + statements.append(self.parse_enumerate_(vertical=vertical)) + elif self.is_cur_keyword_("feature"): + statements.append(self.parse_feature_reference_()) elif self.is_cur_keyword_("ignore"): statements.append(self.parse_ignore_()) elif self.is_cur_keyword_("language"): statements.append(self.parse_language_()) elif self.is_cur_keyword_("lookup"): statements.append(self.parse_lookup_(vertical)) + elif self.is_cur_keyword_("lookupflag"): + statements.append(self.parse_lookupflag_()) + elif self.is_cur_keyword_("markClass"): + statements.append(self.parse_markClass_()) + elif self.is_cur_keyword_({"pos", "position"}): + statements.append( + self.parse_position_(enumerated=False, vertical=vertical)) elif self.is_cur_keyword_("script"): statements.append(self.parse_script_()) - elif (self.is_cur_keyword_("substitute") or - self.is_cur_keyword_("sub")): + elif (self.is_cur_keyword_({"sub", "substitute", + "rsub", "reversesub"})): statements.append(self.parse_substitute_()) elif self.is_cur_keyword_("subtable"): statements.append(self.parse_subtable_()) elif self.is_cur_keyword_("valueRecordDef"): statements.append(self.parse_valuerecord_definition_(vertical)) + elif stylisticset and self.is_cur_keyword_("featureNames"): + statements.append(self.parse_featureNames_(stylisticset)) + elif size_feature and self.is_cur_keyword_("parameters"): + statements.append(self.parse_size_parameters_()) + elif size_feature and self.is_cur_keyword_("sizemenuname"): + statements.append(self.parse_size_menuname_()) + elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in self.extensions: + statements.append(self.extensions[self.cur_token_](self)) + elif self.cur_token_ == ";": + continue else: - raise ParserError( - "Expected glyph class definition or statement", + raise FeatureLibError( + "Expected glyph class definition or statement: got {} {}".format(self.cur_token_type_, self.cur_token_), self.cur_token_location_) self.expect_symbol_("}") @@ -352,64 +1272,193 @@ name = self.expect_name_() if name != block.name.strip(): - raise ParserError("Expected \"%s\"" % block.name.strip(), - self.cur_token_location_) + raise FeatureLibError("Expected \"%s\"" % block.name.strip(), + self.cur_token_location_) self.expect_symbol_(";") + # A multiple substitution may have a single destination, in which case + # it will look just like a single substitution. So if there are both + # multiple and single substitutions, upgrade all the single ones to + # multiple substitutions. + + # Check if we have a mix of non-contextual singles and multiples. + has_single = False + has_multiple = False + for s in statements: + if isinstance(s, self.ast.SingleSubstStatement): + has_single = not any([s.prefix, s.suffix, s.forceChain]) + elif isinstance(s, self.ast.MultipleSubstStatement): + has_multiple = not any([s.prefix, s.suffix]) + + # Upgrade all single substitutions to multiple substitutions. + if has_single and has_multiple: + for i, s in enumerate(statements): + if isinstance(s, self.ast.SingleSubstStatement): + statements[i] = self.ast.MultipleSubstStatement(s.location, + s.prefix, s.glyphs[0].glyphSet()[0], s.suffix, + [r.glyphSet()[0] for r in s.replacements]) + def is_cur_keyword_(self, k): - return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + if self.cur_token_type_ is Lexer.NAME: + if isinstance(k, type("")): # basestring is gone in Python3 + return self.cur_token_ == k + else: + return self.cur_token_ in k + return False + + def expect_class_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.GLYPHCLASS: + raise FeatureLibError("Expected @NAME", self.cur_token_location_) + return self.cur_token_ + + def expect_cid_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.CID: + return self.cur_token_ + raise FeatureLibError("Expected a CID", self.cur_token_location_) + + def expect_glyph_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + self.cur_token_ = self.cur_token_.lstrip("\\") + if len(self.cur_token_) > 63: + raise FeatureLibError( + "Glyph names must not be longer than 63 characters", + self.cur_token_location_) + return self.cur_token_ + elif self.cur_token_type_ is Lexer.CID: + return "cid%05d" % self.cur_token_ + raise FeatureLibError("Expected a glyph name or CID", + self.cur_token_location_) + + def expect_markClass_reference_(self): + name = self.expect_class_name_() + mc = self.glyphclasses_.resolve(name) + if mc is None: + raise FeatureLibError("Unknown markClass @%s" % name, + self.cur_token_location_) + if not isinstance(mc, self.ast.MarkClass): + raise FeatureLibError("@%s is not a markClass" % name, + self.cur_token_location_) + return mc def expect_tag_(self): self.advance_lexer_() if self.cur_token_type_ is not Lexer.NAME: - raise ParserError("Expected a tag", self.cur_token_location_) + raise FeatureLibError("Expected a tag", self.cur_token_location_) if len(self.cur_token_) > 4: - raise ParserError("Tags can not be longer than 4 characters", - self.cur_token_location_) + raise FeatureLibError("Tags can not be longer than 4 characters", + self.cur_token_location_) return (self.cur_token_ + " ")[:4] + def expect_script_tag_(self): + tag = self.expect_tag_() + if tag == "dflt": + raise FeatureLibError( + '"dflt" is not a valid script tag; use "DFLT" instead', + self.cur_token_location_) + return tag + + def expect_language_tag_(self): + tag = self.expect_tag_() + if tag == "DFLT": + raise FeatureLibError( + '"DFLT" is not a valid language tag; use "dflt" instead', + self.cur_token_location_) + return tag + def expect_symbol_(self, symbol): self.advance_lexer_() if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: return symbol - raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) + raise FeatureLibError("Expected '%s'" % symbol, + self.cur_token_location_) def expect_keyword_(self, keyword): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: return self.cur_token_ - raise ParserError("Expected \"%s\"" % keyword, - self.cur_token_location_) + raise FeatureLibError("Expected \"%s\"" % keyword, + self.cur_token_location_) def expect_name_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NAME: return self.cur_token_ - raise ParserError("Expected a name", self.cur_token_location_) + raise FeatureLibError("Expected a name", self.cur_token_location_) def expect_number_(self): self.advance_lexer_() if self.cur_token_type_ is Lexer.NUMBER: return self.cur_token_ - raise ParserError("Expected a number", self.cur_token_location_) + raise FeatureLibError("Expected a number", self.cur_token_location_) - def advance_lexer_(self): - self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( - self.next_token_type_, self.next_token_, self.next_token_location_) - try: - (self.next_token_type_, self.next_token_, - self.next_token_location_) = self.lexer_.next() - except StopIteration: - self.next_token_type_, self.next_token_ = (None, None) + def expect_float_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.FLOAT: + return self.cur_token_ + raise FeatureLibError("Expected a floating-point number", + self.cur_token_location_) + + def expect_decipoint_(self): + if self.next_token_type_ == Lexer.FLOAT: + return self.expect_float_() + elif self.next_token_type_ is Lexer.NUMBER: + return self.expect_number_() / 10 + else: + raise FeatureLibError("Expected an integer or floating-point number", + self.cur_token_location_) + + def expect_string_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.STRING: + return self.cur_token_ + raise FeatureLibError("Expected a string", self.cur_token_location_) + + def advance_lexer_(self, comments=False): + if comments and self.cur_comments_: + self.cur_token_type_ = Lexer.COMMENT + self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0) + return + else: + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + self.cur_comments_ = [] + while True: + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = next(self.lexer_) + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + if self.next_token_type_ != Lexer.COMMENT: + break + self.cur_comments_.append((self.next_token_, self.next_token_location_)) + + @staticmethod + def reverse_string_(s): + """'abc' --> 'cba'""" + return ''.join(reversed(list(s))) + + def make_cid_range_(self, location, start, limit): + """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]""" + result = list() + if start > limit: + raise FeatureLibError( + "Bad range: start should be less than limit", location) + for cid in range(start, limit + 1): + result.append("cid%05d" % cid) + return result def make_glyph_range_(self, location, start, limit): - """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" - result = set() + """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]""" + result = list() if len(start) != len(limit): - raise ParserError( + raise FeatureLibError( "Bad range: \"%s\" and \"%s\" should have the same length" % (start, limit), location) - rev = lambda s: ''.join(reversed(list(s))) # string reversal + + rev = self.reverse_string_ prefix = os.path.commonprefix([start, limit]) suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) if len(suffix) > 0: @@ -420,29 +1469,31 @@ limit_range = limit[len(prefix):] if start_range >= limit_range: - raise ParserError("Start of range must be smaller than its end", - location) + raise FeatureLibError( + "Start of range must be smaller than its end", + location) uppercase = re.compile(r'^[A-Z]$') if uppercase.match(start_range) and uppercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) + result.append("%s%c%s" % (prefix, c, suffix)) return result lowercase = re.compile(r'^[a-z]$') if lowercase.match(start_range) and lowercase.match(limit_range): for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) + result.append("%s%c%s" % (prefix, c, suffix)) return result digits = re.compile(r'^[0-9]{1,3}$') if digits.match(start_range) and digits.match(limit_range): for i in range(int(start_range, 10), int(limit_range, 10) + 1): number = ("000" + str(i))[-len(start_range):] - result.add("%s%s%s" % (prefix, number, suffix)) + result.append("%s%s%s" % (prefix, number, suffix)) return result - raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) + raise FeatureLibError("Bad range: \"%s-%s\"" % (start, limit), + location) class SymbolTable(object): diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/parser_test.py fonttools-3.21.2/Snippets/fontTools/feaLib/parser_test.py --- fonttools-3.0/Snippets/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,448 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import LexerError -from fontTools.feaLib.parser import Parser, ParserError, SymbolTable -from fontTools.misc.py23 import * -import fontTools.feaLib.ast as ast -import codecs -import os -import shutil -import sys -import tempfile -import unittest - - -class ParserTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_anchordef(self): - [foo] = self.parse("anchorDef 123 456 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, None) - - def test_anchordef_contourpoint(self): - [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, 5) - - def test_feature_block(self): - [liga] = self.parse("feature liga {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertFalse(liga.use_extension) - - def test_feature_block_useExtension(self): - [liga] = self.parse("feature liga useExtension {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertTrue(liga.use_extension) - - def test_glyphclass(self): - [gc] = self.parse("@dash = [endash emdash figuredash];").statements - self.assertEqual(gc.name, "dash") - self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) - - def test_glyphclass_bad(self): - self.assertRaisesRegex( - ParserError, - "Expected glyph name, glyph range, or glyph class reference", - self.parse, "@bad = [a 123];") - - def test_glyphclass_duplicate(self): - self.assertRaisesRegex( - ParserError, "Glyph class @dup already defined", - self.parse, "@dup = [a b]; @dup = [x];") - - def test_glyphclass_empty(self): - [gc] = self.parse("@empty_set = [];").statements - self.assertEqual(gc.name, "empty_set") - self.assertEqual(gc.glyphs, set()) - - def test_glyphclass_equality(self): - [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(bar.glyphs, {"a", "b"}) - - def test_glyphclass_range_uppercase(self): - [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements - self.assertEqual(gc.name, "swashes") - self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) - - def test_glyphclass_range_lowercase(self): - [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements - self.assertEqual(gc.name, "defg.sc") - self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) - - def test_glyphclass_range_digit1(self): - [gc] = self.parse("@range = [foo.2-foo.5];").statements - self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) - - def test_glyphclass_range_digit2(self): - [gc] = self.parse("@range = [foo.09-foo.11];").statements - self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) - - def test_glyphclass_range_digit3(self): - [gc] = self.parse("@range = [foo.123-foo.125];").statements - self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) - - def test_glyphclass_range_bad(self): - self.assertRaisesRegex( - ParserError, - "Bad range: \"a\" and \"foobar\" should have the same length", - self.parse, "@bad = [a-foobar];") - self.assertRaisesRegex( - ParserError, "Bad range: \"A.swash-z.swash\"", - self.parse, "@bad = [A.swash-z.swash];") - self.assertRaisesRegex( - ParserError, "Start of range must be smaller than its end", - self.parse, "@bad = [B.swash-A.swash];") - self.assertRaisesRegex( - ParserError, "Bad range: \"foo.1234-foo.9876\"", - self.parse, "@bad = [foo.1234-foo.9876];") - - def test_glyphclass_range_mixed(self): - [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements - self.assertEqual(gc.glyphs, { - "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" - }) - - def test_glyphclass_reference(self): - [vowels_lc, vowels_uc, vowels] = self.parse( - "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" - "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements - self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) - self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) - self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) - self.assertRaisesRegex( - ParserError, "Unknown glyph class @unknown", - self.parse, "@bad = [@unknown];") - - def test_glyphclass_scoping(self): - [foo, liga, smcp] = self.parse( - "@foo = [a b];" - "feature liga { @bar = [@foo l]; } liga;" - "feature smcp { @bar = [@foo s]; } smcp;" - ).statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) - self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) - - def test_ignore_sub(self): - doc = self.parse("feature test {ignore sub e t' c;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"e"}]) - self.assertEqual(s.glyphs, [{"t"}]) - self.assertEqual(s.suffix, [{"c"}]) - - def test_ignore_substitute(self): - doc = self.parse( - "feature test {" - " ignore substitute f [a e] d' [a u]' [e y];" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) - self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) - self.assertEqual(s.suffix, [{"e", "y"}]) - - def test_language(self): - doc = self.parse("feature test {language DEU;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt(self): - doc = self.parse("feature test {language DEU exclude_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt_required(self): - doc = self.parse("feature test {" - " language DEU exclude_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertTrue(s.required) - - def test_language_include_dflt(self): - doc = self.parse("feature test {language DEU include_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_include_dflt_required(self): - doc = self.parse("feature test {" - " language DEU include_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertTrue(s.required) - - def test_lookup_block(self): - [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements - self.assertEqual(lookup.name, "Ligatures") - self.assertFalse(lookup.use_extension) - - def test_lookup_block_useExtension(self): - [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements - self.assertEqual(lookup.name, "Foo") - self.assertTrue(lookup.use_extension) - - def test_lookup_block_name_mismatch(self): - self.assertRaisesRegex( - ParserError, 'Expected "Foo"', - self.parse, "lookup Foo {} Bar;") - - def test_lookup_block_with_horizontal_valueRecordDef(self): - doc = self.parse("feature liga {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} liga;") - [liga] = doc.statements - [look] = liga.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 123) - self.assertEqual(foo.value.yAdvance, 0) - - def test_lookup_block_with_vertical_valueRecordDef(self): - doc = self.parse("feature vkrn {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} vkrn;") - [vkrn] = doc.statements - [look] = vkrn.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 0) - self.assertEqual(foo.value.yAdvance, 123) - - def test_lookup_reference(self): - [foo, bar] = self.parse("lookup Foo {} Foo;" - "feature Bar {lookup Foo;} Bar;").statements - [ref] = bar.statements - self.assertEqual(type(ref), ast.LookupReferenceStatement) - self.assertEqual(ref.lookup, foo) - - def test_lookup_reference_unknown(self): - self.assertRaisesRegex( - ParserError, 'Unknown lookup "Huh"', - self.parse, "feature liga {lookup Huh;} liga;") - - def test_script(self): - doc = self.parse("feature test {script cyrl;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.ScriptStatement) - self.assertEqual(s.script, "cyrl") - - def test_substitute_single_format_a(self): # GSUB LookupType 1 - doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"a.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_b(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [one.fitted one.oldstyle] by one;" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"one"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_c(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [a-d] by [A.sc-D.sc];" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_multiple(self): # GSUB LookupType 2 - doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.SubstitutionRule) - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f_f_i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_from(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " substitute a from [a.1 a.2 a.3];" - "} test;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "a") - self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) - - def test_substitute_from_glyphclass(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " @Ampersands = [ampersand.1 ampersand.2];" - " substitute ampersand from @Ampersands;" - "} test;") - [glyphclass, sub] = doc.statements[0].statements - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "ampersand") - self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) - - def test_substitute_ligature(self): # GSUB LookupType 4 - doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f_f_i"}]) - self.assertEqual(sub.lookups, [None, None, None]) - - def test_substitute_lookups(self): - doc = Parser(self.getpath("spec5fi.fea")).parse() - [ligs, sub, feature] = doc.statements - self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) - self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) - - def test_substitute_missing_by(self): - self.assertRaisesRegex( - ParserError, 'Expected "by", "from" or explicit lookup references', - self.parse, "feature liga {substitute f f i;} liga;") - - def test_subtable(self): - doc = self.parse("feature test {subtable;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.SubtableStatement) - - def test_valuerecord_format_a_horizontal(self): - doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 123) - self.assertEqual(value.yAdvance, 0) - - def test_valuerecord_format_a_vertical(self): - doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 0) - self.assertEqual(value.yAdvance, 123) - - def test_valuerecord_format_b(self): - doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named(self): - doc = self.parse("valueRecordDef <1 2 3 4> foo;" - "feature liga {valueRecordDef bar;} liga;") - value = doc.statements[1].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named_unknown(self): - self.assertRaisesRegex( - ParserError, "Unknown valueRecordDef \"unknown\"", - self.parse, "valueRecordDef foo;") - - def test_valuerecord_scoping(self): - [foo, liga, smcp] = self.parse( - "valueRecordDef 789 foo;" - "feature liga {valueRecordDef bar;} liga;" - "feature smcp {valueRecordDef bar;} smcp;" - ).statements - self.assertEqual(foo.value.xAdvance, 789) - self.assertEqual(liga.statements[0].value.xAdvance, 789) - self.assertEqual(smcp.statements[0].value.xAdvance, 789) - - def test_languagesystem(self): - [langsys] = self.parse("languagesystem latn DEU;").statements - self.assertEqual(langsys.script, "latn") - self.assertEqual(langsys.language, "DEU ") - self.assertRaisesRegex( - ParserError, "Expected ';'", - self.parse, "languagesystem latn DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem foobar DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem latn FOOBAR") - - def setUp(self): - self.tempdir = None - self.num_tempfiles = 0 - - def tearDown(self): - if self.tempdir: - shutil.rmtree(self.tempdir) - - def parse(self, text): - if not self.tempdir: - self.tempdir = tempfile.mkdtemp() - self.num_tempfiles += 1 - path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) - with codecs.open(path, "wb", "utf-8") as outfile: - outfile.write(text) - return Parser(path).parse() - - @staticmethod - def getpath(testfile): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", testfile) - - -class SymbolTableTest(unittest.TestCase): - def test_scopes(self): - symtab = SymbolTable() - symtab.define("foo", 23) - self.assertEqual(symtab.resolve("foo"), 23) - symtab.enter_scope() - self.assertEqual(symtab.resolve("foo"), 23) - symtab.define("foo", 42) - self.assertEqual(symtab.resolve("foo"), 42) - symtab.exit_scope() - self.assertEqual(symtab.resolve("foo"), 23) - - def test_resolve_undefined(self): - self.assertEqual(SymbolTable().resolve("abc"), None) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include0.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include0.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -I0 diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include1.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include1.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I1a -include(include0.fea); -I1b diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include2.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include2.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I2a -include(include1.fea); -I2b diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include3.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include3.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I3a -include(include2.fea); -I3b - diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include4.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include4.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I4a -include(include3.fea); -I4b - diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include5.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include5.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I5a -include(include4.fea); -I5b diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/include6.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include6.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I6a -include(include5.fea); -I6b diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(missingfile.fea); diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/includeself.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/includeself.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(includeself.fea); diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/mini.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/mini.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Example file from OpenType Feature File specification, section 1. -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -# Script and language coverage -languagesystem DFLT dflt; -languagesystem latn dflt; - -# Ligature formation -feature liga { - substitute f i by f_i; - substitute f l by f_l; -} liga; - -# Kerning -feature kern { - position A Y -100; - position a y -80; - position s f' <0 0 10 0> t; -} kern; diff -Nru fonttools-3.0/Snippets/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-3.0/Snippets/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -# OpenType Feature File specification, section 5.f.i, example 1 -# "Specifying a Chain Sub rule and marking sub-runs" -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -lookup CNTXT_LIGS { - substitute f i by f_i; - substitute c t by c_t; - } CNTXT_LIGS; - -lookup CNTXT_SUB { - substitute n by n.end; - substitute s by s.end; - } CNTXT_SUB; - -feature test { - substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; - substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; -} test; diff -Nru fonttools-3.0/Snippets/fontTools/__init__.py fonttools-3.21.2/Snippets/fontTools/__init__.py --- fonttools-3.0/Snippets/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,4 +1,10 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import logging +from fontTools.misc.loggingTools import configLogger -version = "3.0" +log = logging.getLogger(__name__) + +version = __version__ = "3.21.2" + +__all__ = ["version", "log", "configLogger"] diff -Nru fonttools-3.0/Snippets/fontTools/inspect.py fonttools-3.21.2/Snippets/fontTools/inspect.py --- fonttools-3.0/Snippets/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/inspect.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,8 +8,14 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import misc, ttLib, cffLib -import pygtk -pygtk.require('2.0') +try: + from gi import pygtkcompat +except ImportError: + pygtkcompat = None + +if pygtkcompat is not None: + pygtkcompat.enable() + pygtkcompat.enable_gtk(version='3.0') import gtk import sys @@ -73,7 +79,7 @@ def _add_object(self, key, value): # Make sure item is decompiled try: - value["asdf"] + value.asdf # Any better way?! except (AttributeError, KeyError, TypeError, ttLib.TTLibError): pass if isinstance(value, ttLib.getTableModule('glyf').Glyph): @@ -256,10 +262,10 @@ args = sys.argv[1:] if len(args) < 1: print("usage: pyftinspect font...", file=sys.stderr) - sys.exit(1) + return 1 for arg in args: Inspect(arg) gtk.main() if __name__ == "__main__": - main() + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/__main__.py fonttools-3.21.2/Snippets/fontTools/__main__.py --- fonttools-3.0/Snippets/fontTools/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ +from __future__ import print_function, division, absolute_import +import sys + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + # TODO Add help output, --help, etc. + + # TODO Handle library-wide options. Eg.: + # --unicodedata + # --verbose / other logging stuff + + # TODO Allow a way to run arbitrary modules? Useful for setting + # library-wide options and calling another library. Eg.: + # + # $ fonttools --unicodedata=... fontmake ... + # + # This allows for a git-like command where thirdparty commands + # can be added. Should we just try importing the fonttools + # module first and try without if it fails? + + mod = 'fontTools.'+sys.argv[1] + sys.argv[1] = sys.argv[0] + ' ' + sys.argv[1] + del sys.argv[0] + + import runpy + runpy.run_module(mod, run_name='__main__') + + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/merge.py fonttools-3.21.2/Snippets/fontTools/merge.py --- fonttools-3.0/Snippets/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/merge.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,10 +11,16 @@ from fontTools import ttLib, cffLib from fontTools.ttLib.tables import otTables, _h_e_a_d from fontTools.ttLib.tables.DefaultTable import DefaultTable +from fontTools.misc.loggingTools import Timer from functools import reduce import sys import time import operator +import logging + + +log = logging.getLogger("fontTools.merge") +timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO) def _add_method(*clazzes, **kwargs): @@ -22,7 +28,10 @@ more classes.""" allowDefault = kwargs.get('allowDefaultTable', False) def wrapper(method): + done = [] for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' assert method.__name__ not in clazz.__dict__, \ "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) @@ -141,7 +150,7 @@ @_add_method(DefaultTable, allowDefaultTable=True) def merge(self, m, tables): if not hasattr(self, 'mergeMap'): - m.log("Don't know how to merge '%s'." % self.tableTag) + log.info("Don't know how to merge '%s'.", self.tableTag) return NotImplemented logic = self.mergeMap @@ -307,12 +316,6 @@ 'metrics': sumDicts, } -ttLib.getTableClass('gasp').mergeMap = { - 'tableTag': equal, - 'version': max, - 'gaspRange': first, # FIXME? Appears irreconcilable -} - ttLib.getTableClass('name').mergeMap = { 'tableTag': equal, 'names': first, # FIXME? Does mixing name records make sense? @@ -346,24 +349,32 @@ ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) +ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable @_add_method(ttLib.getTableClass('cmap')) def merge(self, m, tables): # TODO Handle format=14. - cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] - # TODO Better handle format-4 and format-12 coexisting in same font. - # TODO Insert both a format-4 and format-12 if needed. - module = ttLib.getTableModule('cmap') - assert all(t.format in [4, 12] for t,_ in cmapTables) - format = max(t.format for t,_ in cmapTables) - cmapTable = module.cmap_classes[format](format) - cmapTable.cmap = {} - cmapTable.platformID = 3 - cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) - cmapTable.language = 0 - cmap = cmapTable.cmap + # Only merges 4/3/1 and 12/3/10 subtables, ignores all other subtables + # If there is a format 12 table for the same font, ignore the format 4 table + cmapTables = [] + for fontIdx,table in enumerate(tables): + format4 = None + format12 = None + for subtable in table.tables: + properties = (subtable.format, subtable.platformID, subtable.platEncID) + if properties == (4,3,1): + format4 = subtable + elif properties == (12,3,10): + format12 = subtable + if format12 is not None: + cmapTables.append((format12, fontIdx)) + elif format4 is not None: + cmapTables.append((format4, fontIdx)) + + # Build a unicode mapping, then decide which format is needed to store it. + cmap = {} for table,fontIdx in cmapTables: - # TODO handle duplicates. + # handle duplicates for uni,gid in table.cmap.items(): oldgid = cmap.get(uni, None) if oldgid is None: @@ -371,26 +382,121 @@ elif oldgid != gid: # Char previously mapped to oldgid, now to gid. # Record, to fix up in GSUB 'locl' later. - assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid - m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + if m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid: + m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid + else: + # Char previously mapped to oldgid but already remapped to a different gid. + # TODO: Try harder to do something about these. + log.warning("Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid) + + cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF} + self.tables = [] + module = ttLib.getTableModule('cmap') + if len(cmapBmpOnly) != len(cmap): + # format-12 required. + cmapTable = module.cmap_classes[12](12) + cmapTable.platformID = 3 + cmapTable.platEncID = 10 + cmapTable.language = 0 + cmapTable.cmap = cmap + self.tables.append(cmapTable) + # always create format-4 + cmapTable = module.cmap_classes[4](4) + cmapTable.platformID = 3 + cmapTable.platEncID = 1 + cmapTable.language = 0 + cmapTable.cmap = cmapBmpOnly + # ordered by platform then encoding + self.tables.insert(0, cmapTable) self.tableVersion = 0 - self.tables = [cmapTable] self.numSubTables = len(self.tables) return self +def mergeLookupLists(lst): + # TODO Do smarter merge. + return sumLists(lst) + +def mergeFeatures(lst): + assert lst + self = otTables.Feature() + self.FeatureParams = None + self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex]) + self.LookupCount = len(self.LookupListIndex) + return self + +def mergeFeatureLists(lst): + d = {} + for l in lst: + for f in l: + tag = f.FeatureTag + if tag not in d: + d[tag] = [] + d[tag].append(f.Feature) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.FeatureRecord() + rec.FeatureTag = tag + rec.Feature = mergeFeatures(d[tag]) + ret.append(rec) + return ret + +def mergeLangSyses(lst): + assert lst + + # TODO Support merging ReqFeatureIndex + assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) + + self = otTables.LangSys() + self.LookupOrder = None + self.ReqFeatureIndex = 0xFFFF + self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex]) + self.FeatureCount = len(self.FeatureIndex) + return self + +def mergeScripts(lst): + assert lst + + if len(lst) == 1: + return lst[0] + # TODO Support merging LangSysRecords + assert all(not s.LangSysRecord for s in lst) + + self = otTables.Script() + self.LangSysRecord = [] + self.LangSysCount = 0 + self.DefaultLangSys = mergeLangSyses([s.DefaultLangSys for s in lst if s.DefaultLangSys]) + return self + +def mergeScriptRecords(lst): + d = {} + for l in lst: + for s in l: + tag = s.ScriptTag + if tag not in d: + d[tag] = [] + d[tag].append(s.Script) + ret = [] + for tag in sorted(d.keys()): + rec = otTables.ScriptRecord() + rec.ScriptTag = tag + rec.Script = mergeScripts(d[tag]) + ret.append(rec) + return ret + otTables.ScriptList.mergeMap = { - 'ScriptCount': sum, - 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), + 'ScriptCount': lambda lst: None, # TODO + 'ScriptRecord': mergeScriptRecords, } otTables.BaseScriptList.mergeMap = { - 'BaseScriptCount': sum, + 'BaseScriptCount': lambda lst: None, # TODO + # TODO: Merge duplicate entries 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), } otTables.FeatureList.mergeMap = { 'FeatureCount': sum, - 'FeatureRecord': sumLists, + 'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), } otTables.LookupList.mergeMap = { @@ -399,10 +505,12 @@ } otTables.Coverage.mergeMap = { + 'Format': min, 'glyphs': sumLists, } otTables.ClassDef.mergeMap = { + 'Format': min, 'classDefs': sumDicts, } @@ -463,15 +571,14 @@ assert len(tables) == len(m.duplicateGlyphsPerFont) for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): if not dups: continue - assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) - lookupMap = {id(v):v for v in table.table.LookupList.Lookup} - featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} + assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB: %s" % (i + 1, dups) synthFeature = None synthLookup = None for script in table.table.ScriptList.ScriptRecord: if script.ScriptTag == 'DFLT': continue # XXX for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: - feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] + if langsys is None: continue # XXX Create! + feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl'] assert len(feature) <= 1 if feature: feature = feature[0] @@ -483,9 +590,8 @@ f.FeatureParams = None f.LookupCount = 0 f.LookupListIndex = [] - langsys.FeatureIndex.append(id(synthFeature)) - featureMap[id(synthFeature)] = synthFeature - langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) + langsys.FeatureIndex.append(synthFeature) + langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) table.table.FeatureList.FeatureRecord.append(synthFeature) table.table.FeatureList.FeatureCount += 1 feature = synthFeature @@ -501,7 +607,7 @@ table.table.LookupList.Lookup.append(synthLookup) table.table.LookupList.LookupCount += 1 - feature.Feature.LookupListIndex[:0] = [id(synthLookup)] + feature.Feature.LookupListIndex[:0] = [synthLookup] feature.Feature.LookupCount += 1 DefaultTable.merge(self, m, tables) @@ -647,6 +753,9 @@ def __init__(self, **kwargs): + self.verbose = False + self.timing = False + self.set(**kwargs) def set(self, **kwargs): @@ -655,7 +764,7 @@ raise self.UnknownOptionError("Unknown option '%s'" % k) setattr(self, k, v) - def parse_opts(self, argv, ignore_unknown=False): + def parse_opts(self, argv, ignore_unknown=[]): ret = [] opts = {} for a in argv: @@ -715,18 +824,50 @@ return ret +class _AttendanceRecordingIdentityDict(dict): + """A dictionary-like object that records indices of items actually accessed + from a list.""" + + def __init__(self, lst): + self.l = lst + self.d = {id(v):i for i,v in enumerate(lst)} + self.s = set() + + def __getitem__(self, v): + self.s.add(self.d[id(v)]) + return v + +class _GregariousDict(dict): + """A dictionary-like object that welcomes guests without reservations and + adds them to the end of the guest list.""" + + def __init__(self, lst): + self.l = lst + self.s = set(id(v) for v in lst) + + def __getitem__(self, v): + if id(v) not in self.s: + self.s.add(id(v)) + self.l.append(v) + return v + +class _NonhashableDict(dict): + """A dictionary-like object mapping objects to their index within a list.""" + + def __init__(self, lst): + self.d = {id(v):i for i,v in enumerate(lst)} + + def __getitem__(self, v): + return self.d[id(v)] class Merger(object): - def __init__(self, options=None, log=None): + def __init__(self, options=None): - if not log: - log = Logger() if not options: options = Options() self.options = options - self.log = log def merge(self, fontfiles): @@ -763,19 +904,19 @@ allTags = ['cmap'] + list(allTags) for tag in allTags: + with timer("merge '%s'" % tag): + tables = [font.get(tag, NotImplemented) for font in fonts] - tables = [font.get(tag, NotImplemented) for font in fonts] - - clazz = ttLib.getTableClass(tag) - table = clazz(tag).merge(self, tables) - # XXX Clean this up and use: table = mergeObjects(tables) - - if table is not NotImplemented and table is not False: - mega[tag] = table - self.log("Merged '%s'." % tag) - else: - self.log("Dropped '%s'." % tag) - self.log.lapse("merge '%s'" % tag) + log.info("Merging '%s'.", tag) + clazz = ttLib.getTableClass(tag) + table = clazz(tag).merge(self, tables) + # XXX Clean this up and use: table = mergeObjects(tables) + + if table is not NotImplemented and table is not False: + mega[tag] = table + log.info("Merged '%s'.", tag) + else: + log.info("Dropped '%s'.", tag) del self.duplicateGlyphsPerFont @@ -831,14 +972,12 @@ if not t: continue if t.table.LookupList: - lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} + lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)} t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) + t.table.FeatureList.mapLookups(lookupMap) if t.table.FeatureList and t.table.ScriptList: - featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} + featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)} t.table.ScriptList.mapFeatures(featureMap) # TODO GDEF/Lookup MarkFilteringSets @@ -855,95 +994,85 @@ for t in [GSUB, GPOS]: if not t: continue + if t.table.FeatureList and t.table.ScriptList: + + # Collect unregistered (new) features. + featureMap = _GregariousDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + # Record used features. + featureMap = _AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + usedIndices = featureMap.s + + # Remove unused features + t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices] + + # Map back to indices. + featureMap = _NonhashableDict(t.table.FeatureList.FeatureRecord) + t.table.ScriptList.mapFeatures(featureMap) + + t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) + if t.table.LookupList: - lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} + + # Collect unregistered (new) lookups. + lookupMap = _GregariousDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) - if t.table.FeatureList and t.table.ScriptList: - # XXX Handle present ScriptList but absent FeatureList - featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} - t.table.ScriptList.mapFeatures(featureMap) + # Record used lookups. + lookupMap = _AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) + usedIndices = lookupMap.s - # TODO GDEF/Lookup MarkFilteringSets - # TODO FeatureParams nameIDs + # Remove unused lookups + t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices] + # Map back to indices. + lookupMap = _NonhashableDict(t.table.LookupList.Lookup) + t.table.FeatureList.mapLookups(lookupMap) + t.table.LookupList.mapLookups(lookupMap) -class Logger(object): + t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - font.disassembleInstructions = False # Work around ttLib bug - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() + # TODO GDEF/Lookup MarkFilteringSets + # TODO FeatureParams nameIDs __all__ = [ 'Options', 'Merger', - 'Logger', 'main' ] +@timer("make one with everything (TOTAL TIME)") def main(args=None): + from fontTools import configLogger if args is None: args = sys.argv[1:] - log = Logger() - args = log.parse_opts(args) - options = Options() args = options.parse_opts(args) if len(args) < 1: print("usage: pyftmerge font...", file=sys.stderr) - sys.exit(1) + return 1 + + configLogger(level=logging.INFO if options.verbose else logging.WARNING) + if options.timing: + timer.logger.setLevel(logging.DEBUG) + else: + timer.logger.disabled = True - merger = Merger(options=options, log=log) + merger = Merger(options=options) font = merger.merge(args) outfile = 'merged.ttf' - font.save(outfile) - log.lapse("compile and save font") + with timer("compile and save font"): + font.save(outfile) - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") if __name__ == "__main__": - main() + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/misc/arrayTools.py fonttools-3.21.2/Snippets/fontTools/misc/arrayTools.py --- fonttools-3.0/Snippets/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/arrayTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,7 +6,9 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from numbers import Number import math +import operator def calcBounds(array): """Return the bounding rectangle of a 2D points array as a tuple: @@ -21,13 +23,9 @@ def calcIntBounds(array): """Return the integer bounding rectangle of a 2D points array as a tuple: (xMin, yMin, xMax, yMax) + Values are rounded to closest integer. """ - xMin, yMin, xMax, yMax = calcBounds(array) - xMin = int(math.floor(xMin)) - xMax = int(math.ceil(xMax)) - yMin = int(math.floor(yMin)) - yMax = int(math.ceil(yMax)) - return xMin, yMin, xMax, yMax + return tuple(round(v) for v in calcBounds(array)) def updateBounds(bounds, p, min=min, max=max): @@ -124,6 +122,136 @@ return (xMin, yMin, xMax, yMax) +class Vector(object): + """A math-like vector.""" + + def __init__(self, values, keep=False): + self.values = values if keep else list(values) + + def __getitem__(self, index): + return self.values[index] + + def __len__(self): + return len(self.values) + + def __repr__(self): + return "Vector(%s)" % self.values + + def _vectorOp(self, other, op): + if isinstance(other, Vector): + assert len(self.values) == len(other.values) + a = self.values + b = other.values + return [op(a[i], b[i]) for i in range(len(self.values))] + if isinstance(other, Number): + return [op(v, other) for v in self.values] + raise NotImplementedError + + def _scalarOp(self, other, op): + if isinstance(other, Number): + return [op(v, other) for v in self.values] + raise NotImplementedError + + def _unaryOp(self, op): + return [op(v) for v in self.values] + + def __add__(self, other): + return Vector(self._vectorOp(other, operator.add), keep=True) + def __iadd__(self, other): + self.values = self._vectorOp(other, operator.add) + return self + __radd__ = __add__ + + def __sub__(self, other): + return Vector(self._vectorOp(other, operator.sub), keep=True) + def __isub__(self, other): + self.values = self._vectorOp(other, operator.sub) + return self + def __rsub__(self, other): + return other + (-self) + + def __mul__(self, other): + return Vector(self._scalarOp(other, operator.mul), keep=True) + def __imul__(self, other): + self.values = self._scalarOp(other, operator.mul) + return self + __rmul__ = __mul__ + + def __truediv__(self, other): + return Vector(self._scalarOp(other, operator.div), keep=True) + def __itruediv__(self, other): + self.values = self._scalarOp(other, operator.div) + return self + + def __pos__(self): + return Vector(self._unaryOp(operator.pos), keep=True) + def __neg__(self): + return Vector(self._unaryOp(operator.neg), keep=True) + def __round__(self): + return Vector(self._unaryOp(round), keep=True) + def toInt(self): + return self.__round__() + + def __eq__(self, other): + if type(other) == Vector: + return self.values == other.values + else: + return self.values == other + def __ne__(self, other): + return not self.__eq__(other) + + def __bool__(self): + return any(self.values) + __nonzero__ = __bool__ + + def __abs__(self): + return math.sqrt(sum([x*x for x in self.values])) + def dot(self, other): + a = self.values + b = other.values if type(other) == Vector else b + assert len(a) == len(b) + return sum([a[i] * b[i] for i in range(len(a))]) + + +def pairwise(iterable, reverse=False): + """Iterate over current and next items in iterable, optionally in + reverse order. + + >>> tuple(pairwise([])) + () + >>> tuple(pairwise([], reverse=True)) + () + >>> tuple(pairwise([0])) + ((0, 0),) + >>> tuple(pairwise([0], reverse=True)) + ((0, 0),) + >>> tuple(pairwise([0, 1])) + ((0, 1), (1, 0)) + >>> tuple(pairwise([0, 1], reverse=True)) + ((1, 0), (0, 1)) + >>> tuple(pairwise([0, 1, 2])) + ((0, 1), (1, 2), (2, 0)) + >>> tuple(pairwise([0, 1, 2], reverse=True)) + ((2, 1), (1, 0), (0, 2)) + >>> tuple(pairwise(['a', 'b', 'c', 'd'])) + (('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a')) + >>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True)) + (('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd')) + """ + if not iterable: + return + if reverse: + it = reversed(iterable) + else: + it = iter(iterable) + first = next(it, None) + a = first + for b in it: + yield (a, b) + a = b + yield (a, first) + + def _test(): """ >>> import math diff -Nru fonttools-3.0/Snippets/fontTools/misc/bezierTools.py fonttools-3.21.2/Snippets/fontTools/misc/bezierTools.py --- fonttools-3.0/Snippets/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/bezierTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,20 @@ +# -*- coding: utf-8 -*- """fontTools.misc.bezierTools.py -- tools for working with bezier path segments. """ from __future__ import print_function, division, absolute_import +from fontTools.misc.arrayTools import calcBounds from fontTools.misc.py23 import * +import math + __all__ = [ + "approximateCubicArcLength", + "approximateCubicArcLengthC", + "approximateQuadraticArcLength", + "approximateQuadraticArcLengthC", + "calcQuadraticArcLength", + "calcQuadraticArcLengthC", "calcQuadraticBounds", "calcCubicBounds", "splitLine", @@ -16,9 +26,96 @@ "solveCubic", ] -from fontTools.misc.arrayTools import calcBounds -epsilon = 1e-12 +epsilonDigits = 6 +epsilon = 1e-10 + + +def _dot(v1, v2): + return (v1 * v2.conjugate()).real + + +def _intSecAtan(x): + # In : sympy.integrate(sp.sec(sp.atan(x))) + # Out: x*sqrt(x**2 + 1)/2 + asinh(x)/2 + return x * math.sqrt(x**2 + 1)/2 + math.asinh(x)/2 + + +def calcQuadraticArcLength(pt1, pt2, pt3, approximate_fallback=False): + """Return the arc length for a qudratic bezier segment. + pt1 and pt3 are the "anchor" points, pt2 is the "handle". + + >>> calcQuadraticArcLength((0, 0), (0, 0), (0, 0)) # empty segment + 0.0 + >>> calcQuadraticArcLength((0, 0), (50, 0), (80, 0)) # collinear points + 80.0 + >>> calcQuadraticArcLength((0, 0), (0, 50), (0, 80)) # collinear points vertical + 80.0 + >>> calcQuadraticArcLength((0, 0), (50, 20), (100, 40)) # collinear points + 107.70329614269008 + >>> calcQuadraticArcLength((0, 0), (0, 100), (100, 0)) + 154.02976155645263 + >>> calcQuadraticArcLength((0, 0), (0, 50), (100, 0)) + 120.21581243984076 + >>> calcQuadraticArcLength((0, 0), (50, -10), (80, 50)) + 102.53273816445825 + >>> calcQuadraticArcLength((0, 0), (40, 0), (-40, 0), True) # collinear points, control point outside, exact result should be 66.6666666666667 + 69.41755572720999 + >>> calcQuadraticArcLength((0, 0), (40, 0), (0, 0), True) # collinear points, looping back, exact result should be 40 + 34.4265186329548 + """ + return calcQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3), approximate_fallback) + + +def calcQuadraticArcLengthC(pt1, pt2, pt3, approximate_fallback=False): + """Return the arc length for a qudratic bezier segment using complex points. + pt1 and pt3 are the "anchor" points, pt2 is the "handle".""" + + # Analytical solution to the length of a quadratic bezier. + # I'll explain how I arrived at this later. + d0 = pt2 - pt1 + d1 = pt3 - pt2 + d = d1 - d0 + n = d * 1j + scale = abs(n) + if scale == 0.: + return abs(pt3-pt1) + origDist = _dot(n,d0) + if origDist == 0.: + if _dot(d0,d1) >= 0: + return abs(pt3-pt1) + if approximate_fallback: + return approximateQuadraticArcLengthC(pt1, pt2, pt3) + assert 0 # TODO handle cusps + x0 = _dot(d,d0) / origDist + x1 = _dot(d,d1) / origDist + Len = abs(2 * (_intSecAtan(x1) - _intSecAtan(x0)) * origDist / (scale * (x1 - x0))) + return Len + + +def approximateQuadraticArcLength(pt1, pt2, pt3): + # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature + # with n=3 points. + return approximateQuadraticArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3)) + + +def approximateQuadraticArcLengthC(pt1, pt2, pt3): + # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature + # with n=3 points for complex points. + # + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching fifth-degree polynomial + # approximation of it. + # + #https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature + + # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2), + # weighted 5/18, 8/18, 5/18 respectively. + v0 = abs(-0.492943519233745*pt1 + 0.430331482911935*pt2 + 0.0626120363218102*pt3) + v1 = abs(pt3-pt1)*0.4444444444444444 + v2 = abs(-0.0626120363218102*pt1 - 0.430331482911935*pt2 + 0.492943519233745*pt3) + + return v0 + v1 + v2 def calcQuadraticBounds(pt1, pt2, pt3): @@ -42,6 +139,50 @@ return calcBounds(points) +def approximateCubicArcLength(pt1, pt2, pt3, pt4): + """Return the approximate arc length for a cubic bezier segment. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". + + >>> approximateCubicArcLength((0, 0), (25, 100), (75, 100), (100, 0)) + 190.04332968932817 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 50), (100, 100)) + 154.8852074945903 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (150, 0)) # line; exact result should be 150. + 149.99999999999991 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, 0), (-50, 0)) # cusp; exact result should be 150. + 136.9267662156362 + >>> approximateCubicArcLength((0, 0), (50, 0), (100, -50), (-50, 0)) # cusp + 154.80848416537057 + """ + # Approximate length of cubic Bezier curve using Gauss-Lobatto quadrature + # with n=5 points. + return approximateCubicArcLengthC(complex(*pt1), complex(*pt2), complex(*pt3), complex(*pt4)) + + +def approximateCubicArcLengthC(pt1, pt2, pt3, pt4): + """Return the approximate arc length for a cubic bezier segment of complex points. + pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".""" + + # Approximate length of cubic Bezier curve using Gauss-Lobatto quadrature + # with n=5 points for complex points. + # + # This, essentially, approximates the length-of-derivative function + # to be integrated with the best-matching seventh-degree polynomial + # approximation of it. + # + # https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Lobatto_rules + + # abs(BezierCurveC[3].diff(t).subs({t:T})) for T in sorted(0, .5±(3/7)**.5/2, .5, 1), + # weighted 1/20, 49/180, 32/90, 49/180, 1/20 respectively. + v0 = abs(pt2-pt1)*.15 + v1 = abs(-0.558983582205757*pt1 + 0.325650248872424*pt2 + 0.208983582205757*pt3 + 0.024349751127576*pt4) + v2 = abs(pt4-pt1+pt3-pt2)*0.26666666666666666 + v3 = abs(-0.024349751127576*pt1 - 0.208983582205757*pt2 - 0.325650248872424*pt3 + 0.558983582205757*pt4) + v4 = abs(pt4-pt3)*.15 + + return v0 + v1 + v2 + v3 + v4 + + def calcCubicBounds(pt1, pt2, pt3, pt4): """Return the bounding rectangle for a cubic bezier segment. pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". @@ -214,12 +355,14 @@ t2 = ts[i+1] delta = (t2 - t1) # calc new a, b and c - a1x = ax * delta**2 - a1y = ay * delta**2 + delta_2 = delta*delta + a1x = ax * delta_2 + a1y = ay * delta_2 b1x = (2*ax*t1 + bx) * delta b1y = (2*ay*t1 + by) * delta - c1x = ax*t1**2 + bx*t1 + cx - c1y = ay*t1**2 + by*t1 + cy + t1_2 = t1*t1 + c1x = ax*t1_2 + bx*t1 + cx + c1y = ay*t1_2 + by*t1 + cy pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) segments.append((pt1, pt2, pt3)) @@ -239,15 +382,21 @@ t1 = ts[i] t2 = ts[i+1] delta = (t2 - t1) + + delta_2 = delta*delta + delta_3 = delta*delta_2 + t1_2 = t1*t1 + t1_3 = t1*t1_2 + # calc new a, b, c and d - a1x = ax * delta**3 - a1y = ay * delta**3 - b1x = (3*ax*t1 + bx) * delta**2 - b1y = (3*ay*t1 + by) * delta**2 - c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta - c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta - d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx - d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy + a1x = ax * delta_3 + a1y = ay * delta_3 + b1x = (3*ax*t1 + bx) * delta_2 + b1y = (3*ay*t1 + by) * delta_2 + c1x = (2*bx*t1 + cx + 3*ax*t1_2) * delta + c1y = (2*by*t1 + cy + 3*ay*t1_2) * delta + d1x = ax*t1_3 + bx*t1_2 + cx*t1 + dx + d1y = ay*t1_3 + by*t1_2 + cy*t1 + dy pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y)) segments.append((pt1, pt2, pt3, pt4)) return segments @@ -291,6 +440,21 @@ a*x*x*x + b*x*x + c*x + d = 0 This function returns a list of roots. Note that the returned list is neither guaranteed to be sorted nor to contain unique values! + + >>> solveCubic(1, 1, -6, 0) + [-3.0, -0.0, 2.0] + >>> solveCubic(-10.0, -9.0, 48.0, -29.0) + [-2.9, 1.0, 1.0] + >>> solveCubic(-9.875, -9.0, 47.625, -28.75) + [-2.911392, 1.0, 1.0] + >>> solveCubic(1.0, -4.5, 6.75, -3.375) + [1.5, 1.5, 1.5] + >>> solveCubic(-12.0, 18.0, -9.0, 1.50023651123) + [0.5, 0.5, 0.5] + >>> solveCubic( + ... 9.0, 0.0, 0.0, -7.62939453125e-05 + ... ) == [-0.0, -0.0, -0.0] + True """ # # adapted from: @@ -309,24 +473,46 @@ Q = (a1*a1 - 3.0*a2)/9.0 R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 - R2_Q3 = R*R - Q*Q*Q - if R2_Q3 < 0: - theta = acos(R/sqrt(Q*Q*Q)) + R2 = R*R + Q3 = Q*Q*Q + R2 = 0 if R2 < epsilon else R2 + Q3 = 0 if abs(Q3) < epsilon else Q3 + + R2_Q3 = R2 - Q3 + + if R2 == 0. and Q3 == 0.: + x = round(-a1/3.0, epsilonDigits) + return [x, x, x] + elif R2_Q3 <= epsilon * .5: + # The epsilon * .5 above ensures that Q3 is not zero. + theta = acos(max(min(R/sqrt(Q3), 1.0), -1.0)) rQ2 = -2.0*sqrt(Q) - x0 = rQ2*cos(theta/3.0) - a1/3.0 - x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0 - x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0 + a1_3 = a1/3.0 + x0 = rQ2*cos(theta/3.0) - a1_3 + x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1_3 + x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1_3 + x0, x1, x2 = sorted([x0, x1, x2]) + # Merge roots that are close-enough + if x1 - x0 < epsilon and x2 - x1 < epsilon: + x0 = x1 = x2 = round((x0 + x1 + x2) / 3., epsilonDigits) + elif x1 - x0 < epsilon: + x0 = x1 = round((x0 + x1) / 2., epsilonDigits) + x2 = round(x2, epsilonDigits) + elif x2 - x1 < epsilon: + x0 = round(x0, epsilonDigits) + x1 = x2 = round((x1 + x2) / 2., epsilonDigits) + else: + x0 = round(x0, epsilonDigits) + x1 = round(x1, epsilonDigits) + x2 = round(x2, epsilonDigits) return [x0, x1, x2] else: - if Q == 0 and R == 0: - x = 0 - else: - x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) - x = x + Q/x + x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) + x = x + Q/x if R >= 0.0: x = -x - x = x - a1/3.0 + x = round(x - a1/3.0, epsilonDigits) return [x] diff -Nru fonttools-3.0/Snippets/fontTools/misc/classifyTools.py fonttools-3.21.2/Snippets/fontTools/misc/classifyTools.py --- fonttools-3.0/Snippets/fontTools/misc/classifyTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/classifyTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,173 @@ +""" fontTools.misc.classifyTools.py -- tools for classifying things. +""" + +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * + +class Classifier(object): + + """ + Main Classifier object, used to classify things into similar sets. + """ + + def __init__(self, sort=True): + + self._things = set() # set of all things known so far + self._sets = [] # list of class sets produced so far + self._mapping = {} # map from things to their class set + self._dirty = False + self._sort = sort + + def add(self, set_of_things): + """ + Add a set to the classifier. Any iterable is accepted. + """ + if not set_of_things: + return + + self._dirty = True + + things, sets, mapping = self._things, self._sets, self._mapping + + s = set(set_of_things) + intersection = s.intersection(things) # existing things + s.difference_update(intersection) # new things + difference = s + del s + + # Add new class for new things + if difference: + things.update(difference) + sets.append(difference) + for thing in difference: + mapping[thing] = difference + del difference + + while intersection: + # Take one item and process the old class it belongs to + old_class = mapping[next(iter(intersection))] + old_class_intersection = old_class.intersection(intersection) + + # Update old class to remove items from new set + old_class.difference_update(old_class_intersection) + + # Remove processed items from todo list + intersection.difference_update(old_class_intersection) + + # Add new class for the intersection with old class + sets.append(old_class_intersection) + for thing in old_class_intersection: + mapping[thing] = old_class_intersection + del old_class_intersection + + def update(self, list_of_sets): + """ + Add a a list of sets to the classifier. Any iterable of iterables is accepted. + """ + for s in list_of_sets: + self.add(s) + + def _process(self): + if not self._dirty: + return + + # Do any deferred processing + sets = self._sets + self._sets = [s for s in sets if s] + + if self._sort: + self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) + + self._dirty = False + + # Output methods + + def getThings(self): + """Returns the set of all things known so far. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._things + + def getMapping(self): + """Returns the mapping from things to their class set. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._mapping + + def getClasses(self): + """Returns the list of class sets. + + The return value belongs to the Classifier object and should NOT + be modified while the classifier is still in use. + """ + self._process() + return self._sets + + +def classify(list_of_sets, sort=True): + """ + Takes a iterable of iterables (list of sets from here on; but any + iterable works.), and returns the smallest list of sets such that + each set, is either a subset, or is disjoint from, each of the input + sets. + + In other words, this function classifies all the things present in + any of the input sets, into similar classes, based on which sets + things are a member of. + + If sort=True, return class sets are sorted by decreasing size and + their natural sort order within each class size. Otherwise, class + sets are returned in the order that they were identified, which is + generally not significant. + + >>> classify([]) == ([], {}) + True + >>> classify([[]]) == ([], {}) + True + >>> classify([[], []]) == ([], {}) + True + >>> classify([[1]]) == ([{1}], {1: {1}}) + True + >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) + True + >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + True + >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) + True + >>> classify([[1,2],[2,4,5]]) == ( + ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2],[2,4,5]], sort=False) == ( + ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + True + >>> classify([[1,2,9],[2,4,5]], sort=False) == ( + ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, + ... 9: {1, 9}}) + True + >>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( + ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, + ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) + True + >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) + >>> set([frozenset(c) for c in classes]) == set( + ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) + True + >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} + True + """ + classifier = Classifier(sort=sort) + classifier.update(list_of_sets) + return classifier.getClasses(), classifier.getMapping() + + +if __name__ == "__main__": + import sys, doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/cliTools.py fonttools-3.21.2/Snippets/fontTools/misc/cliTools.py --- fonttools-3.0/Snippets/fontTools/misc/cliTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/cliTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,26 @@ +"""Collection of utilities for command-line interfaces and console scripts.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import re + + +numberAddedRE = re.compile("#\d+$") + + +def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False): + dirName, fileName = os.path.split(input) + fileName, ext = os.path.splitext(fileName) + if outputDir: + dirName = outputDir + fileName = numberAddedRE.split(fileName)[0] + if extension is None: + extension = os.path.splitext(input)[1] + output = os.path.join(dirName, fileName + extension) + n = 1 + if not overWrite: + while os.path.exists(output): + output = os.path.join( + dirName, fileName + "#" + repr(n) + extension) + n += 1 + return output diff -Nru fonttools-3.0/Snippets/fontTools/misc/eexec.py fonttools-3.21.2/Snippets/fontTools/misc/eexec.py --- fonttools-3.0/Snippets/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/eexec.py 2018-01-08 12:40:40.000000000 +0000 @@ -19,19 +19,35 @@ def decrypt(cipherstring, R): + r""" + >>> testStr = b"\0\0asdadads asds\265" + >>> decryptedStr, R = decrypt(testStr, 12321) + >>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + True + >>> R == 36142 + True + """ plainList = [] for cipher in cipherstring: plain, R = _decryptChar(cipher, R) plainList.append(plain) - plainstring = strjoin(plainList) + plainstring = bytesjoin(plainList) return plainstring, int(R) def encrypt(plainstring, R): + r""" + >>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + >>> encryptedStr, R = encrypt(testStr, 12321) + >>> encryptedStr == b"\0\0asdadads asds\265" + True + >>> R == 36142 + True + """ cipherList = [] for plain in plainstring: cipher, R = _encryptChar(plain, R) cipherList.append(cipher) - cipherstring = strjoin(cipherList) + cipherstring = bytesjoin(cipherList) return cipherstring, int(R) @@ -41,15 +57,11 @@ def deHexString(h): import binascii - h = strjoin(h.split()) + h = bytesjoin(h.split()) return binascii.unhexlify(h) -def _test(): - testStr = "\0\0asdadads asds\265" - print(decrypt, decrypt(testStr, 12321)) - print(encrypt, encrypt(testStr, 12321)) - - if __name__ == "__main__": - _test() + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/encodingTools_test.py fonttools-3.21.2/Snippets/fontTools/misc/encodingTools_test.py --- fonttools-3.0/Snippets/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -from .encodingTools import getEncoding - -class EncodingTest(unittest.TestCase): - - def test_encoding_unicode(self): - - self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well - self.assertEqual(getEncoding(3, 1, None), "utf_16_be") - self.assertEqual(getEncoding(3, 10, None), "utf_16_be") - self.assertEqual(getEncoding(0, 3, None), "utf_16_be") - - def test_encoding_macroman_misc(self): - self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") - self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") - self.assertEqual(getEncoding(1, 0, 45), "mac_roman") - - def test_extended_mac_encodings(self): - encoding = getEncoding(1, 1, 0) # Mac Japanese - decoded = b'\xfe'.decode(encoding) - self.assertEqual(decoded, unichr(0x2122)) - - def test_extended_unknown(self): - self.assertEqual(getEncoding(10, 11, 12), None) - self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") - self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/misc/fixedTools.py fonttools-3.21.2/Snippets/fontTools/misc/fixedTools.py --- fonttools-3.0/Snippets/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/fixedTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,10 +3,16 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import logging + +log = logging.getLogger(__name__) __all__ = [ "fixedToFloat", "floatToFixed", + "floatToFixedToFloat", + "ensureVersionIsLong", + "versionToFixed", ] def fixedToFloat(value, precisionBits): @@ -42,6 +48,34 @@ def floatToFixed(value, precisionBits): """Converts a float to a fixed-point number given the number of - precisionBits. Ie. int(round(value * (1<>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> formatter = LevelFormatter( + ... fmt={ + ... '*': '[%(levelname)s] %(message)s', + ... 'DEBUG': '%(name)s [%(levelname)s] %(message)s', + ... 'INFO': '%(message)s', + ... }) + >>> handler.setFormatter(formatter) + >>> log = logging.getLogger('test') + >>> log.setLevel(logging.DEBUG) + >>> log.addHandler(handler) + >>> log.debug('this uses a custom format string') + test [DEBUG] this uses a custom format string + >>> log.info('this also uses a custom format string') + this also uses a custom format string + >>> log.warning("this one uses the default format string") + [WARNING] this one uses the default format string + """ + + def __init__(self, fmt=None, datefmt=None, style="%"): + if style != '%': + raise ValueError( + "only '%' percent style is supported in both python 2 and 3") + if fmt is None: + fmt = DEFAULT_FORMATS + if isinstance(fmt, basestring): + default_format = fmt + custom_formats = {} + elif isinstance(fmt, collections.Mapping): + custom_formats = dict(fmt) + default_format = custom_formats.pop("*", None) + else: + raise TypeError('fmt must be a str or a dict of str: %r' % fmt) + super(LevelFormatter, self).__init__(default_format, datefmt) + self.default_format = self._fmt + self.custom_formats = {} + for level, fmt in custom_formats.items(): + level = logging._checkLevel(level) + self.custom_formats[level] = fmt + + def format(self, record): + if self.custom_formats: + fmt = self.custom_formats.get(record.levelno, self.default_format) + if self._fmt != fmt: + self._fmt = fmt + # for python >= 3.2, _style needs to be set if _fmt changes + if PercentStyle: + self._style = PercentStyle(fmt) + return super(LevelFormatter, self).format(record) + + +def configLogger(**kwargs): + """ Do basic configuration for the logging system. This is more or less + the same as logging.basicConfig with some additional options and defaults. + + The default behaviour is to create a StreamHandler which writes to + sys.stderr, set a formatter using the DEFAULT_FORMATS strings, and add + the handler to the top-level library logger ("fontTools"). + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + logger Specifies the logger name or a Logger instance to be configured. + (it defaults to "fontTools" logger). Unlike basicConfig, this + function can be called multiple times to reconfigure a logger. + If the logger or any of its children already exists before the + call is made, they will be reset before the new configuration + is applied. + filename Specifies that a FileHandler be created, using the specified + filename, rather than a StreamHandler. + filemode Specifies the mode to open the file, if filename is specified + (if filemode is unspecified, it defaults to 'a'). + format Use the specified format string for the handler. This argument + also accepts a dictionary of format strings keyed by level name, + to allow customising the records appearance for specific levels. + The special '*' key is for 'any other' level. + datefmt Use the specified date/time format. + level Set the logger level to the specified level. + stream Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with 'filename' - if both + are present, 'stream' is ignored. + handlers If specified, this should be an iterable of already created + handlers, which will be added to the logger. Any handler + in the list which does not have a formatter assigned will be + assigned the formatter created in this function. + filters If specified, this should be an iterable of already created + filters, which will be added to the handler(s), if the latter + do(es) not already have filters assigned. + propagate All loggers have a "propagate" attribute initially set to True, + which determines whether to continue searching for handlers up + the logging hierarchy. By default, this arguments sets the + "propagate" attribute to False. + """ + # using kwargs to enforce keyword-only arguments in py2. + handlers = kwargs.pop("handlers", None) + if handlers is None: + if "stream" in kwargs and "filename" in kwargs: + raise ValueError("'stream' and 'filename' should not be " + "specified together") + else: + if "stream" in kwargs or "filename" in kwargs: + raise ValueError("'stream' or 'filename' should not be " + "specified together with 'handlers'") + if handlers is None: + filename = kwargs.pop("filename", None) + mode = kwargs.pop("filemode", 'a') + if filename: + h = logging.FileHandler(filename, mode) + else: + stream = kwargs.pop("stream", None) + h = logging.StreamHandler(stream) + handlers = [h] + # By default, the top-level library logger is configured. + logger = kwargs.pop("logger", "fontTools") + if not logger or isinstance(logger, basestring): + # empty "" or None means the 'root' logger + logger = logging.getLogger(logger) + # before (re)configuring, reset named logger and its children (if exist) + _resetExistingLoggers(parent=logger.name) + # use DEFAULT_FORMATS if 'format' is None + fs = kwargs.pop("format", None) + dfs = kwargs.pop("datefmt", None) + # XXX: '%' is the only format style supported on both py2 and 3 + style = kwargs.pop("style", '%') + fmt = LevelFormatter(fs, dfs, style) + filters = kwargs.pop("filters", []) + for h in handlers: + if h.formatter is None: + h.setFormatter(fmt) + if not h.filters: + for f in filters: + h.addFilter(f) + logger.addHandler(h) + if logger.name != "root": + # stop searching up the hierarchy for handlers + logger.propagate = kwargs.pop("propagate", False) + # set a custom severity level + level = kwargs.pop("level", None) + if level is not None: + logger.setLevel(level) + if kwargs: + keys = ', '.join(kwargs.keys()) + raise ValueError('Unrecognised argument(s): %s' % keys) + + +def _resetExistingLoggers(parent="root"): + """ Reset the logger named 'parent' and all its children to their initial + state, if they already exist in the current configuration. + """ + root = logging.root + # get sorted list of all existing loggers + existing = sorted(root.manager.loggerDict.keys()) + if parent == "root": + # all the existing loggers are children of 'root' + loggers_to_reset = [parent] + existing + elif parent not in existing: + # nothing to do + return + elif parent in existing: + loggers_to_reset = [parent] + # collect children, starting with the entry after parent name + i = existing.index(parent) + 1 + prefixed = parent + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + loggers_to_reset.append(existing[i]) + i += 1 + for name in loggers_to_reset: + if name == "root": + root.setLevel(logging.WARNING) + for h in root.handlers[:]: + root.removeHandler(h) + for f in root.filters[:]: + root.removeFilters(f) + root.disabled = False + else: + logger = root.manager.loggerDict[name] + logger.level = logging.NOTSET + logger.handlers = [] + logger.filters = [] + logger.propagate = True + logger.disabled = False + + +class Timer(object): + """ Keeps track of overall time and split/lap times. + + >>> import time + >>> timer = Timer() + >>> time.sleep(0.01) + >>> print("First lap:", timer.split()) + First lap: ... + >>> time.sleep(0.02) + >>> print("Second lap:", timer.split()) + Second lap: ... + >>> print("Overall time:", timer.time()) + Overall time: ... + + Can be used as a context manager inside with-statements. + + >>> with Timer() as t: + ... time.sleep(0.01) + >>> print("%0.3f seconds" % t.elapsed) + 0... seconds + + If initialised with a logger, it can log the elapsed time automatically + upon exiting the with-statement. + + >>> import logging + >>> log = logging.getLogger("fontTools") + >>> configLogger(level="DEBUG", format="%(message)s", stream=sys.stdout) + >>> with Timer(log, 'do something'): + ... time.sleep(0.01) + Took ... to do something + + The same Timer instance, holding a reference to a logger, can be reused + in multiple with-statements, optionally with different messages or levels. + + >>> timer = Timer(log) + >>> with timer(): + ... time.sleep(0.01) + elapsed time: ...s + >>> with timer('redo it', level=logging.INFO): + ... time.sleep(0.02) + Took ... to redo it + + It can also be used as a function decorator to log the time elapsed to run + the decorated function. + + >>> @timer() + ... def test1(): + ... time.sleep(0.01) + >>> @timer('run test 2', level=logging.INFO) + ... def test2(): + ... time.sleep(0.02) + >>> test1() + Took ... to run 'test1' + >>> test2() + Took ... to run test 2 + """ + + # timeit.default_timer choses the most accurate clock for each platform + _time = timeit.default_timer + default_msg = "elapsed time: %(time).3fs" + default_format = "Took %(time).3fs to %(msg)s" + + def __init__(self, logger=None, msg=None, level=None, start=None): + self.reset(start) + if logger is None: + for arg in ('msg', 'level'): + if locals().get(arg) is not None: + raise ValueError( + "'%s' can't be specified without a 'logger'" % arg) + self.logger = logger + self.level = level if level is not None else TIME_LEVEL + self.msg = msg + + def reset(self, start=None): + """ Reset timer to 'start_time' or the current time. """ + if start is None: + self.start = self._time() + else: + self.start = start + self.last = self.start + self.elapsed = 0.0 + + def time(self): + """ Return the overall time (in seconds) since the timer started. """ + return self._time() - self.start + + def split(self): + """ Split and return the lap time (in seconds) in between splits. """ + current = self._time() + self.elapsed = current - self.last + self.last = current + return self.elapsed + + def formatTime(self, msg, time): + """ Format 'time' value in 'msg' and return formatted string. + If 'msg' contains a '%(time)' format string, try to use that. + Otherwise, use the predefined 'default_format'. + If 'msg' is empty or None, fall back to 'default_msg'. + """ + if not msg: + msg = self.default_msg + if msg.find("%(time)") < 0: + msg = self.default_format % {"msg": msg, "time": time} + else: + try: + msg = msg % {"time": time} + except (KeyError, ValueError): + pass # skip if the format string is malformed + return msg + + def __enter__(self): + """ Start a new lap """ + self.last = self._time() + self.elapsed = 0.0 + return self + + def __exit__(self, exc_type, exc_value, traceback): + """ End the current lap. If timer has a logger, log the time elapsed, + using the format string in self.msg (or the default one). + """ + time = self.split() + if self.logger is None or exc_type: + # if there's no logger attached, or if any exception occurred in + # the with-statement, exit without logging the time + return + message = self.formatTime(self.msg, time) + # Allow log handlers to see the individual parts to facilitate things + # like a server accumulating aggregate stats. + msg_parts = { 'msg': self.msg, 'time': time } + self.logger.log(self.level, message, msg_parts) + + def __call__(self, func_or_msg=None, **kwargs): + """ If the first argument is a function, return a decorator which runs + the wrapped function inside Timer's context manager. + Otherwise, treat the first argument as a 'msg' string and return an updated + Timer instance, referencing the same logger. + A 'level' keyword can also be passed to override self.level. + """ + if isinstance(func_or_msg, collections.Callable): + func = func_or_msg + # use the function name when no explicit 'msg' is provided + if not self.msg: + self.msg = "run '%s'" % func.__name__ + + @wraps(func) + def wrapper(*args, **kwds): + with self: + return func(*args, **kwds) + return wrapper + else: + msg = func_or_msg or kwargs.get("msg") + level = kwargs.get("level", self.level) + return self.__class__(self.logger, msg, level) + + def __float__(self): + return self.elapsed + + def __int__(self): + return int(self.elapsed) + + def __str__(self): + return "%.3f" % self.elapsed + + +class ChannelsFilter(logging.Filter): + """ Filter out records emitted from a list of enabled channel names, + including their children. It works the same as the logging.Filter class, + but allows to specify multiple channel names. + + >>> import sys + >>> handler = logging.StreamHandler(sys.stdout) + >>> handler.setFormatter(logging.Formatter("%(message)s")) + >>> filter = ChannelsFilter("A.B", "C.D") + >>> handler.addFilter(filter) + >>> root = logging.getLogger() + >>> root.addHandler(handler) + >>> root.setLevel(level=logging.DEBUG) + >>> logging.getLogger('A.B').debug('this record passes through') + this record passes through + >>> logging.getLogger('A.B.C').debug('records from children also pass') + records from children also pass + >>> logging.getLogger('C.D').debug('this one as well') + this one as well + >>> logging.getLogger('A.B.').debug('also this one') + also this one + >>> logging.getLogger('A.F').debug('but this one does not!') + >>> logging.getLogger('C.DE').debug('neither this one!') + """ + + def __init__(self, *names): + self.names = names + self.num = len(names) + self.lenghts = {n: len(n) for n in names} + + def filter(self, record): + if self.num == 0: + return True + for name in self.names: + nlen = self.lenghts[name] + if name == record.name: + return True + elif (record.name.find(name, 0, nlen) == 0 + and record.name[nlen] == "."): + return True + return False + + +class CapturingLogHandler(logging.Handler): + def __init__(self, logger, level): + self.records = [] + self.level = logging._checkLevel(level) + if isinstance(logger, basestring): + self.logger = logging.getLogger(logger) + else: + self.logger = logger + + def __enter__(self): + self.original_disabled = self.logger.disabled + self.original_level = self.logger.level + + self.logger.addHandler(self) + self.logger.level = self.level + self.logger.disabled = False + + return self + + def __exit__(self, type, value, traceback): + self.logger.removeHandler(self) + self.logger.level = self.original_level + self.logger.disabled = self.logger.disabled + return self + + def handle(self, record): + self.records.append(record) + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + def assertRegex(self, regexp): + import re + pattern = re.compile(regexp) + for r in self.records: + if pattern.search(r.msg): + return True + assert 0, "Pattern '%s' not found in logger records" % regexp + + +class LogMixin(object): + """ Mixin class that adds logging functionality to another class. + You can define a new class that subclasses from LogMixin as well as + other base classes through multiple inheritance. + All instances of that class will have a 'log' property that returns + a logging.Logger named after their respective .. + For example: + + >>> class BaseClass(object): + ... pass + >>> class MyClass(LogMixin, BaseClass): + ... pass + >>> a = MyClass() + >>> isinstance(a.log, logging.Logger) + True + >>> print(a.log.name) + fontTools.misc.loggingTools.MyClass + >>> class AnotherClass(MyClass): + ... pass + >>> b = AnotherClass() + >>> isinstance(b.log, logging.Logger) + True + >>> print(b.log.name) + fontTools.misc.loggingTools.AnotherClass + """ + + @property + def log(self): + name = ".".join([self.__class__.__module__, self.__class__.__name__]) + return logging.getLogger(name) + + +def deprecateArgument(name, msg, category=UserWarning): + """ Raise a warning about deprecated function argument 'name'. """ + warnings.warn( + "%r is deprecated; %s" % (name, msg), category=category, stacklevel=3) + + +def deprecateFunction(msg, category=UserWarning): + """ Decorator to raise a warning when a deprecated function is called. """ + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + warnings.warn( + "%r is deprecated; %s" % (func.__name__, msg), + category=category, stacklevel=2) + return func(*args, **kwargs) + return wrapper + return decorator + + +if __name__ == "__main__": + import doctest + sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/macCreatorType.py fonttools-3.21.2/Snippets/fontTools/misc/macCreatorType.py --- fonttools-3.0/Snippets/fontTools/misc/macCreatorType.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/macCreatorType.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,10 +2,14 @@ from fontTools.misc.py23 import * import sys try: + import xattr +except ImportError: + xattr = None +try: import MacOS except ImportError: MacOS = None -from .py23 import * + def _reverseString(s): s = list(s) @@ -14,6 +18,15 @@ def getMacCreatorAndType(path): + if xattr is not None: + try: + finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo') + except (KeyError, IOError): + pass + else: + fileType = Tag(finderInfo[:4]) + fileCreator = Tag(finderInfo[4:8]) + return fileCreator, fileType if MacOS is not None: fileCreator, fileType = MacOS.GetCreatorAndType(path) if sys.version_info[:2] < (2, 7) and sys.byteorder == "little": @@ -28,5 +41,11 @@ def setMacCreatorAndType(path, fileCreator, fileType): + if xattr is not None: + from fontTools.misc.textTools import pad + if not all(len(s) == 4 for s in (fileCreator, fileType)): + raise TypeError('arg must be string of 4 chars') + finderInfo = pad(bytesjoin([fileType, fileCreator]), 32) + xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo) if MacOS is not None: MacOS.SetCreatorAndType(path, fileCreator, fileType) diff -Nru fonttools-3.0/Snippets/fontTools/misc/macRes.py fonttools-3.21.2/Snippets/fontTools/misc/macRes.py --- fonttools-3.0/Snippets/fontTools/misc/macRes.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/macRes.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,233 @@ +""" Tools for reading Mac resource forks. """ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import struct +from fontTools.misc import sstruct +from collections import OrderedDict +try: + from collections.abc import MutableMapping +except ImportError: + from UserDict import DictMixin as MutableMapping + + +class ResourceError(Exception): + pass + + +class ResourceReader(MutableMapping): + + def __init__(self, fileOrPath): + self._resources = OrderedDict() + if hasattr(fileOrPath, 'read'): + self.file = fileOrPath + else: + try: + # try reading from the resource fork (only works on OS X) + self.file = self.openResourceFork(fileOrPath) + self._readFile() + return + except (ResourceError, IOError): + # if it fails, use the data fork + self.file = self.openDataFork(fileOrPath) + self._readFile() + + @staticmethod + def openResourceFork(path): + with open(path + '/..namedfork/rsrc', 'rb') as resfork: + data = resfork.read() + infile = BytesIO(data) + infile.name = path + return infile + + @staticmethod + def openDataFork(path): + with open(path, 'rb') as datafork: + data = datafork.read() + infile = BytesIO(data) + infile.name = path + return infile + + def _readFile(self): + self._readHeaderAndMap() + self._readTypeList() + + def _read(self, numBytes, offset=None): + if offset is not None: + try: + self.file.seek(offset) + except OverflowError: + raise ResourceError("Failed to seek offset ('offset' is too large)") + if self.file.tell() != offset: + raise ResourceError('Failed to seek offset (reached EOF)') + try: + data = self.file.read(numBytes) + except OverflowError: + raise ResourceError("Cannot read resource ('numBytes' is too large)") + if len(data) != numBytes: + raise ResourceError('Cannot read resource (not enough data)') + return data + + def _readHeaderAndMap(self): + self.file.seek(0) + headerData = self._read(ResourceForkHeaderSize) + sstruct.unpack(ResourceForkHeader, headerData, self) + # seek to resource map, skip reserved + mapOffset = self.mapOffset + 22 + resourceMapData = self._read(ResourceMapHeaderSize, mapOffset) + sstruct.unpack(ResourceMapHeader, resourceMapData, self) + self.absTypeListOffset = self.mapOffset + self.typeListOffset + self.absNameListOffset = self.mapOffset + self.nameListOffset + + def _readTypeList(self): + absTypeListOffset = self.absTypeListOffset + numTypesData = self._read(2, absTypeListOffset) + self.numTypes, = struct.unpack('>H', numTypesData) + absTypeListOffset2 = absTypeListOffset + 2 + for i in range(self.numTypes + 1): + resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i + resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) + item = sstruct.unpack(ResourceTypeItem, resTypeItemData) + resType = tostr(item['type'], encoding='mac-roman') + refListOffset = absTypeListOffset + item['refListOffset'] + numRes = item['numRes'] + 1 + resources = self._readReferenceList(resType, refListOffset, numRes) + self._resources[resType] = resources + + def _readReferenceList(self, resType, refListOffset, numRes): + resources = [] + for i in range(numRes): + refOffset = refListOffset + ResourceRefItemSize * i + refData = self._read(ResourceRefItemSize, refOffset) + res = Resource(resType) + res.decompile(refData, self) + resources.append(res) + return resources + + def __getitem__(self, resType): + return self._resources[resType] + + def __delitem__(self, resType): + del self._resources[resType] + + def __setitem__(self, resType, resources): + self._resources[resType] = resources + + def __len__(self): + return len(self._resources) + + def __iter__(self): + return iter(self._resources) + + def keys(self): + return self._resources.keys() + + @property + def types(self): + return list(self._resources.keys()) + + def countResources(self, resType): + """Return the number of resources of a given type.""" + try: + return len(self[resType]) + except KeyError: + return 0 + + def getIndices(self, resType): + numRes = self.countResources(resType) + if numRes: + return list(range(1, numRes+1)) + else: + return [] + + def getNames(self, resType): + """Return list of names of all resources of a given type.""" + return [res.name for res in self.get(resType, []) if res.name is not None] + + def getIndResource(self, resType, index): + """Return resource of given type located at an index ranging from 1 + to the number of resources for that type, or None if not found. + """ + if index < 1: + return None + try: + res = self[resType][index-1] + except (KeyError, IndexError): + return None + return res + + def getNamedResource(self, resType, name): + """Return the named resource of given type, else return None.""" + name = tostr(name, encoding='mac-roman') + for res in self.get(resType, []): + if res.name == name: + return res + return None + + def close(self): + if not self.file.closed: + self.file.close() + + +class Resource(object): + + def __init__(self, resType=None, resData=None, resID=None, resName=None, + resAttr=None): + self.type = resType + self.data = resData + self.id = resID + self.name = resName + self.attr = resAttr + + def decompile(self, refData, reader): + sstruct.unpack(ResourceRefItem, refData, self) + # interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct + self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset])) + absDataOffset = reader.dataOffset + self.dataOffset + dataLength, = struct.unpack(">L", reader._read(4, absDataOffset)) + self.data = reader._read(dataLength) + if self.nameOffset == -1: + return + absNameOffset = reader.absNameListOffset + self.nameOffset + nameLength, = struct.unpack('B', reader._read(1, absNameOffset)) + name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength)) + self.name = tostr(name, encoding='mac-roman') + + +ResourceForkHeader = """ + > # big endian + dataOffset: L + mapOffset: L + dataLen: L + mapLen: L +""" + +ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader) + +ResourceMapHeader = """ + > # big endian + attr: H + typeListOffset: H + nameListOffset: H +""" + +ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader) + +ResourceTypeItem = """ + > # big endian + type: 4s + numRes: H + refListOffset: H +""" + +ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem) + +ResourceRefItem = """ + > # big endian + id: h + nameOffset: h + attr: B + dataOffset: 3s + reserved: L +""" + +ResourceRefItemSize = sstruct.calcsize(ResourceRefItem) diff -Nru fonttools-3.0/Snippets/fontTools/misc/psCharStrings.py fonttools-3.21.2/Snippets/fontTools/misc/psCharStrings.py --- fonttools-3.0/Snippets/fontTools/misc/psCharStrings.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/psCharStrings.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,10 +4,13 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat +from fontTools.pens.boundsPen import BoundsPen import struct +import logging -DEBUG = 0 +log = logging.getLogger(__name__) def read_operator(self, b0, data, index): @@ -16,7 +19,10 @@ index = index+1 else: op = b0 - operator = self.operators[op] + try: + operator = self.operators[op] + except KeyError: + return None, index value = self.handle_operator(operator) return value, index @@ -41,7 +47,7 @@ def read_fixed1616(self, b0, data, index): value, = struct.unpack(">l", data[index:index+4]) - return value / 65536, index+4 + return fixedToFloat(value, precisionBits=16), index+4 def read_reserved(self, b0, data, index): assert NotImplementedError @@ -85,9 +91,7 @@ '.', 'E', 'E-', None, '-'] realNibblesDict = {v:i for i,v in enumerate(realNibbles)} - -class ByteCodeBase(object): - pass +maxOpStack = 193 def buildOperatorDict(operatorList): @@ -117,6 +121,7 @@ (10, 'callsubr'), (11, 'return'), (14, 'endchar'), + (15, 'vsindex'), (16, 'blend'), (18, 'hstemhm'), (19, 'hintmask'), @@ -162,7 +167,6 @@ ((12, 37), 'flex1'), ] - def getIntEncoder(format): if format == "cff": fourByteOp = bytechr(29) @@ -213,7 +217,7 @@ def encodeFixed(f, pack=struct.pack): # For T2 only - return b"\xff" + pack(">l", int(round(f * 65536))) + return b"\xff" + pack(">l", round(f * 65536)) def encodeFloat(f): # For CFF only, used in cffLib @@ -242,261 +246,14 @@ class CharStringCompileError(Exception): pass -class T2CharString(ByteCodeBase): - - operandEncoding = t2OperandEncoding - operators, opcodes = buildOperatorDict(t2Operators) - - def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.private = private - self.globalSubrs = globalSubrs if globalSubrs is not None else [] - - def __repr__(self): - if self.bytecode is None: - return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) - else: - return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) - - def getIntEncoder(self): - return encodeIntT2 - - def getFixedEncoder(self): - return encodeFixed - - def decompile(self): - if not self.needsDecompilation(): - return - subrs = getattr(self.private, "Subrs", []) - decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) - decompiler.execute(self) - - def draw(self, pen): - subrs = getattr(self.private, "Subrs", []) - extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, - self.private.nominalWidthX, self.private.defaultWidthX) - extractor.execute(self) - self.width = extractor.width - - def compile(self): - if self.bytecode is not None: - return - assert self.program, "illegal CharString: decompiled to empty program" - assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" - bytecode = [] - opcodes = self.opcodes - program = self.program - encodeInt = self.getIntEncoder() - encodeFixed = self.getFixedEncoder() - i = 0 - end = len(program) - while i < end: - token = program[i] - i = i + 1 - tp = type(token) - if issubclass(tp, basestring): - try: - bytecode.extend(bytechr(b) for b in opcodes[token]) - except KeyError: - raise CharStringCompileError("illegal operator: %s" % token) - if token in ('hintmask', 'cntrmask'): - bytecode.append(program[i]) # hint mask - i = i + 1 - elif tp == int: - bytecode.append(encodeInt(token)) - elif tp == float: - bytecode.append(encodeFixed(token)) - else: - assert 0, "unsupported type: %s" % tp - try: - bytecode = bytesjoin(bytecode) - except TypeError: - print(bytecode) - raise - self.setBytecode(bytecode) - - def needsDecompilation(self): - return self.bytecode is not None - - def setProgram(self, program): - self.program = program - self.bytecode = None - - def setBytecode(self, bytecode): - self.bytecode = bytecode - self.program = None - - def getToken(self, index, - len=len, byteord=byteord, basestring=basestring, - isinstance=isinstance): - if self.bytecode is not None: - if index >= len(self.bytecode): - return None, 0, 0 - b0 = byteord(self.bytecode[index]) - index = index + 1 - handler = self.operandEncoding[b0] - token, index = handler(self, b0, self.bytecode, index) - else: - if index >= len(self.program): - return None, 0, 0 - token = self.program[index] - index = index + 1 - isOperator = isinstance(token, basestring) - return token, isOperator, index - - def getBytes(self, index, nBytes): - if self.bytecode is not None: - newIndex = index + nBytes - bytes = self.bytecode[index:newIndex] - index = newIndex - else: - bytes = self.program[index] - index = index + 1 - assert len(bytes) == nBytes - return bytes, index - - def handle_operator(self, operator): - return operator - - def toXML(self, xmlWriter): - from fontTools.misc.textTools import num2binary - if self.bytecode is not None: - xmlWriter.dumphex(self.bytecode) - else: - index = 0 - args = [] - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - if isOperator: - args = [str(arg) for arg in args] - if token in ('hintmask', 'cntrmask'): - hintMask, isOperator, index = self.getToken(index) - bits = [] - for byte in hintMask: - bits.append(num2binary(byteord(byte), 8)) - hintMask = strjoin(bits) - line = ' '.join(args + [token, hintMask]) - else: - line = ' '.join(args + [token]) - xmlWriter.write(line) - xmlWriter.newline() - args = [] - else: - args.append(token) - - def fromXML(self, name, attrs, content): - from fontTools.misc.textTools import binary2num, readHex - if attrs.get("raw"): - self.setBytecode(readHex(content)) - return - content = strjoin(content) - content = content.split() - program = [] - end = len(content) - i = 0 - while i < end: - token = content[i] - i = i + 1 - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - program.append(token) - if token in ('hintmask', 'cntrmask'): - mask = content[i] - maskBytes = b"" - for j in range(0, len(mask), 8): - maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) - program.append(maskBytes) - i = i + 1 - else: - program.append(token) - else: - program.append(token) - self.setProgram(program) - - -t1Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (9, 'closepath'), - (10, 'callsubr'), - (11, 'return'), - (13, 'hsbw'), - (14, 'endchar'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'dotsection'), - ((12, 1), 'vstem3'), - ((12, 2), 'hstem3'), - ((12, 6), 'seac'), - ((12, 7), 'sbw'), - ((12, 12), 'div'), - ((12, 16), 'callothersubr'), - ((12, 17), 'pop'), - ((12, 33), 'setcurrentpoint'), -] - -class T1CharString(T2CharString): - - operandEncoding = t1OperandEncoding - operators, opcodes = buildOperatorDict(t1Operators) - - def __init__(self, bytecode=None, program=None, subrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.subrs = subrs - - def getIntEncoder(self): - return encodeIntT1 - - def getFixedEncoder(self): - def encodeFixed(value): - raise TypeError("Type 1 charstrings don't support floating point operands") - - def decompile(self): - if self.bytecode is None: - return - program = [] - index = 0 - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - program.append(token) - self.setProgram(program) - - def draw(self, pen): - extractor = T1OutlineExtractor(pen, self.subrs) - extractor.execute(self) - self.width = extractor.width - - class SimpleT2Decompiler(object): - def __init__(self, localSubrs, globalSubrs): + def __init__(self, localSubrs, globalSubrs, private=None): self.localSubrs = localSubrs self.localBias = calcSubrBias(localSubrs) self.globalSubrs = globalSubrs self.globalBias = calcSubrBias(globalSubrs) + self.private = private self.reset() def reset(self): @@ -504,6 +261,23 @@ self.operandStack = [] self.hintCount = 0 self.hintMaskBytes = 0 + self.numRegions = 0 + + def check_program(self, program): + if not hasattr(self, 'private') or self.private is None: + # Type 1 charstrings don't have self.private. + # Type2 CFF charstrings may have self.private == None. + # In both cases, they are not CFF2 charstrings + isCFF2 = False + else: + isCFF2 = self.private._isCFF2 + if isCFF2: + if program: + assert program[-1] not in ("seac",), "illegal CharString Terminator" + else: + assert program, "illegal CharString: decompiled to empty program" + assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", + "seac"), "illegal CharString" def execute(self, charString): self.callingStack.append(charString) @@ -533,9 +307,7 @@ else: pushToStack(token) if needsDecompilation: - assert program, "illegal CharString: decompiled to empty program" - assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" + self.check_program(program) charString.setProgram(program) del self.callingStack[-1] @@ -640,19 +412,100 @@ def op_roll(self, index): raise NotImplementedError -class T2OutlineExtractor(SimpleT2Decompiler): + # TODO(behdad): move to T2OutlineExtractor and add a 'setVariation' + # method that takes VarStoreData and a location + def op_blend(self, index): + if self.numRegions == 0: + self.numRegions = self.private.getNumRegions() + numBlends = self.pop() + numOps = numBlends * (self.numRegions + 1) + blendArgs = self.operandStack[-numOps:] + del self.operandStack[:-(numOps-numBlends)] # Leave the default operands on the stack. + + def op_vsindex(self, index): + vi = self.pop() + self.numRegions = self.private.getNumRegions(vi) + + +t1Operators = [ +# opcode name + (1, 'hstem'), + (3, 'vstem'), + (4, 'vmoveto'), + (5, 'rlineto'), + (6, 'hlineto'), + (7, 'vlineto'), + (8, 'rrcurveto'), + (9, 'closepath'), + (10, 'callsubr'), + (11, 'return'), + (13, 'hsbw'), + (14, 'endchar'), + (21, 'rmoveto'), + (22, 'hmoveto'), + (30, 'vhcurveto'), + (31, 'hvcurveto'), + ((12, 0), 'dotsection'), + ((12, 1), 'vstem3'), + ((12, 2), 'hstem3'), + ((12, 6), 'seac'), + ((12, 7), 'sbw'), + ((12, 12), 'div'), + ((12, 16), 'callothersubr'), + ((12, 17), 'pop'), + ((12, 33), 'setcurrentpoint'), +] + + +class T2WidthExtractor(SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + self.nominalWidthX = nominalWidthX + self.defaultWidthX = defaultWidthX + + def reset(self): + SimpleT2Decompiler.reset(self) + self.gotWidth = 0 + self.width = 0 + + def popallWidth(self, evenOdd=0): + args = self.popall() + if not self.gotWidth: + if evenOdd ^ (len(args) % 2): + self.width = self.nominalWidthX + args[0] + args = args[1:] + else: + self.width = self.defaultWidthX + self.gotWidth = 1 + return args + + def countHints(self): + args = self.popallWidth() + self.hintCount = self.hintCount + len(args) // 2 + + def op_rmoveto(self, index): + self.popallWidth() + + def op_hmoveto(self, index): + self.popallWidth(1) + + def op_vmoveto(self, index): + self.popallWidth(1) + + def op_endchar(self, index): + self.popallWidth() + + +class T2OutlineExtractor(T2WidthExtractor): def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): - SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) + T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX) self.pen = pen - self.nominalWidthX = nominalWidthX - self.defaultWidthX = defaultWidthX def reset(self): - SimpleT2Decompiler.reset(self) - self.hints = [] - self.gotWidth = 0 - self.width = 0 + T2WidthExtractor.reset(self) self.currentPoint = (0, 0) self.sawMoveTo = 0 @@ -687,21 +540,6 @@ # finishing a sub path. self.closePath() - def popallWidth(self, evenOdd=0): - args = self.popall() - if not self.gotWidth: - if evenOdd ^ (len(args) % 2): - self.width = self.nominalWidthX + args[0] - args = args[1:] - else: - self.width = self.defaultWidthX - self.gotWidth = 1 - return args - - def countHints(self): - args = self.popallWidth() - self.hintCount = self.hintCount + len(args) // 2 - # # hint operators # @@ -957,7 +795,6 @@ self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) return args - class T1OutlineExtractor(T2OutlineExtractor): def __init__(self, pen, subrs): @@ -1100,15 +937,262 @@ def op_vstem3(self, index): self.popall() # XXX +class T2CharString(object): + + operandEncoding = t2OperandEncoding + operators, opcodes = buildOperatorDict(t2Operators) + decompilerClass = SimpleT2Decompiler + outlineExtractor = T2OutlineExtractor + isCFF2 = False + + def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.private = private + self.globalSubrs = globalSubrs if globalSubrs is not None else [] + + def __repr__(self): + if self.bytecode is None: + return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) + else: + return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) + + def getIntEncoder(self): + return encodeIntT2 + + def getFixedEncoder(self): + return encodeFixed + + def decompile(self): + if not self.needsDecompilation(): + return + subrs = getattr(self.private, "Subrs", []) + decompiler = self.decompilerClass(subrs, self.globalSubrs, self.private) + decompiler.execute(self) + + def draw(self, pen): + subrs = getattr(self.private, "Subrs", []) + extractor = self.outlineExtractor(pen, subrs, self.globalSubrs, + self.private.nominalWidthX, self.private.defaultWidthX) + extractor.execute(self) + self.width = extractor.width + + def calcBounds(self): + boundsPen = BoundsPen(None) + self.draw(boundsPen) + return boundsPen.bounds + + def check_program(self, program, isCFF2=False): + if isCFF2: + if self.program: + assert self.program[-1] not in ("seac",), "illegal CFF2 CharString Termination" + else: + assert self.program, "illegal CharString: decompiled to empty program" + assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", "seac"), "illegal CharString" + + def compile(self, isCFF2=False): + if self.bytecode is not None: + return + opcodes = self.opcodes + program = self.program + self.check_program(program, isCFF2=isCFF2) + bytecode = [] + encodeInt = self.getIntEncoder() + encodeFixed = self.getFixedEncoder() + i = 0 + end = len(program) + while i < end: + token = program[i] + i = i + 1 + tp = type(token) + if issubclass(tp, basestring): + try: + bytecode.extend(bytechr(b) for b in opcodes[token]) + except KeyError: + raise CharStringCompileError("illegal operator: %s" % token) + if token in ('hintmask', 'cntrmask'): + bytecode.append(program[i]) # hint mask + i = i + 1 + elif tp == int: + bytecode.append(encodeInt(token)) + elif tp == float: + bytecode.append(encodeFixed(token)) + else: + assert 0, "unsupported type: %s" % tp + try: + bytecode = bytesjoin(bytecode) + except TypeError: + log.error(bytecode) + raise + self.setBytecode(bytecode) + + if isCFF2: + # If present, remove return and endchar operators. + if self.bytecode and (byteord(self.bytecode[-1]) in (11, 14)): + self.bytecode = self.bytecode[:-1] + + def needsDecompilation(self): + return self.bytecode is not None + + def setProgram(self, program): + self.program = program + self.bytecode = None + + def setBytecode(self, bytecode): + self.bytecode = bytecode + self.program = None + + def getToken(self, index, + len=len, byteord=byteord, basestring=basestring, + isinstance=isinstance): + if self.bytecode is not None: + if index >= len(self.bytecode): + return None, 0, 0 + b0 = byteord(self.bytecode[index]) + index = index + 1 + handler = self.operandEncoding[b0] + token, index = handler(self, b0, self.bytecode, index) + else: + if index >= len(self.program): + return None, 0, 0 + token = self.program[index] + index = index + 1 + isOperator = isinstance(token, basestring) + return token, isOperator, index + + def getBytes(self, index, nBytes): + if self.bytecode is not None: + newIndex = index + nBytes + bytes = self.bytecode[index:newIndex] + index = newIndex + else: + bytes = self.program[index] + index = index + 1 + assert len(bytes) == nBytes + return bytes, index + + def handle_operator(self, operator): + return operator + + def toXML(self, xmlWriter): + from fontTools.misc.textTools import num2binary + if self.bytecode is not None: + xmlWriter.dumphex(self.bytecode) + else: + index = 0 + args = [] + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + if isOperator: + args = [str(arg) for arg in args] + if token in ('hintmask', 'cntrmask'): + hintMask, isOperator, index = self.getToken(index) + bits = [] + for byte in hintMask: + bits.append(num2binary(byteord(byte), 8)) + hintMask = strjoin(bits) + line = ' '.join(args + [token, hintMask]) + else: + line = ' '.join(args + [token]) + xmlWriter.write(line) + xmlWriter.newline() + args = [] + else: + args.append(token) + if args: + if self.isCFF2: + # CFF2Subr's can have numeric arguments on the stack after the last operator. + args = [str(arg) for arg in args] + line = ' '.join(args) + xmlWriter.write(line) + else: + assert 0, "T2Charstring or Subr has items on the stack after last operator." + + def fromXML(self, name, attrs, content): + from fontTools.misc.textTools import binary2num, readHex + if attrs.get("raw"): + self.setBytecode(readHex(content)) + return + content = strjoin(content) + content = content.split() + program = [] + end = len(content) + i = 0 + while i < end: + token = content[i] + i = i + 1 + try: + token = int(token) + except ValueError: + try: + token = float(token) + except ValueError: + program.append(token) + if token in ('hintmask', 'cntrmask'): + mask = content[i] + maskBytes = b"" + for j in range(0, len(mask), 8): + maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) + program.append(maskBytes) + i = i + 1 + else: + program.append(token) + else: + program.append(token) + self.setProgram(program) + +class CFF2Subr(T2CharString): + isCFF2 = True + +class T1CharString(T2CharString): + + operandEncoding = t1OperandEncoding + operators, opcodes = buildOperatorDict(t1Operators) + + def __init__(self, bytecode=None, program=None, subrs=None): + if program is None: + program = [] + self.bytecode = bytecode + self.program = program + self.subrs = subrs + + def getIntEncoder(self): + return encodeIntT1 + + def getFixedEncoder(self): + def encodeFixed(value): + raise TypeError("Type 1 charstrings don't support floating point operands") + + def decompile(self): + if self.bytecode is None: + return + program = [] + index = 0 + while True: + token, isOperator, index = self.getToken(index) + if token is None: + break + program.append(token) + self.setProgram(program) + + def draw(self, pen): + extractor = T1OutlineExtractor(pen, self.subrs) + extractor.execute(self) + self.width = extractor.width -class DictDecompiler(ByteCodeBase): +class DictDecompiler(object): operandEncoding = cffDictOperandEncoding - def __init__(self, strings): + def __init__(self, strings, parent=None): self.stack = [] self.strings = strings self.dict = {} + self.parent = parent def getDict(self): assert len(self.stack) == 0, "non-empty stack" @@ -1125,7 +1209,6 @@ value, index = handler(self, b0, data, index) if value is not None: push(value) - def pop(self): value = self.stack[-1] del self.stack[-1] @@ -1138,7 +1221,7 @@ def handle_operator(self, operator): operator, argType = operator - if isinstance(argType, type(())): + if isinstance(argType, tuple): value = () for i in range(len(argType)-1, -1, -1): arg = argType[i] @@ -1147,20 +1230,74 @@ else: arghandler = getattr(self, "arg_" + argType) value = arghandler(operator) - self.dict[operator] = value + if operator == "blend": + self.stack.extend(value) + else: + self.dict[operator] = value def arg_number(self, name): - return self.pop() + if isinstance(self.stack[0], list): + out = self.arg_blend_number(self.stack) + else: + out = self.pop() + return out + + def arg_blend_number(self, name): + out = [] + blendArgs = self.pop() + numMasters = len(blendArgs) + out.append(blendArgs) + out.append("blend") + dummy = self.popall() + return blendArgs + def arg_SID(self, name): return self.strings[self.pop()] def arg_array(self, name): return self.popall() + def arg_blendList(self, name): + """ + There may be non-blend args at the top of the stack. We first calculate + where the blend args start in the stack. These are the last + numMasters*numBlends) +1 args. + The blend args starts with numMasters relative coordinate values, the BlueValues in the list from the default master font. This is followed by + numBlends list of values. Each of value in one of these lists is the + Variable Font delta for the matching region. + + We re-arrange this to be a list of numMaster entries. Each entry starts with the corresponding default font relative value, and is followed by + the delta values. We then convert the default values, the first item in each entry, to an absolute value. + """ + vsindex = self.dict.get('vsindex', 0) + numMasters = self.parent.getNumRegions(vsindex) + 1 # only a PrivateDict has blended ops. + numBlends = self.pop() + args = self.popall() + numArgs = len(args) + # The spec says that there should be no non-blended Blue Values,. + assert(numArgs == numMasters * numBlends) + value = [None]*numBlends + numDeltas = numMasters-1 + i = 0 + prevVal = 0 + while i < numBlends: + newVal = args[i] + prevVal + prevVal = newVal + masterOffset = numBlends + (i* numDeltas) + blendList = [newVal] + args[masterOffset:masterOffset+numDeltas] + value[i] = blendList + i += 1 + return value + def arg_delta(self, name): + valueList = self.popall() out = [] - current = 0 - for v in self.popall(): - current = current + v - out.append(current) + if valueList and isinstance(valueList[0], list): + # arg_blendList() has already converted these to absolute values. + out = valueList + else: + current = 0 + for v in valueList: + current = current + v + out.append(current) return out diff -Nru fonttools-3.0/Snippets/fontTools/misc/psLib.py fonttools-3.21.2/Snippets/fontTools/misc/psLib.py --- fonttools-3.0/Snippets/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/psLib.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,17 +5,20 @@ import re import collections from string import whitespace +import logging -ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently +log = logging.getLogger(__name__) -skipwhiteRE = re.compile("[%s]*" % whitespace) -endofthingPat = "[^][(){}<>/%%%s]*" % whitespace +ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently + +skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"])) +endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"]) endofthingRE = re.compile(endofthingPat) -commentRE = re.compile("%[^\n\r]*") +commentRE = re.compile(b"%[^\n\r]*") # XXX This not entirely correct as it doesn't allow *nested* embedded parens: -stringPat = r""" +stringPat = br""" \( ( ( @@ -29,16 +32,44 @@ [^()]* \) """ -stringPat = "".join(stringPat.split()) +stringPat = b"".join(stringPat.split()) stringRE = re.compile(stringPat) -hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace) +hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"])) class PSTokenError(Exception): pass class PSError(Exception): pass -class PSTokenizer(BytesIO): +class PSTokenizer(object): + + def __init__(self, buf=b''): + # Force self.buf to be a byte string + buf = tobytes(buf) + self.buf = buf + self.len = len(buf) + self.pos = 0 + self.closed = False + + def read(self, n=-1): + """Read at most 'n' bytes from the buffer, or less if the read + hits EOF before obtaining 'n' bytes. + If 'n' is negative or omitted, read all data until EOF is reached. + """ + if self.closed: + raise ValueError("I/O operation on closed file") + if n is None or n < 0: + newpos = self.len + else: + newpos = min(self.pos+n, self.len) + r = self.buf[self.pos:newpos] + self.pos = newpos + return r + + def close(self): + if not self.closed: + self.closed = True + del self.buf, self.pos def getnexttoken(self, # localize some stuff, for performance @@ -47,32 +78,30 @@ stringmatch=stringRE.match, hexstringmatch=hexstringRE.match, commentmatch=commentRE.match, - endmatch=endofthingRE.match, - whitematch=skipwhiteRE.match): + endmatch=endofthingRE.match): - _, nextpos = whitematch(self.buf, self.pos).span() - self.pos = nextpos + self.skipwhite() if self.pos >= self.len: return None, None pos = self.pos buf = self.buf - char = buf[pos] + char = bytechr(byteord(buf[pos])) if char in ps_special: - if char in '{}[]': + if char in b'{}[]': tokentype = 'do_special' token = char - elif char == '%': + elif char == b'%': tokentype = 'do_comment' _, nextpos = commentmatch(buf, pos).span() token = buf[pos:nextpos] - elif char == '(': + elif char == b'(': tokentype = 'do_string' m = stringmatch(buf, pos) if m is None: raise PSTokenError('bad string at character %d' % pos) _, nextpos = m.span() token = buf[pos:nextpos] - elif char == '<': + elif char == b'<': tokentype = 'do_hexstring' m = hexstringmatch(buf, pos) if m is None: @@ -82,7 +111,7 @@ else: raise PSTokenError('bad token at character %d' % pos) else: - if char == '/': + if char == b'/': tokentype = 'do_literal' m = endmatch(buf, pos+1) else: @@ -93,6 +122,7 @@ _, nextpos = m.span() token = buf[pos:nextpos] self.pos = pos + len(token) + token = tostr(token, encoding='ascii') return tokentype, token def skipwhite(self, whitematch=skipwhiteRE.match): @@ -101,7 +131,6 @@ def starteexec(self): self.pos = self.pos + 1 - #self.skipwhite() self.dirtybuf = self.buf[self.pos:] self.buf, R = eexec.decrypt(self.dirtybuf, 55665) self.len = len(self.buf) @@ -113,11 +142,6 @@ self.buf = self.dirtybuf del self.dirtybuf - def flush(self): - if self.buflist: - self.buf = self.buf + "".join(self.buflist) - self.buflist = [] - class PSInterpreter(PSOperators): @@ -157,7 +181,6 @@ try: while 1: tokentype, token = getnexttoken() - #print token if not token: break if tokentype: @@ -169,14 +192,18 @@ handle_object(object) tokenizer.close() self.tokenizer = None - finally: + except: if self.tokenizer is not None: - if 0: - print('ps error:\n- - - - - - -') - print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) - print('>>>') - print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) - print('- - - - - - -') + log.debug( + 'ps error:\n' + '- - - - - - -\n' + '%s\n' + '>>>\n' + '%s\n' + '- - - - - - -', + self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos], + self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) + raise def handle_object(self, object): if not (self.proclevel or object.literal or object.type == 'proceduretype'): @@ -339,12 +366,3 @@ rawfont = fontdir[fontNames[0]] interpreter.close() return unpack_item(rawfont) - - -if __name__ == "__main__": - import EasyDialogs - path = EasyDialogs.AskFileForOpen() - if path: - from fontTools import t1Lib - data, kind = t1Lib.read(path) - font = suckfont(data) diff -Nru fonttools-3.0/Snippets/fontTools/misc/py23.py fonttools-3.21.2/Snippets/fontTools/misc/py23.py --- fonttools-3.0/Snippets/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/py23.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,18 +3,32 @@ from __future__ import print_function, division, absolute_import import sys + +__all__ = ['basestring', 'unicode', 'unichr', 'byteord', 'bytechr', 'BytesIO', + 'StringIO', 'UnicodeIO', 'strjoin', 'bytesjoin', 'tobytes', 'tostr', + 'tounicode', 'Tag', 'open', 'range', 'xrange', 'round', 'Py23Error'] + + +class Py23Error(NotImplementedError): + pass + + +PY3 = sys.version_info[0] == 3 +PY2 = sys.version_info[0] == 2 + + try: - basestring + basestring = basestring except NameError: basestring = str try: - unicode + unicode = unicode except NameError: unicode = str try: - unichr + unichr = unichr if sys.maxunicode < 0x10FFFF: # workarounds for Python 2 "narrow" builds with UCS2-only support. @@ -156,6 +170,379 @@ return tobytes(joiner).join(tobytes(item) for item in iterable) +import os +import io as _io + +try: + from msvcrt import setmode as _setmode +except ImportError: + _setmode = None # only available on the Windows platform + + +def open(file, mode='r', buffering=-1, encoding=None, errors=None, + newline=None, closefd=True, opener=None): + """ Wrapper around `io.open` that bridges the differences between Python 2 + and Python 3's built-in `open` functions. In Python 2, `io.open` is a + backport of Python 3's `open`, whereas in Python 3, it is an alias of the + built-in `open` function. + + One difference is that the 'opener' keyword argument is only supported in + Python 3. Here we pass the value of 'opener' only when it is not None. + This causes Python 2 to raise TypeError, complaining about the number of + expected arguments, so it must be avoided in py2 or py2-3 contexts. + + Another difference between 2 and 3, this time on Windows, has to do with + opening files by name or by file descriptor. + + On the Windows C runtime, the 'O_BINARY' flag is defined which disables + the newlines translation ('\r\n' <=> '\n') when reading/writing files. + On both Python 2 and 3 this flag is always set when opening files by name. + This way, the newlines translation at the MSVCRT level doesn't interfere + with the Python io module's own newlines translation. + + However, when opening files via fd, on Python 2 the fd is simply copied, + regardless of whether it has the 'O_BINARY' flag set or not. + This becomes a problem in the case of stdout, stdin, and stderr, because on + Windows these are opened in text mode by default (ie. don't have the + O_BINARY flag set). + + On Python 3, this issue has been fixed, and all fds are now opened in + binary mode on Windows, including standard streams. Similarly here, I use + the `_setmode` function to ensure that integer file descriptors are + O_BINARY'ed before I pass them on to io.open. + + For more info, see: https://bugs.python.org/issue10841 + """ + if isinstance(file, int): + # the 'file' argument is an integer file descriptor + fd = file + if fd < 0: + raise ValueError('negative file descriptor') + if _setmode: + # `_setmode` function sets the line-end translation and returns the + # value of the previous mode. AFAIK there's no `_getmode`, so to + # check if the previous mode already had the bit set, I fist need + # to duplicate the file descriptor, set the binary flag on the copy + # and check the returned value. + fdcopy = os.dup(fd) + current_mode = _setmode(fdcopy, os.O_BINARY) + if not (current_mode & os.O_BINARY): + # the binary mode was not set: use the file descriptor's copy + file = fdcopy + if closefd: + # close the original file descriptor + os.close(fd) + else: + # ensure the copy is closed when the file object is closed + closefd = True + else: + # original file descriptor already had binary flag, close copy + os.close(fdcopy) + + if opener is not None: + # "opener" is not supported on Python 2, use it at your own risk! + return _io.open( + file, mode, buffering, encoding, errors, newline, closefd, + opener=opener) + else: + return _io.open( + file, mode, buffering, encoding, errors, newline, closefd) + + +# always use iterator for 'range' on both py 2 and 3 +try: + range = xrange +except NameError: + range = range + +def xrange(*args, **kwargs): + raise Py23Error("'xrange' is not defined. Use 'range' instead.") + + +import math as _math + +try: + isclose = _math.isclose +except AttributeError: + # math.isclose() was only added in Python 3.5 + + _isinf = _math.isinf + _fabs = _math.fabs + + def isclose(a, b, rel_tol=1e-09, abs_tol=0): + """ + Python 2 implementation of Python 3.5 math.isclose() + https://hg.python.org/cpython/file/v3.5.2/Modules/mathmodule.c#l1993 + """ + # sanity check on the inputs + if rel_tol < 0 or abs_tol < 0: + raise ValueError("tolerances must be non-negative") + # short circuit exact equality -- needed to catch two infinities of + # the same sign. And perhaps speeds things up a bit sometimes. + if a == b: + return True + # This catches the case of two infinities of opposite sign, or + # one infinity and one finite number. Two infinities of opposite + # sign would otherwise have an infinite relative tolerance. + # Two infinities of the same sign are caught by the equality check + # above. + if _isinf(a) or _isinf(b): + return False + # Cast to float to allow decimal.Decimal arguments + if not isinstance(a, float): + a = float(a) + if not isinstance(b, float): + b = float(b) + # now do the regular computation + # this is essentially the "weak" test from the Boost library + diff = _fabs(b - a) + result = ((diff <= _fabs(rel_tol * a)) or + (diff <= _fabs(rel_tol * b)) or + (diff <= abs_tol)) + return result + + +import decimal as _decimal + +if PY3: + def round2(number, ndigits=None): + """ + Implementation of Python 2 built-in round() function. + + Rounds a number to a given precision in decimal digits (default + 0 digits). The result is a floating point number. Values are rounded + to the closest multiple of 10 to the power minus ndigits; if two + multiples are equally close, rounding is done away from 0. + + ndigits may be negative. + + See Python 2 documentation: + https://docs.python.org/2/library/functions.html?highlight=round#round + """ + if ndigits is None: + ndigits = 0 + + if ndigits < 0: + exponent = 10 ** (-ndigits) + quotient, remainder = divmod(number, exponent) + if remainder >= exponent//2 and number >= 0: + quotient += 1 + return float(quotient * exponent) + else: + exponent = _decimal.Decimal('10') ** (-ndigits) + + d = _decimal.Decimal.from_float(number).quantize( + exponent, rounding=_decimal.ROUND_HALF_UP) + + return float(d) + + if sys.version_info[:2] >= (3, 6): + # in Python 3.6, 'round3' is an alias to the built-in 'round' + round = round3 = round + else: + # in Python3 < 3.6 we need work around the inconsistent behavior of + # built-in round(), whereby floats accept a second None argument, + # while integers raise TypeError. See https://bugs.python.org/issue27936 + _round = round + + def round3(number, ndigits=None): + return _round(number) if ndigits is None else _round(number, ndigits) + + round = round3 + +else: + # in Python 2, 'round2' is an alias to the built-in 'round' and + # 'round' is shadowed by 'round3' + round2 = round + + def round3(number, ndigits=None): + """ + Implementation of Python 3 built-in round() function. + + Rounds a number to a given precision in decimal digits (default + 0 digits). This returns an int when ndigits is omitted or is None, + otherwise the same type as the number. + + Values are rounded to the closest multiple of 10 to the power minus + ndigits; if two multiples are equally close, rounding is done toward + the even choice (aka "Banker's Rounding"). For example, both round(0.5) + and round(-0.5) are 0, and round(1.5) is 2. + + ndigits may be negative. + + See Python 3 documentation: + https://docs.python.org/3/library/functions.html?highlight=round#round + + Derived from python-future: + https://github.com/PythonCharmers/python-future/blob/master/src/future/builtins/newround.py + """ + if ndigits is None: + ndigits = 0 + # return an int when called with one argument + totype = int + # shortcut if already an integer, or a float with no decimal digits + inumber = totype(number) + if inumber == number: + return inumber + else: + # return the same type as the number, when called with two arguments + totype = type(number) + + m = number * (10 ** ndigits) + # if number is half-way between two multiples, and the mutliple that is + # closer to zero is even, we use the (slow) pure-Python implementation + if isclose(m % 1, .5) and int(m) % 2 == 0: + if ndigits < 0: + exponent = 10 ** (-ndigits) + quotient, remainder = divmod(number, exponent) + half = exponent//2 + if remainder > half or (remainder == half and quotient % 2 != 0): + quotient += 1 + d = quotient * exponent + else: + exponent = _decimal.Decimal('10') ** (-ndigits) if ndigits != 0 else 1 + + d = _decimal.Decimal.from_float(number).quantize( + exponent, rounding=_decimal.ROUND_HALF_EVEN) + else: + # else we use the built-in round() as it produces the same results + d = round2(number, ndigits) + + return totype(d) + + round = round3 + + +import logging + + +class _Logger(logging.Logger): + """ Add support for 'lastResort' handler introduced in Python 3.2. """ + + def callHandlers(self, record): + # this is the same as Python 3.5's logging.Logger.callHandlers + c = self + found = 0 + while c: + for hdlr in c.handlers: + found = found + 1 + if record.levelno >= hdlr.level: + hdlr.handle(record) + if not c.propagate: + c = None # break out + else: + c = c.parent + if (found == 0): + if logging.lastResort: + if record.levelno >= logging.lastResort.level: + logging.lastResort.handle(record) + elif logging.raiseExceptions and not self.manager.emittedNoHandlerWarning: + sys.stderr.write("No handlers could be found for logger" + " \"%s\"\n" % self.name) + self.manager.emittedNoHandlerWarning = True + + +class _StderrHandler(logging.StreamHandler): + """ This class is like a StreamHandler using sys.stderr, but always uses + whatever sys.stderr is currently set to rather than the value of + sys.stderr at handler construction time. + """ + def __init__(self, level=logging.NOTSET): + """ + Initialize the handler. + """ + logging.Handler.__init__(self, level) + + @property + def stream(self): + # the try/execept avoids failures during interpreter shutdown, when + # globals are set to None + try: + return sys.stderr + except AttributeError: + return __import__('sys').stderr + + +if not hasattr(logging, 'lastResort'): + # for Python pre-3.2, we need to define the "last resort" handler used when + # clients don't explicitly configure logging (in Python 3.2 and above this is + # already defined). The handler prints the bare message to sys.stderr, only + # for events of severity WARNING or greater. + # To obtain the pre-3.2 behaviour, you can set logging.lastResort to None. + # https://docs.python.org/3.5/howto/logging.html#what-happens-if-no-configuration-is-provided + logging.lastResort = _StderrHandler(logging.WARNING) + # Also, we need to set the Logger class to one which supports the last resort + # handler. All new loggers instantiated after this call will use the custom + # logger class (the already existing ones, like the 'root' logger, will not) + logging.setLoggerClass(_Logger) + + +try: + from types import SimpleNamespace +except ImportError: + class SimpleNamespace(object): + """ + A backport of Python 3.3's ``types.SimpleNamespace``. + """ + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + def __repr__(self): + keys = sorted(self.__dict__) + items = ("{0}={1!r}".format(k, self.__dict__[k]) for k in keys) + return "{0}({1})".format(type(self).__name__, ", ".join(items)) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + +if sys.version_info[:2] > (3, 4): + from contextlib import redirect_stdout, redirect_stderr +else: + # `redirect_stdout` was added with python3.4, while `redirect_stderr` + # with python3.5. For simplicity, I redefine both for any versions + # less than or equal to 3.4. + # The code below is copied from: + # https://github.com/python/cpython/blob/57161aa/Lib/contextlib.py + + class _RedirectStream(object): + + _stream = None + + def __init__(self, new_target): + self._new_target = new_target + # We use a list of old targets to make this CM re-entrant + self._old_targets = [] + + def __enter__(self): + self._old_targets.append(getattr(sys, self._stream)) + setattr(sys, self._stream, self._new_target) + return self._new_target + + def __exit__(self, exctype, excinst, exctb): + setattr(sys, self._stream, self._old_targets.pop()) + + + class redirect_stdout(_RedirectStream): + """Context manager for temporarily redirecting stdout to another file. + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + """ + + _stream = "stdout" + + + class redirect_stderr(_RedirectStream): + """Context manager for temporarily redirecting stderr to another file.""" + + _stream = "stderr" + + if __name__ == "__main__": import doctest, sys sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/sstruct.py fonttools-3.21.2/Snippets/fontTools/misc/sstruct.py --- fonttools-3.0/Snippets/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/sstruct.py 2018-01-08 12:40:40.000000000 +0000 @@ -133,6 +133,7 @@ _formatcache = {} def getformat(fmt): + fmt = tostr(fmt, encoding="ascii") try: formatstring, names, fixes = _formatcache[fmt] except KeyError: diff -Nru fonttools-3.0/Snippets/fontTools/misc/symfont.py fonttools-3.21.2/Snippets/fontTools/misc/symfont.py --- fonttools-3.0/Snippets/fontTools/misc/symfont.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/symfont.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,196 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from functools import partial +from itertools import count +import sympy as sp +import sys + +n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic + +t, x, y = sp.symbols('t x y', real=True) +c = sp.symbols('c', real=False) # Complex representation instead of x/y + +X = tuple(sp.symbols('x:%d'%(n+1), real=True)) +Y = tuple(sp.symbols('y:%d'%(n+1), real=True)) +P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01'))) +C = tuple(sp.symbols('c:%d'%(n+1), real=False)) + +# Cubic Bernstein basis functions +BinomialCoefficient = [(1, 0)] +for i in range(1, n+1): + last = BinomialCoefficient[-1] + this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,) + BinomialCoefficient.append(this) +BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) +del last, this + +BernsteinPolynomial = tuple( + tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs)) + for n,coeffs in enumerate(BinomialCoefficient)) + +BezierCurve = tuple( + tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins)) + for j in range(2)) + for n,bernsteins in enumerate(BernsteinPolynomial)) +BezierCurveC = tuple( + sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins)) + for n,bernsteins in enumerate(BernsteinPolynomial)) + + +def green(f, curveXY): + f = -sp.integrate(sp.sympify(f), y) + f = f.subs({x:curveXY[0], y:curveXY[1]}) + f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) + return f + + +class _BezierFuncsLazy(dict): + + def __init__(self, symfunc): + self._symfunc = symfunc + self._bezfuncs = {} + + def __missing__(self, i): + args = ['p%d'%d for d in range(i+1)] + f = green(self._symfunc, BezierCurve[i]) + f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize + return sp.lambdify(args, f) + +class GreenPen(BasePen): + + _BezierFuncs = {} + + @classmethod + def _getGreenBezierFuncs(celf, func): + funcstr = str(func) + if not funcstr in celf._BezierFuncs: + celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) + return celf._BezierFuncs[funcstr] + + def __init__(self, func, glyphset=None): + BasePen.__init__(self, glyphset) + self._funcs = self._getGreenBezierFuncs(func) + self.value = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += self._funcs[1](p0, p1) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self.value += self._funcs[2](p0, p1, p2) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self.value += self._funcs[3](p0, p1, p2, p3) + +# Sample pens. +# Do not use this in real code. +# Use fontTools.pens.momentsPen.MomentsPen instead. +AreaPen = partial(GreenPen, func=1) +MomentXPen = partial(GreenPen, func=x) +MomentYPen = partial(GreenPen, func=y) +MomentXXPen = partial(GreenPen, func=x*x) +MomentYYPen = partial(GreenPen, func=y*y) +MomentXYPen = partial(GreenPen, func=x*y) + + +def printGreenPen(penName, funcs, file=sys.stdout): + + print( +'''from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + +class %s(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) +'''%penName, file=file) + for name,f in funcs: + print(' self.%s = 0' % name, file=file) + print(''' + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError +''', end='', file=file) + + for n in (1, 2, 3): + + if n == 1: + print(''' + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 +''', file=file) + elif n == 2: + print(''' + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 +''', file=file) + elif n == 3: + print(''' + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 +''', file=file) + subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)} + greens = [green(f, BezierCurve[n]) for name,f in funcs] + greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize + greens = [f.subs(subs) for f in greens] # Convert to p to x/y + defs, exprs = sp.cse(greens, + optimizations='basic', + symbols=(sp.Symbol('r%d'%i) for i in count())) + for name,value in defs: + print(' %s = %s' % (name, value), file=file) + print(file=file) + for name,value in zip([f[0] for f in funcs], exprs): + print(' self.%s += %s' % (name, value), file=file) + + print(''' +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('%s', ['''%penName, file=file) + for name,f in funcs: + print(" ('%s', %s)," % (name, str(f)), file=file) + print(' ])', file=file) + + +if __name__ == '__main__': + pen = AreaPen() + pen.moveTo((100,100)) + pen.lineTo((100,200)) + pen.lineTo((200,200)) + pen.curveTo((200,250),(300,300),(250,350)) + pen.lineTo((200,100)) + pen.closePath() + print(pen.value) diff -Nru fonttools-3.0/Snippets/fontTools/misc/testTools.py fonttools-3.21.2/Snippets/fontTools/misc/testTools.py --- fonttools-3.0/Snippets/fontTools/misc/testTools.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/testTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ +"""Helpers for writing unit tests.""" + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +import collections +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter + + +def parseXML(xmlSnippet): + """Parses a snippet of XML. + + Input can be either a single string (unicode or UTF-8 bytes), or a + a sequence of strings. + + The result is in the same format that would be returned by + XMLReader, but the parser imposes no constraints on the root + element so it can be called on small snippets of TTX files. + """ + # To support snippets with multiple elements, we add a fake root. + reader = TestXMLReader_() + xml = b"" + if isinstance(xmlSnippet, bytes): + xml += xmlSnippet + elif isinstance(xmlSnippet, unicode): + xml += tobytes(xmlSnippet, 'utf-8') + elif isinstance(xmlSnippet, collections.Iterable): + xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet) + else: + raise TypeError("expected string or sequence of strings; found %r" + % type(xmlSnippet).__name__) + xml += b"" + reader.parser.Parse(xml, 0) + return reader.root[2] + + +class FakeFont: + def __init__(self, glyphs): + self.glyphOrder_ = glyphs + self.reverseGlyphOrderDict_ = {g:i for i,g in enumerate(glyphs)} + self.lazy = False + self.tables = {} + + def __getitem__(self, tag): + return self.tables[tag] + + def __setitem__(self, tag, table): + self.tables[tag] = table + + def get(self, tag, default=None): + return self.tables.get(tag, default) + + def getGlyphID(self, name): + return self.reverseGlyphOrderDict_[name] + + def getGlyphName(self, glyphID): + if glyphID < len(self.glyphOrder_): + return self.glyphOrder_[glyphID] + else: + return "glyph%.5d" % glyphID + + def getGlyphOrder(self): + return self.glyphOrder_ + + def getReverseGlyphMap(self): + return self.reverseGlyphOrderDict_ + + +class TestXMLReader_(object): + def __init__(self): + from xml.parsers.expat import ParserCreate + self.parser = ParserCreate() + self.parser.StartElementHandler = self.startElement_ + self.parser.EndElementHandler = self.endElement_ + self.parser.CharacterDataHandler = self.addCharacterData_ + self.root = None + self.stack = [] + + def startElement_(self, name, attrs): + element = (name, attrs, []) + if self.stack: + self.stack[-1][2].append(element) + else: + self.root = element + self.stack.append(element) + + def endElement_(self, name): + self.stack.pop() + + def addCharacterData_(self, data): + self.stack[-1][2].append(data) + + +def makeXMLWriter(newlinestr='\n'): + # don't write OS-specific new lines + writer = XMLWriter(BytesIO(), newlinestr=newlinestr) + # erase XML declaration + writer.file.seek(0) + writer.file.truncate() + return writer + + +def getXML(func, ttFont=None): + """Call the passed toXML function and return the written content as a + list of lines (unicode strings). + Result is stripped of XML declaration and OS-specific newline characters. + """ + writer = makeXMLWriter() + func(writer, ttFont) + xml = writer.file.getvalue().decode("utf-8") + # toXML methods must always end with a writer.newline() + assert xml.endswith("\n") + return xml.splitlines() + + +class MockFont(object): + """A font-like object that automatically adds any looked up glyphname + to its glyphOrder.""" + + def __init__(self): + self._glyphOrder = ['.notdef'] + class AllocatingDict(dict): + def __missing__(reverseDict, key): + self._glyphOrder.append(key) + gid = len(reverseDict) + reverseDict[key] = gid + return gid + self._reverseGlyphOrder = AllocatingDict({'.notdef': 0}) + self.lazy = False + + def getGlyphID(self, glyph, requireReal=None): + gid = self._reverseGlyphOrder[glyph] + return gid + + def getReverseGlyphMap(self): + return self._reverseGlyphOrder + + def getGlyphName(self, gid): + return self._glyphOrder[gid] + + def getGlyphOrder(self): + return self._glyphOrder diff -Nru fonttools-3.0/Snippets/fontTools/misc/textTools.py fonttools-3.21.2/Snippets/fontTools/misc/textTools.py --- fonttools-3.0/Snippets/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/textTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -91,11 +91,12 @@ """ data = tobytes(data) if size > 1: - while len(data) % size != 0: - data += b"\0" + remainder = len(data) % size + if remainder: + data += b"\0" * (size - remainder) return data if __name__ == "__main__": - import doctest + import doctest, sys sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/timeTools.py fonttools-3.21.2/Snippets/fontTools/misc/timeTools.py --- fonttools-3.0/Snippets/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/timeTools.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,20 +3,62 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +import os import time import calendar epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) +DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] +MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] + + +def asctime(t=None): + """ + Convert a tuple or struct_time representing a time as returned by gmtime() + or localtime() to a 24-character string of the following form: + + >>> asctime(time.gmtime(0)) + 'Thu Jan 1 00:00:00 1970' + + If t is not provided, the current time as returned by localtime() is used. + Locale information is not used by asctime(). + + This is meant to normalise the output of the built-in time.asctime() across + different platforms and Python versions. + In Python 3.x, the day of the month is right-justified, whereas on Windows + Python 2.7 it is padded with zeros. + + See https://github.com/behdad/fonttools/issues/455 + """ + if t is None: + t = time.localtime() + s = "%s %s %2s %s" % ( + DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday, + time.strftime("%H:%M:%S %Y", t)) + return s + + def timestampToString(value): - return time.asctime(time.gmtime(max(0, value + epoch_diff))) + return asctime(time.gmtime(max(0, value + epoch_diff))) def timestampFromString(value): return calendar.timegm(time.strptime(value)) - epoch_diff def timestampNow(): + # https://reproducible-builds.org/specs/source-date-epoch/ + source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH") + if source_date_epoch is not None: + return int(source_date_epoch) - epoch_diff return int(time.time() - epoch_diff) def timestampSinceEpoch(value): return int(value - epoch_diff) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/misc/transform.py fonttools-3.21.2/Snippets/fontTools/misc/transform.py --- fonttools-3.0/Snippets/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/transform.py 2018-01-08 12:40:40.000000000 +0000 @@ -320,6 +320,27 @@ """ return hash(self.__affine) + def __bool__(self): + """Returns True if transform is not identity, False otherwise. + >>> bool(Identity) + False + >>> bool(Transform()) + False + >>> bool(Scale(1.)) + False + >>> bool(Scale(2)) + True + >>> bool(Offset()) + False + >>> bool(Offset(0)) + False + >>> bool(Offset(2)) + True + """ + return self.__affine != Identity.__affine + + __nonzero__ = __bool__ + def __repr__(self): return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ + self.__affine) diff -Nru fonttools-3.0/Snippets/fontTools/misc/xmlReader.py fonttools-3.21.2/Snippets/fontTools/misc/xmlReader.py --- fonttools-3.0/Snippets/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/xmlReader.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,9 +3,13 @@ from fontTools import ttLib from fontTools.misc.textTools import safeEval from fontTools.ttLib.tables.DefaultTable import DefaultTable +import sys import os +import logging +log = logging.getLogger(__name__) + class TTXParseError(Exception): pass BUFSIZE = 0x4000 @@ -13,22 +17,42 @@ class XMLReader(object): - def __init__(self, fileName, ttFont, progress=None, quiet=False): + def __init__(self, fileOrPath, ttFont, progress=None, quiet=None): + if fileOrPath == '-': + fileOrPath = sys.stdin + if not hasattr(fileOrPath, "read"): + self.file = open(fileOrPath, "rb") + self._closeStream = True + else: + # assume readable file object + self.file = fileOrPath + self._closeStream = False self.ttFont = ttFont - self.fileName = fileName self.progress = progress - self.quiet = quiet + if quiet is not None: + from fontTools.misc.loggingTools import deprecateArgument + deprecateArgument("quiet", "configure logging instead") + self.quiet = quiet self.root = None self.contentStack = [] self.stackSize = 0 - def read(self): + def read(self, rootless=False): + if rootless: + self.stackSize += 1 if self.progress: - import stat - self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) - file = open(self.fileName, 'rb') - self._parseFile(file) - file.close() + self.file.seek(0, 2) + fileSize = self.file.tell() + self.progress.set(0, fileSize // 100 or 1) + self.file.seek(0) + self._parseFile(self.file) + if self._closeStream: + self.close() + if rootless: + self.stackSize -= 1 + + def close(self): + self.file.close() def _parseFile(self, file): from xml.parsers.expat import ParserCreate @@ -63,20 +87,22 @@ elif stackSize == 1: subFile = attrs.get("src") if subFile is not None: - subFile = os.path.join(os.path.dirname(self.fileName), subFile) - subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) + if hasattr(self.file, 'name'): + # if file has a name, get its parent directory + dirname = os.path.dirname(self.file.name) + else: + # else fall back to using the current working directory + dirname = os.getcwd() + subFile = os.path.join(dirname, subFile) + subReader = XMLReader(subFile, self.ttFont, self.progress) subReader.read() self.contentStack.append([]) return tag = ttLib.xmlToTag(name) msg = "Parsing '%s' table..." % tag if self.progress: - self.progress.setlabel(msg) - elif self.ttFont.verbose: - ttLib.debugmsg(msg) - else: - if not self.quiet: - print(msg) + self.progress.setLabel(msg) + log.info(msg) if tag == "GlyphOrder": tableClass = ttLib.GlyphOrder elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): diff -Nru fonttools-3.0/Snippets/fontTools/misc/xmlReader_test.py fonttools-3.21.2/Snippets/fontTools/misc/xmlReader_test.py --- fonttools-3.0/Snippets/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import os -import unittest -from fontTools.ttLib import TTFont -from .xmlReader import XMLReader -import tempfile - - -class TestXMLReader(unittest.TestCase): - - def test_decode_utf8(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.contents = [] - - def _endElementHandler(self, name): - if self.stackSize == 3: - name, attrs, content = self.root - self.contents.append(content) - super(DebugXMLReader, self)._endElementHandler(name) - - expected = 'fôôbär' - data = '''\ - - - - - %s - - - -''' % expected - - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - content = strjoin(reader.contents[0]).strip() - self.assertEqual(expected, content) - - def test_normalise_newlines(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.newlines = [] - - def _characterDataHandler(self, data): - self.newlines.extend([c for c in data if c in ('\r', '\n')]) - - # notice how when CR is escaped, it is not normalised by the XML parser - data = ( - '\r' # \r -> \n - ' \r\n' # \r\n -> \n - ' a line of text\n' # \n - ' escaped CR and unix newline \n' # \n -> \r\n - ' escaped CR and macintosh newline \r' # \r -> \r\n - ' escaped CR and windows newline \r\n' # \r\n -> \r\n - ' \n' # \n - '') - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] - self.assertEqual(expected, reader.newlines) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/misc/xmlWriter.py fonttools-3.21.2/Snippets/fontTools/misc/xmlWriter.py --- fonttools-3.0/Snippets/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/xmlWriter.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,7 +11,8 @@ class XMLWriter(object): - def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): + def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8", + newlinestr=None): if encoding.lower().replace('-','').replace('_','') != 'utf8': raise Exception('Only UTF-8 encoding is supported.') if fileOrPath == '-': @@ -33,7 +34,10 @@ self.file.write(tounicode('')) self.totype = tounicode self.indentwhite = self.totype(indentwhite) - self.newlinestr = self.totype(os.linesep) + if newlinestr is None: + self.newlinestr = self.totype(os.linesep) + else: + self.newlinestr = self.totype(newlinestr) self.indentlevel = 0 self.stack = [] self.needindent = 1 diff -Nru fonttools-3.0/Snippets/fontTools/misc/xmlWriter_test.py fonttools-3.21.2/Snippets/fontTools/misc/xmlWriter_test.py --- fonttools-3.0/Snippets/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import os -import unittest -from .xmlWriter import XMLWriter - -linesep = tobytes(os.linesep) -HEADER = b'' + linesep - -class TestXMLWriter(unittest.TestCase): - - def test_comment_escaped(self): - writer = XMLWriter(BytesIO()) - writer.comment("This&that are ") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_comment_multiline(self): - writer = XMLWriter(BytesIO()) - writer.comment("Hello world\nHow are you?") - self.assertEqual(HEADER + b"", - writer.file.getvalue()) - - def test_encoding_default(self): - writer = XMLWriter(BytesIO()) - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_utf8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="utf8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF_8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF-8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_other(self): - self.assertRaises(Exception, XMLWriter, BytesIO(), - encoding="iso-8859-1") - - def test_write(self): - writer = XMLWriter(BytesIO()) - writer.write("foo&bar") - self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) - - def test_indent_dedent(self): - writer = XMLWriter(BytesIO()) - writer.write("foo") - writer.newline() - writer.indent() - writer.write("bar") - writer.newline() - writer.dedent() - writer.write("baz") - self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), - writer.file.getvalue()) - - def test_writecdata(self): - writer = XMLWriter(BytesIO()) - writer.writecdata("foo&bar") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_simpletag(self): - writer = XMLWriter(BytesIO()) - writer.simpletag("tag", a="1", b="2") - self.assertEqual(HEADER + b'', writer.file.getvalue()) - - def test_begintag_endtag(self): - writer = XMLWriter(BytesIO()) - writer.begintag("tag", attr="value") - writer.write("content") - writer.endtag("tag") - self.assertEqual(HEADER + b'content', writer.file.getvalue()) - - def test_dumphex(self): - writer = XMLWriter(BytesIO()) - writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") - self.assertEqual(HEADER + bytesjoin([ - "54797065 20697320 61206265 61757469", - "66756c20 67726f75 70206f66 206c6574", - "74657273 2c206e6f 74206120 67726f75", - "70206f66 20626561 75746966 756c206c", - "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) - - def test_stringifyattrs(self): - writer = XMLWriter(BytesIO()) - expected = ' attr="0"' - self.assertEqual(expected, writer.stringifyattrs(attr=0)) - self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) - self.assertEqual(expected, writer.stringifyattrs(attr='0')) - self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) - - def test_carriage_return_escaped(self): - writer = XMLWriter(BytesIO()) - writer.write("two lines\r\nseparated by Windows line endings") - self.assertEqual( - HEADER + b'two lines \nseparated by Windows line endings', - writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/mtiLib/__init__.py fonttools-3.21.2/Snippets/fontTools/mtiLib/__init__.py --- fonttools-3.0/Snippets/fontTools/mtiLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/mtiLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1196 @@ +#!/usr/bin/python + +# FontDame-to-FontTools for OpenType Layout tables +# +# Source language spec is available at: +# http://monotype.github.io/OpenType_Table_Source/otl_source.html +# https://github.com/Monotype/OpenType_Table_Source/ + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables._c_m_a_p import cmap_classes +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict +from fontTools.otlLib import builder as otl +from contextlib import contextmanager +from operator import setitem +import logging + +class MtiLibError(Exception): pass +class ReferenceNotFoundError(MtiLibError): pass +class FeatureNotFoundError(ReferenceNotFoundError): pass +class LookupNotFoundError(ReferenceNotFoundError): pass + + +log = logging.getLogger("fontTools.mtiLib") + + +def makeGlyph(s): + if s[:2] in ['U ', 'u ']: + return ttLib.TTFont._makeGlyphName(int(s[2:], 16)) + elif s[:2] == '# ': + return "glyph%.5d" % int(s[2:]) + assert s.find(' ') < 0, "Space found in glyph name: %s" % s + assert s, "Glyph name is empty" + return s + +def makeGlyphs(l): + return [makeGlyph(g) for g in l] + +def mapLookup(sym, mapping): + # Lookups are addressed by name. So resolved them using a map if available. + # Fallback to parsing as lookup index if a map isn't provided. + if mapping is not None: + try: + idx = mapping[sym] + except KeyError: + raise LookupNotFoundError(sym) + else: + idx = int(sym) + return idx + +def mapFeature(sym, mapping): + # Features are referenced by index according the spec. So, if symbol is an + # integer, use it directly. Otherwise look up in the map if provided. + try: + idx = int(sym) + except ValueError: + try: + idx = mapping[sym] + except KeyError: + raise FeatureNotFoundError(sym) + return idx + +def setReference(mapper, mapping, sym, setter, collection, key): + try: + mapped = mapper(sym, mapping) + except ReferenceNotFoundError as e: + try: + if mapping is not None: + mapping.addDeferredMapping(lambda ref: setter(collection, key, ref), sym, e) + return + except AttributeError: + pass + raise + setter(collection, key, mapped) + +class DeferredMapping(dict): + + def __init__(self): + self._deferredMappings = [] + + def addDeferredMapping(self, setter, sym, e): + log.debug("Adding deferred mapping for symbol '%s' %s", sym, type(e).__name__) + self._deferredMappings.append((setter,sym, e)) + + def applyDeferredMappings(self): + for setter,sym,e in self._deferredMappings: + log.debug("Applying deferred mapping for symbol '%s' %s", sym, type(e).__name__) + try: + mapped = self[sym] + except KeyError: + raise e + setter(mapped) + log.debug("Set to %s", mapped) + self._deferredMappings = [] + + +def parseScriptList(lines, featureMap=None): + self = ot.ScriptList() + records = [] + with lines.between('script table'): + for line in lines: + while len(line) < 4: + line.append('') + scriptTag, langSysTag, defaultFeature, features = line + log.debug("Adding script %s language-system %s", scriptTag, langSysTag) + + langSys = ot.LangSys() + langSys.LookupOrder = None + if defaultFeature: + setReference(mapFeature, featureMap, defaultFeature, setattr, langSys, 'ReqFeatureIndex') + else: + langSys.ReqFeatureIndex = 0xFFFF + syms = stripSplitComma(features) + langSys.FeatureIndex = theList = [3] * len(syms) + for i,sym in enumerate(syms): + setReference(mapFeature, featureMap, sym, setitem, theList, i) + langSys.FeatureCount = len(langSys.FeatureIndex) + + script = [s for s in records if s.ScriptTag == scriptTag] + if script: + script = script[0].Script + else: + scriptRec = ot.ScriptRecord() + scriptRec.ScriptTag = scriptTag + scriptRec.Script = ot.Script() + records.append(scriptRec) + script = scriptRec.Script + script.DefaultLangSys = None + script.LangSysRecord = [] + script.LangSysCount = 0 + + if langSysTag == 'default': + script.DefaultLangSys = langSys + else: + langSysRec = ot.LangSysRecord() + langSysRec.LangSysTag = langSysTag + ' '*(4 - len(langSysTag)) + langSysRec.LangSys = langSys + script.LangSysRecord.append(langSysRec) + script.LangSysCount = len(script.LangSysRecord) + + for script in records: + script.Script.LangSysRecord = sorted(script.Script.LangSysRecord, key=lambda rec: rec.LangSysTag) + self.ScriptRecord = sorted(records, key=lambda rec: rec.ScriptTag) + self.ScriptCount = len(self.ScriptRecord) + return self + +def parseFeatureList(lines, lookupMap=None, featureMap=None): + self = ot.FeatureList() + self.FeatureRecord = [] + with lines.between('feature table'): + for line in lines: + name, featureTag, lookups = line + if featureMap is not None: + assert name not in featureMap, "Duplicate feature name: %s" % name + featureMap[name] = len(self.FeatureRecord) + # If feature name is integer, make sure it matches its index. + try: + assert int(name) == len(self.FeatureRecord), "%d %d" % (name, len(self.FeatureRecord)) + except ValueError: + pass + featureRec = ot.FeatureRecord() + featureRec.FeatureTag = featureTag + featureRec.Feature = ot.Feature() + self.FeatureRecord.append(featureRec) + feature = featureRec.Feature + feature.FeatureParams = None + syms = stripSplitComma(lookups) + feature.LookupListIndex = theList = [None] * len(syms) + for i,sym in enumerate(syms): + setReference(mapLookup, lookupMap, sym, setitem, theList, i) + feature.LookupCount = len(feature.LookupListIndex) + + self.FeatureCount = len(self.FeatureRecord) + return self + +def parseLookupFlags(lines): + flags = 0 + filterset = None + allFlags = [ + 'righttoleft', + 'ignorebaseglyphs', + 'ignoreligatures', + 'ignoremarks', + 'markattachmenttype', + 'markfiltertype', + ] + while lines.peeks()[0].lower() in allFlags: + line = next(lines) + flag = { + 'righttoleft': 0x0001, + 'ignorebaseglyphs': 0x0002, + 'ignoreligatures': 0x0004, + 'ignoremarks': 0x0008, + }.get(line[0].lower()) + if flag: + assert line[1].lower() in ['yes', 'no'], line[1] + if line[1].lower() == 'yes': + flags |= flag + continue + if line[0].lower() == 'markattachmenttype': + flags |= int(line[1]) << 8 + continue + if line[0].lower() == 'markfiltertype': + flags |= 0x10 + filterset = int(line[1]) + return flags, filterset + +def parseSingleSubst(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + return otl.buildSingleSubstSubtable(mapping) + +def parseMultiple(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildMultipleSubstSubtable(mapping) + +def parseAlternate(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + line = makeGlyphs(line) + mapping[line[0]] = line[1:] + return otl.buildAlternateSubstSubtable(mapping) + +def parseLigature(lines, font, _lookupMap=None): + mapping = {} + for line in lines: + assert len(line) >= 2, line + line = makeGlyphs(line) + mapping[tuple(line[1:])] = line[0] + return otl.buildLigatureSubstSubtable(mapping) + +def parseSinglePos(lines, font, _lookupMap=None): + values = {} + for line in lines: + assert len(line) == 3, line + w = line[0].title().replace(' ', '') + assert w in valueRecordFormatDict + g = makeGlyph(line[1]) + v = int(line[2]) + if g not in values: + values[g] = ValueRecord() + assert not hasattr(values[g], w), (g, w) + setattr(values[g], w, v) + return otl.buildSinglePosSubtable(values, font.getReverseGlyphMap()) + +def parsePair(lines, font, _lookupMap=None): + self = ot.PairPos() + self.ValueFormat1 = self.ValueFormat2 = 0 + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + self.Format = 1 + values = {} + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + glyph1, glyph2 = makeGlyphs(line[1:3]) + value = int(line[3]) + if not glyph1 in values: values[glyph1] = {} + if not glyph2 in values[glyph1]: values[glyph1][glyph2] = (ValueRecord(),ValueRecord()) + rec2 = values[glyph1][glyph2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2[0] + else: + self.ValueFormat2 |= mask + vr = rec2[1] + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + self.Coverage = makeCoverage(set(values.keys()), font) + self.PairSet = [] + for glyph1 in self.Coverage.glyphs: + values1 = values[glyph1] + pairset = ot.PairSet() + records = pairset.PairValueRecord = [] + for glyph2 in sorted(values1.keys(), key=font.getGlyphID): + values2 = values1[glyph2] + pair = ot.PairValueRecord() + pair.SecondGlyph = glyph2 + pair.Value1 = values2[0] + pair.Value2 = values2[1] if self.ValueFormat2 else None + records.append(pair) + pairset.PairValueCount = len(pairset.PairValueRecord) + self.PairSet.append(pairset) + self.PairSetCount = len(self.PairSet) + elif typ.endswith('class'): + self.Format = 2 + classDefs = [None, None] + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 'first': (0,ot.ClassDef1), + 'second': (1,ot.ClassDef2), + }[typ] + assert classDefs[idx] is None + classDefs[idx] = parseClassDef(lines, font, klass=klass) + self.ClassDef1, self.ClassDef2 = classDefs + self.Class1Count, self.Class2Count = (1+max(c.classDefs.values()) for c in classDefs) + self.Class1Record = [ot.Class1Record() for i in range(self.Class1Count)] + for rec1 in self.Class1Record: + rec1.Class2Record = [ot.Class2Record() for j in range(self.Class2Count)] + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord() + rec2.Value2 = ValueRecord() + for line in lines: + assert len(line) == 4, line + side = line[0].split()[0].lower() + assert side in ('left', 'right'), side + what = line[0][len(side):].title().replace(' ', '') + mask = valueRecordFormatDict[what][0] + class1, class2, value = (int(x) for x in line[1:4]) + rec2 = self.Class1Record[class1].Class2Record[class2] + if side == 'left': + self.ValueFormat1 |= mask + vr = rec2.Value1 + else: + self.ValueFormat2 |= mask + vr = rec2.Value2 + assert not hasattr(vr, what), (vr, what) + setattr(vr, what, value) + for rec1 in self.Class1Record: + for rec2 in rec1.Class2Record: + rec2.Value1 = ValueRecord(self.ValueFormat1, rec2.Value1) + rec2.Value2 = ValueRecord(self.ValueFormat2, rec2.Value2) \ + if self.ValueFormat2 else None + + self.Coverage = makeCoverage(set(self.ClassDef1.classDefs.keys()), font) + else: + assert 0, typ + return self + +def parseKernset(lines, font, _lookupMap=None): + typ = lines.peeks()[0].split()[0].lower() + if typ in ('left', 'right'): + with lines.until(("firstclass definition begin", "secondclass definition begin")): + return parsePair(lines, font) + return parsePair(lines, font) + +def makeAnchor(data, klass=ot.Anchor): + assert len(data) <= 2 + anchor = klass() + anchor.Format = 1 + anchor.XCoordinate,anchor.YCoordinate = intSplitComma(data[0]) + if len(data) > 1 and data[1] != '': + anchor.Format = 2 + anchor.AnchorPoint = int(data[1]) + return anchor + +def parseCursive(lines, font, _lookupMap=None): + records = {} + for line in lines: + assert len(line) in [3,4], line + idx,klass = { + 'entry': (0,ot.EntryAnchor), + 'exit': (1,ot.ExitAnchor), + }[line[0]] + glyph = makeGlyph(line[1]) + if glyph not in records: + records[glyph] = [None,None] + assert records[glyph][idx] is None, (glyph, idx) + records[glyph][idx] = makeAnchor(line[2:], klass) + return otl.buildCursivePosSubtable(records, font.getReverseGlyphMap()) + +def makeMarkRecords(data, coverage, c): + records = [] + for glyph in coverage.glyphs: + klass, anchor = data[glyph] + record = c.MarkRecordClass() + record.Class = klass + setattr(record, c.MarkAnchor, anchor) + records.append(record) + return records + +def makeBaseRecords(data, coverage, c, classCount): + records = [] + idx = {} + for glyph in coverage.glyphs: + idx[glyph] = len(records) + record = c.BaseRecordClass() + anchors = [None] * classCount + setattr(record, c.BaseAnchor, anchors) + records.append(record) + for (glyph,klass),anchor in data.items(): + record = records[idx[glyph]] + anchors = getattr(record, c.BaseAnchor) + assert anchors[klass] is None, (glyph, klass) + anchors[klass] = anchor + return records + +def makeLigatureRecords(data, coverage, c, classCount): + records = [None] * len(coverage.glyphs) + idx = {g:i for i,g in enumerate(coverage.glyphs)} + + for (glyph,klass,compIdx,compCount),anchor in data.items(): + record = records[idx[glyph]] + if record is None: + record = records[idx[glyph]] = ot.LigatureAttach() + record.ComponentCount = compCount + record.ComponentRecord = [ot.ComponentRecord() for i in range(compCount)] + for compRec in record.ComponentRecord: + compRec.LigatureAnchor = [None] * classCount + assert record.ComponentCount == compCount, (glyph, record.ComponentCount, compCount) + + anchors = record.ComponentRecord[compIdx - 1].LigatureAnchor + assert anchors[klass] is None, (glyph, compIdx, klass) + anchors[klass] = anchor + return records + +def parseMarkToSomething(lines, font, c): + self = c.Type() + self.Format = 1 + markData = {} + baseData = {} + Data = { + 'mark': (markData, c.MarkAnchorClass), + 'base': (baseData, c.BaseAnchorClass), + 'ligature': (baseData, c.BaseAnchorClass), + } + maxKlass = 0 + for line in lines: + typ = line[0] + assert typ in ('mark', 'base', 'ligature') + glyph = makeGlyph(line[1]) + data, anchorClass = Data[typ] + extraItems = 2 if typ == 'ligature' else 0 + extras = tuple(int(i) for i in line[2:2+extraItems]) + klass = int(line[2+extraItems]) + anchor = makeAnchor(line[3+extraItems:], anchorClass) + if typ == 'mark': + key,value = glyph,(klass,anchor) + else: + key,value = ((glyph,klass)+extras),anchor + assert key not in data, key + data[key] = value + maxKlass = max(maxKlass, klass) + + # Mark + markCoverage = makeCoverage(set(markData.keys()), font, c.MarkCoverageClass) + markArray = c.MarkArrayClass() + markRecords = makeMarkRecords(markData, markCoverage, c) + setattr(markArray, c.MarkRecord, markRecords) + setattr(markArray, c.MarkCount, len(markRecords)) + setattr(self, c.MarkCoverage, markCoverage) + setattr(self, c.MarkArray, markArray) + self.ClassCount = maxKlass + 1 + + # Base + self.classCount = 0 if not baseData else 1+max(k[1] for k,v in baseData.items()) + baseCoverage = makeCoverage(set([k[0] for k in baseData.keys()]), font, c.BaseCoverageClass) + baseArray = c.BaseArrayClass() + if c.Base == 'Ligature': + baseRecords = makeLigatureRecords(baseData, baseCoverage, c, self.classCount) + else: + baseRecords = makeBaseRecords(baseData, baseCoverage, c, self.classCount) + setattr(baseArray, c.BaseRecord, baseRecords) + setattr(baseArray, c.BaseCount, len(baseRecords)) + setattr(self, c.BaseCoverage, baseCoverage) + setattr(self, c.BaseArray, baseArray) + + return self + +class MarkHelper(object): + def __init__(self): + for Which in ('Mark', 'Base'): + for What in ('Coverage', 'Array', 'Count', 'Record', 'Anchor'): + key = Which + What + if Which == 'Mark' and What in ('Count', 'Record', 'Anchor'): + value = key + else: + value = getattr(self, Which) + What + if value == 'LigatureRecord': + value = 'LigatureAttach' + setattr(self, key, value) + if What != 'Count': + klass = getattr(ot, value) + setattr(self, key+'Class', klass) + +class MarkToBaseHelper(MarkHelper): + Mark = 'Mark' + Base = 'Base' + Type = ot.MarkBasePos +class MarkToMarkHelper(MarkHelper): + Mark = 'Mark1' + Base = 'Mark2' + Type = ot.MarkMarkPos +class MarkToLigatureHelper(MarkHelper): + Mark = 'Mark' + Base = 'Ligature' + Type = ot.MarkLigPos + +def parseMarkToBase(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToBaseHelper()) +def parseMarkToMark(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToMarkHelper()) +def parseMarkToLigature(lines, font, _lookupMap=None): + return parseMarkToSomething(lines, font, MarkToLigatureHelper()) + +def stripSplitComma(line): + return [s.strip() for s in line.split(',')] if line else [] + +def intSplitComma(line): + return [int(i) for i in line.split(',')] if line else [] + +# Copied from fontTools.subset +class ContextHelper(object): + def __init__(self, klassName, Format): + if klassName.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klassName.startswith('Chain'): + Chain = 'Chain' + InputIdx = 1 + DataLen = 3 + else: + Chain = '' + InputIdx = 0 + DataLen = 1 + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + self.InputIdx = InputIdx + self.DataLen = DataLen + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + SetContextData = None + SetChainContextData = None + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Input,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + def SetContextData(r, d): + (r.ClassDef,) = d + def SetChainContextData(r, d): + (r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) = d + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Class,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + SetContextData = None + SetChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + def SetRuleData(r, d): + (r.Coverage,) = d + (r.GlyphCount,) = (len(x) for x in d) + def ChainSetRuleData(r, d): + (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.SetContextData = SetChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.SetContextData = SetContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + +def parseLookupRecords(items, klassName, lookupMap=None): + klass = getattr(ot, klassName) + lst = [] + for item in items: + rec = klass() + item = stripSplitComma(item) + assert len(item) == 2, item + idx = int(item[0]) + assert idx > 0, idx + rec.SequenceIndex = idx - 1 + setReference(mapLookup, lookupMap, item[1], setattr, rec, 'LookupListIndex') + lst.append(rec) + return lst + +def makeClassDef(classDefs, font, klass=ot.Coverage): + if not classDefs: return None + self = klass() + self.classDefs = dict(classDefs) + return self + +def parseClassDef(lines, font, klass=ot.ClassDef): + classDefs = {} + with lines.between('class definition'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in classDefs, glyph + classDefs[glyph] = int(line[1]) + return makeClassDef(classDefs, font, klass) + +def makeCoverage(glyphs, font, klass=ot.Coverage): + if not glyphs: return None + if isinstance(glyphs, set): + glyphs = sorted(glyphs) + coverage = klass() + coverage.glyphs = sorted(set(glyphs), key=font.getGlyphID) + return coverage + +def parseCoverage(lines, font, klass=ot.Coverage): + glyphs = [] + with lines.between('coverage definition'): + for line in lines: + glyphs.append(makeGlyph(line[0])) + return makeCoverage(glyphs, font, klass) + +def bucketizeRules(self, c, rules, bucketKeys): + buckets = {} + for seq,recs in rules: + buckets.setdefault(seq[c.InputIdx][0], []).append((tuple(s[1 if i==c.InputIdx else 0:] for i,s in enumerate(seq)), recs)) + + rulesets = [] + for firstGlyph in bucketKeys: + if firstGlyph not in buckets: + rulesets.append(None) + continue + thisRules = [] + for seq,recs in buckets[firstGlyph]: + rule = getattr(ot, c.Rule)() + c.SetRuleData(rule, seq) + setattr(rule, c.Type+'Count', len(recs)) + setattr(rule, c.LookupRecord, recs) + thisRules.append(rule) + + ruleset = getattr(ot, c.RuleSet)() + setattr(ruleset, c.Rule, thisRules) + setattr(ruleset, c.RuleCount, len(thisRules)) + rulesets.append(ruleset) + + setattr(self, c.RuleSet, rulesets) + setattr(self, c.RuleSetCount, len(rulesets)) + +def parseContext(lines, font, Type, lookupMap=None): + self = getattr(ot, Type)() + typ = lines.peeks()[0].split()[0].lower() + if typ == 'glyph': + self.Format = 1 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + rules = [] + for line in lines: + assert line[0].lower() == 'glyph', line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(makeGlyphs(stripSplitComma(i)) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + + firstGlyphs = set(seq[c.InputIdx][0] for seq,recs in rules) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, self.Coverage.glyphs) + elif typ.endswith('class'): + self.Format = 2 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + classDefs = [None] * c.DataLen + while lines.peeks()[0].endswith("class definition begin"): + typ = lines.peek()[0][:-len("class definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.ClassDef), + }, + 3: { + 'backtrack': (0,ot.BacktrackClassDef), + '': (1,ot.InputClassDef), + 'lookahead': (2,ot.LookAheadClassDef), + }, + }[c.DataLen][typ] + assert classDefs[idx] is None, idx + classDefs[idx] = parseClassDef(lines, font, klass=klass) + c.SetContextData(self, classDefs) + rules = [] + for line in lines: + assert line[0].lower().startswith('class'), line[0] + while len(line) < 1+c.DataLen: line.append('') + seq = tuple(intSplitComma(i) for i in line[1:1+c.DataLen]) + recs = parseLookupRecords(line[1+c.DataLen:], c.LookupRecord, lookupMap) + rules.append((seq, recs)) + firstClasses = set(seq[c.InputIdx][0] for seq,recs in rules) + firstGlyphs = set(g for g,c in classDefs[c.InputIdx].classDefs.items() if c in firstClasses) + self.Coverage = makeCoverage(firstGlyphs, font) + bucketizeRules(self, c, rules, range(max(firstClasses) + 1)) + elif typ.endswith('coverage'): + self.Format = 3 + log.debug("Parsing %s format %s", Type, self.Format) + c = ContextHelper(Type, self.Format) + coverages = tuple([] for i in range(c.DataLen)) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 1: { + '': (0,ot.Coverage), + }, + 3: { + 'backtrack': (0,ot.BacktrackCoverage), + 'input': (1,ot.InputCoverage), + 'lookahead': (2,ot.LookAheadCoverage), + }, + }[c.DataLen][typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + c.SetRuleData(self, coverages) + lines = list(lines) + assert len(lines) == 1 + line = lines[0] + assert line[0].lower() == 'coverage', line[0] + recs = parseLookupRecords(line[1:], c.LookupRecord, lookupMap) + setattr(self, c.Type+'Count', len(recs)) + setattr(self, c.LookupRecord, recs) + else: + assert 0, typ + return self + +def parseContextSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextSubst", lookupMap=lookupMap) +def parseContextPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ContextPos", lookupMap=lookupMap) +def parseChainedSubst(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextSubst", lookupMap=lookupMap) +def parseChainedPos(lines, font, lookupMap=None): + return parseContext(lines, font, "ChainContextPos", lookupMap=lookupMap) + +def parseReverseChainedSubst(lines, font, _lookupMap=None): + self = ot.ReverseChainSingleSubst() + self.Format = 1 + coverages = ([], []) + while lines.peeks()[0].endswith("coverage definition begin"): + typ = lines.peek()[0][:-len("coverage definition begin")].lower() + idx,klass = { + 'backtrack': (0,ot.BacktrackCoverage), + 'lookahead': (1,ot.LookAheadCoverage), + }[typ] + coverages[idx].append(parseCoverage(lines, font, klass=klass)) + self.BacktrackCoverage = coverages[0] + self.BacktrackGlyphCount = len(self.BacktrackCoverage) + self.LookAheadCoverage = coverages[1] + self.LookAheadGlyphCount = len(self.LookAheadCoverage) + mapping = {} + for line in lines: + assert len(line) == 2, line + line = makeGlyphs(line) + mapping[line[0]] = line[1] + self.Coverage = makeCoverage(set(mapping.keys()), font) + self.Substitute = [mapping[k] for k in self.Coverage.glyphs] + self.GlyphCount = len(self.Substitute) + return self + +def parseLookup(lines, tableTag, font, lookupMap=None): + line = lines.expect('lookup') + _, name, typ = line + log.debug("Parsing lookup type %s %s", typ, name) + lookup = ot.Lookup() + lookup.LookupFlag,filterset = parseLookupFlags(lines) + if filterset is not None: + lookup.MarkFilteringSet = filterset + lookup.LookupType, parseLookupSubTable = { + 'GSUB': { + 'single': (1, parseSingleSubst), + 'multiple': (2, parseMultiple), + 'alternate': (3, parseAlternate), + 'ligature': (4, parseLigature), + 'context': (5, parseContextSubst), + 'chained': (6, parseChainedSubst), + 'reversechained':(8, parseReverseChainedSubst), + }, + 'GPOS': { + 'single': (1, parseSinglePos), + 'pair': (2, parsePair), + 'kernset': (2, parseKernset), + 'cursive': (3, parseCursive), + 'mark to base': (4, parseMarkToBase), + 'mark to ligature':(5, parseMarkToLigature), + 'mark to mark': (6, parseMarkToMark), + 'context': (7, parseContextPos), + 'chained': (8, parseChainedPos), + }, + }[tableTag][typ] + + with lines.until('lookup end'): + subtables = [] + + while lines.peek(): + with lines.until(('% subtable', 'subtable end')): + while lines.peek(): + subtable = parseLookupSubTable(lines, font, lookupMap) + assert lookup.LookupType == subtable.LookupType + subtables.append(subtable) + if lines.peeks()[0] in ('% subtable', 'subtable end'): + next(lines) + lines.expect('lookup end') + + lookup.SubTable = subtables + lookup.SubTableCount = len(lookup.SubTable) + if lookup.SubTableCount is 0: + # Remove this return when following is fixed: + # https://github.com/fonttools/fonttools/issues/789 + return None + return lookup + +def parseGSUBGPOS(lines, font, tableTag): + container = ttLib.getTableClass(tableTag)() + lookupMap = DeferredMapping() + featureMap = DeferredMapping() + assert tableTag in ('GSUB', 'GPOS') + log.debug("Parsing %s", tableTag) + self = getattr(ot, tableTag)() + self.Version = 0x00010000 + fields = { + 'script table begin': + ('ScriptList', + lambda lines: parseScriptList (lines, featureMap)), + 'feature table begin': + ('FeatureList', + lambda lines: parseFeatureList (lines, lookupMap, featureMap)), + 'lookup': + ('LookupList', + None), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', lines.peek()) + next(lines) + continue + attr,parser = fields[typ] + if typ == 'lookup': + if self.LookupList is None: + self.LookupList = ot.LookupList() + self.LookupList.Lookup = [] + _, name, _ = lines.peek() + lookup = parseLookup(lines, tableTag, font, lookupMap) + if lookupMap is not None: + assert name not in lookupMap, "Duplicate lookup name: %s" % name + lookupMap[name] = len(self.LookupList.Lookup) + else: + assert int(name) == len(self.LookupList.Lookup), "%d %d" % (name, len(self.Lookup)) + self.LookupList.Lookup.append(lookup) + else: + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines)) + if self.LookupList: + self.LookupList.LookupCount = len(self.LookupList.Lookup) + if lookupMap is not None: + lookupMap.applyDeferredMappings() + if featureMap is not None: + featureMap.applyDeferredMappings() + container.table = self + return container + +def parseGSUB(lines, font): + return parseGSUBGPOS(lines, font, 'GSUB') +def parseGPOS(lines, font): + return parseGSUBGPOS(lines, font, 'GPOS') + +def parseAttachList(lines, font): + points = {} + with lines.between('attachment list'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in points, glyph + points[glyph] = [int(i) for i in line[1:]] + return otl.buildAttachList(points, font.getReverseGlyphMap()) + +def parseCaretList(lines, font): + carets = {} + with lines.between('carets'): + for line in lines: + glyph = makeGlyph(line[0]) + assert glyph not in carets, glyph + num = int(line[1]) + thisCarets = [int(i) for i in line[2:]] + assert num == len(thisCarets), line + carets[glyph] = thisCarets + return otl.buildLigCaretList(carets, {}, font.getReverseGlyphMap()) + +def makeMarkFilteringSets(sets, font): + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.MarkSetCount = 1 + max(sets.keys()) + self.Coverage = [None] * self.MarkSetCount + for k,v in sorted(sets.items()): + self.Coverage[k] = makeCoverage(set(v), font) + return self + +def parseMarkFilteringSets(lines, font): + sets = {} + with lines.between('set definition'): + for line in lines: + assert len(line) == 2, line + glyph = makeGlyph(line[0]) + # TODO accept set names + st = int(line[1]) + if st not in sets: + sets[st] = [] + sets[st].append(glyph) + return makeMarkFilteringSets(sets, font) + +def parseGDEF(lines, font): + container = ttLib.getTableClass('GDEF')() + log.debug("Parsing GDEF") + self = ot.GDEF() + fields = { + 'class definition begin': + ('GlyphClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.GlyphClassDef)), + 'attachment list begin': + ('AttachList', parseAttachList), + 'carets begin': + ('LigCaretList', parseCaretList), + 'mark attachment class definition begin': + ('MarkAttachClassDef', + lambda lines, font: parseClassDef(lines, font, klass=ot.MarkAttachClassDef)), + 'markfilter set definition begin': + ('MarkGlyphSetsDef', parseMarkFilteringSets), + } + for attr,parser in fields.values(): + setattr(self, attr, None) + while lines.peek() is not None: + typ = lines.peek()[0].lower() + if typ not in fields: + log.debug('Skipping %s', typ) + next(lines) + continue + attr,parser = fields[typ] + assert getattr(self, attr) is None, attr + setattr(self, attr, parser(lines, font)) + self.Version = 0x00010000 if self.MarkGlyphSetsDef is None else 0x00010002 + container.table = self + return container + +def parseCmap(lines, font): + container = ttLib.getTableClass('cmap')() + log.debug("Parsing cmap") + tables = [] + while lines.peek() is not None: + lines.expect('cmap subtable %d' % len(tables)) + platId, encId, fmt, lang = [ + parseCmapId(lines, field) + for field in ('platformID', 'encodingID', 'format', 'language')] + table = cmap_classes[fmt](fmt) + table.platformID = platId + table.platEncID = encId + table.language = lang + table.cmap = {} + line = next(lines) + while line[0] != 'end subtable': + table.cmap[int(line[0], 16)] = line[1] + line = next(lines) + tables.append(table) + container.tableVersion = 0 + container.tables = tables + return container + +def parseCmapId(lines, field): + line = next(lines) + assert field == line[0] + return int(line[1]) + +def parseTable(lines, font, tableTag=None): + log.debug("Parsing table") + line = lines.peeks() + tag = None + if line[0].split()[0] == 'FontDame': + tag = line[0].split()[1] + elif ' '.join(line[0].split()[:3]) == 'Font Chef Table': + tag = line[0].split()[3] + if tag is not None: + next(lines) + tag = tag.ljust(4) + if tableTag is None: + tableTag = tag + else: + assert tableTag == tag, (tableTag, tag) + + assert tableTag is not None, "Don't know what table to parse and data doesn't specify" + + return { + 'GSUB': parseGSUB, + 'GPOS': parseGPOS, + 'GDEF': parseGDEF, + 'cmap': parseCmap, + }[tableTag](lines, font) + +class Tokenizer(object): + + def __init__(self, f): + # TODO BytesIO / StringIO as needed? also, figure out whether we work on bytes or unicode + lines = iter(f) + try: + self.filename = f.name + except: + self.filename = None + self.lines = iter(lines) + self.line = '' + self.lineno = 0 + self.stoppers = [] + self.buffer = None + + def __iter__(self): + return self + + def _next_line(self): + self.lineno += 1 + line = self.line = next(self.lines) + line = [s.strip() for s in line.split('\t')] + if len(line) == 1 and not line[0]: + del line[0] + if line and not line[-1]: + log.warning('trailing tab found on line %d: %s' % (self.lineno, self.line)) + while line and not line[-1]: + del line[-1] + return line + + def _next_nonempty(self): + while True: + line = self._next_line() + # Skip comments and empty lines + if line and line[0] and (line[0][0] != '%' or line[0] == '% subtable'): + return line + + def _next_buffered(self): + if self.buffer: + ret = self.buffer + self.buffer = None + return ret + else: + return self._next_nonempty() + + def __next__(self): + line = self._next_buffered() + if line[0].lower() in self.stoppers: + self.buffer = line + raise StopIteration + return line + + def next(self): + return self.__next__() + + def peek(self): + if not self.buffer: + try: + self.buffer = self._next_nonempty() + except StopIteration: + return None + if self.buffer[0].lower() in self.stoppers: + return None + return self.buffer + + def peeks(self): + ret = self.peek() + return ret if ret is not None else ('',) + + @contextmanager + def between(self, tag): + start = tag + ' begin' + end = tag + ' end' + self.expectendswith(start) + self.stoppers.append(end) + yield + del self.stoppers[-1] + self.expect(tag + ' end') + + @contextmanager + def until(self, tags): + if type(tags) is not tuple: + tags = (tags,) + self.stoppers.extend(tags) + yield + del self.stoppers[-len(tags):] + + def expect(self, s): + line = next(self) + tag = line[0].lower() + assert tag == s, "Expected '%s', got '%s'" % (s, tag) + return line + + def expectendswith(self, s): + line = next(self) + tag = line[0].lower() + assert tag.endswith(s), "Expected '*%s', got '%s'" % (s, tag) + return line + +def build(f, font, tableTag=None): + lines = Tokenizer(f) + return parseTable(lines, font, tableTag=tableTag) + + +def main(args=None, font=None): + import sys + from fontTools import configLogger + from fontTools.misc.testTools import MockFont + + if args is None: + args = sys.argv[1:] + + # configure the library logger (for >= WARNING) + configLogger() + # comment this out to enable debug messages from mtiLib's logger + # log.setLevel(logging.DEBUG) + + if font is None: + font = MockFont() + + tableTag = None + if args[0].startswith('-t'): + tableTag = args[0][2:] + del args[0] + for f in args: + log.debug("Processing %s", f) + table = build(open(f, 'rt', encoding="utf-8"), font, tableTag=tableTag) + blob = table.compile(font) # Make sure it compiles + decompiled = table.__class__() + decompiled.decompile(blob, font) # Make sure it decompiles! + + #continue + from fontTools.misc import xmlWriter + tag = table.tableTag + writer = xmlWriter.XMLWriter(sys.stdout) + writer.begintag(tag) + writer.newline() + #table.toXML(writer, font) + decompiled.toXML(writer, font) + writer.endtag(tag) + writer.newline() + + +if __name__ == '__main__': + import sys + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/mtiLib/__main__.py fonttools-3.21.2/Snippets/fontTools/mtiLib/__main__.py --- fonttools-3.0/Snippets/fontTools/mtiLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/mtiLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.mtiLib import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/otlLib/builder.py fonttools-3.21.2/Snippets/fontTools/otlLib/builder.py --- fonttools-3.0/Snippets/fontTools/otlLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/otlLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,640 @@ +from __future__ import print_function, division, absolute_import +from fontTools import ttLib +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables.otBase import ValueRecord, valueRecordFormatDict + + +def buildCoverage(glyphs, glyphMap): + if not glyphs: + return None + self = ot.Coverage() + self.glyphs = sorted(glyphs, key=glyphMap.__getitem__) + return self + + +LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001 +LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002 +LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004 +LOOKUP_FLAG_IGNORE_MARKS = 0x0008 +LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010 + + +def buildLookup(subtables, flags=0, markFilterSet=None): + if subtables is None: + return None + subtables = [st for st in subtables if st is not None] + if not subtables: + return None + assert all(t.LookupType == subtables[0].LookupType for t in subtables), \ + ("all subtables must have the same LookupType; got %s" % + repr([t.LookupType for t in subtables])) + self = ot.Lookup() + self.LookupType = subtables[0].LookupType + self.LookupFlag = flags + self.SubTable = subtables + self.SubTableCount = len(self.SubTable) + if markFilterSet is not None: + assert self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET, \ + ("if markFilterSet is not None, flags must set " + "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags) + assert isinstance(markFilterSet, int), markFilterSet + self.MarkFilteringSet = markFilterSet + else: + assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, \ + ("if markFilterSet is None, flags must not set " + "LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags) + return self + + +# GSUB + + +def buildSingleSubstSubtable(mapping): + if not mapping: + return None + self = ot.SingleSubst() + self.mapping = dict(mapping) + return self + + +def buildMultipleSubstSubtable(mapping): + if not mapping: + return None + self = ot.MultipleSubst() + self.mapping = dict(mapping) + return self + + +def buildAlternateSubstSubtable(mapping): + if not mapping: + return None + self = ot.AlternateSubst() + self.alternates = dict(mapping) + return self + + +def _getLigatureKey(components): + """Computes a key for ordering ligatures in a GSUB Type-4 lookup. + + When building the OpenType lookup, we need to make sure that + the longest sequence of components is listed first, so we + use the negative length as the primary key for sorting. + To make buildLigatureSubstSubtable() deterministic, we use the + component sequence as the secondary key. + + For example, this will sort (f,f,f) < (f,f,i) < (f,f) < (f,i) < (f,l). + """ + return (-len(components), components) + + +def buildLigatureSubstSubtable(mapping): + if not mapping: + return None + self = ot.LigatureSubst() + # The following single line can replace the rest of this function + # with fontTools >= 3.1: + # self.ligatures = dict(mapping) + self.ligatures = {} + for components in sorted(mapping.keys(), key=_getLigatureKey): + ligature = ot.Ligature() + ligature.Component = components[1:] + ligature.CompCount = len(ligature.Component) + 1 + ligature.LigGlyph = mapping[components] + firstGlyph = components[0] + self.ligatures.setdefault(firstGlyph, []).append(ligature) + return self + + +# GPOS + + +def buildAnchor(x, y, point=None, deviceX=None, deviceY=None): + self = ot.Anchor() + self.XCoordinate, self.YCoordinate = x, y + self.Format = 1 + if point is not None: + self.AnchorPoint = point + self.Format = 2 + if deviceX is not None or deviceY is not None: + assert self.Format == 1, \ + "Either point, or both of deviceX/deviceY, must be None." + self.XDeviceTable = deviceX + self.YDeviceTable = deviceY + self.Format = 3 + return self + + +def buildBaseArray(bases, numMarkClasses, glyphMap): + self = ot.BaseArray() + self.BaseRecord = [] + for base in sorted(bases, key=glyphMap.__getitem__): + b = bases[base] + anchors = [b.get(markClass) for markClass in range(numMarkClasses)] + self.BaseRecord.append(buildBaseRecord(anchors)) + self.BaseCount = len(self.BaseRecord) + return self + + +def buildBaseRecord(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord""" + self = ot.BaseRecord() + self.BaseAnchor = anchors + return self + + +def buildComponentRecord(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.ComponentRecord""" + if not anchors: + return None + self = ot.ComponentRecord() + self.LigatureAnchor = anchors + return self + + +def buildCursivePosSubtable(attach, glyphMap): + """{"alef": (entry, exit)} --> otTables.CursivePos""" + if not attach: + return None + self = ot.CursivePos() + self.Format = 1 + self.Coverage = buildCoverage(attach.keys(), glyphMap) + self.EntryExitRecord = [] + for glyph in self.Coverage.glyphs: + entryAnchor, exitAnchor = attach[glyph] + rec = ot.EntryExitRecord() + rec.EntryAnchor = entryAnchor + rec.ExitAnchor = exitAnchor + self.EntryExitRecord.append(rec) + self.EntryExitCount = len(self.EntryExitRecord) + return self + + +def buildDevice(deltas): + """{8:+1, 10:-3, ...} --> otTables.Device""" + if not deltas: + return None + self = ot.Device() + keys = deltas.keys() + self.StartSize = startSize = min(keys) + self.EndSize = endSize = max(keys) + assert 0 <= startSize <= endSize + self.DeltaValue = deltaValues = [ + deltas.get(size, 0) + for size in range(startSize, endSize + 1)] + maxDelta = max(deltaValues) + minDelta = min(deltaValues) + assert minDelta > -129 and maxDelta < 128 + if minDelta > -3 and maxDelta < 2: + self.DeltaFormat = 1 + elif minDelta > -9 and maxDelta < 8: + self.DeltaFormat = 2 + else: + self.DeltaFormat = 3 + return self + + +def buildLigatureArray(ligs, numMarkClasses, glyphMap): + self = ot.LigatureArray() + self.LigatureAttach = [] + for lig in sorted(ligs, key=glyphMap.__getitem__): + anchors = [] + for component in ligs[lig]: + anchors.append([component.get(mc) for mc in range(numMarkClasses)]) + self.LigatureAttach.append(buildLigatureAttach(anchors)) + self.LigatureCount = len(self.LigatureAttach) + return self + + +def buildLigatureAttach(components): + """[[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach""" + self = ot.LigatureAttach() + self.ComponentRecord = [buildComponentRecord(c) for c in components] + self.ComponentCount = len(self.ComponentRecord) + return self + + +def buildMarkArray(marks, glyphMap): + """{"acute": (markClass, otTables.Anchor)} --> otTables.MarkArray""" + self = ot.MarkArray() + self.MarkRecord = [] + for mark in sorted(marks.keys(), key=glyphMap.__getitem__): + markClass, anchor = marks[mark] + markrec = buildMarkRecord(markClass, anchor) + self.MarkRecord.append(markrec) + self.MarkCount = len(self.MarkRecord) + return self + + +def buildMarkBasePos(marks, bases, glyphMap): + """Build a list of MarkBasePos subtables. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + """ + # TODO: Consider emitting multiple subtables to save space. + # Partition the marks and bases into disjoint subsets, so that + # MarkBasePos rules would only access glyphs from a single + # subset. This would likely lead to smaller mark/base + # matrices, so we might be able to omit many of the empty + # anchor tables that we currently produce. Of course, this + # would only work if the MarkBasePos rules of real-world fonts + # allow partitioning into multiple subsets. We should find out + # whether this is the case; if so, implement the optimization. + # On the other hand, a very large number of subtables could + # slow down layout engines; so this would need profiling. + return [buildMarkBasePosSubtable(marks, bases, glyphMap)] + + +def buildMarkBasePosSubtable(marks, bases, glyphMap): + """Build a single MarkBasePos subtable. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}} + """ + self = ot.MarkBasePos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.BaseCoverage = buildCoverage(bases, glyphMap) + self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap) + return self + + +def buildMarkLigPos(marks, ligs, glyphMap): + """Build a list of MarkLigPos subtables. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + ligs = {"f_i": [{0: a3, 1: a5}, {0: a4, 1: a5}], "c_t": [{...}, {...}]} + """ + # TODO: Consider splitting into multiple subtables to save space, + # as with MarkBasePos, this would be a trade-off that would need + # profiling. And, depending on how typical fonts are structured, + # it might not be worth doing at all. + return [buildMarkLigPosSubtable(marks, ligs, glyphMap)] + + +def buildMarkLigPosSubtable(marks, ligs, glyphMap): + """Build a single MarkLigPos subtable. + + a1, a2, a3, a4, a5 = buildAnchor(500, 100), ... + marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)} + ligs = {"f_i": [{0: a3, 1: a5}, {0: a4, 1: a5}], "c_t": [{...}, {...}]} + """ + self = ot.MarkLigPos() + self.Format = 1 + self.MarkCoverage = buildCoverage(marks, glyphMap) + self.MarkArray = buildMarkArray(marks, glyphMap) + self.ClassCount = max([mc for mc, _ in marks.values()]) + 1 + self.LigatureCoverage = buildCoverage(ligs, glyphMap) + self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap) + return self + + +def buildMarkRecord(classID, anchor): + assert isinstance(classID, int) + assert isinstance(anchor, ot.Anchor) + self = ot.MarkRecord() + self.Class = classID + self.MarkAnchor = anchor + return self + + +def buildMark2Record(anchors): + """[otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record""" + self = ot.Mark2Record() + self.Mark2Anchor = anchors + return self + + +def _getValueFormat(f, values, i): + """Helper for buildPairPos{Glyphs|Classes}Subtable.""" + if f is not None: + return f + mask = 0 + for value in values: + if value is not None and value[i] is not None: + mask |= value[i].getFormat() + return mask + + +def buildPairPosClassesSubtable(pairs, glyphMap, + valueFormat1=None, valueFormat2=None): + coverage = set() + classDef1 = ClassDefBuilder(useClass0=True) + classDef2 = ClassDefBuilder(useClass0=False) + for gc1, gc2 in sorted(pairs): + coverage.update(gc1) + classDef1.add(gc1) + classDef2.add(gc2) + self = ot.PairPos() + self.Format = 2 + self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + self.Coverage = buildCoverage(coverage, glyphMap) + self.ClassDef1 = classDef1.build() + self.ClassDef2 = classDef2.build() + classes1 = classDef1.classes() + classes2 = classDef2.classes() + self.Class1Record = [] + for c1 in classes1: + rec1 = ot.Class1Record() + rec1.Class2Record = [] + self.Class1Record.append(rec1) + for c2 in classes2: + rec2 = ot.Class2Record() + rec2.Value1, rec2.Value2 = pairs.get((c1, c2), (None, None)) + rec1.Class2Record.append(rec2) + self.Class1Count = len(self.Class1Record) + self.Class2Count = len(classes2) + return self + + +def buildPairPosGlyphs(pairs, glyphMap): + p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + formatA = valA.getFormat() if valA is not None else 0 + formatB = valB.getFormat() if valB is not None else 0 + pos = p.setdefault((formatA, formatB), {}) + pos[(glyphA, glyphB)] = (valA, valB) + return [ + buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB) + for ((formatA, formatB), pos) in sorted(p.items())] + + +def buildPairPosGlyphsSubtable(pairs, glyphMap, + valueFormat1=None, valueFormat2=None): + self = ot.PairPos() + self.Format = 1 + self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0) + self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1) + p = {} + for (glyphA, glyphB), (valA, valB) in pairs.items(): + p.setdefault(glyphA, []).append((glyphB, valA, valB)) + self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap) + self.PairSet = [] + for glyph in self.Coverage.glyphs: + ps = ot.PairSet() + ps.PairValueRecord = [] + self.PairSet.append(ps) + for glyph2, val1, val2 in \ + sorted(p[glyph], key=lambda x: glyphMap[x[0]]): + pvr = ot.PairValueRecord() + pvr.SecondGlyph = glyph2 + pvr.Value1 = val1 if val1 and val1.getFormat() != 0 else None + pvr.Value2 = val2 if val2 and val2.getFormat() != 0 else None + ps.PairValueRecord.append(pvr) + ps.PairValueCount = len(ps.PairValueRecord) + self.PairSetCount = len(self.PairSet) + return self + + +def buildSinglePos(mapping, glyphMap): + """{"glyph": ValueRecord} --> [otTables.SinglePos*]""" + result, handled = [], set() + # In SinglePos format 1, the covered glyphs all share the same ValueRecord. + # In format 2, each glyph has its own ValueRecord, but these records + # all have the same properties (eg., all have an X but no Y placement). + coverages, masks, values = {}, {}, {} + for glyph, value in mapping.items(): + key = _getSinglePosValueKey(value) + coverages.setdefault(key, []).append(glyph) + masks.setdefault(key[0], []).append(key) + values[key] = value + + # If a ValueRecord is shared between multiple glyphs, we generate + # a SinglePos format 1 subtable; that is the most compact form. + for key, glyphs in coverages.items(): + if len(glyphs) > 1: + format1Mapping = {g: values[key] for g in glyphs} + result.append(buildSinglePosSubtable(format1Mapping, glyphMap)) + handled.add(key) + + # In the remaining ValueRecords, look for those whose valueFormat + # (the set of used properties) is shared between multiple records. + # These will get encoded in format 2. + for valueFormat, keys in masks.items(): + f2 = [k for k in keys if k not in handled] + if len(f2) > 1: + format2Mapping = {coverages[k][0]: values[k] for k in f2} + result.append(buildSinglePosSubtable(format2Mapping, glyphMap)) + handled.update(f2) + + # The remaining ValueRecords are singletons in the sense that + # they are only used by a single glyph, and their valueFormat + # is unique as well. We encode these in format 1 again. + for key, glyphs in coverages.items(): + if key not in handled: + assert len(glyphs) == 1, glyphs + st = buildSinglePosSubtable({glyphs[0]: values[key]}, glyphMap) + result.append(st) + + # When the OpenType layout engine traverses the subtables, it will + # stop after the first matching subtable. Therefore, we sort the + # resulting subtables by decreasing coverage size; this increases + # the chance that the layout engine can do an early exit. (Of course, + # this would only be true if all glyphs were equally frequent, which + # is not really the case; but we do not know their distribution). + # If two subtables cover the same number of glyphs, we sort them + # by glyph ID so that our output is deterministic. + result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap)) + return result + + +def buildSinglePosSubtable(values, glyphMap): + """{glyphName: otBase.ValueRecord} --> otTables.SinglePos""" + self = ot.SinglePos() + self.Coverage = buildCoverage(values.keys(), glyphMap) + valueRecords = [values[g] for g in self.Coverage.glyphs] + self.ValueFormat = 0 + for v in valueRecords: + self.ValueFormat |= v.getFormat() + if all(v == valueRecords[0] for v in valueRecords): + self.Format = 1 + if self.ValueFormat != 0: + self.Value = valueRecords[0] + else: + self.Value = None + else: + self.Format = 2 + self.Value = valueRecords + self.ValueCount = len(self.Value) + return self + + +def _getSinglePosTableKey(subtable, glyphMap): + assert isinstance(subtable, ot.SinglePos), subtable + glyphs = subtable.Coverage.glyphs + return (-len(glyphs), glyphMap[glyphs[0]]) + + +def _getSinglePosValueKey(valueRecord): + """otBase.ValueRecord --> (2, ("YPlacement": 12))""" + assert isinstance(valueRecord, ValueRecord), valueRecord + valueFormat, result = 0, [] + for name, value in valueRecord.__dict__.items(): + if isinstance(value, ot.Device): + result.append((name, _makeDeviceTuple(value))) + else: + result.append((name, value)) + valueFormat |= valueRecordFormatDict[name][0] + result.sort() + result.insert(0, valueFormat) + return tuple(result) + + +def _makeDeviceTuple(device): + """otTables.Device --> tuple, for making device tables unique""" + return (device.DeltaFormat, device.StartSize, device.EndSize, + tuple(device.DeltaValue)) + + +def buildValue(value): + self = ValueRecord() + for k, v in value.items(): + setattr(self, k, v) + return self + + +# GDEF + +def buildAttachList(attachPoints, glyphMap): + """{"glyphName": [4, 23]} --> otTables.AttachList, or None""" + if not attachPoints: + return None + self = ot.AttachList() + self.Coverage = buildCoverage(attachPoints.keys(), glyphMap) + self.AttachPoint = [buildAttachPoint(attachPoints[g]) + for g in self.Coverage.glyphs] + self.GlyphCount = len(self.AttachPoint) + return self + + +def buildAttachPoint(points): + """[4, 23, 41] --> otTables.AttachPoint""" + if not points: + return None + self = ot.AttachPoint() + self.PointIndex = sorted(set(points)) + self.PointCount = len(self.PointIndex) + return self + + +def buildCaretValueForCoord(coord): + """500 --> otTables.CaretValue, format 1""" + self = ot.CaretValue() + self.Format = 1 + self.Coordinate = coord + return self + + +def buildCaretValueForPoint(point): + """4 --> otTables.CaretValue, format 2""" + self = ot.CaretValue() + self.Format = 2 + self.CaretValuePoint = point + return self + + +def buildLigCaretList(coords, points, glyphMap): + """{"f_f_i":[300,600]}, {"c_t":[28]} --> otTables.LigCaretList, or None""" + glyphs = set(coords.keys()) if coords else set() + if points: + glyphs.update(points.keys()) + carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs} + carets = {g: c for g, c in carets.items() if c is not None} + if not carets: + return None + self = ot.LigCaretList() + self.Coverage = buildCoverage(carets.keys(), glyphMap) + self.LigGlyph = [carets[g] for g in self.Coverage.glyphs] + self.LigGlyphCount = len(self.LigGlyph) + return self + + +def buildLigGlyph(coords, points): + """([500], [4]) --> otTables.LigGlyph; None for empty coords/points""" + carets = [] + if coords: + carets.extend([buildCaretValueForCoord(c) for c in sorted(coords)]) + if points: + carets.extend([buildCaretValueForPoint(p) for p in sorted(points)]) + if not carets: + return None + self = ot.LigGlyph() + self.CaretValue = carets + self.CaretCount = len(self.CaretValue) + return self + + +def buildMarkGlyphSetsDef(markSets, glyphMap): + """[{"acute","grave"}, {"caron","grave"}] --> otTables.MarkGlyphSetsDef""" + if not markSets: + return None + self = ot.MarkGlyphSetsDef() + self.MarkSetTableFormat = 1 + self.Coverage = [buildCoverage(m, glyphMap) for m in markSets] + self.MarkSetCount = len(self.Coverage) + return self + + +class ClassDefBuilder(object): + """Helper for building ClassDef tables.""" + def __init__(self, useClass0): + self.classes_ = set() + self.glyphs_ = {} + self.useClass0_ = useClass0 + + def canAdd(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return True + for glyph in glyphs: + if glyph in self.glyphs_: + return False + return True + + def add(self, glyphs): + if isinstance(glyphs, (set, frozenset)): + glyphs = sorted(glyphs) + glyphs = tuple(glyphs) + if glyphs in self.classes_: + return + self.classes_.add(glyphs) + for glyph in glyphs: + assert glyph not in self.glyphs_ + self.glyphs_[glyph] = glyphs + + def classes(self): + # In ClassDef1 tables, class id #0 does not need to be encoded + # because zero is the default. Therefore, we use id #0 for the + # glyph class that has the largest number of members. However, + # in other tables than ClassDef1, 0 means "every other glyph" + # so we should not use that ID for any real glyph classes; + # we implement this by inserting an empty set at position 0. + # + # TODO: Instead of counting the number of glyphs in each class, + # we should determine the encoded size. If the glyphs in a large + # class form a contiguous range, the encoding is actually quite + # compact, whereas a non-contiguous set might need a lot of bytes + # in the output file. We don't get this right with the key below. + result = sorted(self.classes_, key=lambda s: (len(s), s), reverse=True) + if not self.useClass0_: + result.insert(0, frozenset()) + return result + + def build(self): + glyphClasses = {} + for classID, glyphs in enumerate(self.classes()): + if classID == 0: + continue + for glyph in glyphs: + glyphClasses[glyph] = classID + classDef = ot.ClassDef() + classDef.classDefs = glyphClasses + return classDef diff -Nru fonttools-3.0/Snippets/fontTools/otlLib/builder.py.sketch fonttools-3.21.2/Snippets/fontTools/otlLib/builder.py.sketch --- fonttools-3.0/Snippets/fontTools/otlLib/builder.py.sketch 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/otlLib/builder.py.sketch 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,105 @@ + +from fontTools.otlLib import builder as builder + +GDEF::mark filtering sets +name:: + +lookup_flags = builder.LOOKUP_FLAG_IGNORE_MARKS | builder.LOOKUP_FLAG_RTL +smcp_subtable = builder.buildSingleSubstitute({'a':'a.scmp'}) +smcp_lookup = builder.buildLookup([smcp_subtable], lookup_flags=lookup_flags, mark_filter_set=int) + +lookups = [smcp_lookup, ...] + +scmp_feature = builder.buildFeature('smcp', [scmp_lookup], lookup_list=lookups) +scmp_feature = builder.buildFeature('smcp', [0]) + +features = [smcp_feature] + +default_langsys = builder.buildLangSys(set([scmp_feature]), requiredFeature=None, featureOrder=features) +default_langsys = builder.buildLangSys(set([0]), requiredFeature=None) + +script = + + +#GSUB: + +builder.buildSingleSubst({'a':'a.scmp'}) +builder.buildLigatureSubst({('f','i'):'fi'}) +builder.buildMultipleSubst({'a':('a0','a1')}) +builder.buildAlternateSubst({'a':('a.0','a.1')}) + + +class ChainSequence : namedtuple(['backtrack', 'input', 'lookahead')]) + pass + +ChainSequence(backtrack=..., input=..., lookahead=...) + +klass0 = frozenset() + +builder.buildChainContextGlyphs( + [ + ( (None, ('f','f','i'), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + ], + glyphMap +) +builder.buildChainContextClass( + [ + ( (None, (2,0,1), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + ], + klasses = ( backtrackClass, ... ), + glyphMap +) +builder.buildChainContextCoverage( + ( (None, (frozenset('f'),frozenset('f'),frozenset('i')), (,)), ( (1,lookup_fi), (1,lookup_2) ) ), + glyphMap +) +builder.buildExtension(...) + +#GPOS: +device = builder.buildDevice() +builder.buildAnchor(100, -200) or (100,-200) +builder.buildAnchor(100, -200, device=device) +builder.buildAnchor(100, -200, point=2) + +valueRecord = builder.buildValue({'XAdvance':-200, ...}) + +builder.buildSinglePos({'a':valueRecord}) +builder.buildPairPosGlyphs( + { + ('a','b'): (valueRecord1,valueRecord2), + }, + glyphMap, + , valueFormat1=None, valueFormat2=None +) +builder.buildPairPosClasses( + { + (frozenset(['a']),frozenset(['b'])): (valueRecord1,valueRecord2), + }, + glyphMap, + , valueFormat1=None, valueFormat2=None +) + +builder.buildCursivePos( + { + 'alef': (entry,exit), + } + glyphMap +) +builder.buildMarkBasePos( + marks = { + 'mark1': (klass, anchor), + }, + bases = { + 'base0': [anchor0, anchor1, anchor2], + }, + glyphMap +) +builder.buildMarkBasePos( + marks = { + 'mark1': (name, anchor), + }, + bases = { + 'base0': {'top':anchor0, 'left':anchor1}, + }, + glyphMap +) diff -Nru fonttools-3.0/Snippets/fontTools/otlLib/__init__.py fonttools-3.21.2/Snippets/fontTools/otlLib/__init__.py --- fonttools-3.0/Snippets/fontTools/otlLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/otlLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +"""OpenType Layout-related functionality.""" diff -Nru fonttools-3.0/Snippets/fontTools/pens/areaPen.py fonttools-3.21.2/Snippets/fontTools/pens/areaPen.py --- fonttools-3.0/Snippets/fontTools/pens/areaPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/areaPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ +"""Calculate the area of a glyph.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["AreaPen"] + + +class AreaPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + self.value = 0 + + def _moveTo(self, p0): + self._p0 = self._startPoint = p0 + + def _lineTo(self, p1): + x0, y0 = self._p0 + x1, y1 = p1 + self.value -= (x1 - x0) * (y1 + y0) * .5 + self._p0 = p1 + + def _qCurveToOne(self, p1, p2): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + self.value -= (x2 * y1 - x1 * y2) / 3 + self._lineTo(p2) + self._p0 = p2 + + def _curveToOne(self, p1, p2, p3): + # https://github.com/Pomax/bezierinfo/issues/44 + p0 = self._p0 + x0, y0 = p0[0], p0[1] + x1, y1 = p1[0] - x0, p1[1] - y0 + x2, y2 = p2[0] - x0, p2[1] - y0 + x3, y3 = p3[0] - x0, p3[1] - y0 + self.value -= ( + x1 * ( - y2 - y3) + + x2 * (y1 - 2*y3) + + x3 * (y1 + 2*y2 ) + ) * 0.15 + self._lineTo(p3) + self._p0 = p3 + + def _closePath(self): + self._lineTo(self._startPoint) + del self._p0, self._startPoint + + def _endPath(self): + if self._p0 != self._startPoint: + # Area is not defined for open contours. + raise NotImplementedError + del self._p0, self._startPoint diff -Nru fonttools-3.0/Snippets/fontTools/pens/basePen.py fonttools-3.21.2/Snippets/fontTools/pens/basePen.py --- fonttools-3.0/Snippets/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/basePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -38,6 +38,7 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import LogMixin __all__ = ["AbstractPen", "NullPen", "BasePen", "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] @@ -141,7 +142,50 @@ pass -class BasePen(AbstractPen): +class LoggingPen(LogMixin, AbstractPen): + """A pen with a `log` property (see fontTools.misc.loggingTools.LogMixin) + """ + pass + + +class DecomposingPen(LoggingPen): + + """ Implements a 'addComponent' method that decomposes components + (i.e. draws them onto self as simple contours). + It can also be used as a mixin class (e.g. see ContourRecordingPen). + + You must override moveTo, lineTo, curveTo and qCurveTo. You may + additionally override closePath, endPath and addComponent. + """ + + # By default a warning message is logged when a base glyph is missing; + # set this to False if you want to raise a 'KeyError' exception + skipMissingComponents = True + + def __init__(self, glyphSet): + """ Takes a single 'glyphSet' argument (dict), in which the glyphs + that are referenced as components are looked up by their name. + """ + super(DecomposingPen, self).__init__() + self.glyphSet = glyphSet + + def addComponent(self, glyphName, transformation): + """ Transform the points of the base glyph and draw it onto self. + """ + from fontTools.pens.transformPen import TransformPen + try: + glyph = self.glyphSet[glyphName] + except KeyError: + if not self.skipMissingComponents: + raise + self.log.warning( + "glyph '%s' is missing from glyphSet; skipped" % glyphName) + else: + tPen = TransformPen(self, transformation) + glyph.draw(tPen) + + +class BasePen(DecomposingPen): """Base class for drawing pens. You must override _moveTo, _lineTo and _curveToOne. You may additionally override _closePath, _endPath, @@ -149,8 +193,8 @@ methods. """ - def __init__(self, glyphSet): - self.glyphSet = glyphSet + def __init__(self, glyphSet=None): + super(BasePen, self).__init__(glyphSet) self.__currentPoint = None # must override @@ -186,19 +230,6 @@ mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) - def addComponent(self, glyphName, transformation): - """This default implementation simply transforms the points - of the base glyph and draws it onto self. - """ - from fontTools.pens.transformPen import TransformPen - try: - glyph = self.glyphSet[glyphName] - except KeyError: - pass - else: - tPen = TransformPen(self, transformation) - glyph.draw(tPen) - # don't override def _getCurrentPoint(self): diff -Nru fonttools-3.0/Snippets/fontTools/pens/basePen_test.py fonttools-3.21.2/Snippets/fontTools/pens/basePen_test.py --- fonttools-3.0/Snippets/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,171 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import \ - BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment -import unittest - - -class _TestPen(BasePen): - def __init__(self): - BasePen.__init__(self, glyphSet={}) - self._commands = [] - - def __repr__(self): - return " ".join(self._commands) - - def getCurrentPoint(self): - return self._getCurrentPoint() - - def _moveTo(self, pt): - self._commands.append("%s %s moveto" % (pt[0], pt[1])) - - def _lineTo(self, pt): - self._commands.append("%s %s lineto" % (pt[0], pt[1])) - - def _curveToOne(self, bcp1, bcp2, pt): - self._commands.append("%s %s %s %s %s %s curveto" % - (bcp1[0], bcp1[1], - bcp2[0], bcp2[1], - pt[0], pt[1])) - - def _closePath(self): - self._commands.append("closepath") - - def _endPath(self): - self._commands.append("endpath") - - -class _TestGlyph: - def draw(self, pen): - pen.moveTo((0.0, 0.0)) - pen.lineTo((0.0, 100.0)) - pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) - pen.closePath() - - -class BasePenTest(unittest.TestCase): - def test_moveTo(self): - pen = _TestPen() - pen.moveTo((0.5, -4.3)) - self.assertEqual("0.5 -4.3 moveto", repr(pen)) - self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) - - def test_lineTo(self): - pen = _TestPen() - pen.moveTo((4, 5)) - pen.lineTo((7, 8)) - self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) - self.assertEqual((7, 8), pen.getCurrentPoint()) - - def test_curveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.curveTo) - - def test_curveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1)) - self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) - self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) - - def test_curveTo_twoPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_curveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) - self.assertEqual("0.0 0.0 moveto " - "1.0 1.1 1.5 1.6 2.0 2.1 curveto " - "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) - self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) - - def test_qCurveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.qCurveTo) - - def test_qCurveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((77.7, 99.9)) - self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) - self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) - - def test_qCurveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_qCurveTo_onlyOffCurvePoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) - self.assertEqual("0.0 0.0 moveto " - "12.0 -12.0 moveto " - "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " - "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " - "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) - self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) - - def test_closePath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.closePath() - self.assertEqual("3 4 lineto closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_endPath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.endPath() - self.assertEqual("3 4 lineto endpath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_addComponent(self): - pen = _TestPen() - pen.glyphSet["oslash"] = _TestGlyph() - pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) - self.assertEqual("-10.0 0.0 moveto " - "40.0 200.0 lineto " - "127.5 300.0 131.25 290.0 125.0 265.0 curveto " - "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " - "closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - -class DecomposeSegmentTest(unittest.TestCase): - def test_decomposeSuperBezierSegment(self): - decompose = decomposeSuperBezierSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) - self.assertEqual([((0, 0), (1, 1), (2, 2))], - decompose([(0, 0), (1, 1), (2, 2)])) - self.assertEqual( - [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], - decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) - - def test_decomposeQuadraticSegment(self): - decompose = decomposeQuadraticSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) - self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], - decompose([(0, 0), (4, 8), (9, -9)])) - self.assertEqual( - [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], - decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/pens/boundsPen.py fonttools-3.21.2/Snippets/fontTools/pens/boundsPen.py --- fonttools-3.0/Snippets/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/boundsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -17,25 +17,42 @@ When the shape has been drawn, the bounds are available as the 'bounds' attribute of the pen object. It's a 4-tuple: - (xMin, yMin, xMax, yMax) + (xMin, yMin, xMax, yMax). + + If 'ignoreSinglePoints' is True, single points are ignored. """ - def __init__(self, glyphSet): + def __init__(self, glyphSet, ignoreSinglePoints=False): BasePen.__init__(self, glyphSet) - self.bounds = None + self.ignoreSinglePoints = ignoreSinglePoints + self.init() + + def init(self): + self.bounds = None + self._start = None def _moveTo(self, pt): + self._start = pt + if not self.ignoreSinglePoints: + self._addMoveTo() + + def _addMoveTo(self): + if self._start is None: + return bounds = self.bounds if bounds: - self.bounds = updateBounds(bounds, pt) + self.bounds = updateBounds(bounds, self._start) else: - x, y = pt + x, y = self._start self.bounds = (x, y, x, y) + self._start = None def _lineTo(self, pt): + self._addMoveTo() self.bounds = updateBounds(self.bounds, pt) def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp1) bounds = updateBounds(bounds, bcp2) @@ -43,6 +60,7 @@ self.bounds = bounds def _qCurveToOne(self, bcp, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, bcp) bounds = updateBounds(bounds, pt) @@ -62,6 +80,7 @@ """ def _curveToOne(self, bcp1, bcp2, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): @@ -70,6 +89,7 @@ self.bounds = bounds def _qCurveToOne(self, bcp, pt): + self._addMoveTo() bounds = self.bounds bounds = updateBounds(bounds, pt) if not pointInRect(bcp, bounds): diff -Nru fonttools-3.0/Snippets/fontTools/pens/boundsPen_test.py fonttools-3.21.2/Snippets/fontTools/pens/boundsPen_test.py --- fonttools-3.0/Snippets/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen -import unittest - - -def draw_(pen): - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) - pen.closePath() - - -def bounds_(pen): - return " ".join(["%.0f" % c for c in pen.bounds]) - - -class BoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = BoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 58 100", bounds_(pen)) - - def test_empty(self): - pen = BoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 45 20", bounds_(pen)) - - def test_quadraticCurve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 3", bounds_(pen)) - - -class ControlBoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = ControlBoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 60 100", bounds_(pen)) - - def test_empty(self): - pen = ControlBoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 90 40", bounds_(pen)) - - def test_quadraticCurve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 6", bounds_(pen)) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/pens/filterPen.py fonttools-3.21.2/Snippets/fontTools/pens/filterPen.py --- fonttools-3.0/Snippets/fontTools/pens/filterPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/filterPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.recordingPen import RecordingPen + + +class _PassThruComponentsMixin(object): + + def addComponent(self, glyphName, transformation): + self._outPen.addComponent(glyphName, transformation) + + +class FilterPen(_PassThruComponentsMixin, AbstractPen): + + """ Base class for pens that apply some transformation to the coordinates + they receive and pass them to another pen. + + You can override any of its methods. The default implementation does + nothing, but passes the commands unmodified to the other pen. + + >>> from fontTools.pens.recordingPen import RecordingPen + >>> rec = RecordingPen() + >>> pen = FilterPen(rec) + >>> v = iter(rec.value) + + >>> pen.moveTo((0, 0)) + >>> next(v) + ('moveTo', ((0, 0),)) + + >>> pen.lineTo((1, 1)) + >>> next(v) + ('lineTo', ((1, 1),)) + + >>> pen.curveTo((2, 2), (3, 3), (4, 4)) + >>> next(v) + ('curveTo', ((2, 2), (3, 3), (4, 4))) + + >>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8)) + >>> next(v) + ('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8))) + + >>> pen.closePath() + >>> next(v) + ('closePath', ()) + + >>> pen.moveTo((9, 9)) + >>> next(v) + ('moveTo', ((9, 9),)) + + >>> pen.endPath() + >>> next(v) + ('endPath', ()) + + >>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0)) + >>> next(v) + ('addComponent', ('foo', (1, 0, 0, 1, 0, 0))) + """ + + def __init__(self, outPen): + self._outPen = outPen + + def moveTo(self, pt): + self._outPen.moveTo(pt) + + def lineTo(self, pt): + self._outPen.lineTo(pt) + + def curveTo(self, *points): + self._outPen.curveTo(*points) + + def qCurveTo(self, *points): + self._outPen.qCurveTo(*points) + + def closePath(self): + self._outPen.closePath() + + def endPath(self): + self._outPen.endPath() + + +class ContourFilterPen(_PassThruComponentsMixin, RecordingPen): + """A "buffered" filter pen that accumulates contour data, passes + it through a ``filterContour`` method when the contour is closed or ended, + and finally draws the result with the output pen. + + Components are passed through unchanged. + """ + + def __init__(self, outPen): + super(ContourFilterPen, self).__init__() + self._outPen = outPen + + def closePath(self): + super(ContourFilterPen, self).closePath() + self._flushContour() + + def endPath(self): + super(ContourFilterPen, self).endPath() + self._flushContour() + + def _flushContour(self): + result = self.filterContour(self.value) + if result is not None: + self.value = result + self.replay(self._outPen) + self.value = [] + + def filterContour(self, contour): + """Subclasses must override this to perform the filtering. + + The contour is a list of pen (operator, operands) tuples. + Operators are strings corresponding to the AbstractPen methods: + "moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and + "endPath". The operands are the positional arguments that are + passed to each method. + + If the method doesn't return a value (i.e. returns None), it's + assumed that the argument was modified in-place. + Otherwise, the return value is drawn with the output pen. + """ + return # or return contour diff -Nru fonttools-3.0/Snippets/fontTools/pens/momentsPen.py fonttools-3.21.2/Snippets/fontTools/pens/momentsPen.py --- fonttools-3.0/Snippets/fontTools/pens/momentsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/momentsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,294 @@ +"""Pen calculating 0th, 1st, and 2nd moments of area of glyph shapes. +This is low-level, autogenerated pen. Use statisticsPen instead.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["MomentsPen"] + + +class MomentsPen(BasePen): + + def __init__(self, glyphset=None): + BasePen.__init__(self, glyphset) + + self.area = 0 + self.momentX = 0 + self.momentY = 0 + self.momentXX = 0 + self.momentXY = 0 + self.momentYY = 0 + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _endPath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + # Green theorem is not defined on open contours. + raise NotImplementedError + + def _lineTo(self, p1): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + + r0 = x1*y0 + r1 = x1*y1 + r2 = x1**2 + r3 = x0**2 + r4 = 2*y0 + r5 = y0 - y1 + r6 = r5*x0 + r7 = y0**2 + r8 = y1**2 + r9 = x1**3 + r10 = r4*y1 + r11 = y0**3 + r12 = y1**3 + + self.area += -r0/2 - r1/2 + x0*(y0 + y1)/2 + self.momentX += -r2*y0/6 - r2*y1/3 + r3*(r4 + y1)/6 - r6*x1/6 + self.momentY += -r0*y1/6 - r7*x1/6 - r8*x1/6 + x0*(r7 + r8 + y0*y1)/6 + self.momentXX += -r2*r6/12 - r3*r5*x1/12 - r9*y0/12 - r9*y1/4 + x0**3*(3*y0 + y1)/12 + self.momentXY += -r10*r2/24 - r2*r7/24 - r2*r8/8 + r3*(r10 + 3*r7 + r8)/24 - x0*x1*(r7 - r8)/12 + self.momentYY += -r0*r8/12 - r1*r7/12 - r11*x1/12 - r12*x1/12 + x0*(r11 + r12 + r7*y1 + r8*y0)/12 + + def _qCurveToOne(self, p1, p2): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + + r0 = 2*x1 + r1 = r0*y2 + r2 = 2*y1 + r3 = r2*x2 + r4 = 3*y2 + r5 = r4*x2 + r6 = 3*y0 + r7 = x1**2 + r8 = 2*y2 + r9 = x2**2 + r10 = 4*y1 + r11 = 10*y2 + r12 = r0*x2 + r13 = x0**2 + r14 = 10*y0 + r15 = x2*y2 + r16 = r0*y1 + r15 + r17 = 4*x1 + r18 = x2*y0 + r19 = r10*r15 + r20 = y1**2 + r21 = 2*r20 + r22 = y2**2 + r23 = r22*x2 + r24 = 5*r23 + r25 = y0**2 + r26 = y0*y2 + r27 = 5*r25 + r28 = 8*x1**3 + r29 = x2**3 + r30 = 30*y1 + r31 = 6*y1 + r32 = 10*r9*x1 + r33 = 4*r7 + r34 = 5*y2 + r35 = 12*r7 + r36 = r5 + 20*x1*y1 + r37 = 30*x1 + r38 = 12*x1 + r39 = 20*r7 + r40 = 8*r7*y1 + r41 = r34*r9 + r42 = 60*y1 + r43 = 20*r20 + r44 = 4*r20 + r45 = 15*r22 + r46 = r38*x2 + r47 = y1*y2 + r48 = 8*r20*x1 + r24 + r49 = 6*x1 + r50 = 8*y1**3 + r51 = y2**3 + r52 = y0**3 + r53 = 10*y1 + r54 = 12*y1 + r55 = 12*r20 + + self.area += r1/6 - r3/6 - r5/6 + x0*(r2 + r6 + y2)/6 - y0*(r0 + x2)/6 + self.momentX += -r10*r9/30 - r11*r9/30 - r12*(-r8 + y1)/30 + r13*(r10 + r14 + y2)/30 + r7*r8/30 + x0*(r1 + r16 - r17*y0 - r18)/30 - y0*(r12 + 2*r7 + r9)/30 + self.momentY += r1*(r8 + y1)/30 - r19/30 - r21*x2/30 - r24/30 - r25*(r17 + x2)/30 + x0*(r10*y0 + r2*y2 + r21 + r22 + r26 + r27)/30 - y0*(r16 + r3)/30 + self.momentXX += r13*(r11*x1 - 5*r18 + r3 + r36 - r37*y0)/420 + r28*y2/420 - r29*r30/420 - r29*y2/4 - r32*(r2 - r4)/420 - r33*x2*(r2 - r34)/420 + x0**3*(r31 + 21*y0 + y2)/84 - x0*(-r15*r38 + r18*r38 + r2*r9 - r35*y2 + r39*y0 - r40 - r41 + r6*r9)/420 - y0*(r28 + 5*r29 + r32 + r35*x2)/420 + self.momentXY += r13*(r14*y2 + 3*r22 + 105*r25 + r42*y0 + r43 + 12*r47)/840 - r17*x2*(r44 - r45)/840 - r22*r9/8 - r25*(r39 + r46 + 3*r9)/840 + r33*y2*(r10 + r34)/840 - r42*r9*y2/840 - r43*r9/840 + x0*(-r10*r18 + r17*r26 + r19 + r22*r49 - r25*r37 - r27*x2 + r38*r47 + r48)/420 - y0*(r15*r17 + r31*r9 + r40 + r41 + r46*y1)/420 + self.momentYY += r1*(r11*y1 + r44 + r45)/420 - r15*r43/420 - r23*r30/420 - r25*(r1 + r36 + r53*x2)/420 - r50*x2/420 - r51*x2/12 - r52*(r49 + x2)/84 + x0*(r22*r53 + r22*r6 + r25*r30 + r25*r34 + r26*r54 + r43*y0 + r50 + 5*r51 + 35*r52 + r55*y2)/420 - y0*(-r0*r22 + r15*r54 + r48 + r55*x2)/420 + + def _curveToOne(self, p1, p2, p3): + x0,y0 = self._getCurrentPoint() + x1,y1 = p1 + x2,y2 = p2 + x3,y3 = p3 + + r0 = 6*x2 + r1 = r0*y3 + r2 = 6*y2 + r3 = 10*y3 + r4 = r3*x3 + r5 = 3*x1 + r6 = 3*y1 + r7 = 6*x1 + r8 = 3*x2 + r9 = 6*y1 + r10 = 3*y2 + r11 = x2**2 + r12 = r11*y3 + r13 = 45*r12 + r14 = x3**2 + r15 = r14*y2 + r16 = r14*y3 + r17 = x2*x3 + r18 = 15*r17 + r19 = 7*y3 + r20 = x1**2 + r21 = 9*r20 + r22 = x0**2 + r23 = 21*y1 + r24 = 9*r11 + r25 = 9*x2 + r26 = x2*y3 + r27 = 15*r26 + r28 = -r25*y1 + r27 + r29 = r25*y2 + r30 = r9*x3 + r31 = 45*x1 + r32 = x1*x3 + r33 = 45*r20 + r34 = 5*r14 + r35 = x2*y2 + r36 = 18*r35 + r37 = 5*x3 + r38 = r37*y3 + r39 = r31*y1 + r36 + r38 + r40 = x1*y0 + r41 = x1*y3 + r42 = x2*y0 + r43 = x3*y1 + r44 = r10*x3 + r45 = x3*y2*y3 + r46 = y2**2 + r47 = 45*r46 + r48 = r47*x3 + r49 = y3**2 + r50 = r49*x3 + r51 = y1**2 + r52 = 9*r51 + r53 = y0**2 + r54 = 21*x1 + r55 = x3*y2 + r56 = 15*r55 + r57 = 9*y2 + r58 = y2*y3 + r59 = 15*r58 + r60 = 9*r46 + r61 = 3*y3 + r62 = 45*y1 + r63 = r8*y3 + r64 = y0*y1 + r65 = y0*y2 + r66 = 30*r65 + r67 = 5*y3 + r68 = y1*y3 + r69 = 45*r51 + r70 = 5*r49 + r71 = x2**3 + r72 = x3**3 + r73 = 126*x3 + r74 = x1**3 + r75 = r14*x2 + r76 = 63*r11 + r77 = r76*x3 + r78 = 15*r35 + r79 = r19*x3 + r80 = x1*y1 + r81 = 63*r35 + r82 = r38 + 378*r80 + r81 + r83 = x1*y2 + r84 = x2*y1 + r85 = x3*y0 + r86 = x2*x3*y1 + r87 = x2*x3*y3 + r88 = r11*y2 + r89 = 27*r88 + r90 = 42*y3 + r91 = r14*r90 + r92 = 90*x1*x2 + r93 = 189*x2 + r94 = 30*x1*x3 + r95 = 14*r16 + 126*r20*y1 + 45*r88 + r94*y2 + r96 = x1*x2 + r97 = 252*r96 + r98 = x1*x2*y2 + r99 = 42*r32 + r100 = x1*x3*y1 + r101 = 30*r17 + r102 = 18*r17 + r103 = 378*r20 + r104 = 189*y2 + r105 = r20*y3 + r106 = r11*y1 + r107 = r14*y1 + r108 = 378*r46 + r109 = 252*y2 + r110 = y1*y2 + r111 = x2*x3*y2 + r112 = y0*y3 + r113 = 378*r51 + r114 = 63*r46 + r115 = 27*x2 + r116 = r115*r46 + 42*r50 + r117 = x2*y1*y3 + r118 = x3*y1*y2 + r119 = r49*x2 + r120 = r51*x3 + r121 = x3*y3 + r122 = 14*x3 + r123 = 30*r117 + r122*r49 + r47*x2 + 126*r51*x1 + r124 = x1*y1*y3 + r125 = x1*y2*y3 + r126 = x2*y1*y2 + r127 = 54*y3 + r128 = 21*r55 + r129 = 630*r53 + r130 = r46*x1 + r131 = r49*x1 + r132 = 126*r53 + r133 = y2**3 + r134 = y3**3 + r135 = 630*r49 + r136 = y1**3 + r137 = y0**3 + r138 = r114*y3 + r23*r49 + r139 = r49*y2 + + self.area += r1/20 - r2*x3/20 - r4/20 + r5*(y2 + y3)/20 - r6*(x2 + x3)/20 + x0*(r10 + r9 + 10*y0 + y3)/20 - y0*(r7 + r8 + x3)/20 + self.momentX += r13/840 - r15/8 - r16/3 - r18*(r10 - r19)/840 + r21*(r10 + 2*y3)/840 + r22*(r2 + r23 + 56*y0 + y3)/168 + r5*(r28 + r29 - r30 + r4)/840 - r6*(10*r14 + r18 + r24)/840 + x0*(12*r26 + r31*y2 - r37*y0 + r39 - 105*r40 + 15*r41 - 30*r42 - 3*r43 + r44)/840 - y0*(18*r11 + r18 + r31*x2 + 12*r32 + r33 + r34)/840 + self.momentY += r27*(r10 + r19)/840 - r45/8 - r48/840 + r5*(10*r49 + r57*y1 + r59 + r60 + r9*y3)/840 - r50/6 - r52*(r8 + 2*x3)/840 - r53*(r0 + r54 + x3)/168 - r6*(r29 + r4 + r56)/840 + x0*(18*r46 + 140*r53 + r59 + r62*y2 + 105*r64 + r66 + r67*y0 + 12*r68 + r69 + r70)/840 - y0*(r39 + 15*r43 + 12*r55 - r61*x1 + r62*x2 + r63)/840 + self.momentXX += -r11*r73*(-r61 + y2)/9240 + r21*(r28 - r37*y1 + r44 + r78 + r79)/9240 + r22*(21*r26 - 630*r40 + 42*r41 - 126*r42 + r57*x3 + r82 + 210*r83 + 42*r84 - 14*r85)/9240 - r5*(r11*r62 + r14*r23 + 14*r15 - r76*y3 + 54*r86 - 84*r87 - r89 - r91)/9240 - r6*(27*r71 + 42*r72 + 70*r75 + r77)/9240 + 3*r71*y3/220 - 3*r72*y2/44 - r72*y3/4 + 3*r74*(r57 + r67)/3080 - r75*(378*y2 - 630*y3)/9240 + x0**3*(r57 + r62 + 165*y0 + y3)/660 + x0*(-18*r100 - r101*y0 - r101*y1 + r102*y2 - r103*y0 + r104*r20 + 63*r105 - 27*r106 - 9*r107 + r13 - r34*y0 - r76*y0 + 42*r87 + r92*y3 + r94*y3 + r95 - r97*y0 + 162*r98 - r99*y0)/9240 - y0*(135*r11*x1 + r14*r54 + r20*r93 + r33*x3 + 45*r71 + 14*r72 + 126*r74 + 42*r75 + r77 + r92*x3)/9240 + self.momentXY += -r108*r14/18480 + r12*(r109 + 378*y3)/18480 - r14*r49/8 - 3*r14*r58/44 - r17*(252*r46 - 1260*r49)/18480 + r21*(18*r110 + r3*y1 + 15*r46 + 7*r49 + 18*r58)/18480 + r22*(252*r110 + 28*r112 + r113 + r114 + 2310*r53 + 30*r58 + 1260*r64 + 252*r65 + 42*r68 + r70)/18480 - r52*(r102 + 15*r11 + 7*r14)/18480 - r53*(r101 + r103 + r34 + r76 + r97 + r99)/18480 + r7*(-r115*r51 + r116 + 18*r117 - 18*r118 + 42*r119 - 15*r120 + 28*r45 + r81*y3)/18480 - r9*(63*r111 + 42*r15 + 28*r87 + r89 + r91)/18480 + x0*(r1*y0 + r104*r80 + r112*r54 + 21*r119 - 9*r120 - r122*r53 + r123 + 54*r124 + 60*r125 + 54*r126 + r127*r35 + r128*y3 - r129*x1 + 81*r130 + 15*r131 - r132*x2 - r2*r85 - r23*r85 + r30*y3 + 84*r40*y2 - 84*r42*y1 + r60*x3)/9240 - y0*(54*r100 - 9*r105 + 81*r106 + 15*r107 + 54*r111 + r121*r7 + 21*r15 + r24*y3 + 60*r86 + 21*r87 + r95 + 189*r96*y1 + 54*r98)/9240 + self.momentYY += -r108*r121/9240 - r133*r73/9240 - r134*x3/12 - r135*r55/9240 - 3*r136*(r25 + r37)/3080 - r137*(r25 + r31 + x3)/660 + r26*(r135 + 126*r46 + 378*y2*y3)/9240 + r5*(r110*r127 + 27*r133 + 42*r134 + r138 + 70*r139 + r46*r62 + 27*r51*y2 + 15*r51*y3)/9240 - r52*(r56 + r63 + r78 + r79)/9240 - r53*(r128 + r25*y3 + 42*r43 + r82 + 42*r83 + 210*r84)/9240 - r6*(r114*x3 + r116 - 14*r119 + 84*r45)/9240 + x0*(r104*r51 + r109*r64 + 90*r110*y3 + r113*y0 + r114*y0 + r129*y1 + r132*y2 + 45*r133 + 14*r134 + 126*r136 + 770*r137 + r138 + 42*r139 + 135*r46*y1 + 14*r53*y3 + r64*r90 + r66*y3 + r69*y3 + r70*y0)/9240 - y0*(90*r118 + 63*r120 + r123 - 18*r124 - 30*r125 + 162*r126 - 27*r130 - 9*r131 + r36*y3 + 30*r43*y3 + 42*r45 + r48 + r51*r93)/9240 + +if __name__ == '__main__': + from fontTools.misc.symfont import x, y, printGreenPen + printGreenPen('MomentsPen', [ + ('area', 1), + ('momentX', x), + ('momentY', y), + ('momentXX', x**2), + ('momentXY', x*y), + ('momentYY', y**2), + ]) diff -Nru fonttools-3.0/Snippets/fontTools/pens/perimeterPen.py fonttools-3.21.2/Snippets/fontTools/pens/perimeterPen.py --- fonttools-3.0/Snippets/fontTools/pens/perimeterPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/perimeterPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +"""Calculate the perimeter of a glyph.""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen +from fontTools.misc.bezierTools import splitQuadraticAtT, splitCubicAtT, approximateQuadraticArcLengthC, calcQuadraticArcLengthC, approximateCubicArcLengthC +import math + + +__all__ = ["PerimeterPen"] + + +def _distance(p0, p1): + return math.hypot(p0[0] - p1[0], p0[1] - p1[1]) + +def _split_cubic_into_two(p0, p1, p2, p3): + mid = (p0 + 3 * (p1 + p2) + p3) * .125 + deriv3 = (p3 + p2 - p1 - p0) * .125 + return ((p0, (p0 + p1) * .5, mid - deriv3, mid), + (mid, mid + deriv3, (p2 + p3) * .5, p3)) + +class PerimeterPen(BasePen): + + def __init__(self, glyphset=None, tolerance=0.005): + BasePen.__init__(self, glyphset) + self.value = 0 + self._mult = 1.+1.5*tolerance # The 1.5 is a empirical hack; no math + + # Choose which algorithm to use for quadratic and for cubic. + # Quadrature is faster but has fixed error characteristic with no strong + # error bound. The cutoff points are derived empirically. + self._addCubic = self._addCubicQuadrature if tolerance >= 0.0015 else self._addCubicRecursive + self._addQuadratic = self._addQuadraticQuadrature if tolerance >= 0.00075 else self._addQuadraticExact + + def _moveTo(self, p0): + self.__startPoint = p0 + + def _closePath(self): + p0 = self._getCurrentPoint() + if p0 != self.__startPoint: + self._lineTo(self.__startPoint) + + def _lineTo(self, p1): + p0 = self._getCurrentPoint() + self.value += _distance(p0, p1) + + def _addQuadraticExact(self, c0, c1, c2): + self.value += calcQuadraticArcLengthC(c0, c1, c2) + + def _addQuadraticQuadrature(self, c0, c1, c2): + self.value += approximateQuadraticArcLengthC(c0, c1, c2) + + def _qCurveToOne(self, p1, p2): + p0 = self._getCurrentPoint() + self._addQuadratic(complex(*p0), complex(*p1), complex(*p2)) + + def _addCubicRecursive(self, p0, p1, p2, p3): + arch = abs(p0-p3) + box = abs(p0-p1) + abs(p1-p2) + abs(p2-p3) + if arch * self._mult >= box: + self.value += (arch + box) * .5 + else: + one,two = _split_cubic_into_two(p0,p1,p2,p3) + self._addCubicRecursive(*one) + self._addCubicRecursive(*two) + + def _addCubicQuadrature(self, c0, c1, c2, c3): + self.value += approximateCubicArcLengthC(c0, c1, c2, c3) + + def _curveToOne(self, p1, p2, p3): + p0 = self._getCurrentPoint() + self._addCubic(complex(*p0), complex(*p1), complex(*p2), complex(*p3)) diff -Nru fonttools-3.0/Snippets/fontTools/pens/pointInsidePen.py fonttools-3.21.2/Snippets/fontTools/pens/pointInsidePen.py --- fonttools-3.0/Snippets/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/pointInsidePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -11,12 +11,6 @@ __all__ = ["PointInsidePen"] -# working around floating point errors -EPSILON = 1e-10 -ONE_PLUS_EPSILON = 1 + EPSILON -ZERO_MINUS_EPSILON = 0 - EPSILON - - class PointInsidePen(BasePen): """This pen implements "point inside" testing: to test whether @@ -46,29 +40,33 @@ # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html # I extended the principles outlined on that page to curves. - def __init__(self, glyphSet, testPoint, evenOdd=0): + def __init__(self, glyphSet, testPoint, evenOdd=False): BasePen.__init__(self, glyphSet) self.setTestPoint(testPoint, evenOdd) - def setTestPoint(self, testPoint, evenOdd=0): + def setTestPoint(self, testPoint, evenOdd=False): """Set the point to test. Call this _before_ the outline gets drawn.""" self.testPoint = testPoint self.evenOdd = evenOdd self.firstPoint = None self.intersectionCount = 0 - def getResult(self): - """After the shape has been drawn, getResult() returns True if the test - point lies within the (black) shape, and False if it doesn't. - """ + def getWinding(self): if self.firstPoint is not None: # always make sure the sub paths are closed; the algorithm only works # for closed paths. self.closePath() + return self.intersectionCount + + def getResult(self): + """After the shape has been drawn, getResult() returns True if the test + point lies within the (black) shape, and False if it doesn't. + """ + winding = self.getWinding() if self.evenOdd: - result = self.intersectionCount % 2 - else: - result = self.intersectionCount + result = winding % 2 + else: # non-zero + result = self.intersectionCount != 0 return not not result def _addIntersection(self, goingUp): @@ -123,7 +121,7 @@ by = (y3 - y2) * 3.0 - cy ay = y4 - dy - cy - by solutions = sorted(solveCubic(ay, by, cy, dy - y)) - solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] + solutions = [t for t in solutions if -0. <= t <= 1.] if not solutions: return @@ -142,29 +140,30 @@ t3 = t2 * t direction = 3*ay*t2 + 2*by*t + cy + incomingGoingUp = outgoingGoingUp = direction > 0.0 if direction == 0.0: direction = 6*ay*t + 2*by + outgoingGoingUp = direction > 0.0 + incomingGoingUp = not outgoingGoingUp if direction == 0.0: direction = ay - goingUp = direction > 0.0 + incomingGoingUp = outgoingGoingUp = direction > 0.0 xt = ax*t3 + bx*t2 + cx*t + dx if xt < x: - above = goingUp continue - if t == 0.0: - if not goingUp: - self._addIntersection(goingUp) + if t in (0.0, -0.0): + if not outgoingGoingUp: + self._addIntersection(outgoingGoingUp) elif t == 1.0: - if not above: - self._addIntersection(goingUp) + if incomingGoingUp: + self._addIntersection(incomingGoingUp) else: - if above != goingUp: - self._addIntersection(goingUp) + if incomingGoingUp == outgoingGoingUp: + self._addIntersection(outgoingGoingUp) #else: - # we're not really intersecting, merely touching the 'top' - above = goingUp + # we're not really intersecting, merely touching def _qCurveToOne_unfinished(self, bcp, point): # XXX need to finish this, for now doing it through a cubic @@ -188,4 +187,6 @@ self.lineTo(self.firstPoint) self.firstPoint = None - _endPath = _closePath + def _endPath(self): + """Insideness is not defined for open contours.""" + raise NotImplementedError diff -Nru fonttools-3.0/Snippets/fontTools/pens/pointInsidePen_test.py fonttools-3.21.2/Snippets/fontTools/pens/pointInsidePen_test.py --- fonttools-3.0/Snippets/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.pointInsidePen import PointInsidePen -import unittest - - -class PointInsidePenTest(unittest.TestCase): - def test_line(self): - def draw_triangles(pen): - pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) - pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) - pen.closePath() - - self.assertEqual( - " *********" - " ** *" - " ** *" - " * *" - " *", - self.render(draw_triangles, even_odd=True)) - - self.assertEqual( - " *********" - " *******" - " *****" - " ***" - " *", - self.render(draw_triangles, even_odd=False)) - - def test_curve(self): - def draw_curves(pen): - pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) - pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) - pen.closePath() - - self.assertEqual( - "*** ***" - "**** ****" - "*** ***" - "**** ****" - "*** ***", - self.render(draw_curves, even_odd=True)) - - self.assertEqual( - "*** ***" - "**********" - "**********" - "**********" - "*** ***", - self.render(draw_curves, even_odd=False)) - - def test_qCurve(self): - def draw_qCurves(pen): - pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) - pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) - pen.closePath() - - self.assertEqual( - "*** **" - "**** ***" - "*** ***" - "*** ****" - "** ***", - self.render(draw_qCurves, even_odd=True)) - - self.assertEqual( - "*** **" - "**********" - "**********" - "**********" - "** ***", - self.render(draw_qCurves, even_odd=False)) - - @staticmethod - def render(draw_function, even_odd): - result = BytesIO() - for y in range(5): - for x in range(10): - pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) - draw_function(pen) - if pen.getResult(): - result.write(b"*") - else: - result.write(b" ") - return tounicode(result.getvalue()) - - -if __name__ == "__main__": - unittest.main() - diff -Nru fonttools-3.0/Snippets/fontTools/pens/qtPen.py fonttools-3.21.2/Snippets/fontTools/pens/qtPen.py --- fonttools-3.0/Snippets/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/qtPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -24,5 +24,8 @@ def _curveToOne(self, p1, p2, p3): self.path.cubicTo(*p1+p2+p3) + def _qCurveToOne(self, p1, p2): + self.path.quadTo(*p1+p2) + def _closePath(self): self.path.closeSubpath() diff -Nru fonttools-3.0/Snippets/fontTools/pens/recordingPen.py fonttools-3.21.2/Snippets/fontTools/pens/recordingPen.py --- fonttools-3.0/Snippets/fontTools/pens/recordingPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/recordingPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ +"""Pen recording operations that can be accessed or replayed.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen, DecomposingPen + + +__all__ = ["replayRecording", "RecordingPen", "DecomposingRecordingPen"] + + +def replayRecording(recording, pen): + """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen, + to a pen. + + Note that recording does not have to be produced by those pens. + It can be any iterable of tuples of method name and tuple-of-arguments. + Likewise, pen can be any objects receiving those method calls. + """ + for operator,operands in recording: + getattr(pen, operator)(*operands) + + +class RecordingPen(AbstractPen): + """Pen recording operations that can be accessed or replayed. + + The recording can be accessed as pen.value; or replayed using + pen.replay(otherPen). + + Usage example: + ============== + from fontTools.ttLib import TTFont + from fontTools.pens.recordingPen import RecordingPen + + glyph_name = 'dollar' + font_path = 'MyFont.otf' + + font = TTFont(font_path) + glyphset = font.getGlyphSet() + glyph = glyphset[glyph_name] + + pen = RecordingPen() + glyph.draw(pen) + print(pen.value) + """ + + def __init__(self): + self.value = [] + def moveTo(self, p0): + self.value.append(('moveTo', (p0,))) + def lineTo(self, p1): + self.value.append(('lineTo', (p1,))) + def qCurveTo(self, *points): + self.value.append(('qCurveTo', points)) + def curveTo(self, *points): + self.value.append(('curveTo', points)) + def closePath(self): + self.value.append(('closePath', ())) + def endPath(self): + self.value.append(('endPath', ())) + def addComponent(self, glyphName, transformation): + self.value.append(('addComponent', (glyphName, transformation))) + def replay(self, pen): + replayRecording(self.value, pen) + + +class DecomposingRecordingPen(DecomposingPen, RecordingPen): + """ Same as RecordingPen, except that it doesn't keep components + as references, but draws them decomposed as regular contours. + + The constructor takes a single 'glyphSet' positional argument, + a dictionary of glyph objects (i.e. with a 'draw' method) keyed + by thir name. + + >>> class SimpleGlyph(object): + ... def draw(self, pen): + ... pen.moveTo((0, 0)) + ... pen.curveTo((1, 1), (2, 2), (3, 3)) + ... pen.closePath() + >>> class CompositeGlyph(object): + ... def draw(self, pen): + ... pen.addComponent('a', (1, 0, 0, 1, -1, 1)) + >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()} + >>> for name, glyph in sorted(glyphSet.items()): + ... pen = DecomposingRecordingPen(glyphSet) + ... glyph.draw(pen) + ... print("{}: {}".format(name, pen.value)) + a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())] + b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())] + """ + # raises KeyError if base glyph is not found in glyphSet + skipMissingComponents = False + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = RecordingPen() + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() + from pprint import pprint + pprint(pen.value) diff -Nru fonttools-3.0/Snippets/fontTools/pens/reportLabPen.py fonttools-3.21.2/Snippets/fontTools/pens/reportLabPen.py --- fonttools-3.0/Snippets/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/reportLabPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,6 +4,9 @@ from reportlab.graphics.shapes import Path +__all__ = ["ReportLabPen"] + + class ReportLabPen(BasePen): """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" diff -Nru fonttools-3.0/Snippets/fontTools/pens/reverseContourPen.py fonttools-3.21.2/Snippets/fontTools/pens/reverseContourPen.py --- fonttools-3.0/Snippets/fontTools/pens/reverseContourPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/reverseContourPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,97 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import pairwise +from fontTools.pens.filterPen import ContourFilterPen + + +__all__ = ["reversedContour", "ReverseContourPen"] + + +class ReverseContourPen(ContourFilterPen): + """Filter pen that passes outline data to another pen, but reversing + the winding direction of all contours. Components are simply passed + through unchanged. + + Closed contours are reversed in such a way that the first point remains + the first point. + """ + + def filterContour(self, contour): + return reversedContour(contour) + + +def reversedContour(contour): + """ Generator that takes a list of pen's (operator, operands) tuples, + and yields them with the winding direction reversed. + """ + if not contour: + return # nothing to do, stop iteration + + # valid contours must have at least a starting and ending command, + # can't have one without the other + assert len(contour) > 1, "invalid contour" + + # the type of the last command determines if the contour is closed + contourType = contour.pop()[0] + assert contourType in ("endPath", "closePath") + closed = contourType == "closePath" + + firstType, firstPts = contour.pop(0) + assert firstType in ("moveTo", "qCurveTo"), ( + "invalid initial segment type: %r" % firstType) + firstOnCurve = firstPts[-1] + if firstType == "qCurveTo": + # special case for TrueType paths contaning only off-curve points + assert firstOnCurve is None, ( + "off-curve only paths must end with 'None'") + assert not contour, ( + "only one qCurveTo allowed per off-curve path") + firstPts = ((firstPts[0],) + tuple(reversed(firstPts[1:-1])) + + (None,)) + + if not contour: + # contour contains only one segment, nothing to reverse + if firstType == "moveTo": + closed = False # single-point paths can't be closed + else: + closed = True # off-curve paths are closed by definition + yield firstType, firstPts + else: + lastType, lastPts = contour[-1] + lastOnCurve = lastPts[-1] + if closed: + # for closed paths, we keep the starting point + yield firstType, firstPts + if firstOnCurve != lastOnCurve: + # emit an implied line between the last and first points + yield "lineTo", (lastOnCurve,) + contour[-1] = (lastType, + tuple(lastPts[:-1]) + (firstOnCurve,)) + + if len(contour) > 1: + secondType, secondPts = contour[0] + else: + # contour has only two points, the second and last are the same + secondType, secondPts = lastType, lastPts + # if a lineTo follows the initial moveTo, after reversing it + # will be implied by the closePath, so we don't emit one; + # unless the lineTo and moveTo overlap, in which case we keep the + # duplicate points + if secondType == "lineTo" and firstPts != secondPts: + del contour[0] + if contour: + contour[-1] = (lastType, + tuple(lastPts[:-1]) + secondPts) + else: + # for open paths, the last point will become the first + yield firstType, (lastOnCurve,) + contour[-1] = (lastType, tuple(lastPts[:-1]) + (firstOnCurve,)) + + # we iterate over all segment pairs in reverse order, and yield + # each one with the off-curve points reversed (if any), and + # with the on-curve point of the following segment + for (curType, curPts), (_, nextPts) in pairwise( + contour, reverse=True): + yield curType, tuple(reversed(curPts[:-1])) + (nextPts[-1],) + + yield "closePath" if closed else "endPath", () diff -Nru fonttools-3.0/Snippets/fontTools/pens/statisticsPen.py fonttools-3.21.2/Snippets/fontTools/pens/statisticsPen.py --- fonttools-3.0/Snippets/fontTools/pens/statisticsPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/statisticsPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,102 @@ +"""Pen calculating area, center of mass, variance and standard-deviation, +covariance and correlation, and slant, of glyph shapes.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import math +from fontTools.pens.momentsPen import MomentsPen + +__all__ = ["StatisticsPen"] + + +class StatisticsPen(MomentsPen): + + """Pen calculating area, center of mass, variance and + standard-deviation, covariance and correlation, and slant, + of glyph shapes. + + Note that all the calculated values are 'signed'. Ie. if the + glyph shape is self-intersecting, the values are not correct + (but well-defined). As such, area will be negative if contour + directions are clockwise. Moreover, variance might be negative + if the shapes are self-intersecting in certain ways.""" + + def __init__(self, glyphset=None): + MomentsPen.__init__(self, glyphset=glyphset) + self.__zero() + + def _closePath(self): + MomentsPen._closePath(self) + self.__update() + + def __zero(self): + self.meanX = 0 + self.meanY = 0 + self.varianceX = 0 + self.varianceY = 0 + self.stddevX = 0 + self.stddevY = 0 + self.covariance = 0 + self.correlation = 0 + self.slant = 0 + + def __update(self): + + area = self.area + if not area: + self.__zero() + return + + # Center of mass + # https://en.wikipedia.org/wiki/Center_of_mass#A_continuous_volume + self.meanX = meanX = self.momentX / area + self.meanY = meanY = self.momentY / area + + # Var(X) = E[X^2] - E[X]^2 + self.varianceX = varianceX = self.momentXX / area - meanX**2 + self.varianceY = varianceY = self.momentYY / area - meanY**2 + + self.stddevX = stddevX = math.copysign(abs(varianceX)**.5, varianceX) + self.stddevY = stddevY = math.copysign(abs(varianceY)**.5, varianceY) + + # Covariance(X,Y) = ( E[X.Y] - E[X]E[Y] ) + self.covariance = covariance = self.momentXY / area - meanX*meanY + + # Correlation(X,Y) = Covariance(X,Y) / ( stddev(X) * stddev(Y) ) + # https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient + correlation = covariance / (stddevX * stddevY) + self.correlation = correlation if abs(correlation) > 1e-3 else 0 + + slant = covariance / varianceY + self.slant = slant if abs(slant) > 1e-3 else 0 + + +def _test(glyphset, upem, glyphs): + from fontTools.pens.transformPen import TransformPen + from fontTools.misc.transform import Scale + + print('upem', upem) + + for glyph_name in glyphs: + print() + print("glyph:", glyph_name) + glyph = glyphset[glyph_name] + pen = StatisticsPen(glyphset=glyphset) + transformer = TransformPen(pen, Scale(1./upem)) + glyph.draw(transformer) + for item in ['area', 'momentX', 'momentY', 'momentXX', 'momentYY', 'momentXY', 'meanX', 'meanY', 'varianceX', 'varianceY', 'stddevX', 'stddevY', 'covariance', 'correlation', 'slant']: + if item[0] == '_': continue + print ("%s: %g" % (item, getattr(pen, item))) + +def main(args): + if not args: + return + filename, glyphs = args[0], args[1:] + if not glyphs: + glyphs = ['e', 'o', 'I', 'slash', 'E', 'zero', 'eight', 'minus', 'equal'] + from fontTools.ttLib import TTFont + font = TTFont(filename) + _test(font.getGlyphSet(), font['head'].unitsPerEm, glyphs) + +if __name__ == '__main__': + import sys + main(sys.argv[1:]) diff -Nru fonttools-3.0/Snippets/fontTools/pens/svgPathPen.py fonttools-3.21.2/Snippets/fontTools/pens/svgPathPen.py --- fonttools-3.0/Snippets/fontTools/pens/svgPathPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/svgPathPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,178 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +def pointToString(pt): + return " ".join([str(i) for i in pt]) + + +class SVGPathPen(BasePen): + + def __init__(self, glyphSet): + BasePen.__init__(self, glyphSet) + self._commands = [] + self._lastCommand = None + self._lastX = None + self._lastY = None + + def _handleAnchor(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.moveTo((10, 10)) + >>> pen._commands + ['M10 10'] + """ + if self._lastCommand == "M": + self._commands.pop(-1) + + def _moveTo(self, pt): + """ + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen._commands + ['M0 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 0)) + >>> pen._commands + ['M10 0'] + + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 10)) + >>> pen._commands + ['M0 10'] + """ + self._handleAnchor() + t = "M%s" % (pointToString(pt)) + self._commands.append(t) + self._lastCommand = "M" + self._lastX, self._lastY = pt + + def _lineTo(self, pt): + """ + # duplicate point + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M10 10'] + + # vertical line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((10, 0)) + >>> pen._commands + ['M10 10', 'V0'] + + # horizontal line + >>> pen = SVGPathPen(None) + >>> pen.moveTo((10, 10)) + >>> pen.lineTo((0, 10)) + >>> pen._commands + ['M10 10', 'H0'] + + # basic + >>> pen = SVGPathPen(None) + >>> pen.lineTo((70, 80)) + >>> pen._commands + ['L70 80'] + + # basic following a moveto + >>> pen = SVGPathPen(None) + >>> pen.moveTo((0, 0)) + >>> pen.lineTo((10, 10)) + >>> pen._commands + ['M0 0', ' 10 10'] + """ + x, y = pt + # duplicate point + if x == self._lastX and y == self._lastY: + return + # vertical line + elif x == self._lastX: + cmd = "V" + pts = str(y) + # horizontal line + elif y == self._lastY: + cmd = "H" + pts = str(x) + # previous was a moveto + elif self._lastCommand == "M": + cmd = None + pts = " " + pointToString(pt) + # basic + else: + cmd = "L" + pts = pointToString(pt) + # write the string + t = "" + if cmd: + t += cmd + self._lastCommand = cmd + t += pts + self._commands.append(t) + # store for future reference + self._lastX, self._lastY = pt + + def _curveToOne(self, pt1, pt2, pt3): + """ + >>> pen = SVGPathPen(None) + >>> pen.curveTo((10, 20), (30, 40), (50, 60)) + >>> pen._commands + ['C10 20 30 40 50 60'] + """ + t = "C" + t += pointToString(pt1) + " " + t += pointToString(pt2) + " " + t += pointToString(pt3) + self._commands.append(t) + self._lastCommand = "C" + self._lastX, self._lastY = pt3 + + def _qCurveToOne(self, pt1, pt2): + """ + >>> pen = SVGPathPen(None) + >>> pen.qCurveTo((10, 20), (30, 40)) + >>> pen._commands + ['Q10 20 30 40'] + """ + assert pt2 is not None + t = "Q" + t += pointToString(pt1) + " " + t += pointToString(pt2) + self._commands.append(t) + self._lastCommand = "Q" + self._lastX, self._lastY = pt2 + + def _closePath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.closePath() + >>> pen._commands + ['Z'] + """ + self._commands.append("Z") + self._lastCommand = "Z" + self._lastX = self._lastY = None + + def _endPath(self): + """ + >>> pen = SVGPathPen(None) + >>> pen.endPath() + >>> pen._commands + ['Z'] + """ + self._closePath() + self._lastCommand = None + self._lastX = self._lastY = None + + def getCommands(self): + return "".join(self._commands) + + +if __name__ == "__main__": + import sys + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/pens/t2CharStringPen.py fonttools-3.21.2/Snippets/fontTools/pens/t2CharStringPen.py --- fonttools-3.0/Snippets/fontTools/pens/t2CharStringPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/t2CharStringPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,90 @@ +# Copyright (c) 2009 Type Supply LLC +# Author: Tal Leming + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.psCharStrings import T2CharString +from fontTools.pens.basePen import BasePen +from fontTools.cffLib.specializer import specializeCommands, commandsToProgram + + +def makeRoundFunc(tolerance): + if tolerance < 0: + raise ValueError("Rounding tolerance must be positive") + + def _round(number): + if tolerance == 0: + return number # no-op + rounded = round(number) + # return rounded integer if the tolerance >= 0.5, or if the absolute + # difference between the original float and the rounded integer is + # within the tolerance + if tolerance >= .5 or abs(rounded - number) <= tolerance: + return rounded + else: + # else return the value un-rounded + return number + + def roundPoint(point): + x, y = point + return _round(x), _round(y) + + return roundPoint + + +class T2CharStringPen(BasePen): + """Pen to draw Type 2 CharStrings. + + The 'roundTolerance' argument controls the rounding of point coordinates. + It is defined as the maximum absolute difference between the original + float and the rounded integer value. + The default tolerance of 0.5 means that all floats are rounded to integer; + a value of 0 disables rounding; values in between will only round floats + which are close to their integral part within the tolerated range. + """ + + def __init__(self, width, glyphSet, roundTolerance=0.5, CFF2=False): + super(T2CharStringPen, self).__init__(glyphSet) + self.roundPoint = makeRoundFunc(roundTolerance) + self._CFF2 = CFF2 + self._width = width + self._commands = [] + self._p0 = (0,0) + + def _p(self, pt): + p0 = self._p0 + pt = self._p0 = self.roundPoint(pt) + return [pt[0]-p0[0], pt[1]-p0[1]] + + def _moveTo(self, pt): + self._commands.append(('rmoveto', self._p(pt))) + + def _lineTo(self, pt): + self._commands.append(('rlineto', self._p(pt))) + + def _curveToOne(self, pt1, pt2, pt3): + _p = self._p + self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3))) + + def _closePath(self): + pass + + def _endPath(self): + pass + + def getCharString(self, private=None, globalSubrs=None, optimize=True): + commands = self._commands + if optimize: + maxstack = 48 if not self._CFF2 else 513 + commands = specializeCommands(commands, + generalizeFirst=False, + maxstack=maxstack) + program = commandsToProgram(commands) + if self._width is not None: + assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString." + program.insert(0, round(self._width)) + if not self._CFF2: + program.append('endchar') + charString = T2CharString( + program=program, private=private, globalSubrs=globalSubrs) + return charString diff -Nru fonttools-3.0/Snippets/fontTools/pens/teePen.py fonttools-3.21.2/Snippets/fontTools/pens/teePen.py --- fonttools-3.0/Snippets/fontTools/pens/teePen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/teePen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ +"""Pen multiplexing drawing to one or more pens.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import AbstractPen + + +__all__ = ["TeePen"] + + +class TeePen(AbstractPen): + """Pen multiplexing drawing to one or more pens. + + Use either as TeePen(pen1, pen2, ...) or TeePen(iterableOfPens).""" + + def __init__(self, *pens): + if len(pens) == 1: + pens = pens[0] + self.pens = pens + def moveTo(self, p0): + for pen in self.pens: + pen.moveTo(p0) + def lineTo(self, p1): + for pen in self.pens: + pen.lineTo(p1) + def qCurveTo(self, *points): + for pen in self.pens: + pen.qCurveTo(*points) + def curveTo(self, *points): + for pen in self.pens: + pen.curveTo(*points) + def closePath(self): + for pen in self.pens: + pen.closePath() + def endPath(self): + for pen in self.pens: + pen.endPath() + def addComponent(self, glyphName, transformation): + for pen in self.pens: + pen.addComponent(glyphName, transformation) + + +if __name__ == "__main__": + from fontTools.pens.basePen import _TestPen + pen = TeePen(_TestPen(), _TestPen()) + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.curveTo((50, 75), (60, 50), (50, 25)) + pen.closePath() diff -Nru fonttools-3.0/Snippets/fontTools/pens/transformPen.py fonttools-3.21.2/Snippets/fontTools/pens/transformPen.py --- fonttools-3.0/Snippets/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/transformPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,12 +1,12 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from fontTools.pens.basePen import AbstractPen +from fontTools.pens.filterPen import FilterPen __all__ = ["TransformPen"] -class TransformPen(AbstractPen): +class TransformPen(FilterPen): """Pen that transforms all coordinates using a Affine transformation, and passes them to another pen. @@ -17,12 +17,12 @@ transformed coordinates. The 'transformation' argument can either be a six-tuple, or a fontTools.misc.transform.Transform object. """ + super(TransformPen, self).__init__(outPen) if not hasattr(transformation, "transformPoint"): from fontTools.misc.transform import Transform transformation = Transform(*transformation) self._transformation = transformation self._transformPoint = transformation.transformPoint - self._outPen = outPen self._stack = [] def moveTo(self, pt): @@ -42,15 +42,15 @@ self._outPen.qCurveTo(*points) def _transformPoints(self, points): - new = [] transformPoint = self._transformPoint - for pt in points: - new.append(transformPoint(pt)) - return new + return [transformPoint(pt) for pt in points] def closePath(self): self._outPen.closePath() + def endPath(self): + self._outPen.endPath() + def addComponent(self, glyphName, transformation): transformation = self._transformation.transform(transformation) self._outPen.addComponent(glyphName, transformation) diff -Nru fonttools-3.0/Snippets/fontTools/pens/ttGlyphPen.py fonttools-3.21.2/Snippets/fontTools/pens/ttGlyphPen.py --- fonttools-3.0/Snippets/fontTools/pens/ttGlyphPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/ttGlyphPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,115 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from array import array +from fontTools.pens.basePen import AbstractPen +from fontTools.pens.transformPen import TransformPen +from fontTools.ttLib.tables import ttProgram +from fontTools.ttLib.tables._g_l_y_f import Glyph +from fontTools.ttLib.tables._g_l_y_f import GlyphComponent +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates + + +__all__ = ["TTGlyphPen"] + + +class TTGlyphPen(AbstractPen): + """Pen used for drawing to a TrueType glyph.""" + + def __init__(self, glyphSet): + self.glyphSet = glyphSet + self.init() + + def init(self): + self.points = [] + self.endPts = [] + self.types = [] + self.components = [] + + def _addPoint(self, pt, onCurve): + self.points.append(pt) + self.types.append(onCurve) + + def _popPoint(self): + self.points.pop() + self.types.pop() + + def _isClosed(self): + return ( + (not self.points) or + (self.endPts and self.endPts[-1] == len(self.points) - 1)) + + def lineTo(self, pt): + self._addPoint(pt, 1) + + def moveTo(self, pt): + assert self._isClosed(), '"move"-type point must begin a new contour.' + self._addPoint(pt, 1) + + def qCurveTo(self, *points): + assert len(points) >= 1 + for pt in points[:-1]: + self._addPoint(pt, 0) + + # last point is None if there are no on-curve points + if points[-1] is not None: + self._addPoint(points[-1], 1) + + def closePath(self): + endPt = len(self.points) - 1 + + # ignore anchors (one-point paths) + if endPt == 0 or (self.endPts and endPt == self.endPts[-1] + 1): + self._popPoint() + return + + # if first and last point on this path are the same, remove last + startPt = 0 + if self.endPts: + startPt = self.endPts[-1] + 1 + if self.points[startPt] == self.points[endPt]: + self._popPoint() + endPt -= 1 + + self.endPts.append(endPt) + + def endPath(self): + # TrueType contours are always "closed" + self.closePath() + + def addComponent(self, glyphName, transformation): + self.components.append((glyphName, transformation)) + + def glyph(self, componentFlags=0x4): + assert self._isClosed(), "Didn't close last contour." + + components = [] + for glyphName, transformation in self.components: + if self.points: + # can't have both, so decompose the glyph + tpen = TransformPen(self, transformation) + self.glyphSet[glyphName].draw(tpen) + continue + + component = GlyphComponent() + component.glyphName = glyphName + if transformation[:4] != (1, 0, 0, 1): + component.transform = (transformation[:2], transformation[2:4]) + component.x, component.y = transformation[4:] + component.flags = componentFlags + components.append(component) + + glyph = Glyph() + glyph.coordinates = GlyphCoordinates(self.points) + glyph.endPtsOfContours = self.endPts + glyph.flags = array("B", self.types) + self.init() + + if components: + glyph.components = components + glyph.numberOfContours = -1 + else: + glyph.numberOfContours = len(glyph.endPtsOfContours) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(b"") + + return glyph diff -Nru fonttools-3.0/Snippets/fontTools/pens/wxPen.py fonttools-3.21.2/Snippets/fontTools/pens/wxPen.py --- fonttools-3.0/Snippets/fontTools/pens/wxPen.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/pens/wxPen.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,31 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import BasePen + + +__all__ = ["WxPen"] + + +class WxPen(BasePen): + + def __init__(self, glyphSet, path=None): + BasePen.__init__(self, glyphSet) + if path is None: + import wx + path = wx.GraphicsRenderer.GetDefaultRenderer().CreatePath() + self.path = path + + def _moveTo(self, p): + self.path.MoveToPoint(*p) + + def _lineTo(self, p): + self.path.AddLineToPoint(*p) + + def _curveToOne(self, p1, p2, p3): + self.path.AddCurveToPoint(*p1+p2+p3) + + def _qCurveToOne(self, p1, p2): + self.path.AddQuadCurveToPoint(*p1+p2) + + def _closePath(self): + self.path.CloseSubpath() diff -Nru fonttools-3.0/Snippets/fontTools/subset/__init__.py fonttools-3.21.2/Snippets/fontTools/subset/__init__.py --- fonttools-3.0/Snippets/fontTools/subset/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/subset/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3146 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.tables import otTables +from fontTools.misc import psCharStrings +from fontTools.pens.basePen import NullPen +from fontTools.misc.loggingTools import Timer +import sys +import struct +import array +import logging +from collections import Counter +from types import MethodType + +__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." + +__doc__="""\ +pyftsubset -- OpenType font subsetter and optimizer + + pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. + It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) + font file. The subsetted glyph set is based on the specified glyphs + or characters, and specified OpenType layout features. + + The tool also performs some size-reducing optimizations, aimed for using + subset fonts as webfonts. Individual optimizations can be enabled or + disabled, and are enabled by default when they are safe. + +Usage: + """+__usage__+""" + + At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, + --text, --text-file, --unicodes, or --unicodes-file, must be specified. + +Arguments: + font-file + The input font file. + glyph + Specify one or more glyph identifiers to include in the subset. Must be + PS glyph names, or the special string '*' to keep the entire glyph set. + +Initial glyph set specification: + These options populate the initial glyph set. Same option can appear + multiple times, and the results are accummulated. + --gids=[,...] + Specify comma/whitespace-separated list of glyph IDs or ranges as + decimal numbers. For example, --gids=10-12,14 adds glyphs with + numbers 10, 11, 12, and 14. + --gids-file= + Like --gids but reads from a file. Anything after a '#' on any line + is ignored as comments. + --glyphs=[,...] + Specify comma/whitespace-separated PS glyph names to add to the subset. + Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc + that are accepted on the command line. The special string '*' will keep + the entire glyph set. + --glyphs-file= + Like --glyphs but reads from a file. Anything after a '#' on any line + is ignored as comments. + --text= + Specify characters to include in the subset, as UTF-8 string. + --text-file= + Like --text but reads from a file. Newline character are not added to + the subset. + --unicodes=[,...] + Specify comma/whitespace-separated list of Unicode codepoints or + ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. + For example, --unicodes=41-5a,61-7a adds ASCII letters, so does + the more verbose --unicodes=U+0041-005A,U+0061-007A. + The special strings '*' will choose all Unicode characters mapped + by the font. + --unicodes-file= + Like --unicodes, but reads from a file. Anything after a '#' on any + line in the file is ignored as comments. + --ignore-missing-glyphs + Do not fail if some requested glyphs or gids are not available in + the font. + --no-ignore-missing-glyphs + Stop and fail if some requested glyphs or gids are not available + in the font. [default] + --ignore-missing-unicodes [default] + Do not fail if some requested Unicode characters (including those + indirectly specified using --text or --text-file) are not available + in the font. + --no-ignore-missing-unicodes + Stop and fail if some requested Unicode characters are not available + in the font. + Note the default discrepancy between ignoring missing glyphs versus + unicodes. This is for historical reasons and in the future + --no-ignore-missing-unicodes might become default. + +Other options: + For the other options listed below, to see the current value of the option, + pass a value of '?' to it, with or without a '='. + Examples: + $ pyftsubset --glyph-names? + Current setting for 'glyph-names' is: False + $ ./pyftsubset --name-IDs=? + Current setting for 'name-IDs' is: [1, 2] + $ ./pyftsubset --hinting? --no-hinting --hinting? + Current setting for 'hinting' is: True + Current setting for 'hinting' is: False + +Output options: + --output-file= + The output font file. If not specified, the subsetted font + will be saved in as font-file.subset. + --flavor= + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, available + at https://github.com/google/brotli + --with-zopfli + Use the Google Zopfli algorithm to compress WOFF. The output is 3-8 % + smaller than pure zlib, but the compression speed is much slower. + The Zopfli Python bindings are available at: + https://pypi.python.org/pypi/zopfli + +Glyph set expansion: + These options control how additional glyphs are added to the subset. + --notdef-glyph + Add the '.notdef' glyph to the subset (ie, keep it). [default] + --no-notdef-glyph + Drop the '.notdef' glyph unless specified in the glyph set. This + saves a few bytes, but is not possible for Postscript-flavored + fonts, as those require '.notdef'. For TrueType-flavored fonts, + this works fine as long as no unsupported glyphs are requested + from the font. + --notdef-outline + Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is + used when glyphs not supported by the font are to be shown. It is not + needed otherwise. + --no-notdef-outline + When including a '.notdef' glyph, remove its outline. This saves + a few bytes. [default] + --recommended-glyphs + Add glyphs 0, 1, 2, and 3 to the subset, as recommended for + TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. + Some legacy software might require this, but no modern system does. + --no-recommended-glyphs + Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in + glyph set. [default] + --layout-features[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of OpenType layout feature tags that will be preserved. + Glyph variants used by the preserved features are added to the + specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', + 'dnom', 'frac', 'kern', 'liga', 'locl', 'mark', 'mkmk', 'numr', 'rclt', + 'rlig', 'rvrn', and all features required for script shaping are + preserved. To see the full list, try '--layout-features=?'. + Use '*' to keep all features. + Multiple --layout-features options can be provided if necessary. + Examples: + --layout-features+=onum,pnum,ss01 + * Keep the default set of features and 'onum', 'pnum', 'ss01'. + --layout-features-='mark','mkmk' + * Keep the default set of features but drop 'mark' and 'mkmk'. + --layout-features='kern' + * Only keep the 'kern' feature, drop all others. + --layout-features='' + * Drop all features. + --layout-features='*' + * Keep all features. + --layout-features+=aalt --layout-features-=vrt2 + * Keep default set of features plus 'aalt', but drop 'vrt2'. + +Hinting options: + --hinting + Keep hinting [default] + --no-hinting + Drop glyph-specific hinting and font-wide hinting tables, as well + as remove hinting-related bits and pieces from other tables (eg. GPOS). + See --hinting-tables for list of tables that are dropped by default. + Instructions and hints are stripped from 'glyf' and 'CFF ' tables + respectively. This produces (sometimes up to 30%) smaller fonts that + are suitable for extremely high-resolution systems, like high-end + mobile devices and retina displays. + +Optimization options: + --desubroutinize + Remove CFF use of subroutinizes. Subroutinization is a way to make CFF + fonts smaller. For small subsets however, desubroutinizing might make + the font smaller. It has even been reported that desubroutinized CFF + fonts compress better (produce smaller output) WOFF and WOFF2 fonts. + Also see note under --no-hinting. + --no-desubroutinize [default] + Leave CFF subroutinizes as is, only throw away unused subroutinizes. + +Font table options: + --drop-tables[+|-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the comma-separated + set of tables that will be be dropped. + By default, the following tables are dropped: + 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' + and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' + and color tables: 'CBLC', 'CBDT', 'sbix'. + The tool will attempt to subset the remaining tables. + Examples: + --drop-tables-='SVG ' + * Drop the default set of tables but keep 'SVG '. + --drop-tables+=GSUB + * Drop the default set of tables and 'GSUB'. + --drop-tables=DSIG + * Only drop the 'DSIG' table, keep all others. + --drop-tables= + * Keep all tables. + --no-subset-tables+=
[,
...] + Add to the set of tables that will not be subsetted. + By default, the following tables are included in this list, as + they do not need subsetting (ignore the fact that 'loca' is listed + here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', + 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', 'DSIG', 'CPAL', 'MVAR', 'STAT'. + By default, tables that the tool does not know how to subset and are not + specified here will be dropped from the font, unless --passthrough-tables + option is passed. + Example: + --no-subset-tables+=FFTM + * Keep 'FFTM' table in the font by preventing subsetting. + --passthrough-tables + Do not drop tables that the tool does not know how to subset. + --no-passthrough-tables + Tables that the tool does not know how to subset and are not specified + in --no-subset-tables will be dropped from the font. [default] + --hinting-tables[-]=
[,
...] + Specify (=), add to (+=) or exclude from (-=) the list of font-wide + hinting tables that will be dropped if --no-hinting is specified, + Examples: + --hinting-tables-='VDMX' + * Drop font-wide hinting tables except 'VDMX'. + --hinting-tables='' + * Keep all font-wide hinting tables (but strip hints from glyphs). + --legacy-kern + Keep TrueType 'kern' table even when OpenType 'GPOS' is available. + --no-legacy-kern + Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] + +Font naming options: + These options control what is retained in the 'name' table. For numerical + codes, see: http://www.microsoft.com/typography/otspec/name.htm + --name-IDs[+|-]=[,...] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + entry nameIDs that will be preserved. By default only nameID 1 (Family) + and nameID 2 (Style) are preserved. Use '*' to keep all entries. + Examples: + --name-IDs+=0,4,6 + * Also keep Copyright, Full name and PostScript name entry. + --name-IDs='' + * Drop all 'name' table entries. + --name-IDs='*' + * keep all 'name' table entries + --name-legacy + Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). + XXX Note: This might be needed for some fonts that have no Unicode name + entires for English. See: https://github.com/behdad/fonttools/issues/146 + --no-name-legacy + Drop legacy (non-Unicode) 'name' table entries [default] + --name-languages[+|-]=[,] + Specify (=), add to (+=) or exclude from (-=) the set of 'name' table + langIDs that will be preserved. By default only records with langID + 0x0409 (English) are preserved. Use '*' to keep all langIDs. + --obfuscate-names + Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, + and 6 with dummy strings (it is still fully functional as webfont). + +Glyph naming and encoding options: + --glyph-names + Keep PS glyph names in TT-flavored fonts. In general glyph names are + not needed for correct use of the font. However, some PDF generators + and PDF viewers might rely on glyph names to extract Unicode text + from PDF documents. + --no-glyph-names + Drop PS glyph names in TT-flavored fonts, by using 'post' table + version 3.0. [default] + --legacy-cmap + Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). + --no-legacy-cmap + Drop the legacy 'cmap' subtables. [default] + --symbol-cmap + Keep the 3.0 symbol 'cmap'. + --no-symbol-cmap + Drop the 3.0 symbol 'cmap'. [default] + +Other font-specific options: + --recalc-bounds + Recalculate font bounding boxes. + --no-recalc-bounds + Keep original font bounding boxes. This is faster and still safe + for all practical purposes. [default] + --recalc-timestamp + Set font 'modified' timestamp to current time. + --no-recalc-timestamp + Do not modify font 'modified' timestamp. [default] + --canonical-order + Order tables as recommended in the OpenType standard. This is not + required by the standard, nor by any known implementation. + --no-canonical-order + Keep original order of font tables. This is faster. [default] + --prune-unicode-ranges + Update the 'OS/2 ulUnicodeRange*' bits after subsetting. The Unicode + ranges defined in the OpenType specification v1.7 are intersected with + the Unicode codepoints specified in the font's Unicode 'cmap' subtables: + when no overlap is found, the bit will be switched off. However, it will + *not* be switched on if an intersection is found. [default] + --no-prune-unicode-ranges + Don't change the 'OS/2 ulUnicodeRange*' bits. + --recalc-average-width + Update the 'OS/2 xAvgCharWidth' field after subsetting. + --no-recalc-average-width + Don't change the 'OS/2 xAvgCharWidth' field. [default] + +Application options: + --verbose + Display verbose information of the subsetting process. + --timing + Display detailed timing information of the subsetting process. + --xml + Display the TTX XML representation of subsetted font. + +Example: + Produce a subset containing the characters ' !"#$%' without performing + size-reducing optimizations: + + $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ + --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ + --notdef-glyph --notdef-outline --recommended-glyphs \\ + --name-IDs='*' --name-legacy --name-languages='*' +""" + + +log = logging.getLogger("fontTools.subset") + +def _log_glyphs(self, glyphs, font=None): + self.info("Glyph names: %s", sorted(glyphs)) + if font: + reverseGlyphMap = font.getReverseGlyphMap() + self.info("Glyph IDs: %s", sorted(reverseGlyphMap[g] for g in glyphs)) + +# bind "glyphs" function to 'log' object +log.glyphs = MethodType(_log_glyphs, log) + +# I use a different timing channel so I can configure it separately from the +# main module's logger +timer = Timer(logger=logging.getLogger("fontTools.subset.timer")) + + +def _add_method(*clazzes): + """Returns a decorator function that adds a new method to one or + more classes.""" + def wrapper(method): + done = [] + for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) + assert clazz.__name__ != 'DefaultTable', \ + 'Oops, table class not found.' + assert not hasattr(clazz, method.__name__), \ + "Oops, class '%s' has method '%s'." % (clazz.__name__, + method.__name__) + setattr(clazz, method.__name__, method) + return None + return wrapper + +def _uniq_sort(l): + return sorted(set(l)) + +def _set_update(s, *others): + # Jython's set.update only takes one other argument. + # Emulate real set.update... + for other in others: + s.update(other) + +def _dict_subset(d, glyphs): + return {g:d[g] for g in glyphs} + + +@_add_method(otTables.Coverage) +def intersect(self, glyphs): + """Returns ascending list of matching coverage values.""" + return [i for i,g in enumerate(self.glyphs) if g in glyphs] + +@_add_method(otTables.Coverage) +def intersect_glyphs(self, glyphs): + """Returns set of intersecting glyphs.""" + return set(g for g in self.glyphs if g in glyphs) + +@_add_method(otTables.Coverage) +def subset(self, glyphs): + """Returns ascending list of remaining coverage values.""" + indices = self.intersect(glyphs) + self.glyphs = [g for g in self.glyphs if g in glyphs] + return indices + +@_add_method(otTables.Coverage) +def remap(self, coverage_map): + """Remaps coverage.""" + self.glyphs = [self.glyphs[i] for i in coverage_map] + +@_add_method(otTables.ClassDef) +def intersect(self, glyphs): + """Returns ascending list of matching class values.""" + return _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + [v for g,v in self.classDefs.items() if g in glyphs]) + +@_add_method(otTables.ClassDef) +def intersect_class(self, glyphs, klass): + """Returns set of glyphs matching class.""" + if klass == 0: + return set(g for g in glyphs if g not in self.classDefs) + return set(g for g,v in self.classDefs.items() + if v == klass and g in glyphs) + +@_add_method(otTables.ClassDef) +def subset(self, glyphs, remap=False): + """Returns ascending list of remaining classes.""" + self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} + # Note: while class 0 has the special meaning of "not matched", + # if no glyph will ever /not match/, we can optimize class 0 out too. + indices = _uniq_sort( + ([0] if any(g not in self.classDefs for g in glyphs) else []) + + list(self.classDefs.values())) + if remap: + self.remap(indices) + return indices + +@_add_method(otTables.ClassDef) +def remap(self, class_map): + """Remaps classes.""" + self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} + +@_add_method(otTables.SingleSubst) +def closure_glyphs(self, s, cur_glyphs): + s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) + +@_add_method(otTables.SingleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and v in s.glyphs} + return bool(self.mapping) + +@_add_method(otTables.MultipleSubst) +def closure_glyphs(self, s, cur_glyphs): + for glyph, subst in self.mapping.items(): + if glyph in cur_glyphs: + _set_update(s.glyphs, subst) + +@_add_method(otTables.MultipleSubst) +def subset_glyphs(self, s): + self.mapping = {g:v for g,v in self.mapping.items() + if g in s.glyphs and all(sub in s.glyphs for sub in v)} + return bool(self.mapping) + +@_add_method(otTables.AlternateSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() + if g in cur_glyphs)) + +@_add_method(otTables.AlternateSubst) +def subset_glyphs(self, s): + self.alternates = {g:vlist + for g,vlist in self.alternates.items() + if g in s.glyphs and + all(v in s.glyphs for v in vlist)} + return bool(self.alternates) + +@_add_method(otTables.LigatureSubst) +def closure_glyphs(self, s, cur_glyphs): + _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs + if all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items() + if g in cur_glyphs)) + +@_add_method(otTables.LigatureSubst) +def subset_glyphs(self, s): + self.ligatures = {g:v for g,v in self.ligatures.items() + if g in s.glyphs} + self.ligatures = {g:[seq for seq in seqs + if seq.LigGlyph in s.glyphs and + all(c in s.glyphs for c in seq.Component)] + for g,seqs in self.ligatures.items()} + self.ligatures = {g:v for g,v in self.ligatures.items() if v} + return bool(self.ligatures) + +@_add_method(otTables.ReverseChainSingleSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + indices = self.Coverage.intersect(cur_glyphs) + if(not indices or + not all(c.intersect(s.glyphs) + for c in self.LookAheadCoverage + self.BacktrackCoverage)): + return + s.glyphs.update(self.Substitute[i] for i in indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ReverseChainSingleSubst) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + self.Substitute = [self.Substitute[i] for i in indices] + # Now drop rules generating glyphs we don't want + indices = [i for i,sub in enumerate(self.Substitute) + if sub in s.glyphs] + self.Substitute = [self.Substitute[i] for i in indices] + self.Coverage.remap(indices) + self.GlyphCount = len(self.Substitute) + return bool(self.GlyphCount and + all(c.subset(s.glyphs) + for c in self.LookAheadCoverage+self.BacktrackCoverage)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def subset_glyphs(self, s): + if self.Format == 1: + return len(self.Coverage.subset(s.glyphs)) + elif self.Format == 2: + indices = self.Coverage.subset(s.glyphs) + values = self.Value + count = len(values) + self.Value = [values[i] for i in indices if i < count] + self.ValueCount = len(self.Value) + return bool(self.ValueCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.SinglePos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat &= ~0x00F0 + return True + +@_add_method(otTables.PairPos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + pairs = self.PairSet + count = len(pairs) + self.PairSet = [pairs[i] for i in indices if i < count] + for p in self.PairSet: + p.PairValueRecord = [r for r in p.PairValueRecord if r.SecondGlyph in s.glyphs] + p.PairValueCount = len(p.PairValueRecord) + # Remove empty pairsets + indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] + self.Coverage.remap(indices) + self.PairSet = [self.PairSet[i] for i in indices] + self.PairSetCount = len(self.PairSet) + return bool(self.PairSetCount) + elif self.Format == 2: + class1_map = [c for c in self.ClassDef1.subset(s.glyphs, remap=True) if c < self.Class1Count] + class2_map = [c for c in self.ClassDef2.subset(s.glyphs, remap=True) if c < self.Class2Count] + self.Class1Record = [self.Class1Record[i] for i in class1_map] + for c in self.Class1Record: + c.Class2Record = [c.Class2Record[i] for i in class2_map] + self.Class1Count = len(class1_map) + self.Class2Count = len(class2_map) + return bool(self.Class1Count and + self.Class2Count and + self.Coverage.subset(s.glyphs)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.PairPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables + self.ValueFormat1 &= ~0x00F0 + self.ValueFormat2 &= ~0x00F0 + return True + +@_add_method(otTables.CursivePos) +def subset_glyphs(self, s): + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + records = self.EntryExitRecord + count = len(records) + self.EntryExitRecord = [records[i] for i in indices if i < count] + self.EntryExitCount = len(self.EntryExitRecord) + return bool(self.EntryExitCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Anchor) +def prune_hints(self): + # Drop device tables / contour anchor point + self.ensureDecompiled() + self.Format = 1 + +@_add_method(otTables.CursivePos) +def prune_post_subset(self, options): + if not options.hinting: + for rec in self.EntryExitRecord: + if rec.EntryAnchor: rec.EntryAnchor.prune_hints() + if rec.ExitAnchor: rec.ExitAnchor.prune_hints() + return True + +@_add_method(otTables.MarkBasePos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + base_indices = self.BaseCoverage.subset(s.glyphs) + self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] for i in base_indices] + self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.BaseArray.BaseRecord: + b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.BaseArray.BaseCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkBasePos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.BaseArray.BaseRecord: + for a in b.BaseAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkLigPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark_indices = self.MarkCoverage.subset(s.glyphs) + self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] for i in mark_indices] + self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) + ligature_indices = self.LigatureCoverage.subset(s.glyphs) + self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] for i in ligature_indices] + self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.MarkArray.MarkRecord: + m.Class = class_indices.index(m.Class) + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] + return bool(self.ClassCount and + self.MarkArray.MarkCount and + self.LigatureArray.LigatureCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkLigPos) +def prune_post_subset(self, options): + if not options.hinting: + for m in self.MarkArray.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for l in self.LigatureArray.LigatureAttach: + for c in l.ComponentRecord: + for a in c.LigatureAnchor: + if a: + a.prune_hints() + return True + +@_add_method(otTables.MarkMarkPos) +def subset_glyphs(self, s): + if self.Format == 1: + mark1_indices = self.Mark1Coverage.subset(s.glyphs) + self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] for i in mark1_indices] + self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) + mark2_indices = self.Mark2Coverage.subset(s.glyphs) + self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] for i in mark2_indices] + self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) + # Prune empty classes + class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) + self.ClassCount = len(class_indices) + for m in self.Mark1Array.MarkRecord: + m.Class = class_indices.index(m.Class) + for b in self.Mark2Array.Mark2Record: + b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] + return bool(self.ClassCount and + self.Mark1Array.MarkCount and + self.Mark2Array.MarkCount) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.MarkMarkPos) +def prune_post_subset(self, options): + if not options.hinting: + # Drop device tables or contour anchor point + for m in self.Mark1Array.MarkRecord: + if m.MarkAnchor: + m.MarkAnchor.prune_hints() + for b in self.Mark2Array.Mark2Record: + for m in b.Mark2Anchor: + if m: + m.prune_hints() + return True + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def subset_lookups(self, lookup_indices): + pass + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.SinglePos, + otTables.PairPos, + otTables.CursivePos, + otTables.MarkBasePos, + otTables.MarkLigPos, + otTables.MarkMarkPos) +def collect_lookups(self): + return [] + +@_add_method(otTables.SingleSubst, + otTables.MultipleSubst, + otTables.AlternateSubst, + otTables.LigatureSubst, + otTables.ReverseChainSingleSubst, + otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.SingleSubst, + otTables.AlternateSubst, + otTables.ReverseChainSingleSubst) +def may_have_non_1to1(self): + return False + +@_add_method(otTables.MultipleSubst, + otTables.LigatureSubst, + otTables.ContextSubst, + otTables.ChainContextSubst) +def may_have_non_1to1(self): + return True + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def __subset_classify_context(self): + + class ContextHelper(object): + def __init__(self, klass, Format): + if klass.__name__.endswith('Subst'): + Typ = 'Sub' + Type = 'Subst' + else: + Typ = 'Pos' + Type = 'Pos' + if klass.__name__.startswith('Chain'): + Chain = 'Chain' + InputIdx = 1 + DataLen = 3 + else: + Chain = '' + InputIdx = 0 + DataLen = 1 + ChainTyp = Chain+Typ + + self.Typ = Typ + self.Type = Type + self.Chain = Chain + self.ChainTyp = ChainTyp + self.InputIdx = InputIdx + self.DataLen = DataLen + + self.LookupRecord = Type+'LookupRecord' + + if Format == 1: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(None,) + ChainContextData = lambda r:(None, None, None) + SetContextData = None + SetChainContextData = None + RuleData = lambda r:(r.Input,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Input,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 2: + Coverage = lambda r: r.Coverage + ChainCoverage = lambda r: r.Coverage + ContextData = lambda r:(r.ClassDef,) + ChainContextData = lambda r:(r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) + def SetContextData(r, d): + (r.ClassDef,) = d + def SetChainContextData(r, d): + (r.BacktrackClassDef, + r.InputClassDef, + r.LookAheadClassDef) = d + RuleData = lambda r:(r.Class,) + ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) + def SetRuleData(r, d): + (r.Class,) = d + (r.GlyphCount,) = (len(x)+1 for x in d) + def ChainSetRuleData(r, d): + (r.Backtrack, r.Input, r.LookAhead) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(d[0]),len(d[1])+1,len(d[2])) + elif Format == 3: + Coverage = lambda r: r.Coverage[0] + ChainCoverage = lambda r: r.InputCoverage[0] + ContextData = None + ChainContextData = None + SetContextData = None + SetChainContextData = None + RuleData = lambda r: r.Coverage + ChainRuleData = lambda r:(r.BacktrackCoverage + + r.InputCoverage + + r.LookAheadCoverage) + def SetRuleData(r, d): + (r.Coverage,) = d + (r.GlyphCount,) = (len(x) for x in d) + def ChainSetRuleData(r, d): + (r.BacktrackCoverage, r.InputCoverage, r.LookAheadCoverage) = d + (r.BacktrackGlyphCount,r.InputGlyphCount,r.LookAheadGlyphCount,) = (len(x) for x in d) + else: + assert 0, "unknown format: %s" % Format + + if Chain: + self.Coverage = ChainCoverage + self.ContextData = ChainContextData + self.SetContextData = SetChainContextData + self.RuleData = ChainRuleData + self.SetRuleData = ChainSetRuleData + else: + self.Coverage = Coverage + self.ContextData = ContextData + self.SetContextData = SetContextData + self.RuleData = RuleData + self.SetRuleData = SetRuleData + + if Format == 1: + self.Rule = ChainTyp+'Rule' + self.RuleCount = ChainTyp+'RuleCount' + self.RuleSet = ChainTyp+'RuleSet' + self.RuleSetCount = ChainTyp+'RuleSetCount' + self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] + elif Format == 2: + self.Rule = ChainTyp+'ClassRule' + self.RuleCount = ChainTyp+'ClassRuleCount' + self.RuleSet = ChainTyp+'ClassSet' + self.RuleSetCount = ChainTyp+'ClassSetCount' + self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c + else (set(glyphs) if r == 0 else set())) + + self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' + self.ClassDefIndex = 1 if Chain else 0 + self.Input = 'Input' if Chain else 'Class' + + if self.Format not in [1, 2, 3]: + return None # Don't shoot the messenger; let it go + if not hasattr(self.__class__, "__ContextHelpers"): + self.__class__.__ContextHelpers = {} + if self.Format not in self.__class__.__ContextHelpers: + helper = ContextHelper(self.__class__, self.Format) + self.__class__.__ContextHelpers[self.Format] = helper + return self.__class__.__ContextHelpers[self.Format] + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst) +def closure_glyphs(self, s, cur_glyphs): + c = self.__subset_classify_context() + + indices = c.Coverage(self).intersect(cur_glyphs) + if not indices: + return [] + cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) + + if self.Format == 1: + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) + else: + pos_glyphs = frozenset([r.Input[seqi - 1]]) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.Input)+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 2: + ClassDef = getattr(self, c.ClassDef) + indices = ClassDef.intersect(cur_glyphs) + ContextData = c.ContextData(self) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + for i in indices: + if i >= rssCount or not rss[i]: continue + for r in getattr(rss[i], c.Rule): + if not r: continue + if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) + for cd,klist in zip(ContextData, c.RuleData(r))): + continue + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) + else: + pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(getattr(r, c.Input))+2)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + elif self.Format == 3: + if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): + return [] + r = self + chaos = set() + for ll in getattr(r, c.LookupRecord): + if not ll: continue + seqi = ll.SequenceIndex + if seqi in chaos: + # TODO Can we improve this? + pos_glyphs = None + else: + if seqi == 0: + pos_glyphs = frozenset(cur_glyphs) + else: + pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) + lookup = s.table.LookupList.Lookup[ll.LookupListIndex] + chaos.add(seqi) + if lookup.may_have_non_1to1(): + chaos.update(range(seqi, len(r.InputCoverage)+1)) + lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ContextPos, + otTables.ChainContextSubst, + otTables.ChainContextPos) +def subset_glyphs(self, s): + c = self.__subset_classify_context() + + if self.Format == 1: + indices = self.Coverage.subset(s.glyphs) + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(g in s.glyphs for g in glist) + for glist in c.RuleData(r))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + # Prune empty rulesets + indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] + self.Coverage.remap(indices) + rss = [rss[i] for i in indices] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + return bool(rss) + elif self.Format == 2: + if not self.Coverage.subset(s.glyphs): + return False + ContextData = c.ContextData(self) + klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] + + # Keep rulesets for class numbers that survived. + indices = klass_maps[c.ClassDefIndex] + rss = getattr(self, c.RuleSet) + rssCount = getattr(self, c.RuleSetCount) + rss = [rss[i] for i in indices if i < rssCount] + del rssCount + # Delete, but not renumber, unreachable rulesets. + indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) + rss = [rss if i in indices else None for i,rss in enumerate(rss)] + + for rs in rss: + if not rs: continue + ss = getattr(rs, c.Rule) + ss = [r for r in ss + if r and all(all(k in klass_map for k in klist) + for klass_map,klist in zip(klass_maps, c.RuleData(r)))] + setattr(rs, c.Rule, ss) + setattr(rs, c.RuleCount, len(ss)) + + # Remap rule classes + for r in ss: + c.SetRuleData(r, [[klass_map.index(k) for k in klist] + for klass_map,klist in zip(klass_maps, c.RuleData(r))]) + + # Prune empty rulesets + rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] + while rss and rss[-1] is None: + del rss[-1] + setattr(self, c.RuleSet, rss) + setattr(self, c.RuleSetCount, len(rss)) + + # TODO: We can do a second round of remapping class values based + # on classes that are actually used in at least one rule. Right + # now we subset classes to c.glyphs only. Or better, rewrite + # the above to do that. + + return bool(rss) + elif self.Format == 3: + return all(x.subset(s.glyphs) for x in c.RuleData(self)) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def subset_lookups(self, lookup_indices): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + for rs in getattr(self, c.RuleSet): + if not rs: continue + for r in getattr(rs, c.Rule): + if not r: continue + setattr(r, c.LookupRecord, + [ll for ll in getattr(r, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(r, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + elif self.Format == 3: + setattr(self, c.LookupRecord, + [ll for ll in getattr(self, c.LookupRecord) + if ll and ll.LookupListIndex in lookup_indices]) + for ll in getattr(self, c.LookupRecord): + if not ll: continue + ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ContextSubst, + otTables.ChainContextSubst, + otTables.ContextPos, + otTables.ChainContextPos) +def collect_lookups(self): + c = self.__subset_classify_context() + + if self.Format in [1, 2]: + return [ll.LookupListIndex + for rs in getattr(self, c.RuleSet) if rs + for r in getattr(rs, c.Rule) if r + for ll in getattr(r, c.LookupRecord) if ll] + elif self.Format == 3: + return [ll.LookupListIndex + for ll in getattr(self, c.LookupRecord) if ll] + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def closure_glyphs(self, s, cur_glyphs): + if self.Format == 1: + self.ExtSubTable.closure_glyphs(s, cur_glyphs) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst) +def may_have_non_1to1(self): + if self.Format == 1: + return self.ExtSubTable.may_have_non_1to1() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_glyphs(self, s): + if self.Format == 1: + return self.ExtSubTable.subset_glyphs(s) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def prune_post_subset(self, options): + if self.Format == 1: + return self.ExtSubTable.prune_post_subset(options) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def subset_lookups(self, lookup_indices): + if self.Format == 1: + return self.ExtSubTable.subset_lookups(lookup_indices) + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.ExtensionSubst, + otTables.ExtensionPos) +def collect_lookups(self): + if self.Format == 1: + return self.ExtSubTable.collect_lookups() + else: + assert 0, "unknown format: %s" % self.Format + +@_add_method(otTables.Lookup) +def closure_glyphs(self, s, cur_glyphs=None): + if cur_glyphs is None: + cur_glyphs = frozenset(s.glyphs) + + # Memoize + if (id(self), cur_glyphs) in s._doneLookups: + return + s._doneLookups.add((id(self), cur_glyphs)) + + if self in s._activeLookups: + raise Exception("Circular loop in lookup recursion") + s._activeLookups.append(self) + for st in self.SubTable: + if not st: continue + st.closure_glyphs(s, cur_glyphs) + assert(s._activeLookups[-1] == self) + del s._activeLookups[-1] + +@_add_method(otTables.Lookup) +def subset_glyphs(self, s): + self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] + self.SubTableCount = len(self.SubTable) + return bool(self.SubTableCount) + +@_add_method(otTables.Lookup) +def prune_post_subset(self, options): + ret = False + for st in self.SubTable: + if not st: continue + if st.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.Lookup) +def subset_lookups(self, lookup_indices): + for s in self.SubTable: + s.subset_lookups(lookup_indices) + +@_add_method(otTables.Lookup) +def collect_lookups(self): + return sum((st.collect_lookups() for st in self.SubTable if st), []) + +@_add_method(otTables.Lookup) +def may_have_non_1to1(self): + return any(st.may_have_non_1to1() for st in self.SubTable if st) + +@_add_method(otTables.LookupList) +def subset_glyphs(self, s): + """Returns the indices of nonempty lookups.""" + return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] + +@_add_method(otTables.LookupList) +def prune_post_subset(self, options): + ret = False + for l in self.Lookup: + if not l: continue + if l.prune_post_subset(options): ret = True + return ret + +@_add_method(otTables.LookupList) +def subset_lookups(self, lookup_indices): + self.ensureDecompiled() + self.Lookup = [self.Lookup[i] for i in lookup_indices + if i < self.LookupCount] + self.LookupCount = len(self.Lookup) + for l in self.Lookup: + l.subset_lookups(lookup_indices) + +@_add_method(otTables.LookupList) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + self.ensureDecompiled() + self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] + +@_add_method(otTables.LookupList) +def closure_lookups(self, lookup_indices): + """Returns sorted index of all lookups reachable from lookup_indices.""" + lookup_indices = _uniq_sort(lookup_indices) + recurse = lookup_indices + while True: + recurse_lookups = sum((self.Lookup[i].collect_lookups() + for i in recurse if i < self.LookupCount), []) + recurse_lookups = [l for l in recurse_lookups + if l not in lookup_indices and l < self.LookupCount] + if not recurse_lookups: + return _uniq_sort(lookup_indices) + recurse_lookups = _uniq_sort(recurse_lookups) + lookup_indices.extend(recurse_lookups) + recurse = recurse_lookups + +@_add_method(otTables.Feature) +def subset_lookups(self, lookup_indices): + """"Returns True if feature is non-empty afterwards.""" + self.LookupListIndex = [l for l in self.LookupListIndex + if l in lookup_indices] + # Now map them. + self.LookupListIndex = [lookup_indices.index(l) + for l in self.LookupListIndex] + self.LookupCount = len(self.LookupListIndex) + return self.LookupCount or self.FeatureParams + +@_add_method(otTables.FeatureList) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + # Note: Never ever drop feature 'pref', even if it's empty. + # HarfBuzz chooses shaper for Khmer based on presence of this + # feature. See thread at: + # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html + return [i for i,f in enumerate(self.FeatureRecord) + if (f.Feature.subset_lookups(lookup_indices) or + f.FeatureTag == 'pref')] + +@_add_method(otTables.FeatureList) +def collect_lookups(self, feature_indices): + return sum((self.FeatureRecord[i].Feature.LookupListIndex + for i in feature_indices + if i < self.FeatureCount), []) + +@_add_method(otTables.FeatureList) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] + self.FeatureCount = len(self.FeatureRecord) + return bool(self.FeatureCount) + +@_add_method(otTables.FeatureTableSubstitution) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + return [r.FeatureIndex for r in self.SubstitutionRecord + if r.Feature.subset_lookups(lookup_indices)] + +@_add_method(otTables.FeatureVariations) +def subset_lookups(self, lookup_indices): + """Returns the indices of nonempty features.""" + return sum((f.FeatureTableSubstitution.subset_lookups(lookup_indices) + for f in self.FeatureVariationRecord), []) + +@_add_method(otTables.FeatureVariations) +def collect_lookups(self, feature_indices): + return sum((r.Feature.LookupListIndex + for vr in self.FeatureVariationRecord + for r in vr.FeatureTableSubstitution.SubstitutionRecord + if r.FeatureIndex in feature_indices), []) + +@_add_method(otTables.FeatureTableSubstitution) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.SubstitutionRecord = [r for r in self.SubstitutionRecord + if r.FeatureIndex in feature_indices] + self.SubstitutionCount = len(self.SubstitutionRecord) + return bool(self.SubstitutionCount) + +@_add_method(otTables.FeatureVariations) +def subset_features(self, feature_indices): + self.ensureDecompiled() + self.FeaturVariationRecord = [r for r in self.FeatureVariationRecord + if r.FeatureTableSubstitution.subset_features(feature_indices)] + self.FeatureVariationCount = len(self.FeatureVariationRecord) + return bool(self.FeatureVariationCount) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def subset_features(self, feature_indices): + if self.ReqFeatureIndex in feature_indices: + self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) + else: + self.ReqFeatureIndex = 65535 + self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] + # Now map them. + self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex + if f in feature_indices] + self.FeatureCount = len(self.FeatureIndex) + return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) + +@_add_method(otTables.DefaultLangSys, + otTables.LangSys) +def collect_features(self): + feature_indices = self.FeatureIndex[:] + if self.ReqFeatureIndex != 65535: + feature_indices.append(self.ReqFeatureIndex) + return _uniq_sort(feature_indices) + +@_add_method(otTables.Script) +def subset_features(self, feature_indices, keepEmptyDefaultLangSys=False): + if(self.DefaultLangSys and + not self.DefaultLangSys.subset_features(feature_indices) and + not keepEmptyDefaultLangSys): + self.DefaultLangSys = None + self.LangSysRecord = [l for l in self.LangSysRecord + if l.LangSys.subset_features(feature_indices)] + self.LangSysCount = len(self.LangSysRecord) + return bool(self.LangSysCount or self.DefaultLangSys) + +@_add_method(otTables.Script) +def collect_features(self): + feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] + if self.DefaultLangSys: + feature_indices.append(self.DefaultLangSys.collect_features()) + return _uniq_sort(sum(feature_indices, [])) + +@_add_method(otTables.ScriptList) +def subset_features(self, feature_indices, retain_empty): + # https://bugzilla.mozilla.org/show_bug.cgi?id=1331737#c32 + self.ScriptRecord = [s for s in self.ScriptRecord + if s.Script.subset_features(feature_indices, s.ScriptTag=='DFLT') or + retain_empty] + self.ScriptCount = len(self.ScriptRecord) + return bool(self.ScriptCount) + +@_add_method(otTables.ScriptList) +def collect_features(self): + return _uniq_sort(sum((s.Script.collect_features() + for s in self.ScriptRecord), [])) + +# CBLC will inherit it +@_add_method(ttLib.getTableClass('EBLC')) +def subset_glyphs(self, s): + for strike in self.strikes: + for indexSubTable in strike.indexSubTables: + indexSubTable.names = [n for n in indexSubTable.names if n in s.glyphs] + strike.indexSubTables = [i for i in strike.indexSubTables if i.names] + self.strikes = [s for s in self.strikes if s.indexSubTables] + + return True + +# CBDC will inherit it +@_add_method(ttLib.getTableClass('EBDT')) +def subset_glyphs(self, s): + self.strikeData = [{g: strike[g] for g in s.glyphs if g in strike} + for strike in self.strikeData] + return True + +@_add_method(ttLib.getTableClass('GSUB')) +def closure_glyphs(self, s): + s.table = self.table + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if getattr(self.table, 'FeatureVariations', None): + lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) + lookup_indices = _uniq_sort(lookup_indices) + if self.table.LookupList: + while True: + orig_glyphs = frozenset(s.glyphs) + s._activeLookups = [] + s._doneLookups = set() + for i in lookup_indices: + if i >= self.table.LookupList.LookupCount: continue + if not self.table.LookupList.Lookup[i]: continue + self.table.LookupList.Lookup[i].closure_glyphs(s) + del s._activeLookups, s._doneLookups + if orig_glyphs == s.glyphs: + break + del s.table + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_gsubed + if self.table.LookupList: + lookup_indices = self.table.LookupList.subset_glyphs(s) + else: + lookup_indices = [] + self.subset_lookups(lookup_indices) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def retain_empty_scripts(self): + # https://github.com/behdad/fonttools/issues/518 + # https://bugzilla.mozilla.org/show_bug.cgi?id=1080739#c15 + return self.__class__ == ttLib.getTableClass('GSUB') + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_lookups(self, lookup_indices): + """Retains specified lookups, then removes empty features, language + systems, and scripts.""" + if self.table.LookupList: + self.table.LookupList.subset_lookups(lookup_indices) + if self.table.FeatureList: + feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) + else: + feature_indices = [] + if getattr(self.table, 'FeatureVariations', None): + feature_indices += self.table.FeatureVariations.subset_lookups(lookup_indices) + feature_indices = _uniq_sort(feature_indices) + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def neuter_lookups(self, lookup_indices): + """Sets lookups not in lookup_indices to None.""" + if self.table.LookupList: + self.table.LookupList.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_lookups(self, remap=True): + """Remove (default) or neuter unreferenced lookups""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) + else: + lookup_indices = [] + if getattr(self.table, 'FeatureVariations', None): + lookup_indices += self.table.FeatureVariations.collect_lookups(feature_indices) + lookup_indices = _uniq_sort(lookup_indices) + if self.table.LookupList: + lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) + else: + lookup_indices = [] + if remap: + self.subset_lookups(lookup_indices) + else: + self.neuter_lookups(lookup_indices) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def subset_feature_tags(self, feature_tags): + if self.table.FeatureList: + feature_indices = \ + [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) + if f.FeatureTag in feature_tags] + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + else: + feature_indices = [] + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_features(self): + """Remove unreferenced features""" + if self.table.ScriptList: + feature_indices = self.table.ScriptList.collect_features() + else: + feature_indices = [] + if self.table.FeatureList: + self.table.FeatureList.subset_features(feature_indices) + if getattr(self.table, 'FeatureVariations', None): + self.table.FeatureVariations.subset_features(feature_indices) + if self.table.ScriptList: + self.table.ScriptList.subset_features(feature_indices, self.retain_empty_scripts()) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_pre_subset(self, font, options): + # Drop undesired features + if '*' not in options.layout_features: + self.subset_feature_tags(options.layout_features) + # Neuter unreferenced lookups + self.prune_lookups(remap=False) + return True + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def remove_redundant_langsys(self): + table = self.table + if not table.ScriptList or not table.FeatureList: + return + + features = table.FeatureList.FeatureRecord + + for s in table.ScriptList.ScriptRecord: + d = s.Script.DefaultLangSys + if not d: + continue + for lr in s.Script.LangSysRecord[:]: + l = lr.LangSys + # Compare d and l + if len(d.FeatureIndex) != len(l.FeatureIndex): + continue + if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): + continue + + if d.ReqFeatureIndex != 65535: + if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: + continue + + for i in range(len(d.FeatureIndex)): + if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: + break + else: + # LangSys and default are equal; delete LangSys + s.Script.LangSysRecord.remove(lr) + +@_add_method(ttLib.getTableClass('GSUB'), + ttLib.getTableClass('GPOS')) +def prune_post_subset(self, options): + table = self.table + + self.prune_lookups() # XXX Is this actually needed?! + + if table.LookupList: + table.LookupList.prune_post_subset(options) + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if not table.LookupList.Lookup: + # table.LookupList = None + + if not table.LookupList: + table.FeatureList = None + + + if table.FeatureList: + self.remove_redundant_langsys() + # Remove unreferenced features + self.prune_features() + + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.FeatureList and not table.FeatureList.FeatureRecord: + # table.FeatureList = None + + # Never drop scripts themselves as them just being available + # holds semantic significance. + # XXX Next two lines disabled because OTS is stupid and + # doesn't like NULL offsets here. + #if table.ScriptList and not table.ScriptList.ScriptRecord: + # table.ScriptList = None + + if not table.FeatureList and hasattr(table, 'FeatureVariations'): + table.FeatureVariations = None + + if hasattr(table, 'FeatureVariations') and not table.FeatureVariations: + if table.Version == 0x00010001: + table.Version = 0x00010000 + + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + table = self.table + if table.LigCaretList: + indices = table.LigCaretList.Coverage.subset(glyphs) + table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] for i in indices] + table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) + if table.MarkAttachClassDef: + table.MarkAttachClassDef.classDefs = \ + {g:v for g,v in table.MarkAttachClassDef.classDefs.items() + if g in glyphs} + if table.GlyphClassDef: + table.GlyphClassDef.classDefs = \ + {g:v for g,v in table.GlyphClassDef.classDefs.items() + if g in glyphs} + if table.AttachList: + indices = table.AttachList.Coverage.subset(glyphs) + GlyphCount = table.AttachList.GlyphCount + table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] + for i in indices if i < GlyphCount] + table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) + if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: + for coverage in table.MarkGlyphSetsDef.Coverage: + coverage.subset(glyphs) + # TODO: The following is disabled. If enabling, we need to go fixup all + # lookups that use MarkFilteringSet and map their set. + # indices = table.MarkGlyphSetsDef.Coverage = \ + # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] + return True + +@_add_method(ttLib.getTableClass('GDEF')) +def prune_post_subset(self, options): + table = self.table + # XXX check these against OTS + if table.LigCaretList and not table.LigCaretList.LigGlyphCount: + table.LigCaretList = None + if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: + table.MarkAttachClassDef = None + if table.GlyphClassDef and not table.GlyphClassDef.classDefs: + table.GlyphClassDef = None + if table.AttachList and not table.AttachList.GlyphCount: + table.AttachList = None + if (hasattr(table, "MarkGlyphSetsDef") and + table.MarkGlyphSetsDef and + not table.MarkGlyphSetsDef.Coverage): + table.MarkGlyphSetsDef = None + if table.Version == 0x00010002: + table.Version = 0x00010000 + return bool(table.LigCaretList or + table.MarkAttachClassDef or + table.GlyphClassDef or + table.AttachList or + (table.Version >= 0x00010002 and table.MarkGlyphSetsDef)) + +@_add_method(ttLib.getTableClass('kern')) +def prune_pre_subset(self, font, options): + # Prune unknown kern table types + self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('kern')) +def subset_glyphs(self, s): + glyphs = s.glyphs_gsubed + for t in self.kernTables: + t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() + if a in glyphs and b in glyphs} + self.kernTables = [t for t in self.kernTables if t.kernTable] + return bool(self.kernTables) + +@_add_method(ttLib.getTableClass('vmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return bool(self.metrics) + +@_add_method(ttLib.getTableClass('hmtx')) +def subset_glyphs(self, s): + self.metrics = _dict_subset(self.metrics, s.glyphs) + return True # Required table + +@_add_method(ttLib.getTableClass('hdmx')) +def subset_glyphs(self, s): + self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} + return bool(self.hdmx) + +@_add_method(ttLib.getTableClass('ankr')) +def subset_glyphs(self, s): + table = self.table.AnchorPoints + assert table.Format == 0, "unknown 'ankr' format %s" % table.Format + table.Anchors = {glyph: table.Anchors[glyph] for glyph in s.glyphs + if glyph in table.Anchors} + return len(table.Anchors) > 0 + +@_add_method(ttLib.getTableClass('bsln')) +def closure_glyphs(self, s): + table = self.table.Baseline + if table.Format in (2, 3): + s.glyphs.add(table.StandardGlyph) + +@_add_method(ttLib.getTableClass('bsln')) +def subset_glyphs(self, s): + table = self.table.Baseline + if table.Format in (1, 3): + baselines = {glyph: table.BaselineValues.get(glyph, table.DefaultBaseline) + for glyph in s.glyphs} + if len(baselines) > 0: + mostCommon, _cnt = Counter(baselines.values()).most_common(1)[0] + table.DefaultBaseline = mostCommon + baselines = {glyph: b for glyph, b in baselines.items() + if b != mostCommon} + if len(baselines) > 0: + table.BaselineValues = baselines + else: + table.Format = {1: 0, 3: 2}[table.Format] + del table.BaselineValues + return True + +@_add_method(ttLib.getTableClass('lcar')) +def subset_glyphs(self, s): + table = self.table.LigatureCarets + if table.Format in (0, 1): + table.Carets = {glyph: table.Carets[glyph] for glyph in s.glyphs + if glyph in table.Carets} + return len(table.Carets) > 0 + else: + assert False, "unknown 'lcar' format %s" % table.Format + +@_add_method(ttLib.getTableClass('gvar')) +def prune_pre_subset(self, font, options): + if options.notdef_glyph and not options.notdef_outline: + self.variations[font.glyphOrder[0]] = [] + return True + +@_add_method(ttLib.getTableClass('gvar')) +def subset_glyphs(self, s): + self.variations = _dict_subset(self.variations, s.glyphs) + self.glyphCount = len(self.variations) + return bool(self.variations) + +@_add_method(ttLib.getTableClass('VORG')) +def subset_glyphs(self, s): + self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() + if g in s.glyphs} + self.numVertOriginYMetrics = len(self.VOriginRecords) + return True # Never drop; has default metrics + +@_add_method(ttLib.getTableClass('opbd')) +def subset_glyphs(self, s): + table = self.table.OpticalBounds + if table.Format == 0: + table.OpticalBoundsDeltas = {glyph: table.OpticalBoundsDeltas[glyph] + for glyph in s.glyphs + if glyph in table.OpticalBoundsDeltas} + return len(table.OpticalBoundsDeltas) > 0 + elif table.Format == 1: + table.OpticalBoundsPoints = {glyph: table.OpticalBoundsPoints[glyph] + for glyph in s.glyphs + if glyph in table.OpticalBoundsPoints} + return len(table.OpticalBoundsPoints) > 0 + else: + assert False, "unknown 'opbd' format %s" % table.Format + +@_add_method(ttLib.getTableClass('post')) +def prune_pre_subset(self, font, options): + if not options.glyph_names: + self.formatType = 3.0 + return True # Required table + +@_add_method(ttLib.getTableClass('post')) +def subset_glyphs(self, s): + self.extraNames = [] # This seems to do it + return True # Required table + +@_add_method(ttLib.getTableClass('prop')) +def subset_glyphs(self, s): + prop = self.table.GlyphProperties + if prop.Format == 0: + return prop.DefaultProperties != 0 + elif prop.Format == 1: + prop.Properties = {g: prop.Properties.get(g, prop.DefaultProperties) + for g in s.glyphs} + mostCommon, _cnt = Counter(prop.Properties.values()).most_common(1)[0] + prop.DefaultProperties = mostCommon + prop.Properties = {g: prop for g, prop in prop.Properties.items() + if prop != mostCommon} + if len(prop.Properties) == 0: + del prop.Properties + prop.Format = 0 + return prop.DefaultProperties != 0 + return True + else: + assert False, "unknown 'prop' format %s" % prop.Format + +@_add_method(ttLib.getTableClass('COLR')) +def closure_glyphs(self, s): + decompose = s.glyphs + while decompose: + layers = set() + for g in decompose: + for l in self.ColorLayers.get(g, []): + layers.add(l.name) + layers -= s.glyphs + s.glyphs.update(layers) + decompose = layers + +@_add_method(ttLib.getTableClass('COLR')) +def subset_glyphs(self, s): + self.ColorLayers = {g: self.ColorLayers[g] for g in s.glyphs if g in self.ColorLayers} + return bool(self.ColorLayers) + +# TODO: prune unused palettes +@_add_method(ttLib.getTableClass('CPAL')) +def prune_post_subset(self, options): + return True + +@_add_method(otTables.MathGlyphConstruction) +def closure_glyphs(self, glyphs): + variants = set() + for v in self.MathGlyphVariantRecord: + variants.add(v.VariantGlyph) + if self.GlyphAssembly: + for p in self.GlyphAssembly.PartRecords: + variants.add(p.glyph) + return variants + +@_add_method(otTables.MathVariants) +def closure_glyphs(self, s): + glyphs = frozenset(s.glyphs) + variants = set() + + if self.VertGlyphCoverage: + indices = self.VertGlyphCoverage.intersect(glyphs) + for i in indices: + variants.update(self.VertGlyphConstruction[i].closure_glyphs(glyphs)) + + if self.HorizGlyphCoverage: + indices = self.HorizGlyphCoverage.intersect(glyphs) + for i in indices: + variants.update(self.HorizGlyphConstruction[i].closure_glyphs(glyphs)) + + s.glyphs.update(variants) + +@_add_method(ttLib.getTableClass('MATH')) +def closure_glyphs(self, s): + self.table.MathVariants.closure_glyphs(s) + +@_add_method(otTables.MathItalicsCorrectionInfo) +def subset_glyphs(self, s): + indices = self.Coverage.subset(s.glyphs) + self.ItalicsCorrection = [self.ItalicsCorrection[i] for i in indices] + self.ItalicsCorrectionCount = len(self.ItalicsCorrection) + return bool(self.ItalicsCorrectionCount) + +@_add_method(otTables.MathTopAccentAttachment) +def subset_glyphs(self, s): + indices = self.TopAccentCoverage.subset(s.glyphs) + self.TopAccentAttachment = [self.TopAccentAttachment[i] for i in indices] + self.TopAccentAttachmentCount = len(self.TopAccentAttachment) + return bool(self.TopAccentAttachmentCount) + +@_add_method(otTables.MathKernInfo) +def subset_glyphs(self, s): + indices = self.MathKernCoverage.subset(s.glyphs) + self.MathKernInfoRecords = [self.MathKernInfoRecords[i] for i in indices] + self.MathKernCount = len(self.MathKernInfoRecords) + return bool(self.MathKernCount) + +@_add_method(otTables.MathGlyphInfo) +def subset_glyphs(self, s): + if self.MathItalicsCorrectionInfo: + self.MathItalicsCorrectionInfo.subset_glyphs(s) + if self.MathTopAccentAttachment: + self.MathTopAccentAttachment.subset_glyphs(s) + if self.MathKernInfo: + self.MathKernInfo.subset_glyphs(s) + if self.ExtendedShapeCoverage: + self.ExtendedShapeCoverage.subset(s.glyphs) + return True + +@_add_method(otTables.MathVariants) +def subset_glyphs(self, s): + if self.VertGlyphCoverage: + indices = self.VertGlyphCoverage.subset(s.glyphs) + self.VertGlyphConstruction = [self.VertGlyphConstruction[i] for i in indices] + self.VertGlyphCount = len(self.VertGlyphConstruction) + + if self.HorizGlyphCoverage: + indices = self.HorizGlyphCoverage.subset(s.glyphs) + self.HorizGlyphConstruction = [self.HorizGlyphConstruction[i] for i in indices] + self.HorizGlyphCount = len(self.HorizGlyphConstruction) + + return True + +@_add_method(ttLib.getTableClass('MATH')) +def subset_glyphs(self, s): + s.glyphs = s.glyphs_mathed + self.table.MathGlyphInfo.subset_glyphs(s) + self.table.MathVariants.subset_glyphs(s) + return True + +@_add_method(ttLib.getTableModule('glyf').Glyph) +def remapComponentsFast(self, indices): + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return # Not composite + data = array.array("B", self.data) + i = 10 + more = 1 + while more: + flags =(data[i] << 8) | data[i+1] + glyphID =(data[i+2] << 8) | data[i+3] + # Remap + glyphID = indices.index(glyphID) + data[i+2] = glyphID >> 8 + data[i+3] = glyphID & 0xFF + i += 4 + flags = int(flags) + + if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS + else: i += 2 + if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE + elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE + elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO + more = flags & 0x0020 # MORE_COMPONENTS + + self.data = data.tostring() + +@_add_method(ttLib.getTableClass('glyf')) +def closure_glyphs(self, s): + decompose = s.glyphs + while decompose: + components = set() + for g in decompose: + if g not in self.glyphs: + continue + gl = self.glyphs[g] + for c in gl.getComponentNames(self): + components.add(c) + components -= s.glyphs + s.glyphs.update(components) + decompose = components + +@_add_method(ttLib.getTableClass('glyf')) +def prune_pre_subset(self, font, options): + if options.notdef_glyph and not options.notdef_outline: + g = self[self.glyphOrder[0]] + # Yay, easy! + g.__dict__.clear() + g.data = "" + return True + +@_add_method(ttLib.getTableClass('glyf')) +def subset_glyphs(self, s): + self.glyphs = _dict_subset(self.glyphs, s.glyphs) + indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] + for v in self.glyphs.values(): + if hasattr(v, "data"): + v.remapComponentsFast(indices) + else: + pass # No need + self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] + # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. + return True + +@_add_method(ttLib.getTableClass('glyf')) +def prune_post_subset(self, options): + remove_hinting = not options.hinting + for v in self.glyphs.values(): + v.trim(remove_hinting=remove_hinting) + return True + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_pre_subset(self, font, options): + cff = self.cff + # CFF table must have one font only + cff.fontNames = cff.fontNames[:1] + + if options.notdef_glyph and not options.notdef_outline: + for fontname in cff.keys(): + font = cff[fontname] + c, fdSelectIndex = font.CharStrings.getItemAndSelector('.notdef') + if hasattr(font, 'FDArray') and font.FDArray is not None: + private = font.FDArray[fdSelectIndex].Private + else: + private = font.Private + dfltWdX = private.defaultWidthX + nmnlWdX = private.nominalWidthX + pen = NullPen() + c.draw(pen) # this will set the charstring's width + if c.width != dfltWdX: + c.program = [c.width - nmnlWdX, 'endchar'] + else: + c.program = ['endchar'] + + # Clear useless Encoding + for fontname in cff.keys(): + font = cff[fontname] + # https://github.com/behdad/fonttools/issues/620 + font.Encoding = "StandardEncoding" + + return True # bool(cff.fontNames) + +@_add_method(ttLib.getTableClass('CFF ')) +def subset_glyphs(self, s): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Load all glyphs + for g in font.charset: + if g not in s.glyphs: continue + c, _ = cs.getItemAndSelector(g) + + if cs.charStringsAreIndexed: + indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] + csi = cs.charStringsIndex + csi.items = [csi.items[i] for i in indices] + del csi.file, csi.offsets + if hasattr(font, "FDSelect"): + sel = font.FDSelect + # XXX We want to set sel.format to None, such that the + # most compact format is selected. However, OTS was + # broken and couldn't parse a FDSelect format 0 that + # happened before CharStrings. As such, always force + # format 3 until we fix cffLib to always generate + # FDSelect after CharStrings. + # https://github.com/khaledhosny/ots/pull/31 + #sel.format = None + sel.format = 3 + sel.gidArray = [sel.gidArray[i] for i in indices] + cs.charStrings = {g:indices.index(v) + for g,v in cs.charStrings.items() + if g in s.glyphs} + else: + cs.charStrings = {g:v + for g,v in cs.charStrings.items() + if g in s.glyphs} + font.charset = [g for g in font.charset if g in s.glyphs] + font.numGlyphs = len(font.charset) + + return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) + +@_add_method(psCharStrings.T2CharString) +def subset_subroutines(self, subrs, gsubrs): + p = self.program + assert len(p) + for i in range(1, len(p)): + if p[i] == 'callsubr': + assert isinstance(p[i-1], int) + p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias + elif p[i] == 'callgsubr': + assert isinstance(p[i-1], int) + p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias + +@_add_method(psCharStrings.T2CharString) +def drop_hints(self): + hints = self._hints + + if hints.deletions: + p = self.program + for idx in reversed(hints.deletions): + del p[idx-2:idx] + + if hints.has_hint: + assert not hints.deletions or hints.last_hint <= hints.deletions[0] + self.program = self.program[hints.last_hint:] + if hasattr(self, 'width'): + # Insert width back if needed + if self.width != self.private.defaultWidthX: + self.program.insert(0, self.width - self.private.nominalWidthX) + + if hints.has_hintmask: + i = 0 + p = self.program + while i < len(p): + if p[i] in ['hintmask', 'cntrmask']: + assert i + 1 <= len(p) + del p[i:i+2] + continue + i += 1 + + assert len(self.program) + + del self._hints + +class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + for subrs in [localSubrs, globalSubrs]: + if subrs and not hasattr(subrs, "_used"): + subrs._used = set() + + def op_callsubr(self, index): + self.localSubrs._used.add(self.operandStack[-1]+self.localBias) + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + + def op_callgsubr(self, index): + self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + +class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor): + + class Hints(object): + def __init__(self): + # Whether calling this charstring produces any hint stems + # Note that if a charstring starts with hintmask, it will + # have has_hint set to True, because it *might* produce an + # implicit vstem if called under certain conditions. + self.has_hint = False + # Index to start at to drop all hints + self.last_hint = 0 + # Index up to which we know more hints are possible. + # Only relevant if status is 0 or 1. + self.last_checked = 0 + # The status means: + # 0: after dropping hints, this charstring is empty + # 1: after dropping hints, there may be more hints + # continuing after this + # 2: no more hints possible after this charstring + self.status = 0 + # Has hintmask instructions; not recursive + self.has_hintmask = False + # List of indices of calls to empty subroutines to remove. + self.deletions = [] + pass + + def __init__(self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): + self._css = css + psCharStrings.T2WidthExtractor.__init__( + self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX) + + def execute(self, charString): + old_hints = charString._hints if hasattr(charString, '_hints') else None + charString._hints = self.Hints() + + psCharStrings.T2WidthExtractor.execute(self, charString) + + hints = charString._hints + + if hints.has_hint or hints.has_hintmask: + self._css.add(charString) + + if hints.status != 2: + # Check from last_check, make sure we didn't have any operators. + for i in range(hints.last_checked, len(charString.program) - 1): + if isinstance(charString.program[i], str): + hints.status = 2 + break + else: + hints.status = 1 # There's *something* here + hints.last_checked = len(charString.program) + + if old_hints: + assert hints.__dict__ == old_hints.__dict__ + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.T2WidthExtractor.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.T2WidthExtractor.op_callgsubr(self, index) + self.processSubr(index, subr) + + def op_hstem(self, index): + psCharStrings.T2WidthExtractor.op_hstem(self, index) + self.processHint(index) + def op_vstem(self, index): + psCharStrings.T2WidthExtractor.op_vstem(self, index) + self.processHint(index) + def op_hstemhm(self, index): + psCharStrings.T2WidthExtractor.op_hstemhm(self, index) + self.processHint(index) + def op_vstemhm(self, index): + psCharStrings.T2WidthExtractor.op_vstemhm(self, index) + self.processHint(index) + def op_hintmask(self, index): + rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index) + self.processHintmask(index) + return rv + def op_cntrmask(self, index): + rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index) + self.processHintmask(index) + return rv + + def processHintmask(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hintmask = True + if hints.status != 2: + # Check from last_check, see if we may be an implicit vstem + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + else: + # We are an implicit vstem + hints.has_hint = True + hints.last_hint = index + 1 + hints.status = 0 + hints.last_checked = index + 1 + + def processHint(self, index): + cs = self.callingStack[-1] + hints = cs._hints + hints.has_hint = True + hints.last_hint = index + hints.last_checked = index + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + hints = cs._hints + subr_hints = subr._hints + + # Check from last_check, make sure we didn't have + # any operators. + if hints.status != 2: + for i in range(hints.last_checked, index - 1): + if isinstance(cs.program[i], str): + hints.status = 2 + break + hints.last_checked = index + + if hints.status != 2: + if subr_hints.has_hint: + hints.has_hint = True + + # Decide where to chop off from + if subr_hints.status == 0: + hints.last_hint = index + else: + hints.last_hint = index - 2 # Leave the subr call in + elif subr_hints.status == 0: + hints.deletions.append(index) + + hints.status = max(hints.status, subr_hints.status) + +class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): + + def __init__(self, localSubrs, globalSubrs): + psCharStrings.SimpleT2Decompiler.__init__(self, + localSubrs, + globalSubrs) + + def execute(self, charString): + # Note: Currently we recompute _desubroutinized each time. + # This is more robust in some cases, but in other places we assume + # that each subroutine always expands to the same code, so + # maybe it doesn't matter. To speed up we can just not + # recompute _desubroutinized if it's there. For now I just + # double-check that it desubroutinized to the same thing. + old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None + + charString._patches = [] + psCharStrings.SimpleT2Decompiler.execute(self, charString) + desubroutinized = charString.program[:] + for idx,expansion in reversed (charString._patches): + assert idx >= 2 + assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] + assert type(desubroutinized[idx - 2]) == int + if expansion[-1] == 'return': + expansion = expansion[:-1] + desubroutinized[idx-2:idx] = expansion + if 'endchar' in desubroutinized: + # Cut off after first endchar + desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] + else: + if not len(desubroutinized) or desubroutinized[-1] != 'return': + desubroutinized.append('return') + + charString._desubroutinized = desubroutinized + del charString._patches + + if old_desubroutinized: + assert desubroutinized == old_desubroutinized + + def op_callsubr(self, index): + subr = self.localSubrs[self.operandStack[-1]+self.localBias] + psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) + self.processSubr(index, subr) + + def op_callgsubr(self, index): + subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] + psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) + self.processSubr(index, subr) + + def processSubr(self, index, subr): + cs = self.callingStack[-1] + cs._patches.append((index, subr._desubroutinized)) + + +@_add_method(ttLib.getTableClass('CFF ')) +def prune_post_subset(self, options): + cff = self.cff + for fontname in cff.keys(): + font = cff[fontname] + cs = font.CharStrings + + # Drop unused FontDictionaries + if hasattr(font, "FDSelect"): + sel = font.FDSelect + indices = _uniq_sort(sel.gidArray) + sel.gidArray = [indices.index (ss) for ss in sel.gidArray] + arr = font.FDArray + arr.items = [arr[i] for i in indices] + del arr.file, arr.offsets + + # Desubroutinize if asked for + if options.desubroutinize: + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + c.program = c._desubroutinized + + # Drop hints if not needed + if not options.hinting: + + # This can be tricky, but doesn't have to. What we do is: + # + # - Run all used glyph charstrings and recurse into subroutines, + # - For each charstring (including subroutines), if it has any + # of the hint stem operators, we mark it as such. + # Upon returning, for each charstring we note all the + # subroutine calls it makes that (recursively) contain a stem, + # - Dropping hinting then consists of the following two ops: + # * Drop the piece of the program in each charstring before the + # last call to a stem op or a stem-calling subroutine, + # * Drop all hintmask operations. + # - It's trickier... A hintmask right after hints and a few numbers + # will act as an implicit vstemhm. As such, we track whether + # we have seen any non-hint operators so far and do the right + # thing, recursively... Good luck understanding that :( + css = set() + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + c.decompile() + subrs = getattr(c.private, "Subrs", []) + decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs, + c.private.nominalWidthX, + c.private.defaultWidthX) + decompiler.execute(c) + c.width = decompiler.width + for charstring in css: + charstring.drop_hints() + del css + + # Drop font-wide hinting values + all_privs = [] + if hasattr(font, 'FDSelect'): + all_privs.extend(fd.Private for fd in font.FDArray) + else: + all_privs.append(font.Private) + for priv in all_privs: + for k in ['BlueValues', 'OtherBlues', + 'FamilyBlues', 'FamilyOtherBlues', + 'BlueScale', 'BlueShift', 'BlueFuzz', + 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: + if hasattr(priv, k): + setattr(priv, k, None) + + # Renumber subroutines to remove unused ones + + # Mark all used subroutines + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) + decompiler.execute(c) + + all_subrs = [font.GlobalSubrs] + if hasattr(font, 'FDSelect'): + all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) + elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: + all_subrs.append(font.Private.Subrs) + + subrs = set(subrs) # Remove duplicates + + # Prepare + for subrs in all_subrs: + if not hasattr(subrs, '_used'): + subrs._used = set() + subrs._used = _uniq_sort(subrs._used) + subrs._old_bias = psCharStrings.calcSubrBias(subrs) + subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) + + # Renumber glyph charstrings + for g in font.charset: + c, _ = cs.getItemAndSelector(g) + subrs = getattr(c.private, "Subrs", []) + c.subset_subroutines (subrs, font.GlobalSubrs) + + # Renumber subroutines themselves + for subrs in all_subrs: + if subrs == font.GlobalSubrs: + if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): + local_subrs = font.Private.Subrs + else: + local_subrs = [] + else: + local_subrs = subrs + + subrs.items = [subrs.items[i] for i in subrs._used] + if hasattr(subrs, 'file'): + del subrs.file + if hasattr(subrs, 'offsets'): + del subrs.offsets + + for subr in subrs.items: + subr.subset_subroutines (local_subrs, font.GlobalSubrs) + + # Delete local SubrsIndex if empty + if hasattr(font, 'FDSelect'): + for fd in font.FDArray: + _delete_empty_subrs(fd.Private) + else: + _delete_empty_subrs(font.Private) + + # Cleanup + for subrs in all_subrs: + del subrs._used, subrs._old_bias, subrs._new_bias + + return True + + +def _delete_empty_subrs(private_dict): + if hasattr(private_dict, 'Subrs') and not private_dict.Subrs: + if 'Subrs' in private_dict.rawDict: + del private_dict.rawDict['Subrs'] + del private_dict.Subrs + + +@_add_method(ttLib.getTableClass('cmap')) +def closure_glyphs(self, s): + tables = [t for t in self.tables if t.isUnicode()] + + # Close glyphs + for table in tables: + if table.format == 14: + for cmap in table.uvsDict.values(): + glyphs = {g for u,g in cmap if u in s.unicodes_requested} + if None in glyphs: + glyphs.remove(None) + s.glyphs.update(glyphs) + else: + cmap = table.cmap + intersection = s.unicodes_requested.intersection(cmap.keys()) + s.glyphs.update(cmap[u] for u in intersection) + + # Calculate unicodes_missing + s.unicodes_missing = s.unicodes_requested.copy() + for table in tables: + s.unicodes_missing.difference_update(table.cmap) + +@_add_method(ttLib.getTableClass('cmap')) +def prune_pre_subset(self, font, options): + if not options.legacy_cmap: + # Drop non-Unicode / non-Symbol cmaps + self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] + if not options.symbol_cmap: + self.tables = [t for t in self.tables if not t.isSymbol()] + # TODO(behdad) Only keep one subtable? + # For now, drop format=0 which can't be subset_glyphs easily? + self.tables = [t for t in self.tables if t.format != 0] + self.numSubTables = len(self.tables) + return True # Required table + +@_add_method(ttLib.getTableClass('cmap')) +def subset_glyphs(self, s): + s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only + for t in self.tables: + if t.format == 14: + # TODO(behdad) We drop all the default-UVS mappings + # for glyphs_requested. So it's the caller's responsibility to make + # sure those are included. + t.uvsDict = {v:[(u,g) for u,g in l + if g in s.glyphs_requested or u in s.unicodes_requested] + for v,l in t.uvsDict.items()} + t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} + elif t.isUnicode(): + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested or u in s.unicodes_requested} + else: + t.cmap = {u:g for u,g in t.cmap.items() + if g in s.glyphs_requested} + self.tables = [t for t in self.tables + if (t.cmap if t.format != 14 else t.uvsDict)] + self.numSubTables = len(self.tables) + # TODO(behdad) Convert formats when needed. + # In particular, if we have a format=12 without non-BMP + # characters, either drop format=12 one or convert it + # to format=4 if there's not one. + return True # Required table + +@_add_method(ttLib.getTableClass('DSIG')) +def prune_pre_subset(self, font, options): + # Drop all signatures since they will be invalid + self.usNumSigs = 0 + self.signatureRecords = [] + return True + +@_add_method(ttLib.getTableClass('maxp')) +def prune_pre_subset(self, font, options): + if not options.hinting: + if self.tableVersion == 0x00010000: + self.maxZones = 1 + self.maxTwilightPoints = 0 + self.maxStorage = 0 + self.maxFunctionDefs = 0 + self.maxInstructionDefs = 0 + self.maxStackElements = 0 + self.maxSizeOfInstructions = 0 + return True + +@_add_method(ttLib.getTableClass('name')) +def prune_pre_subset(self, font, options): + nameIDs = set(options.name_IDs) + fvar = font.get('fvar') + if fvar: + nameIDs.update([axis.axisNameID for axis in fvar.axes]) + nameIDs.update([inst.subfamilyNameID for inst in fvar.instances]) + nameIDs.update([inst.postscriptNameID for inst in fvar.instances + if inst.postscriptNameID != 0xFFFF]) + if '*' not in options.name_IDs: + self.names = [n for n in self.names if n.nameID in nameIDs] + if not options.name_legacy: + # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman + # entry for Latin and no Unicode names. + self.names = [n for n in self.names if n.isUnicode()] + # TODO(behdad) Option to keep only one platform's + if '*' not in options.name_languages: + # TODO(behdad) This is Windows-platform specific! + self.names = [n for n in self.names + if n.langID in options.name_languages] + if options.obfuscate_names: + namerecs = [] + for n in self.names: + if n.nameID in [1, 4]: + n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" + elif n.nameID in [2, 6]: + n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" + elif n.nameID == 3: + n.string = "" + elif n.nameID in [16, 17, 18]: + continue + namerecs.append(n) + self.names = namerecs + return True # Required table + + +# TODO(behdad) OS/2 ulCodePageRange? +# TODO(behdad) Drop AAT tables. +# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. +# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left +# TODO(behdad) Drop GDEF subitems if unused by lookups +# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) +# TODO(behdad) Text direction considerations. +# TODO(behdad) Text script / language considerations. +# TODO(behdad) Optionally drop 'kern' table if GPOS available +# TODO(behdad) Implement --unicode='*' to choose all cmap'ed +# TODO(behdad) Drop old-spec Indic scripts + + +class Options(object): + + class OptionError(Exception): pass + class UnknownOptionError(OptionError): pass + + # spaces in tag names (e.g. "SVG ", "cvt ") are stripped by the argument parser + _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', + 'EBSC', 'SVG', 'PCLT', 'LTSH'] + _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite + _drop_tables_default += ['sbix'] # Color + _no_subset_tables_default = ['avar', 'fvar', + 'gasp', 'head', 'hhea', 'maxp', + 'vhea', 'OS/2', 'loca', 'name', 'cvt', + 'fpgm', 'prep', 'VDMX', 'DSIG', 'CPAL', + 'MVAR', 'STAT'] + _hinting_tables_default = ['cvar', 'cvt', 'fpgm', 'prep', 'hdmx', 'VDMX'] + + # Based on HarfBuzz shapers + _layout_features_groups = { + # Default shaper + 'common': ['rvrn', 'ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], + 'fractions': ['frac', 'numr', 'dnom'], + 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], + 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], + 'ltr': ['ltra', 'ltrm'], + 'rtl': ['rtla', 'rtlm'], + # Complex shapers + 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', + 'cswh', 'mset', 'stch'], + 'hangul': ['ljmo', 'vjmo', 'tjmo'], + 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], + 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', + 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', + 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], + } + _layout_features_default = _uniq_sort(sum( + iter(_layout_features_groups.values()), [])) + + def __init__(self, **kwargs): + + self.drop_tables = self._drop_tables_default[:] + self.no_subset_tables = self._no_subset_tables_default[:] + self.passthrough_tables = False # keep/drop tables we can't subset + self.hinting_tables = self._hinting_tables_default[:] + self.legacy_kern = False # drop 'kern' table if GPOS available + self.layout_features = self._layout_features_default[:] + self.ignore_missing_glyphs = False + self.ignore_missing_unicodes = True + self.hinting = True + self.glyph_names = False + self.legacy_cmap = False + self.symbol_cmap = False + self.name_IDs = [1, 2] # Family and Style + self.name_legacy = False + self.name_languages = [0x0409] # English + self.obfuscate_names = False # to make webfont unusable as a system font + self.notdef_glyph = True # gid0 for TrueType / .notdef for CFF + self.notdef_outline = False # No need for notdef to have an outline really + self.recommended_glyphs = False # gid1, gid2, gid3 for TrueType + self.recalc_bounds = False # Recalculate font bounding boxes + self.recalc_timestamp = False # Recalculate font modified timestamp + self.prune_unicode_ranges = True # Clear unused 'ulUnicodeRange' bits + self.recalc_average_width = False # update 'xAvgCharWidth' + self.canonical_order = None # Order tables as recommended + self.flavor = None # May be 'woff' or 'woff2' + self.with_zopfli = False # use zopfli instead of zlib for WOFF 1.0 + self.desubroutinize = False # Desubroutinize CFF CharStrings + self.verbose = False + self.timing = False + self.xml = False + + self.set(**kwargs) + + def set(self, **kwargs): + for k,v in kwargs.items(): + if not hasattr(self, k): + raise self.UnknownOptionError("Unknown option '%s'" % k) + setattr(self, k, v) + + def parse_opts(self, argv, ignore_unknown=[]): + posargs = [] + passthru_options = [] + for a in argv: + orig_a = a + if not a.startswith('--'): + posargs.append(a) + continue + a = a[2:] + i = a.find('=') + op = '=' + if i == -1: + if a.startswith("no-"): + k = a[3:] + if k == "canonical-order": + # reorderTables=None is faster than False (the latter + # still reorders to "keep" the original table order) + v = None + else: + v = False + else: + k = a + v = True + if k.endswith("?"): + k = k[:-1] + v = '?' + else: + k = a[:i] + if k[-1] in "-+": + op = k[-1]+'=' # Op is '-=' or '+=' now. + k = k[:-1] + v = a[i+1:] + ok = k + k = k.replace('-', '_') + if not hasattr(self, k): + if ignore_unknown is True or ok in ignore_unknown: + passthru_options.append(orig_a) + continue + else: + raise self.UnknownOptionError("Unknown option '%s'" % a) + + ov = getattr(self, k) + if v == '?': + print("Current setting for '%s' is: %s" % (ok, ov)) + continue + if isinstance(ov, bool): + v = bool(v) + elif isinstance(ov, int): + v = int(v) + elif isinstance(ov, str): + v = str(v) # redundant + elif isinstance(ov, list): + if isinstance(v, bool): + raise self.OptionError("Option '%s' requires values to be specified using '='" % a) + vv = v.replace(',', ' ').split() + if vv == ['']: + vv = [] + vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] + if op == '=': + v = vv + elif op == '+=': + v = ov + v.extend(vv) + elif op == '-=': + v = ov + for x in vv: + if x in v: + v.remove(x) + else: + assert False + + setattr(self, k, v) + + return posargs + passthru_options + + +class Subsetter(object): + + class SubsettingError(Exception): pass + class MissingGlyphsSubsettingError(SubsettingError): pass + class MissingUnicodesSubsettingError(SubsettingError): pass + + def __init__(self, options=None): + + if not options: + options = Options() + + self.options = options + self.unicodes_requested = set() + self.glyph_names_requested = set() + self.glyph_ids_requested = set() + + def populate(self, glyphs=[], gids=[], unicodes=[], text=""): + self.unicodes_requested.update(unicodes) + if isinstance(text, bytes): + text = text.decode("utf_8") + text_utf32 = text.encode("utf-32-be") + nchars = len(text_utf32)//4 + for u in struct.unpack('>%dL' % nchars, text_utf32): + self.unicodes_requested.add(u) + self.glyph_names_requested.update(glyphs) + self.glyph_ids_requested.update(gids) + + def _prune_pre_subset(self, font): + for tag in self._sort_tables(font): + if(tag.strip() in self.options.drop_tables or + (tag.strip() in self.options.hinting_tables and not self.options.hinting) or + (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): + log.info("%s dropped", tag) + del font[tag] + continue + + clazz = ttLib.getTableClass(tag) + + if hasattr(clazz, 'prune_pre_subset'): + with timer("load '%s'" % tag): + table = font[tag] + with timer("prune '%s'" % tag): + retain = table.prune_pre_subset(font, self.options) + if not retain: + log.info("%s pruned to empty; dropped", tag) + del font[tag] + continue + else: + log.info("%s pruned", tag) + + def _closure_glyphs(self, font): + + realGlyphs = set(font.getGlyphOrder()) + glyph_order = font.getGlyphOrder() + + self.glyphs_requested = set() + self.glyphs_requested.update(self.glyph_names_requested) + self.glyphs_requested.update(glyph_order[i] + for i in self.glyph_ids_requested + if i < len(glyph_order)) + + self.glyphs_missing = set() + self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) + self.glyphs_missing.update(i for i in self.glyph_ids_requested + if i >= len(glyph_order)) + if self.glyphs_missing: + log.info("Missing requested glyphs: %s", self.glyphs_missing) + if not self.options.ignore_missing_glyphs: + raise self.MissingGlyphsSubsettingError(self.glyphs_missing) + + self.glyphs = self.glyphs_requested.copy() + + self.unicodes_missing = set() + if 'cmap' in font: + with timer("close glyph list over 'cmap'"): + font['cmap'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + self.glyphs_cmaped = frozenset(self.glyphs) + if self.unicodes_missing: + missing = ["U+%04X" % u for u in self.unicodes_missing] + log.info("Missing glyphs for requested Unicodes: %s", missing) + if not self.options.ignore_missing_unicodes: + raise self.MissingUnicodesSubsettingError(missing) + del missing + + if self.options.notdef_glyph: + if 'glyf' in font: + self.glyphs.add(font.getGlyphName(0)) + log.info("Added gid0 to subset") + else: + self.glyphs.add('.notdef') + log.info("Added .notdef to subset") + if self.options.recommended_glyphs: + if 'glyf' in font: + for i in range(min(4, len(font.getGlyphOrder()))): + self.glyphs.add(font.getGlyphName(i)) + log.info("Added first four glyphs to subset") + + if 'GSUB' in font: + with timer("close glyph list over 'GSUB'"): + log.info("Closing glyph list over 'GSUB': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['GSUB'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'GSUB': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_gsubed = frozenset(self.glyphs) + + if 'MATH' in font: + with timer("close glyph list over 'MATH'"): + log.info("Closing glyph list over 'MATH': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['MATH'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'MATH': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_mathed = frozenset(self.glyphs) + + for table in ('COLR', 'bsln'): + if table in font: + with timer("close glyph list over '%s'" % table): + log.info("Closing glyph list over '%s': %d glyphs before", + table, len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font[table].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over '%s': %d glyphs after", + table, len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + + if 'glyf' in font: + with timer("close glyph list over 'glyf'"): + log.info("Closing glyph list over 'glyf': %d glyphs before", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + font['glyf'].closure_glyphs(self) + self.glyphs.intersection_update(realGlyphs) + log.info("Closed glyph list over 'glyf': %d glyphs after", + len(self.glyphs)) + log.glyphs(self.glyphs, font=font) + self.glyphs_glyfed = frozenset(self.glyphs) + + self.glyphs_all = frozenset(self.glyphs) + + log.info("Retaining %d glyphs", len(self.glyphs_all)) + + del self.glyphs + + def _subset_glyphs(self, font): + for tag in self._sort_tables(font): + clazz = ttLib.getTableClass(tag) + + if tag.strip() in self.options.no_subset_tables: + log.info("%s subsetting not needed", tag) + elif hasattr(clazz, 'subset_glyphs'): + with timer("subset '%s'" % tag): + table = font[tag] + self.glyphs = self.glyphs_all + retain = table.subset_glyphs(self) + del self.glyphs + if not retain: + log.info("%s subsetted to empty; dropped", tag) + del font[tag] + else: + log.info("%s subsetted", tag) + elif self.options.passthrough_tables: + log.info("%s NOT subset; don't know how to subset", tag) + else: + log.info("%s NOT subset; don't know how to subset; dropped", tag) + del font[tag] + + with timer("subset GlyphOrder"): + glyphOrder = font.getGlyphOrder() + glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] + font.setGlyphOrder(glyphOrder) + font._buildReverseGlyphOrderDict() + + def _prune_post_subset(self, font): + for tag in font.keys(): + if tag == 'GlyphOrder': continue + if tag == 'OS/2' and self.options.prune_unicode_ranges: + old_uniranges = font[tag].getUnicodeRanges() + new_uniranges = font[tag].recalcUnicodeRanges(font, pruneOnly=True) + if old_uniranges != new_uniranges: + log.info("%s Unicode ranges pruned: %s", tag, sorted(new_uniranges)) + if self.options.recalc_average_width: + widths = [m[0] for m in font["hmtx"].metrics.values() if m[0] > 0] + avg_width = round(sum(widths) / len(widths)) + if avg_width != font[tag].xAvgCharWidth: + font[tag].xAvgCharWidth = avg_width + log.info("%s xAvgCharWidth updated: %d", tag, avg_width) + clazz = ttLib.getTableClass(tag) + if hasattr(clazz, 'prune_post_subset'): + with timer("prune '%s'" % tag): + table = font[tag] + retain = table.prune_post_subset(self.options) + if not retain: + log.info("%s pruned to empty; dropped", tag) + del font[tag] + else: + log.info("%s pruned", tag) + + def _sort_tables(self, font): + tagOrder = ['fvar', 'avar', 'gvar', 'name', 'glyf'] + tagOrder = {t: i + 1 for i, t in enumerate(tagOrder)} + tags = sorted(font.keys(), key=lambda tag: tagOrder.get(tag, 0)) + return [t for t in tags if t != 'GlyphOrder'] + + def subset(self, font): + self._prune_pre_subset(font) + self._closure_glyphs(font) + self._subset_glyphs(font) + self._prune_post_subset(font) + + +@timer("load font") +def load_font(fontFile, + options, + allowVID=False, + checkChecksums=False, + dontLoadGlyphNames=False, + lazy=True): + + font = ttLib.TTFont(fontFile, + allowVID=allowVID, + checkChecksums=checkChecksums, + recalcBBoxes=options.recalc_bounds, + recalcTimestamp=options.recalc_timestamp, + lazy=lazy) + + # Hack: + # + # If we don't need glyph names, change 'post' class to not try to + # load them. It avoid lots of headache with broken fonts as well + # as loading time. + # + # Ideally ttLib should provide a way to ask it to skip loading + # glyph names. But it currently doesn't provide such a thing. + # + if dontLoadGlyphNames: + post = ttLib.getTableClass('post') + saved = post.decode_format_2_0 + post.decode_format_2_0 = post.decode_format_3_0 + f = font['post'] + if f.formatType == 2.0: + f.formatType = 3.0 + post.decode_format_2_0 = saved + + return font + +@timer("compile and save font") +def save_font(font, outfile, options): + if options.flavor and not hasattr(font, 'flavor'): + raise Exception("fonttools version does not support flavors.") + if options.with_zopfli and options.flavor == "woff": + from fontTools.ttLib import sfnt + sfnt.USE_ZOPFLI = True + font.flavor = options.flavor + font.save(outfile, reorderTables=options.canonical_order) + +def parse_unicodes(s): + import re + s = re.sub (r"0[xX]", " ", s) + s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) + l = [] + for item in s.split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(item, 16)) + else: + start,end = fields + l.extend(range(int(start, 16), int(end, 16)+1)) + return l + +def parse_gids(s): + l = [] + for item in s.replace(',', ' ').split(): + fields = item.split('-') + if len(fields) == 1: + l.append(int(fields[0])) + else: + l.extend(range(int(fields[0]), int(fields[1])+1)) + return l + +def parse_glyphs(s): + return s.replace(',', ' ').split() + +def usage(): + print("usage:", __usage__, file=sys.stderr) + print("Try pyftsubset --help for more information.\n", file=sys.stderr) + +@timer("make one with everything (TOTAL TIME)") +def main(args=None): + from os.path import splitext + from fontTools import configLogger + + if args is None: + args = sys.argv[1:] + + if '--help' in args: + print(__doc__) + return 0 + + options = Options() + try: + args = options.parse_opts(args, + ignore_unknown=['gids', 'gids-file', + 'glyphs', 'glyphs-file', + 'text', 'text-file', + 'unicodes', 'unicodes-file', + 'output-file']) + except options.OptionError as e: + usage() + print("ERROR:", e, file=sys.stderr) + return 2 + + if len(args) < 2: + usage() + return 1 + + configLogger(level=logging.INFO if options.verbose else logging.WARNING) + if options.timing: + timer.logger.setLevel(logging.DEBUG) + else: + timer.logger.disabled = True + + fontfile = args[0] + args = args[1:] + + subsetter = Subsetter(options=options) + basename, extension = splitext(fontfile) + outfile = basename + '.subset' + extension + glyphs = [] + gids = [] + unicodes = [] + wildcard_glyphs = False + wildcard_unicodes = False + text = "" + for g in args: + if g == '*': + wildcard_glyphs = True + continue + if g.startswith('--output-file='): + outfile = g[14:] + continue + if g.startswith('--text='): + text += g[7:] + continue + if g.startswith('--text-file='): + text += open(g[12:], encoding='utf-8').read().replace('\n', '') + continue + if g.startswith('--unicodes='): + if g[11:] == '*': + wildcard_unicodes = True + else: + unicodes.extend(parse_unicodes(g[11:])) + continue + if g.startswith('--unicodes-file='): + for line in open(g[16:]).readlines(): + unicodes.extend(parse_unicodes(line.split('#')[0])) + continue + if g.startswith('--gids='): + gids.extend(parse_gids(g[7:])) + continue + if g.startswith('--gids-file='): + for line in open(g[12:]).readlines(): + gids.extend(parse_gids(line.split('#')[0])) + continue + if g.startswith('--glyphs='): + if g[9:] == '*': + wildcard_glyphs = True + else: + glyphs.extend(parse_glyphs(g[9:])) + continue + if g.startswith('--glyphs-file='): + for line in open(g[14:]).readlines(): + glyphs.extend(parse_glyphs(line.split('#')[0])) + continue + glyphs.append(g) + + dontLoadGlyphNames = not options.glyph_names and not glyphs + font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) + + with timer("compile glyph list"): + if wildcard_glyphs: + glyphs.extend(font.getGlyphOrder()) + if wildcard_unicodes: + for t in font['cmap'].tables: + if t.isUnicode(): + unicodes.extend(t.cmap.keys()) + assert '' not in glyphs + + log.info("Text: '%s'" % text) + log.info("Unicodes: %s", unicodes) + log.info("Glyphs: %s", glyphs) + log.info("Gids: %s", gids) + + subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) + subsetter.subset(font) + + save_font(font, outfile, options) + + if options.verbose: + import os + log.info("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) + log.info("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) + + if options.xml: + font.saveXML(sys.stdout) + + font.close() + + +__all__ = [ + 'Options', + 'Subsetter', + 'load_font', + 'save_font', + 'parse_gids', + 'parse_glyphs', + 'parse_unicodes', + 'main' +] + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/subset/__main__.py fonttools-3.21.2/Snippets/fontTools/subset/__main__.py --- fonttools-3.0/Snippets/fontTools/subset/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/subset/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.subset import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/subset.py fonttools-3.21.2/Snippets/fontTools/subset.py --- fonttools-3.0/Snippets/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2742 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.ttLib.tables import otTables -from fontTools.misc import psCharStrings -import sys -import struct -import time -import array - -__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." - -__doc__="""\ -pyftsubset -- OpenType font subsetter and optimizer - - pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. - It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) - font file. The subsetted glyph set is based on the specified glyphs - or characters, and specified OpenType layout features. - - The tool also performs some size-reducing optimizations, aimed for using - subset fonts as webfonts. Individual optimizations can be enabled or - disabled, and are enabled by default when they are safe. - -Usage: - """+__usage__+""" - - At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, - --text, --text-file, --unicodes, or --unicodes-file, must be specified. - -Arguments: - font-file - The input font file. - glyph - Specify one or more glyph identifiers to include in the subset. Must be - PS glyph names, or the special string '*' to keep the entire glyph set. - -Initial glyph set specification: - These options populate the initial glyph set. Same option can appear - multiple times, and the results are accummulated. - --gids=[,...] - Specify comma/whitespace-separated list of glyph IDs or ranges as - decimal numbers. For example, --gids=10-12,14 adds glyphs with - numbers 10, 11, 12, and 14. - --gids-file= - Like --gids but reads from a file. Anything after a '#' on any line - is ignored as comments. - --glyphs=[,...] - Specify comma/whitespace-separated PS glyph names to add to the subset. - Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc - that are accepted on the command line. The special string '*' wil keep - the entire glyph set. - --glyphs-file= - Like --glyphs but reads from a file. Anything after a '#' on any line - is ignored as comments. - --text= - Specify characters to include in the subset, as UTF-8 string. - --text-file= - Like --text but reads from a file. Newline character are not added to - the subset. - --unicodes=[,...] - Specify comma/whitespace-separated list of Unicode codepoints or - ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. - For example, --unicodes=41-5a,61-7a adds ASCII letters, so does - the more verbose --unicodes=U+0041-005A,U+0061-007A. - The special strings '*' will choose all Unicode characters mapped - by the font. - --unicodes-file= - Like --unicodes, but reads from a file. Anything after a '#' on any - line in the file is ignored as comments. - --ignore-missing-glyphs - Do not fail if some requested glyphs or gids are not available in - the font. - --no-ignore-missing-glyphs - Stop and fail if some requested glyphs or gids are not available - in the font. [default] - --ignore-missing-unicodes [default] - Do not fail if some requested Unicode characters (including those - indirectly specified using --text or --text-file) are not available - in the font. - --no-ignore-missing-unicodes - Stop and fail if some requested Unicode characters are not available - in the font. - Note the default discrepancy between ignoring missing glyphs versus - unicodes. This is for historical reasons and in the future - --no-ignore-missing-unicodes might become default. - -Other options: - For the other options listed below, to see the current value of the option, - pass a value of '?' to it, with or without a '='. - Examples: - $ pyftsubset --glyph-names? - Current setting for 'glyph-names' is: False - $ ./pyftsubset --name-IDs=? - Current setting for 'name-IDs' is: [1, 2] - $ ./pyftsubset --hinting? --no-hinting --hinting? - Current setting for 'hinting' is: True - Current setting for 'hinting' is: False - -Output options: - --output-file= - The output font file. If not specified, the subsetted font - will be saved in as font-file.subset. - --flavor= - Specify flavor of output font file. May be 'woff' or 'woff2'. - Note that WOFF2 requires the Brotli Python extension, available - at https://github.com/google/brotli - -Glyph set expansion: - These options control how additional glyphs are added to the subset. - --notdef-glyph - Add the '.notdef' glyph to the subset (ie, keep it). [default] - --no-notdef-glyph - Drop the '.notdef' glyph unless specified in the glyph set. This - saves a few bytes, but is not possible for Postscript-flavored - fonts, as those require '.notdef'. For TrueType-flavored fonts, - this works fine as long as no unsupported glyphs are requested - from the font. - --notdef-outline - Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is - used when glyphs not supported by the font are to be shown. It is not - needed otherwise. - --no-notdef-outline - When including a '.notdef' glyph, remove its outline. This saves - a few bytes. [default] - --recommended-glyphs - Add glyphs 0, 1, 2, and 3 to the subset, as recommended for - TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. - Some legacy software might require this, but no modern system does. - --no-recommended-glyphs - Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in - glyph set. [default] - --layout-features[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of OpenType layout feature tags that will be preserved. - Glyph variants used by the preserved features are added to the - specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', - 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features - required for script shaping are preserved. To see the full list, try - '--layout-features=?'. Use '*' to keep all features. - Multiple --layout-features options can be provided if necessary. - Examples: - --layout-features+=onum,pnum,ss01 - * Keep the default set of features and 'onum', 'pnum', 'ss01'. - --layout-features-='mark','mkmk' - * Keep the default set of features but drop 'mark' and 'mkmk'. - --layout-features='kern' - * Only keep the 'kern' feature, drop all others. - --layout-features='' - * Drop all features. - --layout-features='*' - * Keep all features. - --layout-features+=aalt --layout-features-=vrt2 - * Keep default set of features plus 'aalt', but drop 'vrt2'. - -Hinting options: - --hinting - Keep hinting [default] - --no-hinting - Drop glyph-specific hinting and font-wide hinting tables, as well - as remove hinting-related bits and pieces from other tables (eg. GPOS). - See --hinting-tables for list of tables that are dropped by default. - Instructions and hints are stripped from 'glyf' and 'CFF ' tables - respectively. This produces (sometimes up to 30%) smaller fonts that - are suitable for extremely high-resolution systems, like high-end - mobile devices and retina displays. - XXX Note: Currently there is a known bug in 'CFF ' hint stripping that - might make the font unusable as a webfont as they will be rejected by - OpenType Sanitizer used in common browsers. For more information see: - https://github.com/behdad/fonttools/issues/144 - The --desubroutinize options works around that bug. - -Optimization options: - --desubroutinize - Remove CFF use of subroutinizes. Subroutinization is a way to make CFF - fonts smaller. For small subsets however, desubroutinizing might make - the font smaller. It has even been reported that desubroutinized CFF - fonts compress better (produce smaller output) WOFF and WOFF2 fonts. - Also see note under --no-hinting. - --no-desubroutinize [default] - Leave CFF subroutinizes as is, only throw away unused subroutinizes. - -Font table options: - --drop-tables[+|-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of tables that will be be dropped. - By default, the following tables are dropped: - 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' - and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' - and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. - The tool will attempt to subset the remaining tables. - Examples: - --drop-tables-='SVG ' - * Drop the default set of tables but keep 'SVG '. - --drop-tables+=GSUB - * Drop the default set of tables and 'GSUB'. - --drop-tables=DSIG - * Only drop the 'DSIG' table, keep all others. - --drop-tables= - * Keep all tables. - --no-subset-tables+=
[,
...] - Add to the set of tables that will not be subsetted. - By default, the following tables are included in this list, as - they do not need subsetting (ignore the fact that 'loca' is listed - here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', - 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool - does not know how to subset and are not specified here will be dropped - from the font. - Example: - --no-subset-tables+=FFTM - * Keep 'FFTM' table in the font by preventing subsetting. - --hinting-tables[-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the list of font-wide - hinting tables that will be dropped if --no-hinting is specified, - Examples: - --hinting-tables-='VDMX' - * Drop font-wide hinting tables except 'VDMX'. - --hinting-tables='' - * Keep all font-wide hinting tables (but strip hints from glyphs). - --legacy-kern - Keep TrueType 'kern' table even when OpenType 'GPOS' is available. - --no-legacy-kern - Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] - -Font naming options: - These options control what is retained in the 'name' table. For numerical - codes, see: http://www.microsoft.com/typography/otspec/name.htm - --name-IDs[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - entry nameIDs that will be preserved. By default only nameID 1 (Family) - and nameID 2 (Style) are preserved. Use '*' to keep all entries. - Examples: - --name-IDs+=0,4,6 - * Also keep Copyright, Full name and PostScript name entry. - --name-IDs='' - * Drop all 'name' table entries. - --name-IDs='*' - * keep all 'name' table entries - --name-legacy - Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). - XXX Note: This might be needed for some fonts that have no Unicode name - entires for English. See: https://github.com/behdad/fonttools/issues/146 - --no-name-legacy - Drop legacy (non-Unicode) 'name' table entries [default] - --name-languages[+|-]=[,] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - langIDs that will be preserved. By default only records with langID - 0x0409 (English) are preserved. Use '*' to keep all langIDs. - --obfuscate-names - Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, - and 6 with dummy strings (it is still fully functional as webfont). - -Glyph naming and encoding options: - --glyph-names - Keep PS glyph names in TT-flavored fonts. In general glyph names are - not needed for correct use of the font. However, some PDF generators - and PDF viewers might rely on glyph names to extract Unicode text - from PDF documents. - --no-glyph-names - Drop PS glyph names in TT-flavored fonts, by using 'post' table - version 3.0. [default] - --legacy-cmap - Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). - --no-legacy-cmap - Drop the legacy 'cmap' subtables. [default] - --symbol-cmap - Keep the 3.0 symbol 'cmap'. - --no-symbol-cmap - Drop the 3.0 symbol 'cmap'. [default] - -Other font-specific options: - --recalc-bounds - Recalculate font bounding boxes. - --no-recalc-bounds - Keep original font bounding boxes. This is faster and still safe - for all practical purposes. [default] - --recalc-timestamp - Set font 'modified' timestamp to current time. - --no-recalc-timestamp - Do not modify font 'modified' timestamp. [default] - --canonical-order - Order tables as recommended in the OpenType standard. This is not - required by the standard, nor by any known implementation. - --no-canonical-order - Keep original order of font tables. This is faster. [default] - -Application options: - --verbose - Display verbose information of the subsetting process. - --timing - Display detailed timing information of the subsetting process. - --xml - Display the TTX XML representation of subsetted font. - -Example: - Produce a subset containing the characters ' !"#$%' without performing - size-reducing optimizations: - - $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ - --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ - --notdef-glyph --notdef-outline --recommended-glyphs \\ - --name-IDs='*' --name-legacy --name-languages='*' -""" - - -def _add_method(*clazzes): - """Returns a decorator function that adds a new method to one or - more classes.""" - def wrapper(method): - for clazz in clazzes: - assert clazz.__name__ != 'DefaultTable', \ - 'Oops, table class not found.' - assert not hasattr(clazz, method.__name__), \ - "Oops, class '%s' has method '%s'." % (clazz.__name__, - method.__name__) - setattr(clazz, method.__name__, method) - return None - return wrapper - -def _uniq_sort(l): - return sorted(set(l)) - -def _set_update(s, *others): - # Jython's set.update only takes one other argument. - # Emulate real set.update... - for other in others: - s.update(other) - -def _dict_subset(d, glyphs): - return {g:d[g] for g in glyphs} - - -@_add_method(otTables.Coverage) -def intersect(self, glyphs): - """Returns ascending list of matching coverage values.""" - return [i for i,g in enumerate(self.glyphs) if g in glyphs] - -@_add_method(otTables.Coverage) -def intersect_glyphs(self, glyphs): - """Returns set of intersecting glyphs.""" - return set(g for g in self.glyphs if g in glyphs) - -@_add_method(otTables.Coverage) -def subset(self, glyphs): - """Returns ascending list of remaining coverage values.""" - indices = self.intersect(glyphs) - self.glyphs = [g for g in self.glyphs if g in glyphs] - return indices - -@_add_method(otTables.Coverage) -def remap(self, coverage_map): - """Remaps coverage.""" - self.glyphs = [self.glyphs[i] for i in coverage_map] - -@_add_method(otTables.ClassDef) -def intersect(self, glyphs): - """Returns ascending list of matching class values.""" - return _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - [v for g,v in self.classDefs.items() if g in glyphs]) - -@_add_method(otTables.ClassDef) -def intersect_class(self, glyphs, klass): - """Returns set of glyphs matching class.""" - if klass == 0: - return set(g for g in glyphs if g not in self.classDefs) - return set(g for g,v in self.classDefs.items() - if v == klass and g in glyphs) - -@_add_method(otTables.ClassDef) -def subset(self, glyphs, remap=False): - """Returns ascending list of remaining classes.""" - self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} - # Note: while class 0 has the special meaning of "not matched", - # if no glyph will ever /not match/, we can optimize class 0 out too. - indices = _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - list(self.classDefs.values())) - if remap: - self.remap(indices) - return indices - -@_add_method(otTables.ClassDef) -def remap(self, class_map): - """Remaps classes.""" - self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} - -@_add_method(otTables.SingleSubst) -def closure_glyphs(self, s, cur_glyphs): - s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) - -@_add_method(otTables.SingleSubst) -def subset_glyphs(self, s): - self.mapping = {g:v for g,v in self.mapping.items() - if g in s.glyphs and v in s.glyphs} - return bool(self.mapping) - -@_add_method(otTables.MultipleSubst) -def closure_glyphs(self, s, cur_glyphs): - indices = self.Coverage.intersect(cur_glyphs) - _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) - -@_add_method(otTables.MultipleSubst) -def subset_glyphs(self, s): - indices = self.Coverage.subset(s.glyphs) - self.Sequence = [self.Sequence[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,seq in enumerate(self.Sequence) - if all(sub in s.glyphs for sub in seq.Substitute)] - self.Sequence = [self.Sequence[i] for i in indices] - self.Coverage.remap(indices) - self.SequenceCount = len(self.Sequence) - return bool(self.SequenceCount) - -@_add_method(otTables.AlternateSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() - if g in cur_glyphs)) - -@_add_method(otTables.AlternateSubst) -def subset_glyphs(self, s): - self.alternates = {g:vlist - for g,vlist in self.alternates.items() - if g in s.glyphs and - all(v in s.glyphs for v in vlist)} - return bool(self.alternates) - -@_add_method(otTables.LigatureSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs - if all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items() - if g in cur_glyphs)) - -@_add_method(otTables.LigatureSubst) -def subset_glyphs(self, s): - self.ligatures = {g:v for g,v in self.ligatures.items() - if g in s.glyphs} - self.ligatures = {g:[seq for seq in seqs - if seq.LigGlyph in s.glyphs and - all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items()} - self.ligatures = {g:v for g,v in self.ligatures.items() if v} - return bool(self.ligatures) - -@_add_method(otTables.ReverseChainSingleSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - indices = self.Coverage.intersect(cur_glyphs) - if(not indices or - not all(c.intersect(s.glyphs) - for c in self.LookAheadCoverage + self.BacktrackCoverage)): - return - s.glyphs.update(self.Substitute[i] for i in indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ReverseChainSingleSubst) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.Substitute = [self.Substitute[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,sub in enumerate(self.Substitute) - if sub in s.glyphs] - self.Substitute = [self.Substitute[i] for i in indices] - self.Coverage.remap(indices) - self.GlyphCount = len(self.Substitute) - return bool(self.GlyphCount and - all(c.subset(s.glyphs) - for c in self.LookAheadCoverage+self.BacktrackCoverage)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def subset_glyphs(self, s): - if self.Format == 1: - return len(self.Coverage.subset(s.glyphs)) - elif self.Format == 2: - indices = self.Coverage.subset(s.glyphs) - self.Value = [self.Value[i] for i in indices] - self.ValueCount = len(self.Value) - return bool(self.ValueCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat &= ~0x00F0 - return True - -@_add_method(otTables.PairPos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.PairSet = [self.PairSet[i] for i in indices] - for p in self.PairSet: - p.PairValueRecord = [r for r in p.PairValueRecord - if r.SecondGlyph in s.glyphs] - p.PairValueCount = len(p.PairValueRecord) - # Remove empty pairsets - indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] - self.Coverage.remap(indices) - self.PairSet = [self.PairSet[i] for i in indices] - self.PairSetCount = len(self.PairSet) - return bool(self.PairSetCount) - elif self.Format == 2: - class1_map = self.ClassDef1.subset(s.glyphs, remap=True) - class2_map = self.ClassDef2.subset(s.glyphs, remap=True) - self.Class1Record = [self.Class1Record[i] for i in class1_map] - for c in self.Class1Record: - c.Class2Record = [c.Class2Record[i] for i in class2_map] - self.Class1Count = len(class1_map) - self.Class2Count = len(class2_map) - return bool(self.Class1Count and - self.Class2Count and - self.Coverage.subset(s.glyphs)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.PairPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat1 &= ~0x00F0 - self.ValueFormat2 &= ~0x00F0 - return True - -@_add_method(otTables.CursivePos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] - self.EntryExitCount = len(self.EntryExitRecord) - return bool(self.EntryExitCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Anchor) -def prune_hints(self): - # Drop device tables / contour anchor point - self.ensureDecompiled() - self.Format = 1 - -@_add_method(otTables.CursivePos) -def prune_post_subset(self, options): - if not options.hinting: - for rec in self.EntryExitRecord: - if rec.EntryAnchor: rec.EntryAnchor.prune_hints() - if rec.ExitAnchor: rec.ExitAnchor.prune_hints() - return True - -@_add_method(otTables.MarkBasePos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - base_indices = self.BaseCoverage.subset(s.glyphs) - self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] - for i in base_indices] - self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.BaseArray.BaseRecord: - b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.BaseArray.BaseCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkBasePos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.BaseArray.BaseRecord: - for a in b.BaseAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkLigPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - ligature_indices = self.LigatureCoverage.subset(s.glyphs) - self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] - for i in ligature_indices] - self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.LigatureArray.LigatureCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkLigPos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - for a in c.LigatureAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkMarkPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark1_indices = self.Mark1Coverage.subset(s.glyphs) - self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] - for i in mark1_indices] - self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) - mark2_indices = self.Mark2Coverage.subset(s.glyphs) - self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] - for i in mark2_indices] - self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.Mark1Array.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.Mark2Array.Mark2Record: - b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] - return bool(self.ClassCount and - self.Mark1Array.MarkCount and - self.Mark2Array.MarkCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkMarkPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables or contour anchor point - for m in self.Mark1Array.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.Mark2Array.Mark2Record: - for m in b.Mark2Anchor: - if m: - m.prune_hints() - return True - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def subset_lookups(self, lookup_indices): - pass - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def collect_lookups(self): - return [] - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def prune_post_subset(self, options): - return True - -@_add_method(otTables.SingleSubst, - otTables.AlternateSubst, - otTables.ReverseChainSingleSubst) -def may_have_non_1to1(self): - return False - -@_add_method(otTables.MultipleSubst, - otTables.LigatureSubst, - otTables.ContextSubst, - otTables.ChainContextSubst) -def may_have_non_1to1(self): - return True - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def __subset_classify_context(self): - - class ContextHelper(object): - def __init__(self, klass, Format): - if klass.__name__.endswith('Subst'): - Typ = 'Sub' - Type = 'Subst' - else: - Typ = 'Pos' - Type = 'Pos' - if klass.__name__.startswith('Chain'): - Chain = 'Chain' - else: - Chain = '' - ChainTyp = Chain+Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - - self.LookupRecord = Type+'LookupRecord' - - if Format == 1: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(None,) - ChainContextData = lambda r:(None, None, None) - RuleData = lambda r:(r.Input,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - SetRuleData = None - ChainSetRuleData = None - elif Format == 2: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(r.ClassDef,) - ChainContextData = lambda r:(r.BacktrackClassDef, - r.InputClassDef, - r.LookAheadClassDef) - RuleData = lambda r:(r.Class,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - def SetRuleData(r, d):(r.Class,) = d - def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d - elif Format == 3: - Coverage = lambda r: r.Coverage[0] - ChainCoverage = lambda r: r.InputCoverage[0] - ContextData = None - ChainContextData = None - RuleData = lambda r: r.Coverage - ChainRuleData = lambda r:(r.BacktrackCoverage + - r.InputCoverage + - r.LookAheadCoverage) - SetRuleData = None - ChainSetRuleData = None - else: - assert 0, "unknown format: %s" % Format - - if Chain: - self.Coverage = ChainCoverage - self.ContextData = ChainContextData - self.RuleData = ChainRuleData - self.SetRuleData = ChainSetRuleData - else: - self.Coverage = Coverage - self.ContextData = ContextData - self.RuleData = RuleData - self.SetRuleData = SetRuleData - - if Format == 1: - self.Rule = ChainTyp+'Rule' - self.RuleCount = ChainTyp+'RuleCount' - self.RuleSet = ChainTyp+'RuleSet' - self.RuleSetCount = ChainTyp+'RuleSetCount' - self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] - elif Format == 2: - self.Rule = ChainTyp+'ClassRule' - self.RuleCount = ChainTyp+'ClassRuleCount' - self.RuleSet = ChainTyp+'ClassSet' - self.RuleSetCount = ChainTyp+'ClassSetCount' - self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c - else (set(glyphs) if r == 0 else set())) - - self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' - self.ClassDefIndex = 1 if Chain else 0 - self.Input = 'Input' if Chain else 'Class' - - if self.Format not in [1, 2, 3]: - return None # Don't shoot the messenger; let it go - if not hasattr(self.__class__, "__ContextHelpers"): - self.__class__.__ContextHelpers = {} - if self.Format not in self.__class__.__ContextHelpers: - helper = ContextHelper(self.__class__, self.Format) - self.__class__.__ContextHelpers[self.Format] = helper - return self.__class__.__ContextHelpers[self.Format] - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst) -def closure_glyphs(self, s, cur_glyphs): - c = self.__subset_classify_context() - - indices = c.Coverage(self).intersect(cur_glyphs) - if not indices: - return [] - cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) - - if self.Format == 1: - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) - else: - pos_glyphs = frozenset([r.Input[seqi - 1]]) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.Input)+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 2: - ClassDef = getattr(self, c.ClassDef) - indices = ClassDef.intersect(cur_glyphs) - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) - else: - pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(getattr(r, c.Input))+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 3: - if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): - return [] - r = self - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(cur_glyphs) - else: - pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.InputCoverage)+1)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ContextPos, - otTables.ChainContextSubst, - otTables.ChainContextPos) -def subset_glyphs(self, s): - c = self.__subset_classify_context() - - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(g in s.glyphs for g in glist) - for glist in c.RuleData(r))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - # Prune empty rulesets - indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] - self.Coverage.remap(indices) - rss = [rss[i] for i in indices] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - return bool(rss) - elif self.Format == 2: - if not self.Coverage.subset(s.glyphs): - return False - ContextData = c.ContextData(self) - klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] - - # Keep rulesets for class numbers that survived. - indices = klass_maps[c.ClassDefIndex] - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - del rssCount - # Delete, but not renumber, unreachable rulesets. - indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) - rss = [rss if i in indices else None for i,rss in enumerate(rss)] - - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(k in klass_map for k in klist) - for klass_map,klist in zip(klass_maps, c.RuleData(r)))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - - # Remap rule classes - for r in ss: - c.SetRuleData(r, [[klass_map.index(k) for k in klist] - for klass_map,klist in zip(klass_maps, c.RuleData(r))]) - - # Prune empty rulesets - rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] - while rss and rss[-1] is None: - del rss[-1] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - - # TODO: We can do a second round of remapping class values based - # on classes that are actually used in at least one rule. Right - # now we subset classes to c.glyphs only. Or better, rewrite - # the above to do that. - - return bool(rss) - elif self.Format == 3: - return all(x.subset(s.glyphs) for x in c.RuleData(self)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def subset_lookups(self, lookup_indices): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - for rs in getattr(self, c.RuleSet): - if not rs: continue - for r in getattr(rs, c.Rule): - if not r: continue - setattr(r, c.LookupRecord, - [ll for ll in getattr(r, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(r, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - elif self.Format == 3: - setattr(self, c.LookupRecord, - [ll for ll in getattr(self, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(self, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def collect_lookups(self): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - return [ll.LookupListIndex - for rs in getattr(self, c.RuleSet) if rs - for r in getattr(rs, c.Rule) if r - for ll in getattr(r, c.LookupRecord) if ll] - elif self.Format == 3: - return [ll.LookupListIndex - for ll in getattr(self, c.LookupRecord) if ll] - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - self.ExtSubTable.closure_glyphs(s, cur_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def may_have_non_1to1(self): - if self.Format == 1: - return self.ExtSubTable.may_have_non_1to1() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_glyphs(self, s): - if self.Format == 1: - return self.ExtSubTable.subset_glyphs(s) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def prune_post_subset(self, options): - if self.Format == 1: - return self.ExtSubTable.prune_post_subset(options) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_lookups(self, lookup_indices): - if self.Format == 1: - return self.ExtSubTable.subset_lookups(lookup_indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def collect_lookups(self): - if self.Format == 1: - return self.ExtSubTable.collect_lookups() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Lookup) -def closure_glyphs(self, s, cur_glyphs=None): - if cur_glyphs is None: - cur_glyphs = frozenset(s.glyphs) - - # Memoize - if (id(self), cur_glyphs) in s._doneLookups: - return - s._doneLookups.add((id(self), cur_glyphs)) - - if self in s._activeLookups: - raise Exception("Circular loop in lookup recursion") - s._activeLookups.append(self) - for st in self.SubTable: - if not st: continue - st.closure_glyphs(s, cur_glyphs) - assert(s._activeLookups[-1] == self) - del s._activeLookups[-1] - -@_add_method(otTables.Lookup) -def subset_glyphs(self, s): - self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] - self.SubTableCount = len(self.SubTable) - return bool(self.SubTableCount) - -@_add_method(otTables.Lookup) -def prune_post_subset(self, options): - ret = False - for st in self.SubTable: - if not st: continue - if st.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.Lookup) -def subset_lookups(self, lookup_indices): - for s in self.SubTable: - s.subset_lookups(lookup_indices) - -@_add_method(otTables.Lookup) -def collect_lookups(self): - return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable - if st), [])) - -@_add_method(otTables.Lookup) -def may_have_non_1to1(self): - return any(st.may_have_non_1to1() for st in self.SubTable if st) - -@_add_method(otTables.LookupList) -def subset_glyphs(self, s): - """Returns the indices of nonempty lookups.""" - return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] - -@_add_method(otTables.LookupList) -def prune_post_subset(self, options): - ret = False - for l in self.Lookup: - if not l: continue - if l.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.LookupList) -def subset_lookups(self, lookup_indices): - self.ensureDecompiled() - self.Lookup = [self.Lookup[i] for i in lookup_indices - if i < self.LookupCount] - self.LookupCount = len(self.Lookup) - for l in self.Lookup: - l.subset_lookups(lookup_indices) - -@_add_method(otTables.LookupList) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - self.ensureDecompiled() - self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] - -@_add_method(otTables.LookupList) -def closure_lookups(self, lookup_indices): - lookup_indices = _uniq_sort(lookup_indices) - recurse = lookup_indices - while True: - recurse_lookups = sum((self.Lookup[i].collect_lookups() - for i in recurse if i < self.LookupCount), []) - recurse_lookups = [l for l in recurse_lookups - if l not in lookup_indices and l < self.LookupCount] - if not recurse_lookups: - return _uniq_sort(lookup_indices) - recurse_lookups = _uniq_sort(recurse_lookups) - lookup_indices.extend(recurse_lookups) - recurse = recurse_lookups - -@_add_method(otTables.Feature) -def subset_lookups(self, lookup_indices): - self.LookupListIndex = [l for l in self.LookupListIndex - if l in lookup_indices] - # Now map them. - self.LookupListIndex = [lookup_indices.index(l) - for l in self.LookupListIndex] - self.LookupCount = len(self.LookupListIndex) - return self.LookupCount or self.FeatureParams - -@_add_method(otTables.Feature) -def collect_lookups(self): - return self.LookupListIndex[:] - -@_add_method(otTables.FeatureList) -def subset_lookups(self, lookup_indices): - """Returns the indices of nonempty features.""" - # Note: Never ever drop feature 'pref', even if it's empty. - # HarfBuzz chooses shaper for Khmer based on presence of this - # feature. See thread at: - # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html - feature_indices = [i for i,f in enumerate(self.FeatureRecord) - if (f.Feature.subset_lookups(lookup_indices) or - f.FeatureTag == 'pref')] - self.subset_features(feature_indices) - return feature_indices - -@_add_method(otTables.FeatureList) -def collect_lookups(self, feature_indices): - return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() - for i in feature_indices - if i < self.FeatureCount), [])) - -@_add_method(otTables.FeatureList) -def subset_features(self, feature_indices): - self.ensureDecompiled() - self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] - self.FeatureCount = len(self.FeatureRecord) - return bool(self.FeatureCount) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def subset_features(self, feature_indices): - if self.ReqFeatureIndex in feature_indices: - self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) - else: - self.ReqFeatureIndex = 65535 - self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] - # Now map them. - self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex - if f in feature_indices] - self.FeatureCount = len(self.FeatureIndex) - return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def collect_features(self): - feature_indices = self.FeatureIndex[:] - if self.ReqFeatureIndex != 65535: - feature_indices.append(self.ReqFeatureIndex) - return _uniq_sort(feature_indices) - -@_add_method(otTables.Script) -def subset_features(self, feature_indices): - if(self.DefaultLangSys and - not self.DefaultLangSys.subset_features(feature_indices)): - self.DefaultLangSys = None - self.LangSysRecord = [l for l in self.LangSysRecord - if l.LangSys.subset_features(feature_indices)] - self.LangSysCount = len(self.LangSysRecord) - return bool(self.LangSysCount or self.DefaultLangSys) - -@_add_method(otTables.Script) -def collect_features(self): - feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] - if self.DefaultLangSys: - feature_indices.append(self.DefaultLangSys.collect_features()) - return _uniq_sort(sum(feature_indices, [])) - -@_add_method(otTables.ScriptList) -def subset_features(self, feature_indices): - self.ScriptRecord = [s for s in self.ScriptRecord - if s.Script.subset_features(feature_indices)] - self.ScriptCount = len(self.ScriptRecord) - return bool(self.ScriptCount) - -@_add_method(otTables.ScriptList) -def collect_features(self): - return _uniq_sort(sum((s.Script.collect_features() - for s in self.ScriptRecord), [])) - -@_add_method(ttLib.getTableClass('GSUB')) -def closure_glyphs(self, s): - s.table = self.table - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - while True: - orig_glyphs = frozenset(s.glyphs) - s._activeLookups = [] - s._doneLookups = set() - for i in lookup_indices: - if i >= self.table.LookupList.LookupCount: continue - if not self.table.LookupList.Lookup[i]: continue - self.table.LookupList.Lookup[i].closure_glyphs(s) - del s._activeLookups, s._doneLookups - if orig_glyphs == s.glyphs: - break - del s.table - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_glyphs(self, s): - s.glyphs = s.glyphs_gsubed - if self.table.LookupList: - lookup_indices = self.table.LookupList.subset_glyphs(s) - else: - lookup_indices = [] - self.subset_lookups(lookup_indices) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_lookups(self, lookup_indices): - """Retains specified lookups, then removes empty features, language - systems, and scripts.""" - if self.table.LookupList: - self.table.LookupList.subset_lookups(lookup_indices) - if self.table.FeatureList: - feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - if self.table.LookupList: - self.table.LookupList.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_lookups(self, remap=True): - """Remove (default) or neuter unreferenced lookups""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) - else: - lookup_indices = [] - if remap: - self.subset_lookups(lookup_indices) - else: - self.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_feature_tags(self, feature_tags): - if self.table.FeatureList: - feature_indices = \ - [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) - if f.FeatureTag in feature_tags] - self.table.FeatureList.subset_features(feature_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_features(self): - """Remove unreferenced features""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - self.table.FeatureList.subset_features(feature_indices) - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_pre_subset(self, options): - # Drop undesired features - if '*' not in options.layout_features: - self.subset_feature_tags(options.layout_features) - # Neuter unreferenced lookups - self.prune_lookups(remap=False) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def remove_redundant_langsys(self): - table = self.table - if not table.ScriptList or not table.FeatureList: - return - - features = table.FeatureList.FeatureRecord - - for s in table.ScriptList.ScriptRecord: - d = s.Script.DefaultLangSys - if not d: - continue - for lr in s.Script.LangSysRecord[:]: - l = lr.LangSys - # Compare d and l - if len(d.FeatureIndex) != len(l.FeatureIndex): - continue - if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): - continue - - if d.ReqFeatureIndex != 65535: - if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: - continue - - for i in range(len(d.FeatureIndex)): - if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: - break - else: - # LangSys and default are equal; delete LangSys - s.Script.LangSysRecord.remove(lr) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_post_subset(self, options): - table = self.table - - self.prune_lookups() # XXX Is this actually needed?! - - if table.LookupList: - table.LookupList.prune_post_subset(options) - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if not table.LookupList.Lookup: - # table.LookupList = None - - if not table.LookupList: - table.FeatureList = None - - if table.FeatureList: - self.remove_redundant_langsys() - # Remove unreferenced features - self.prune_features() - - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.FeatureList and not table.FeatureList.FeatureRecord: - # table.FeatureList = None - - # Never drop scripts themselves as them just being available - # holds semantic significance. - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.ScriptList and not table.ScriptList.ScriptRecord: - # table.ScriptList = None - - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - table = self.table - if table.LigCaretList: - indices = table.LigCaretList.Coverage.subset(glyphs) - table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] - for i in indices] - table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) - if table.MarkAttachClassDef: - table.MarkAttachClassDef.classDefs = \ - {g:v for g,v in table.MarkAttachClassDef.classDefs.items() - if g in glyphs} - if table.GlyphClassDef: - table.GlyphClassDef.classDefs = \ - {g:v for g,v in table.GlyphClassDef.classDefs.items() - if g in glyphs} - if table.AttachList: - indices = table.AttachList.Coverage.subset(glyphs) - GlyphCount = table.AttachList.GlyphCount - table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] - for i in indices - if i < GlyphCount] - table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) - if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: - for coverage in table.MarkGlyphSetsDef.Coverage: - coverage.subset(glyphs) - # TODO: The following is disabled. If enabling, we need to go fixup all - # lookups that use MarkFilteringSet and map their set. - # indices = table.MarkGlyphSetsDef.Coverage = \ - # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def prune_post_subset(self, options): - table = self.table - # XXX check these against OTS - if table.LigCaretList and not table.LigCaretList.LigGlyphCount: - table.LigCaretList = None - if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: - table.MarkAttachClassDef = None - if table.GlyphClassDef and not table.GlyphClassDef.classDefs: - table.GlyphClassDef = None - if table.AttachList and not table.AttachList.GlyphCount: - table.AttachList = None - if (hasattr(table, "MarkGlyphSetsDef") and - table.MarkGlyphSetsDef and - not table.MarkGlyphSetsDef.Coverage): - table.MarkGlyphSetsDef = None - if table.Version == 0x00010002/0x10000: - table.Version = 1.0 - return bool(table.LigCaretList or - table.MarkAttachClassDef or - table.GlyphClassDef or - table.AttachList or - (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) - -@_add_method(ttLib.getTableClass('kern')) -def prune_pre_subset(self, options): - # Prune unknown kern table types - self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('kern')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - for t in self.kernTables: - t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() - if a in glyphs and b in glyphs} - self.kernTables = [t for t in self.kernTables if t.kernTable] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('vmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return bool(self.metrics) - -@_add_method(ttLib.getTableClass('hmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return True # Required table - -@_add_method(ttLib.getTableClass('hdmx')) -def subset_glyphs(self, s): - self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} - return bool(self.hdmx) - -@_add_method(ttLib.getTableClass('VORG')) -def subset_glyphs(self, s): - self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() - if g in s.glyphs} - self.numVertOriginYMetrics = len(self.VOriginRecords) - return True # Never drop; has default metrics - -@_add_method(ttLib.getTableClass('post')) -def prune_pre_subset(self, options): - if not options.glyph_names: - self.formatType = 3.0 - return True # Required table - -@_add_method(ttLib.getTableClass('post')) -def subset_glyphs(self, s): - self.extraNames = [] # This seems to do it - return True # Required table - -@_add_method(ttLib.getTableModule('glyf').Glyph) -def remapComponentsFast(self, indices): - if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: - return # Not composite - data = array.array("B", self.data) - i = 10 - more = 1 - while more: - flags =(data[i] << 8) | data[i+1] - glyphID =(data[i+2] << 8) | data[i+3] - # Remap - glyphID = indices.index(glyphID) - data[i+2] = glyphID >> 8 - data[i+3] = glyphID & 0xFF - i += 4 - flags = int(flags) - - if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS - else: i += 2 - if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE - elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE - elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO - more = flags & 0x0020 # MORE_COMPONENTS - - self.data = data.tostring() - -@_add_method(ttLib.getTableClass('glyf')) -def closure_glyphs(self, s): - decompose = s.glyphs - while True: - components = set() - for g in decompose: - if g not in self.glyphs: - continue - gl = self.glyphs[g] - for c in gl.getComponentNames(self): - if c not in s.glyphs: - components.add(c) - components = set(c for c in components if c not in s.glyphs) - if not components: - break - decompose = components - s.glyphs.update(components) - -@_add_method(ttLib.getTableClass('glyf')) -def prune_pre_subset(self, options): - if options.notdef_glyph and not options.notdef_outline: - g = self[self.glyphOrder[0]] - # Yay, easy! - g.__dict__.clear() - g.data = "" - return True - -@_add_method(ttLib.getTableClass('glyf')) -def subset_glyphs(self, s): - self.glyphs = _dict_subset(self.glyphs, s.glyphs) - indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] - for v in self.glyphs.values(): - if hasattr(v, "data"): - v.remapComponentsFast(indices) - else: - pass # No need - self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] - # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. - return True - -@_add_method(ttLib.getTableClass('glyf')) -def prune_post_subset(self, options): - remove_hinting = not options.hinting - for v in self.glyphs.values(): - v.trim(remove_hinting=remove_hinting) - return True - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_pre_subset(self, options): - cff = self.cff - # CFF table must have one font only - cff.fontNames = cff.fontNames[:1] - - if options.notdef_glyph and not options.notdef_outline: - for fontname in cff.keys(): - font = cff[fontname] - c,_ = font.CharStrings.getItemAndSelector('.notdef') - # XXX we should preserve the glyph width - c.bytecode = '\x0e' # endchar - c.program = None - - return True # bool(cff.fontNames) - -@_add_method(ttLib.getTableClass('CFF ')) -def subset_glyphs(self, s): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Load all glyphs - for g in font.charset: - if g not in s.glyphs: continue - c,sel = cs.getItemAndSelector(g) - - if cs.charStringsAreIndexed: - indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] - csi = cs.charStringsIndex - csi.items = [csi.items[i] for i in indices] - del csi.file, csi.offsets - if hasattr(font, "FDSelect"): - sel = font.FDSelect - # XXX We want to set sel.format to None, such that the - # most compact format is selected. However, OTS was - # broken and couldn't parse a FDSelect format 0 that - # happened before CharStrings. As such, always force - # format 3 until we fix cffLib to always generate - # FDSelect after CharStrings. - # https://github.com/khaledhosny/ots/pull/31 - #sel.format = None - sel.format = 3 - sel.gidArray = [sel.gidArray[i] for i in indices] - cs.charStrings = {g:indices.index(v) - for g,v in cs.charStrings.items() - if g in s.glyphs} - else: - cs.charStrings = {g:v - for g,v in cs.charStrings.items() - if g in s.glyphs} - font.charset = [g for g in font.charset if g in s.glyphs] - font.numGlyphs = len(font.charset) - - return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) - -@_add_method(psCharStrings.T2CharString) -def subset_subroutines(self, subrs, gsubrs): - p = self.program - assert len(p) - for i in range(1, len(p)): - if p[i] == 'callsubr': - assert isinstance(p[i-1], int) - p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias - elif p[i] == 'callgsubr': - assert isinstance(p[i-1], int) - p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias - -@_add_method(psCharStrings.T2CharString) -def drop_hints(self): - hints = self._hints - - if hints.has_hint: - self.program = self.program[hints.last_hint:] - if hasattr(self, 'width'): - # Insert width back if needed - if self.width != self.private.defaultWidthX: - self.program.insert(0, self.width - self.private.nominalWidthX) - - if hints.has_hintmask: - i = 0 - p = self.program - while i < len(p): - if p[i] in ['hintmask', 'cntrmask']: - assert i + 1 <= len(p) - del p[i:i+2] - continue - i += 1 - - # TODO: we currently don't drop calls to "empty" subroutines. - - assert len(self.program) - - del self._hints - -class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - for subrs in [localSubrs, globalSubrs]: - if subrs and not hasattr(subrs, "_used"): - subrs._used = set() - - def op_callsubr(self, index): - self.localSubrs._used.add(self.operandStack[-1]+self.localBias) - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - - def op_callgsubr(self, index): - self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - -class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - class Hints(object): - def __init__(self): - # Whether calling this charstring produces any hint stems - self.has_hint = False - # Index to start at to drop all hints - self.last_hint = 0 - # Index up to which we know more hints are possible. - # Only relevant if status is 0 or 1. - self.last_checked = 0 - # The status means: - # 0: after dropping hints, this charstring is empty - # 1: after dropping hints, there may be more hints - # continuing after this - # 2: no more hints possible after this charstring - self.status = 0 - # Has hintmask instructions; not recursive - self.has_hintmask = False - pass - - def __init__(self, css, localSubrs, globalSubrs): - self._css = css - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - old_hints = charString._hints if hasattr(charString, '_hints') else None - charString._hints = self.Hints() - - psCharStrings.SimpleT2Decompiler.execute(self, charString) - - hints = charString._hints - - if hints.has_hint or hints.has_hintmask: - self._css.add(charString) - - if hints.status != 2: - # Check from last_check, make sure we didn't have any operators. - for i in range(hints.last_checked, len(charString.program) - 1): - if isinstance(charString.program[i], str): - hints.status = 2 - break - else: - hints.status = 1 # There's *something* here - hints.last_checked = len(charString.program) - - if old_hints: - assert hints.__dict__ == old_hints.__dict__ - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def op_hstem(self, index): - psCharStrings.SimpleT2Decompiler.op_hstem(self, index) - self.processHint(index) - def op_vstem(self, index): - psCharStrings.SimpleT2Decompiler.op_vstem(self, index) - self.processHint(index) - def op_hstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) - self.processHint(index) - def op_vstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) - self.processHint(index) - def op_hintmask(self, index): - psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) - self.processHintmask(index) - def op_cntrmask(self, index): - psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) - self.processHintmask(index) - - def processHintmask(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hintmask = True - if hints.status != 2 and hints.has_hint: - # Check from last_check, see if we may be an implicit vstem - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - if hints.status != 2: - # We are an implicit vstem - hints.last_hint = index + 1 - hints.status = 0 - hints.last_checked = index + 1 - - def processHint(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hint = True - hints.last_hint = index - hints.last_checked = index - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - hints = cs._hints - subr_hints = subr._hints - - if subr_hints.has_hint: - if hints.status != 2: - hints.has_hint = True - hints.last_checked = index - hints.status = subr_hints.status - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - else: - # In my understanding, this is a font bug. - # I.e., it has hint stems *after* path construction. - # I've seen this in widespread fonts. - # Best to ignore the hints I suppose... - pass - #assert 0 - else: - hints.status = max(hints.status, subr_hints.status) - if hints.status != 2: - # Check from last_check, make sure we didn't have - # any operators. - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - hints.last_checked = index - if hints.status != 2: - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - -class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - # Note: Currently we recompute _desubroutinized each time. - # This is more robust in some cases, but in other places we assume - # that each subroutine always expands to the same code, so - # maybe it doesn't matter. To speed up we can just not - # recompute _desubroutinized if it's there. For now I just - # double-check that it desubroutinized to the same thing. - old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None - - charString._patches = [] - psCharStrings.SimpleT2Decompiler.execute(self, charString) - desubroutinized = charString.program[:] - for idx,expansion in reversed (charString._patches): - assert idx >= 2 - assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] - assert type(desubroutinized[idx - 2]) == int - if expansion[-1] == 'return': - expansion = expansion[:-1] - desubroutinized[idx-2:idx] = expansion - if 'endchar' in desubroutinized: - # Cut off after first endchar - desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] - else: - if not len(desubroutinized) or desubroutinized[-1] != 'return': - desubroutinized.append('return') - - charString._desubroutinized = desubroutinized - del charString._patches - - if old_desubroutinized: - assert desubroutinized == old_desubroutinized - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - cs._patches.append((index, subr._desubroutinized)) - - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_post_subset(self, options): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Drop unused FontDictionaries - if hasattr(font, "FDSelect"): - sel = font.FDSelect - indices = _uniq_sort(sel.gidArray) - sel.gidArray = [indices.index (ss) for ss in sel.gidArray] - arr = font.FDArray - arr.items = [arr[i] for i in indices] - del arr.file, arr.offsets - - # Desubroutinize if asked for - if options.desubroutinize: - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - c.program = c._desubroutinized - - # Drop hints if not needed - if not options.hinting: - - # This can be tricky, but doesn't have to. What we do is: - # - # - Run all used glyph charstrings and recurse into subroutines, - # - For each charstring (including subroutines), if it has any - # of the hint stem operators, we mark it as such. - # Upon returning, for each charstring we note all the - # subroutine calls it makes that (recursively) contain a stem, - # - Dropping hinting then consists of the following two ops: - # * Drop the piece of the program in each charstring before the - # last call to a stem op or a stem-calling subroutine, - # * Drop all hintmask operations. - # - It's trickier... A hintmask right after hints and a few numbers - # will act as an implicit vstemhm. As such, we track whether - # we have seen any non-hint operators so far and do the right - # thing, recursively... Good luck understanding that :( - css = set() - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) - decompiler.execute(c) - for charstring in css: - charstring.drop_hints() - del css - - # Drop font-wide hinting values - all_privs = [] - if hasattr(font, 'FDSelect'): - all_privs.extend(fd.Private for fd in font.FDArray) - else: - all_privs.append(font.Private) - for priv in all_privs: - for k in ['BlueValues', 'OtherBlues', - 'FamilyBlues', 'FamilyOtherBlues', - 'BlueScale', 'BlueShift', 'BlueFuzz', - 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: - if hasattr(priv, k): - setattr(priv, k, None) - - # Renumber subroutines to remove unused ones - - # Mark all used subroutines - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - - all_subrs = [font.GlobalSubrs] - if hasattr(font, 'FDSelect'): - all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) - elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: - all_subrs.append(font.Private.Subrs) - - subrs = set(subrs) # Remove duplicates - - # Prepare - for subrs in all_subrs: - if not hasattr(subrs, '_used'): - subrs._used = set() - subrs._used = _uniq_sort(subrs._used) - subrs._old_bias = psCharStrings.calcSubrBias(subrs) - subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) - - # Renumber glyph charstrings - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - c.subset_subroutines (subrs, font.GlobalSubrs) - - # Renumber subroutines themselves - for subrs in all_subrs: - if subrs == font.GlobalSubrs: - if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): - local_subrs = font.Private.Subrs - else: - local_subrs = [] - else: - local_subrs = subrs - - subrs.items = [subrs.items[i] for i in subrs._used] - del subrs.file - if hasattr(subrs, 'offsets'): - del subrs.offsets - - for subr in subrs.items: - subr.subset_subroutines (local_subrs, font.GlobalSubrs) - - # Cleanup - for subrs in all_subrs: - del subrs._used, subrs._old_bias, subrs._new_bias - - return True - -@_add_method(ttLib.getTableClass('cmap')) -def closure_glyphs(self, s): - tables = [t for t in self.tables if t.isUnicode()] - - # Close glyphs - for table in tables: - if table.format == 14: - for cmap in table.uvsDict.values(): - glyphs = {g for u,g in cmap if u in s.unicodes_requested} - if None in glyphs: - glyphs.remove(None) - s.glyphs.update(glyphs) - else: - cmap = table.cmap - intersection = s.unicodes_requested.intersection(cmap.keys()) - s.glyphs.update(cmap[u] for u in intersection) - - # Calculate unicodes_missing - s.unicodes_missing = s.unicodes_requested.copy() - for table in tables: - s.unicodes_missing.difference_update(table.cmap) - -@_add_method(ttLib.getTableClass('cmap')) -def prune_pre_subset(self, options): - if not options.legacy_cmap: - # Drop non-Unicode / non-Symbol cmaps - self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] - if not options.symbol_cmap: - self.tables = [t for t in self.tables if not t.isSymbol()] - # TODO(behdad) Only keep one subtable? - # For now, drop format=0 which can't be subset_glyphs easily? - self.tables = [t for t in self.tables if t.format != 0] - self.numSubTables = len(self.tables) - return True # Required table - -@_add_method(ttLib.getTableClass('cmap')) -def subset_glyphs(self, s): - s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only - for t in self.tables: - if t.format == 14: - # TODO(behdad) We drop all the default-UVS mappings - # for glyphs_requested. So it's the caller's responsibility to make - # sure those are included. - t.uvsDict = {v:[(u,g) for u,g in l - if g in s.glyphs_requested or u in s.unicodes_requested] - for v,l in t.uvsDict.items()} - t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} - elif t.isUnicode(): - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested or u in s.unicodes_requested} - else: - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested} - self.tables = [t for t in self.tables - if (t.cmap if t.format != 14 else t.uvsDict)] - self.numSubTables = len(self.tables) - # TODO(behdad) Convert formats when needed. - # In particular, if we have a format=12 without non-BMP - # characters, either drop format=12 one or convert it - # to format=4 if there's not one. - return True # Required table - -@_add_method(ttLib.getTableClass('DSIG')) -def prune_pre_subset(self, options): - # Drop all signatures since they will be invalid - self.usNumSigs = 0 - self.signatureRecords = [] - return True - -@_add_method(ttLib.getTableClass('maxp')) -def prune_pre_subset(self, options): - if not options.hinting: - if self.tableVersion == 0x00010000: - self.maxZones = 1 - self.maxTwilightPoints = 0 - self.maxFunctionDefs = 0 - self.maxInstructionDefs = 0 - self.maxStackElements = 0 - self.maxSizeOfInstructions = 0 - return True - -@_add_method(ttLib.getTableClass('name')) -def prune_pre_subset(self, options): - if '*' not in options.name_IDs: - self.names = [n for n in self.names if n.nameID in options.name_IDs] - if not options.name_legacy: - # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman - # entry for Latin and no Unicode names. - self.names = [n for n in self.names if n.isUnicode()] - # TODO(behdad) Option to keep only one platform's - if '*' not in options.name_languages: - # TODO(behdad) This is Windows-platform specific! - self.names = [n for n in self.names - if n.langID in options.name_languages] - if options.obfuscate_names: - namerecs = [] - for n in self.names: - if n.nameID in [1, 4]: - n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" - elif n.nameID in [2, 6]: - n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" - elif n.nameID == 3: - n.string = "" - elif n.nameID in [16, 17, 18]: - continue - namerecs.append(n) - self.names = namerecs - return True # Required table - - -# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? -# TODO(behdad) Drop AAT tables. -# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. -# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left -# TODO(behdad) Drop GDEF subitems if unused by lookups -# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) -# TODO(behdad) Text direction considerations. -# TODO(behdad) Text script / language considerations. -# TODO(behdad) Optionally drop 'kern' table if GPOS available -# TODO(behdad) Implement --unicode='*' to choose all cmap'ed -# TODO(behdad) Drop old-spec Indic scripts - - -class Options(object): - - class OptionError(Exception): pass - class UnknownOptionError(OptionError): pass - - _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', - 'EBSC', 'SVG ', 'PCLT', 'LTSH'] - _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite - _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color - _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', - 'vhea', 'OS/2', 'loca', 'name', 'cvt ', - 'fpgm', 'prep', 'VDMX', 'DSIG'] - _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] - - # Based on HarfBuzz shapers - _layout_features_groups = { - # Default shaper - 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], - 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], - 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], - 'ltr': ['ltra', 'ltrm'], - 'rtl': ['rtla', 'rtlm'], - # Complex shapers - 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', - 'cswh', 'mset'], - 'hangul': ['ljmo', 'vjmo', 'tjmo'], - 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], - 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', - 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', - 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], - } - _layout_features_default = _uniq_sort(sum( - iter(_layout_features_groups.values()), [])) - - drop_tables = _drop_tables_default - no_subset_tables = _no_subset_tables_default - hinting_tables = _hinting_tables_default - legacy_kern = False # drop 'kern' table if GPOS available - layout_features = _layout_features_default - ignore_missing_glyphs = False - ignore_missing_unicodes = True - hinting = True - glyph_names = False - legacy_cmap = False - symbol_cmap = False - name_IDs = [1, 2] # Family and Style - name_legacy = False - name_languages = [0x0409] # English - obfuscate_names = False # to make webfont unusable as a system font - notdef_glyph = True # gid0 for TrueType / .notdef for CFF - notdef_outline = False # No need for notdef to have an outline really - recommended_glyphs = False # gid1, gid2, gid3 for TrueType - recalc_bounds = False # Recalculate font bounding boxes - recalc_timestamp = False # Recalculate font modified timestamp - canonical_order = False # Order tables as recommended - flavor = None # May be 'woff' or 'woff2' - desubroutinize = False # Desubroutinize CFF CharStrings - - def __init__(self, **kwargs): - self.set(**kwargs) - - def set(self, **kwargs): - for k,v in kwargs.items(): - if not hasattr(self, k): - raise self.UnknownOptionError("Unknown option '%s'" % k) - setattr(self, k, v) - - def parse_opts(self, argv, ignore_unknown=False): - ret = [] - for a in argv: - orig_a = a - if not a.startswith('--'): - ret.append(a) - continue - a = a[2:] - i = a.find('=') - op = '=' - if i == -1: - if a.startswith("no-"): - k = a[3:] - v = False - else: - k = a - v = True - if k.endswith("?"): - k = k[:-1] - v = '?' - else: - k = a[:i] - if k[-1] in "-+": - op = k[-1]+'=' # Op is '-=' or '+=' now. - k = k[:-1] - v = a[i+1:] - ok = k - k = k.replace('-', '_') - if not hasattr(self, k): - if ignore_unknown is True or ok in ignore_unknown: - ret.append(orig_a) - continue - else: - raise self.UnknownOptionError("Unknown option '%s'" % a) - - ov = getattr(self, k) - if v == '?': - print("Current setting for '%s' is: %s" % (ok, ov)) - continue - if isinstance(ov, bool): - v = bool(v) - elif isinstance(ov, int): - v = int(v) - elif isinstance(ov, str): - v = str(v) # redundant - elif isinstance(ov, list): - if isinstance(v, bool): - raise self.OptionError("Option '%s' requires values to be specified using '='" % a) - vv = v.replace(',', ' ').split() - if vv == ['']: - vv = [] - vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] - if op == '=': - v = vv - elif op == '+=': - v = ov - v.extend(vv) - elif op == '-=': - v = ov - for x in vv: - if x in v: - v.remove(x) - else: - assert False - - setattr(self, k, v) - - return ret - - -class Subsetter(object): - - class SubsettingError(Exception): pass - class MissingGlyphsSubsettingError(SubsettingError): pass - class MissingUnicodesSubsettingError(SubsettingError): pass - - def __init__(self, options=None, log=None): - - if not log: - log = Logger() - if not options: - options = Options() - - self.options = options - self.log = log - self.unicodes_requested = set() - self.glyph_names_requested = set() - self.glyph_ids_requested = set() - - def populate(self, glyphs=[], gids=[], unicodes=[], text=""): - self.unicodes_requested.update(unicodes) - if isinstance(text, bytes): - text = text.decode("utf_8") - for u in text: - self.unicodes_requested.add(ord(u)) - self.glyph_names_requested.update(glyphs) - self.glyph_ids_requested.update(gids) - - def _prune_pre_subset(self, font): - - for tag in font.keys(): - if tag == 'GlyphOrder': continue - - if(tag in self.options.drop_tables or - (tag in self.options.hinting_tables and not self.options.hinting) or - (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): - self.log(tag, "dropped") - del font[tag] - continue - - clazz = ttLib.getTableClass(tag) - - if hasattr(clazz, 'prune_pre_subset'): - table = font[tag] - self.log.lapse("load '%s'" % tag) - retain = table.prune_pre_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - continue - else: - self.log(tag, "pruned") - - def _closure_glyphs(self, font): - - realGlyphs = set(font.getGlyphOrder()) - glyph_order = font.getGlyphOrder() - - self.glyphs_requested = set() - self.glyphs_requested.update(self.glyph_names_requested) - self.glyphs_requested.update(glyph_order[i] - for i in self.glyph_ids_requested - if i < len(glyph_order)) - - self.glyphs_missing = set() - self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) - self.glyphs_missing.update(i for i in self.glyph_ids_requested - if i >= len(glyph_order)) - if self.glyphs_missing: - self.log("Missing requested glyphs: %s" % self.glyphs_missing) - if not self.options.ignore_missing_glyphs: - raise self.MissingGlyphsSubsettingError(self.glyphs_missing) - - self.glyphs = self.glyphs_requested.copy() - - self.unicodes_missing = set() - if 'cmap' in font: - font['cmap'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log.lapse("close glyph list over 'cmap'") - self.glyphs_cmaped = frozenset(self.glyphs) - if self.unicodes_missing: - missing = ["U+%04X" % u for u in self.unicodes_missing] - self.log("Missing glyphs for requested Unicodes: %s" % missing) - if not self.options.ignore_missing_unicodes: - raise self.MissingUnicodesSubsettingError(missing) - del missing - - if self.options.notdef_glyph: - if 'glyf' in font: - self.glyphs.add(font.getGlyphName(0)) - self.log("Added gid0 to subset") - else: - self.glyphs.add('.notdef') - self.log("Added .notdef to subset") - if self.options.recommended_glyphs: - if 'glyf' in font: - for i in range(min(4, len(font.getGlyphOrder()))): - self.glyphs.add(font.getGlyphName(i)) - self.log("Added first four glyphs to subset") - - if 'GSUB' in font: - self.log("Closing glyph list over 'GSUB': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['GSUB'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'GSUB': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'GSUB'") - self.glyphs_gsubed = frozenset(self.glyphs) - - if 'glyf' in font: - self.log("Closing glyph list over 'glyf': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['glyf'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'glyf': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'glyf'") - self.glyphs_glyfed = frozenset(self.glyphs) - - self.glyphs_all = frozenset(self.glyphs) - - self.log("Retaining %d glyphs: " % len(self.glyphs_all)) - - del self.glyphs - - def _subset_glyphs(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - - if tag in self.options.no_subset_tables: - self.log(tag, "subsetting not needed") - elif hasattr(clazz, 'subset_glyphs'): - table = font[tag] - self.glyphs = self.glyphs_all - retain = table.subset_glyphs(self) - del self.glyphs - self.log.lapse("subset '%s'" % tag) - if not retain: - self.log(tag, "subsetted to empty; dropped") - del font[tag] - else: - self.log(tag, "subsetted") - else: - self.log(tag, "NOT subset; don't know how to subset; dropped") - del font[tag] - - glyphOrder = font.getGlyphOrder() - glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] - font.setGlyphOrder(glyphOrder) - font._buildReverseGlyphOrderDict() - self.log.lapse("subset GlyphOrder") - - def _prune_post_subset(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - if hasattr(clazz, 'prune_post_subset'): - table = font[tag] - retain = table.prune_post_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - else: - self.log(tag, "pruned") - - def subset(self, font): - - self._prune_pre_subset(font) - self._closure_glyphs(font) - self._subset_glyphs(font) - self._prune_post_subset(font) - - -class Logger(object): - - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def glyphs(self, glyphs, font=None): - if not self.verbose: - return - self("Glyph names:", sorted(glyphs)) - if font: - reverseGlyphMap = font.getReverseGlyphMap() - self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() - - -def load_font(fontFile, - options, - allowVID=False, - checkChecksums=False, - dontLoadGlyphNames=False, - lazy=True): - - font = ttLib.TTFont(fontFile, - allowVID=allowVID, - checkChecksums=checkChecksums, - recalcBBoxes=options.recalc_bounds, - recalcTimestamp=options.recalc_timestamp, - lazy=lazy) - - # Hack: - # - # If we don't need glyph names, change 'post' class to not try to - # load them. It avoid lots of headache with broken fonts as well - # as loading time. - # - # Ideally ttLib should provide a way to ask it to skip loading - # glyph names. But it currently doesn't provide such a thing. - # - if dontLoadGlyphNames: - post = ttLib.getTableClass('post') - saved = post.decode_format_2_0 - post.decode_format_2_0 = post.decode_format_3_0 - f = font['post'] - if f.formatType == 2.0: - f.formatType = 3.0 - post.decode_format_2_0 = saved - - return font - -def save_font(font, outfile, options): - if options.flavor and not hasattr(font, 'flavor'): - raise Exception("fonttools version does not support flavors.") - font.flavor = options.flavor - font.save(outfile, reorderTables=options.canonical_order) - -def parse_unicodes(s): - import re - s = re.sub (r"0[xX]", " ", s) - s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) - l = [] - for item in s.split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(item, 16)) - else: - start,end = fields - l.extend(range(int(start, 16), int(end, 16)+1)) - return l - -def parse_gids(s): - l = [] - for item in s.replace(',', ' ').split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(fields[0])) - else: - l.extend(range(int(fields[0]), int(fields[1])+1)) - return l - -def parse_glyphs(s): - return s.replace(',', ' ').split() - -def main(args=None): - - if args is None: - args = sys.argv[1:] - - if '--help' in args: - print(__doc__) - sys.exit(0) - - log = Logger() - args = log.parse_opts(args) - - options = Options() - args = options.parse_opts(args, - ignore_unknown=['gids', 'gids-file', - 'glyphs', 'glyphs-file', - 'text', 'text-file', - 'unicodes', 'unicodes-file', - 'output-file']) - - if len(args) < 2: - print("usage:", __usage__, file=sys.stderr) - print("Try pyftsubset --help for more information.", file=sys.stderr) - sys.exit(1) - - fontfile = args[0] - args = args[1:] - - subsetter = Subsetter(options=options, log=log) - outfile = fontfile + '.subset' - glyphs = [] - gids = [] - unicodes = [] - wildcard_glyphs = False - wildcard_unicodes = False - text = "" - for g in args: - if g == '*': - wildcard_glyphs = True - continue - if g.startswith('--output-file='): - outfile = g[14:] - continue - if g.startswith('--text='): - text += g[7:] - continue - if g.startswith('--text-file='): - text += open(g[12:]).read().replace('\n', '') - continue - if g.startswith('--unicodes='): - if g[11:] == '*': - wildcard_unicodes = True - else: - unicodes.extend(parse_unicodes(g[11:])) - continue - if g.startswith('--unicodes-file='): - for line in open(g[16:]).readlines(): - unicodes.extend(parse_unicodes(line.split('#')[0])) - continue - if g.startswith('--gids='): - gids.extend(parse_gids(g[7:])) - continue - if g.startswith('--gids-file='): - for line in open(g[12:]).readlines(): - gids.extend(parse_gids(line.split('#')[0])) - continue - if g.startswith('--glyphs='): - if g[9:] == '*': - wildcard_glyphs = True - else: - glyphs.extend(parse_glyphs(g[9:])) - continue - if g.startswith('--glyphs-file='): - for line in open(g[14:]).readlines(): - glyphs.extend(parse_glyphs(line.split('#')[0])) - continue - glyphs.append(g) - - dontLoadGlyphNames = not options.glyph_names and not glyphs - font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) - log.lapse("load font") - if wildcard_glyphs: - glyphs.extend(font.getGlyphOrder()) - if wildcard_unicodes: - for t in font['cmap'].tables: - if t.isUnicode(): - unicodes.extend(t.cmap.keys()) - assert '' not in glyphs - - log.lapse("compile glyph list") - log("Text: '%s'" % text) - log("Unicodes:", unicodes) - log("Glyphs:", glyphs) - log("Gids:", gids) - - subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) - subsetter.subset(font) - - save_font (font, outfile, options) - log.lapse("compile and save font") - - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") - - if log.verbose: - import os - log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) - log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) - - log.font(font) - - font.close() - - -__all__ = [ - 'Options', - 'Subsetter', - 'Logger', - 'load_font', - 'save_font', - 'parse_gids', - 'parse_glyphs', - 'parse_unicodes', - 'main' -] - -if __name__ == '__main__': - main() diff -Nru fonttools-3.0/Snippets/fontTools/svgLib/__init__.py fonttools-3.21.2/Snippets/fontTools/svgLib/__init__.py --- fonttools-3.0/Snippets/fontTools/svgLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/svgLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +from .path import SVGPath, parse_path + +__all__ = ["SVGPath", "parse_path"] diff -Nru fonttools-3.0/Snippets/fontTools/svgLib/path/__init__.py fonttools-3.21.2/Snippets/fontTools/svgLib/path/__init__.py --- fonttools-3.0/Snippets/fontTools/svgLib/path/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/svgLib/path/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,58 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals) +from fontTools.misc.py23 import * + +from fontTools.pens.transformPen import TransformPen +from .parser import parse_path + +try: + from xml.etree import cElementTree as ElementTree # python 2 +except ImportError: # pragma nocover + from xml.etree import ElementTree # python 3 + + +__all__ = [tostr(s) for s in ("SVGPath", "parse_path")] + + +class SVGPath(object): + """ Parse SVG ``path`` elements from a file or string, and draw them + onto a glyph object that supports the FontTools Pen protocol. + + For example, reading from an SVG file and drawing to a Defcon Glyph: + + import defcon + glyph = defcon.Glyph() + pen = glyph.getPen() + svg = SVGPath("path/to/a.svg") + svg.draw(pen) + + Or reading from a string containing SVG data, using the alternative + 'fromstring' (a class method): + + data = ' elements) + and call a 'pen' object's moveTo, lineTo, curveTo, qCurveTo and closePath + methods. + + If 'current_pos' (2-float tuple) is provided, the initial moveTo will + be relative to that instead being absolute. + + Arc segments (commands "A" or "a") are not currently supported, and raise + NotImplementedError. + """ + # In the SVG specs, initial movetos are absolute, even if + # specified as 'm'. This is the default behavior here as well. + # But if you pass in a current_pos variable, the initial moveto + # will be relative to that current_pos. This is useful. + current_pos = complex(*current_pos) + + elements = list(_tokenize_path(pathdef)) + # Reverse for easy use of .pop() + elements.reverse() + + start_pos = None + command = None + last_control = None + + while elements: + + if elements[-1] in COMMANDS: + # New command. + last_command = command # Used by S and T + command = elements.pop() + absolute = command in UPPERCASE + command = command.upper() + else: + # If this element starts with numbers, it is an implicit command + # and we don't change the command. Check that it's allowed: + if command is None: + raise ValueError("Unallowed implicit command in %s, position %s" % ( + pathdef, len(pathdef.split()) - len(elements))) + last_command = command # Used by S and T + + if command == 'M': + # Moveto command. + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if absolute: + current_pos = pos + else: + current_pos += pos + + # M is not preceded by Z; it's an open subpath + if start_pos is not None: + pen.endPath() + + pen.moveTo((current_pos.real, current_pos.imag)) + + # when M is called, reset start_pos + # This behavior of Z is defined in svg spec: + # http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand + start_pos = current_pos + + # Implicit moveto commands are treated as lineto commands. + # So we set command to lineto here, in case there are + # further implicit commands after this moveto. + command = 'L' + + elif command == 'Z': + # Close path + if current_pos != start_pos: + pen.lineTo((start_pos.real, start_pos.imag)) + pen.closePath() + current_pos = start_pos + start_pos = None + command = None # You can't have implicit commands after closing. + + elif command == 'L': + x = elements.pop() + y = elements.pop() + pos = float(x) + float(y) * 1j + if not absolute: + pos += current_pos + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'H': + x = elements.pop() + pos = float(x) + current_pos.imag * 1j + if not absolute: + pos += current_pos.real + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'V': + y = elements.pop() + pos = current_pos.real + float(y) * 1j + if not absolute: + pos += current_pos.imag * 1j + pen.lineTo((pos.real, pos.imag)) + current_pos = pos + + elif command == 'C': + control1 = float(elements.pop()) + float(elements.pop()) * 1j + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control1 += current_pos + control2 += current_pos + end += current_pos + + pen.curveTo((control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag)) + current_pos = end + last_control = control2 + + elif command == 'S': + # Smooth curve. First control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in 'CS': + # If there is no previous command or if the previous command + # was not an C, c, S or s, assume the first control point is + # coincident with the current point. + control1 = current_pos + else: + # The first control point is assumed to be the reflection of + # the second control point on the previous command relative + # to the current point. + control1 = current_pos + current_pos - last_control + + control2 = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control2 += current_pos + end += current_pos + + pen.curveTo((control1.real, control1.imag), + (control2.real, control2.imag), + (end.real, end.imag)) + current_pos = end + last_control = control2 + + elif command == 'Q': + control = float(elements.pop()) + float(elements.pop()) * 1j + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + control += current_pos + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == 'T': + # Smooth curve. Control point is the "reflection" of + # the second control point in the previous path. + + if last_command not in 'QT': + # If there is no previous command or if the previous command + # was not an Q, q, T or t, assume the first control point is + # coincident with the current point. + control = current_pos + else: + # The control point is assumed to be the reflection of + # the control point on the previous command relative + # to the current point. + control = current_pos + current_pos - last_control + + end = float(elements.pop()) + float(elements.pop()) * 1j + + if not absolute: + end += current_pos + + pen.qCurveTo((control.real, control.imag), (end.real, end.imag)) + current_pos = end + last_control = control + + elif command == 'A': + raise NotImplementedError('arcs are not supported') + + # no final Z command, it's an open path + if start_pos is not None: + pen.endPath() diff -Nru fonttools-3.0/Snippets/fontTools/t1Lib/__init__.py fonttools-3.21.2/Snippets/fontTools/t1Lib/__init__.py --- fonttools-3.0/Snippets/fontTools/t1Lib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/t1Lib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,369 @@ +"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only) + +Functions for reading and writing raw Type 1 data: + +read(path) + reads any Type 1 font file, returns the raw data and a type indicator: + 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed + to by 'path'. + Raises an error when the file does not contain valid Type 1 data. + +write(path, data, kind='OTHER', dohex=False) + writes raw Type 1 data to the file pointed to by 'path'. + 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. + 'dohex' is a flag which determines whether the eexec encrypted + part should be written as hexadecimal or binary, but only if kind + is 'OTHER'. +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import eexec +from fontTools.misc.macCreatorType import getMacCreatorAndType +import os +import re + +__author__ = "jvr" +__version__ = "1.0b2" +DEBUG = 0 + + +try: + try: + from Carbon import Res + except ImportError: + import Res # MacPython < 2.2 +except ImportError: + haveMacSupport = 0 +else: + haveMacSupport = 1 + + +class T1Error(Exception): pass + + +class T1Font(object): + + """Type 1 font class. + + Uses a minimal interpeter that supports just about enough PS to parse + Type 1 fonts. + """ + + def __init__(self, path=None): + if path is not None: + self.data, type = read(path) + else: + pass # XXX + + def saveAs(self, path, type, dohex=False): + write(path, self.getData(), type, dohex) + + def getData(self): + # XXX Todo: if the data has been converted to Python object, + # recreate the PS stream + return self.data + + def getGlyphSet(self): + """Return a generic GlyphSet, which is a dict-like object + mapping glyph names to glyph objects. The returned glyph objects + have a .draw() method that supports the Pen protocol, and will + have an attribute named 'width', but only *after* the .draw() method + has been called. + + In the case of Type 1, the GlyphSet is simply the CharStrings dict. + """ + return self["CharStrings"] + + def __getitem__(self, key): + if not hasattr(self, "font"): + self.parse() + return self.font[key] + + def parse(self): + from fontTools.misc import psLib + from fontTools.misc import psCharStrings + self.font = psLib.suckfont(self.data) + charStrings = self.font["CharStrings"] + lenIV = self.font["Private"].get("lenIV", 4) + assert lenIV >= 0 + subrs = self.font["Private"]["Subrs"] + for glyphName, charString in charStrings.items(): + charString, R = eexec.decrypt(charString, 4330) + charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], + subrs=subrs) + for i in range(len(subrs)): + charString, R = eexec.decrypt(subrs[i], 4330) + subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) + del self.data + + +# low level T1 data read and write functions + +def read(path, onlyHeader=False): + """reads any Type 1 font file, returns raw data""" + normpath = path.lower() + creator, typ = getMacCreatorAndType(path) + if typ == 'LWFN': + return readLWFN(path, onlyHeader), 'LWFN' + if normpath[-4:] == '.pfb': + return readPFB(path, onlyHeader), 'PFB' + else: + return readOther(path), 'OTHER' + +def write(path, data, kind='OTHER', dohex=False): + assertType1(data) + kind = kind.upper() + try: + os.remove(path) + except os.error: + pass + err = 1 + try: + if kind == 'LWFN': + writeLWFN(path, data) + elif kind == 'PFB': + writePFB(path, data) + else: + writeOther(path, data, dohex) + err = 0 + finally: + if err and not DEBUG: + try: + os.remove(path) + except os.error: + pass + + +# -- internal -- + +LWFNCHUNKSIZE = 2000 +HEXLINELENGTH = 80 + + +def readLWFN(path, onlyHeader=False): + """reads an LWFN font file, returns raw data""" + from fontTools.misc.macRes import ResourceReader + reader = ResourceReader(path) + try: + data = [] + for res in reader.get('POST', []): + code = byteord(res.data[0]) + if byteord(res.data[1]) != 0: + raise T1Error('corrupt LWFN file') + if code in [1, 2]: + if onlyHeader and code == 2: + break + data.append(res.data[2:]) + elif code in [3, 5]: + break + elif code == 4: + f = open(path, "rb") + data.append(f.read()) + f.close() + elif code == 0: + pass # comment, ignore + else: + raise T1Error('bad chunk code: ' + repr(code)) + finally: + reader.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readPFB(path, onlyHeader=False): + """reads a PFB font file, returns raw data""" + f = open(path, "rb") + data = [] + while True: + if f.read(1) != bytechr(128): + raise T1Error('corrupt PFB file') + code = byteord(f.read(1)) + if code in [1, 2]: + chunklen = stringToLong(f.read(4)) + chunk = f.read(chunklen) + assert len(chunk) == chunklen + data.append(chunk) + elif code == 3: + break + else: + raise T1Error('bad chunk code: ' + repr(code)) + if onlyHeader: + break + f.close() + data = bytesjoin(data) + assertType1(data) + return data + +def readOther(path): + """reads any (font) file, returns raw data""" + f = open(path, "rb") + data = f.read() + f.close() + assertType1(data) + + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted and isHex(chunk[:4]): + data.append(deHexString(chunk)) + else: + data.append(chunk) + return bytesjoin(data) + +# file writing tools + +def writeLWFN(path, data): + # Res.FSpCreateResFile was deprecated in OS X 10.5 + Res.FSpCreateResFile(path, "just", "LWFN", 0) + resRef = Res.FSOpenResFile(path, 2) # write-only + try: + Res.UseResFile(resRef) + resID = 501 + chunks = findEncryptedChunks(data) + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + while chunk: + res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) + res.AddResource('POST', resID, '') + chunk = chunk[LWFNCHUNKSIZE - 2:] + resID = resID + 1 + res = Res.Resource(bytechr(5) + '\0') + res.AddResource('POST', resID, '') + finally: + Res.CloseResFile(resRef) + +def writePFB(path, data): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + f.write(bytechr(128) + bytechr(code)) + f.write(longToString(len(chunk))) + f.write(chunk) + f.write(bytechr(128) + bytechr(3)) + finally: + f.close() + +def writeOther(path, data, dohex=False): + chunks = findEncryptedChunks(data) + f = open(path, "wb") + try: + hexlinelen = HEXLINELENGTH // 2 + for isEncrypted, chunk in chunks: + if isEncrypted: + code = 2 + else: + code = 1 + if code == 2 and dohex: + while chunk: + f.write(eexec.hexString(chunk[:hexlinelen])) + f.write(b'\r') + chunk = chunk[hexlinelen:] + else: + f.write(chunk) + finally: + f.close() + + +# decryption tools + +EEXECBEGIN = b"currentfile eexec" +EEXECEND = b'0' * 64 +EEXECINTERNALEND = b"currentfile closefile" +EEXECBEGINMARKER = b"%-- eexec start\r" +EEXECENDMARKER = b"%-- eexec end\r" + +_ishexRE = re.compile(b'[0-9A-Fa-f]*$') + +def isHex(text): + return _ishexRE.match(text) is not None + + +def decryptType1(data): + chunks = findEncryptedChunks(data) + data = [] + for isEncrypted, chunk in chunks: + if isEncrypted: + if isHex(chunk[:4]): + chunk = deHexString(chunk) + decrypted, R = eexec.decrypt(chunk, 55665) + decrypted = decrypted[4:] + if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ + and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: + raise T1Error("invalid end of eexec part") + decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r' + data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) + else: + if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: + data.append(chunk[:-len(EEXECBEGIN)-1]) + else: + data.append(chunk) + return bytesjoin(data) + +def findEncryptedChunks(data): + chunks = [] + while True: + eBegin = data.find(EEXECBEGIN) + if eBegin < 0: + break + eBegin = eBegin + len(EEXECBEGIN) + 1 + eEnd = data.find(EEXECEND, eBegin) + if eEnd < 0: + raise T1Error("can't find end of eexec part") + cypherText = data[eBegin:eEnd + 2] + if isHex(cypherText[:4]): + cypherText = deHexString(cypherText) + plainText, R = eexec.decrypt(cypherText, 55665) + eEndLocal = plainText.find(EEXECINTERNALEND) + if eEndLocal < 0: + raise T1Error("can't find end of eexec part") + chunks.append((0, data[:eBegin])) + chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) + data = data[eEnd:] + chunks.append((0, data)) + return chunks + +def deHexString(hexstring): + return eexec.deHexString(bytesjoin(hexstring.split())) + + +# Type 1 assertion + +_fontType1RE = re.compile(br"/FontType\s+1\s+def") + +def assertType1(data): + for head in [b'%!PS-AdobeFont', b'%!FontType1']: + if data[:len(head)] == head: + break + else: + raise T1Error("not a PostScript font") + if not _fontType1RE.search(data): + raise T1Error("not a Type 1 font") + if data.find(b"currentfile eexec") < 0: + raise T1Error("not an encrypted Type 1 font") + # XXX what else? + return data + + +# pfb helpers + +def longToString(long): + s = b"" + for i in range(4): + s += bytechr((long & (0xff << (i * 8))) >> i * 8) + return s + +def stringToLong(s): + if len(s) != 4: + raise ValueError('string must be 4 bytes long') + l = 0 + for i in range(4): + l += byteord(s[i]) << (i * 8) + return l diff -Nru fonttools-3.0/Snippets/fontTools/t1Lib.py fonttools-3.21.2/Snippets/fontTools/t1Lib.py --- fonttools-3.0/Snippets/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/t1Lib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts - -Functions for reading and writing raw Type 1 data: - -read(path) - reads any Type 1 font file, returns the raw data and a type indicator: - 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed - to by 'path'. - Raises an error when the file does not contain valid Type 1 data. - -write(path, data, kind='OTHER', dohex=False) - writes raw Type 1 data to the file pointed to by 'path'. - 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. - 'dohex' is a flag which determines whether the eexec encrypted - part should be written as hexadecimal or binary, but only if kind - is 'LWFN' or 'PFB'. -""" -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import eexec -from fontTools.misc.macCreatorType import getMacCreatorAndType -import os -import re - -__author__ = "jvr" -__version__ = "1.0b2" -DEBUG = 0 - - -try: - try: - from Carbon import Res - except ImportError: - import Res # MacPython < 2.2 -except ImportError: - haveMacSupport = 0 -else: - haveMacSupport = 1 - import MacOS - - -class T1Error(Exception): pass - - -class T1Font(object): - - """Type 1 font class. - - Uses a minimal interpeter that supports just about enough PS to parse - Type 1 fonts. - """ - - def __init__(self, path=None): - if path is not None: - self.data, type = read(path) - else: - pass # XXX - - def saveAs(self, path, type): - write(path, self.getData(), type) - - def getData(self): - # XXX Todo: if the data has been converted to Python object, - # recreate the PS stream - return self.data - - def getGlyphSet(self): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a .draw() method that supports the Pen protocol, and will - have an attribute named 'width', but only *after* the .draw() method - has been called. - - In the case of Type 1, the GlyphSet is simply the CharStrings dict. - """ - return self["CharStrings"] - - def __getitem__(self, key): - if not hasattr(self, "font"): - self.parse() - return self.font[key] - - def parse(self): - from fontTools.misc import psLib - from fontTools.misc import psCharStrings - self.font = psLib.suckfont(self.data) - charStrings = self.font["CharStrings"] - lenIV = self.font["Private"].get("lenIV", 4) - assert lenIV >= 0 - subrs = self.font["Private"]["Subrs"] - for glyphName, charString in charStrings.items(): - charString, R = eexec.decrypt(charString, 4330) - charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], - subrs=subrs) - for i in range(len(subrs)): - charString, R = eexec.decrypt(subrs[i], 4330) - subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) - del self.data - - -# low level T1 data read and write functions - -def read(path, onlyHeader=False): - """reads any Type 1 font file, returns raw data""" - normpath = path.lower() - creator, typ = getMacCreatorAndType(path) - if typ == 'LWFN': - return readLWFN(path, onlyHeader), 'LWFN' - if normpath[-4:] == '.pfb': - return readPFB(path, onlyHeader), 'PFB' - else: - return readOther(path), 'OTHER' - -def write(path, data, kind='OTHER', dohex=False): - assertType1(data) - kind = kind.upper() - try: - os.remove(path) - except os.error: - pass - err = 1 - try: - if kind == 'LWFN': - writeLWFN(path, data) - elif kind == 'PFB': - writePFB(path, data) - else: - writeOther(path, data, dohex) - err = 0 - finally: - if err and not DEBUG: - try: - os.remove(path) - except os.error: - pass - - -# -- internal -- - -LWFNCHUNKSIZE = 2000 -HEXLINELENGTH = 80 - - -def readLWFN(path, onlyHeader=False): - """reads an LWFN font file, returns raw data""" - resRef = Res.FSOpenResFile(path, 1) # read-only - try: - Res.UseResFile(resRef) - n = Res.Count1Resources('POST') - data = [] - for i in range(501, 501 + n): - res = Res.Get1Resource('POST', i) - code = byteord(res.data[0]) - if byteord(res.data[1]) != 0: - raise T1Error('corrupt LWFN file') - if code in [1, 2]: - if onlyHeader and code == 2: - break - data.append(res.data[2:]) - elif code in [3, 5]: - break - elif code == 4: - f = open(path, "rb") - data.append(f.read()) - f.close() - elif code == 0: - pass # comment, ignore - else: - raise T1Error('bad chunk code: ' + repr(code)) - finally: - Res.CloseResFile(resRef) - data = bytesjoin(data) - assertType1(data) - return data - -def readPFB(path, onlyHeader=False): - """reads a PFB font file, returns raw data""" - f = open(path, "rb") - data = [] - while True: - if f.read(1) != bytechr(128): - raise T1Error('corrupt PFB file') - code = byteord(f.read(1)) - if code in [1, 2]: - chunklen = stringToLong(f.read(4)) - chunk = f.read(chunklen) - assert len(chunk) == chunklen - data.append(chunk) - elif code == 3: - break - else: - raise T1Error('bad chunk code: ' + repr(code)) - if onlyHeader: - break - f.close() - data = bytesjoin(data) - assertType1(data) - return data - -def readOther(path): - """reads any (font) file, returns raw data""" - f = open(path, "rb") - data = f.read() - f.close() - assertType1(data) - - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted and isHex(chunk[:4]): - data.append(deHexString(chunk)) - else: - data.append(chunk) - return bytesjoin(data) - -# file writing tools - -def writeLWFN(path, data): - Res.FSpCreateResFile(path, "just", "LWFN", 0) - resRef = Res.FSOpenResFile(path, 2) # write-only - try: - Res.UseResFile(resRef) - resID = 501 - chunks = findEncryptedChunks(data) - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - while chunk: - res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) - res.AddResource('POST', resID, '') - chunk = chunk[LWFNCHUNKSIZE - 2:] - resID = resID + 1 - res = Res.Resource(bytechr(5) + '\0') - res.AddResource('POST', resID, '') - finally: - Res.CloseResFile(resRef) - -def writePFB(path, data): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - f.write(bytechr(128) + bytechr(code)) - f.write(longToString(len(chunk))) - f.write(chunk) - f.write(bytechr(128) + bytechr(3)) - finally: - f.close() - -def writeOther(path, data, dohex=False): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - hexlinelen = HEXLINELENGTH // 2 - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - if code == 2 and dohex: - while chunk: - f.write(eexec.hexString(chunk[:hexlinelen])) - f.write('\r') - chunk = chunk[hexlinelen:] - else: - f.write(chunk) - finally: - f.close() - - -# decryption tools - -EEXECBEGIN = "currentfile eexec" -EEXECEND = '0' * 64 -EEXECINTERNALEND = "currentfile closefile" -EEXECBEGINMARKER = "%-- eexec start\r" -EEXECENDMARKER = "%-- eexec end\r" - -_ishexRE = re.compile('[0-9A-Fa-f]*$') - -def isHex(text): - return _ishexRE.match(text) is not None - - -def decryptType1(data): - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted: - if isHex(chunk[:4]): - chunk = deHexString(chunk) - decrypted, R = eexec.decrypt(chunk, 55665) - decrypted = decrypted[4:] - if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ - and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: - raise T1Error("invalid end of eexec part") - decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' - data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) - else: - if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: - data.append(chunk[:-len(EEXECBEGIN)-1]) - else: - data.append(chunk) - return bytesjoin(data) - -def findEncryptedChunks(data): - chunks = [] - while True: - eBegin = data.find(EEXECBEGIN) - if eBegin < 0: - break - eBegin = eBegin + len(EEXECBEGIN) + 1 - eEnd = data.find(EEXECEND, eBegin) - if eEnd < 0: - raise T1Error("can't find end of eexec part") - cypherText = data[eBegin:eEnd + 2] - if isHex(cypherText[:4]): - cypherText = deHexString(cypherText) - plainText, R = eexec.decrypt(cypherText, 55665) - eEndLocal = plainText.find(EEXECINTERNALEND) - if eEndLocal < 0: - raise T1Error("can't find end of eexec part") - chunks.append((0, data[:eBegin])) - chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) - data = data[eEnd:] - chunks.append((0, data)) - return chunks - -def deHexString(hexstring): - return eexec.deHexString(strjoin(hexstring.split())) - - -# Type 1 assertion - -_fontType1RE = re.compile(br"/FontType\s+1\s+def") - -def assertType1(data): - for head in [b'%!PS-AdobeFont', b'%!FontType1']: - if data[:len(head)] == head: - break - else: - raise T1Error("not a PostScript font") - if not _fontType1RE.search(data): - raise T1Error("not a Type 1 font") - if data.find(b"currentfile eexec") < 0: - raise T1Error("not an encrypted Type 1 font") - # XXX what else? - return data - - -# pfb helpers - -def longToString(long): - s = "" - for i in range(4): - s += bytechr((long & (0xff << (i * 8))) >> i * 8) - return s - -def stringToLong(s): - if len(s) != 4: - raise ValueError('string must be 4 bytes long') - l = 0 - for i in range(4): - l += byteord(s[i]) << (i * 8) - return l diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/__init__.py fonttools-3.21.2/Snippets/fontTools/ttLib/__init__.py --- fonttools-3.0/Snippets/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,15 +8,15 @@ Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL] Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam ->>> from fontTools import ttLib ->>> tt = ttLib.TTFont("afont.ttf") ->>> tt['maxp'].numGlyphs +>> from fontTools import ttLib +>> tt = ttLib.TTFont("afont.ttf") +>> tt['maxp'].numGlyphs 242 ->>> tt['OS/2'].achVendID +>> tt['OS/2'].achVendID 'B&H\000' ->>> tt['head'].unitsPerEm +>> tt['head'].unitsPerEm 2048 ->>> tt.saveXML("afont.ttx") +>> tt.saveXML("afont.ttx") Dumping 'LTSH' table... Dumping 'OS/2' table... Dumping 'VDMX' table... @@ -33,28 +33,23 @@ Dumping 'name' table... Dumping 'post' table... Dumping 'prep' table... ->>> tt2 = ttLib.TTFont() ->>> tt2.importXML("afont.ttx") ->>> tt2['maxp'].numGlyphs +>> tt2 = ttLib.TTFont() +>> tt2.importXML("afont.ttx") +>> tt2['maxp'].numGlyphs 242 ->>> +>> """ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import deprecateArgument, deprecateFunction import os import sys +import logging -haveMacSupport = 0 -if sys.platform == "mac": - haveMacSupport = 1 -elif sys.platform == "darwin": - if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): - # Python 2.2's Mac support is broken, so don't enable it there. - # Python 3 does not have Res used by macUtils - haveMacSupport = 1 +log = logging.getLogger(__name__) class TTLibError(Exception): pass @@ -69,8 +64,8 @@ def __init__(self, file=None, res_name_or_index=None, sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, - verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, - recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): + verbose=None, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, + recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None): """The constructor can be called with a few different arguments. When reading a font from disk, 'file' should be either a pathname @@ -91,13 +86,15 @@ The TTFont constructor can also be called without a 'file' argument: this is the way to create a new empty font. In this case you can optionally supply the 'sfntVersion' argument, - and a 'flavor' which can be None, or 'woff'. + and a 'flavor' which can be None, 'woff', or 'woff2'. If the recalcBBoxes argument is false, a number of things will *not* be recalculated upon save/compile: - 1) glyph bounding boxes - 2) maxp font bounding box - 3) hhea min/max values + 1) 'glyf' glyph bounding boxes + 2) 'CFF ' font bounding box + 3) 'head' font bounding box + 4) 'hhea' min/max values + 5) 'vhea' min/max values (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). Additionally, upon importing an TTX file, this option cause glyphs to be compiled right away. This should reduce memory consumption @@ -128,8 +125,13 @@ """ from fontTools.ttLib import sfnt - self.verbose = verbose - self.quiet = quiet + + for name in ("verbose", "quiet"): + val = locals().get(name) + if val is not None: + deprecateArgument(name, "configure logging instead") + setattr(self, name, val) + self.lazy = lazy self.recalcBBoxes = recalcBBoxes self.recalcTimestamp = recalcTimestamp @@ -151,8 +153,8 @@ if not hasattr(file, "read"): closeStream = True # assume file is a string - if haveMacSupport and res_name_or_index is not None: - # on the mac, we deal with sfnt resources as well as flat files + if res_name_or_index is not None: + # see if it contains 'sfnt' resources in the resource or data fork from . import macUtils if res_name_or_index == 0: if macUtils.getSFNTResIndices(file): @@ -168,14 +170,16 @@ else: # assume "file" is a readable file object closeStream = False - # read input file in memory and wrap a stream around it to allow overwriting - tmp = BytesIO(file.read()) - if hasattr(file, 'name'): - # save reference to input file name - tmp.name = file.name - if closeStream: - file.close() - self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) + if not self.lazy: + # read input file in memory and wrap a stream around it to allow overwriting + tmp = BytesIO(file.read()) + if hasattr(file, 'name'): + # save reference to input file name + tmp.name = file.name + if closeStream: + file.close() + file = tmp + self.reader = sfnt.SFNTReader(file, checkChecksums, fontNumber=fontNumber) self.sfntVersion = self.reader.sfntVersion self.flavor = self.reader.flavor self.flavorData = self.reader.flavorData @@ -185,28 +189,24 @@ if self.reader is not None: self.reader.close() - def save(self, file, makeSuitcase=False, reorderTables=True): + def save(self, file, reorderTables=True): """Save the font to disk. Similarly to the constructor, the 'file' argument can be either a pathname or a writable file object. - - On the Mac, if makeSuitcase is true, a suitcase (resource fork) - file will we made instead of a flat .ttf file. """ from fontTools.ttLib import sfnt if not hasattr(file, "write"): - closeStream = 1 - if os.name == "mac" and makeSuitcase: - from . import macUtils - file = macUtils.SFNTResourceWriter(file, self) - else: - file = open(file, "wb") - if os.name == "mac": - from fontTools.misc.macCreator import setMacCreatorAndType - setMacCreatorAndType(file.name, 'mdos', 'BINA') + if self.lazy and self.reader.file.name == file: + raise TTLibError( + "Can't overwrite TTFont when 'lazy' attribute is True") + closeStream = True + file = open(file, "wb") else: # assume "file" is a writable file object - closeStream = 0 + closeStream = False + + if self.recalcTimestamp and 'head' in self: + self['head'] # make sure 'head' is loaded so the recalculation is actually done tags = list(self.keys()) if "GlyphOrder" in tags: @@ -245,9 +245,9 @@ if closeStream: file.close() - def saveXML(self, fileOrPath, progress=None, quiet=False, + def saveXML(self, fileOrPath, progress=None, quiet=None, tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, - bitmapGlyphDataFormat='raw'): + bitmapGlyphDataFormat='raw', newlinestr=None): """Export the font as TTX (an XML-based text file), or as a series of text files when splitTables is true. In the latter case, the 'fileOrPath' argument should be a path to a directory. @@ -258,6 +258,13 @@ from fontTools import version from fontTools.misc import xmlWriter + # only write the MAJOR.MINOR version in the 'ttLibVersion' attribute of + # TTX files' root element (without PATCH or .dev suffixes) + version = ".".join(version.split('.')[:2]) + + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") + self.disassembleInstructions = disassembleInstructions self.bitmapGlyphDataFormat = bitmapGlyphDataFormat if not tables: @@ -275,8 +282,9 @@ else: idlefunc = None - writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) - writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], + writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc, + newlinestr=newlinestr) + writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1], ttLibVersion=version) writer.newline() @@ -293,7 +301,8 @@ tag = tables[i] if splitTables: tablePath = fileNameTemplate % tagToIdentifier(tag) - tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc) + tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc, + newlinestr=newlinestr) tableWriter.begintag("ttFont", ttLibVersion=version) tableWriter.newline() tableWriter.newline() @@ -301,7 +310,7 @@ writer.newline() else: tableWriter = writer - self._tableToXML(tableWriter, tag, progress, quiet) + self._tableToXML(tableWriter, tag, progress) if splitTables: tableWriter.endtag("ttFont") tableWriter.newline() @@ -310,11 +319,14 @@ progress.set((i + 1)) writer.endtag("ttFont") writer.newline() - writer.close() - if self.verbose: - debugmsg("Done dumping TTX") - - def _tableToXML(self, writer, tag, progress, quiet): + # close if 'fileOrPath' is a path; leave it open if it's a file. + # The special string "-" means standard output so leave that open too + if not hasattr(fileOrPath, "write") and fileOrPath != "-": + writer.close() + + def _tableToXML(self, writer, tag, progress, quiet=None): + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") if tag in self: table = self[tag] report = "Dumping '%s' table..." % tag @@ -322,11 +334,7 @@ report = "No '%s' table found." % tag if progress: progress.setLabel(report) - elif self.verbose: - debugmsg(report) - else: - if not quiet: - print(report) + log.info(report) if tag not in self: return xmlTag = tagToXML(tag) @@ -346,10 +354,13 @@ writer.newline() writer.newline() - def importXML(self, file, progress=None, quiet=False): + def importXML(self, fileOrPath, progress=None, quiet=None): """Import a TTX file (an XML-based text format), so as to recreate a font object. """ + if quiet is not None: + deprecateArgument("quiet", "configure logging instead") + if "maxp" in self and "post" in self: # Make sure the glyph order is loaded, as it otherwise gets # lost if the XML doesn't contain the glyph order, yet does @@ -359,7 +370,7 @@ from fontTools.misc import xmlReader - reader = xmlReader.XMLReader(file, self, progress, quiet) + reader = xmlReader.XMLReader(fileOrPath, self, progress) reader.read() def isLoaded(self, tag): @@ -405,21 +416,20 @@ return table if self.reader is not None: import traceback - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) + log.debug("Reading '%s' table from disk", tag) data = self.reader[tag] tableClass = getTableClass(tag) table = tableClass(tag) self.tables[tag] = table - if self.verbose: - debugmsg("Decompiling '%s' table" % tag) + log.debug("Decompiling '%s' table", tag) try: table.decompile(data, self) except: if not self.ignoreDecompileErrors: raise # fall back to DefaultTable, retaining the binary table data - print("An exception occurred during the decompilation of the '%s' table" % tag) + log.exception( + "An exception occurred during the decompilation of the '%s' table", tag) from .tables.DefaultTable import DefaultTable file = StringIO() traceback.print_exc(file=file) @@ -511,50 +521,47 @@ # Set the glyph order, so the cmap parser has something # to work with (so we don't get called recursively). self.glyphOrder = glyphOrder - # Get a (new) temporary cmap (based on the just invented names) - try: - tempcmap = self['cmap'].getcmap(3, 1) - except KeyError: - tempcmap = None - if tempcmap is not None: - # we have a unicode cmap - from fontTools import agl - cmap = tempcmap.cmap - # create a reverse cmap dict - reversecmap = {} - for unicode, name in list(cmap.items()): - reversecmap[name] = unicode - allNames = {} - for i in range(numGlyphs): - tempName = glyphOrder[i] - if tempName in reversecmap: - unicode = reversecmap[tempName] - if unicode in agl.UV2AGL: - # get name from the Adobe Glyph List - glyphName = agl.UV2AGL[unicode] - else: - # create uni name - glyphName = "uni%04X" % unicode - tempName = glyphName - n = allNames.get(tempName, 0) - if n: - tempName = glyphName + "#" + str(n) - glyphOrder[i] = tempName - allNames[tempName] = n + 1 - # Delete the temporary cmap table from the cache, so it can - # be parsed again with the right names. - del self.tables['cmap'] - else: - pass # no unicode cmap available, stick with the invented names + + # Make up glyph names based on the reversed cmap table. Because some + # glyphs (eg. ligatures or alternates) may not be reachable via cmap, + # this naming table will usually not cover all glyphs in the font. + # If the font has no Unicode cmap table, reversecmap will be empty. + reversecmap = self['cmap'].buildReversed() + useCount = {} + for i in range(numGlyphs): + tempName = glyphOrder[i] + if tempName in reversecmap: + # If a font maps both U+0041 LATIN CAPITAL LETTER A and + # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph, + # we prefer naming the glyph as "A". + glyphName = self._makeGlyphName(min(reversecmap[tempName])) + numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1 + if numUses > 1: + glyphName = "%s.alt%d" % (glyphName, numUses - 1) + glyphOrder[i] = glyphName + + # Delete the temporary cmap table from the cache, so it can + # be parsed again with the right names. + del self.tables['cmap'] self.glyphOrder = glyphOrder if cmapLoading: # restore partially loaded cmap, so it can continue loading # using the proper names. self.tables['cmap'] = cmapLoading + @staticmethod + def _makeGlyphName(codepoint): + from fontTools import agl # Adobe Glyph List + if codepoint in agl.UV2AGL: + return agl.UV2AGL[codepoint] + elif codepoint <= 0xFFFF: + return "uni%04X" % codepoint + else: + return "u%X" % codepoint + def getGlyphNames(self): """Get a list of glyph names, sorted alphabetically.""" - glyphNames = sorted(self.getGlyphOrder()[:]) + glyphNames = sorted(self.getGlyphOrder()) return glyphNames def getGlyphNames2(self): @@ -651,8 +658,7 @@ else: done.append(masterTable) tabledata = self.getTableData(tag) - if self.verbose: - debugmsg("writing '%s' table to disk" % tag) + log.debug("writing '%s' table to disk", tag) writer[tag] = tabledata done.append(tag) @@ -661,12 +667,10 @@ """ tag = Tag(tag) if self.isLoaded(tag): - if self.verbose: - debugmsg("compiling '%s' table" % tag) + log.debug("compiling '%s' table", tag) return self.tables[tag].compile(self) elif self.reader and tag in self.reader: - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) + log.debug("Reading '%s' table from disk", tag) return self.reader[tag] else: raise KeyError(tag) @@ -677,14 +681,18 @@ have a .draw() method that supports the Pen protocol, and will have an attribute named 'width'. - If the font is CFF-based, the outlines will be taken from the 'CFF ' - table. Otherwise the outlines will be taken from the 'glyf' table. - If the font contains both a 'CFF ' and a 'glyf' table, you can use - the 'preferCFF' argument to specify which one should be taken. + If the font is CFF-based, the outlines will be taken from the 'CFF ' or + 'CFF2' tables. Otherwise the outlines will be taken from the 'glyf' table. + If the font contains both a 'CFF '/'CFF2' and a 'glyf' table, you can use + the 'preferCFF' argument to specify which one should be taken. If the + font contains both a 'CFF ' and a 'CFF2' table, the latter is taken. """ glyphs = None - if (preferCFF and "CFF " in self) or "glyf" not in self: - glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) + if (preferCFF and any(tb in self for tb in ["CFF ", "CFF2"]) or + ("glyf" not in self and any(tb in self for tb in ["CFF ", "CFF2"]))): + table_tag = "CFF2" if "CFF2" in self else "CFF " + glyphs = _TTGlyphSet(self, + list(self[table_tag].cff.values())[0].CharStrings, _TTGlyphCFF) if glyphs is None and "glyf" in self: glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) @@ -694,6 +702,17 @@ return glyphs + def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))): + """Return the 'best' unicode cmap dictionary available in the font, + or None, if no unicode cmap subtable is available. + + By default it will search for the following (platformID, platEncID) + pairs: + (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0) + This can be customized via the cmapPreferences argument. + """ + return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) + class _TTGlyphSet(object): @@ -704,6 +723,7 @@ def __init__(self, ttFont, glyphs, glyphType): self._glyphs = glyphs self._hmtx = ttFont['hmtx'] + self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None self._glyphType = glyphType def keys(self): @@ -715,7 +735,10 @@ __contains__ = has_key def __getitem__(self, glyphName): - return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) + horizontalMetrics = self._hmtx[glyphName] + verticalMetrics = self._vmtx[glyphName] if self._vmtx else None + return self._glyphType( + self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics) def get(self, glyphName, default=None): try: @@ -727,13 +750,21 @@ """Wrapper for a TrueType glyph that supports the Pen protocol, meaning that it has a .draw() method that takes a pen object as its only - argument. Additionally there is a 'width' attribute. + argument. Additionally there are 'width' and 'lsb' attributes, read from + the 'hmtx' table. + + If the font contains a 'vmtx' table, there will also be 'height' and 'tsb' + attributes. """ - def __init__(self, glyphset, glyph, metrics): + def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None): self._glyphset = glyphset self._glyph = glyph - self.width, self.lsb = metrics + self.width, self.lsb = horizontalMetrics + if verticalMetrics: + self.height, self.tsb = verticalMetrics + else: + self.height, self.tsb = None, None def draw(self, pen): """Draw the glyph onto Pen. See fontTools.pens.basePen for details @@ -918,6 +949,7 @@ return Tag(tag + " " * (4 - len(tag))) +@deprecateFunction("use logging instead", category=DeprecationWarning) def debugmsg(msg): import time print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/macUtils.py fonttools-3.21.2/Snippets/fontTools/ttLib/macUtils.py --- fonttools-3.0/Snippets/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/macUtils.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,37 +1,18 @@ """ttLib.macUtils.py -- Various Mac-specific stuff.""" - from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -import sys -import os -if sys.platform not in ("mac", "darwin"): - raise ImportError("This module is Mac-only!") -try: - from Carbon import Res -except ImportError: - import Res - - -def MyOpenResFile(path): - mode = 1 # read only - try: - resref = Res.FSOpenResFile(path, mode) - except Res.Error: - # try data fork - resref = Res.FSOpenResourceFile(path, unicode(), mode) - return resref +from fontTools.misc.macRes import ResourceReader, ResourceError def getSFNTResIndices(path): - """Determine whether a file has a resource fork or not.""" + """Determine whether a file has a 'sfnt' resource fork or not.""" try: - resref = MyOpenResFile(path) - except Res.Error: + reader = ResourceReader(path) + indices = reader.getIndices('sfnt') + reader.close() + return indices + except ResourceError: return [] - Res.UseResFile(resref) - numSFNTs = Res.Count1Resources('sfnt') - Res.CloseResFile(resref) - return list(range(1, numSFNTs + 1)) def openTTFonts(path): @@ -53,21 +34,20 @@ return fonts -class SFNTResourceReader(object): +class SFNTResourceReader(BytesIO): - """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" + """Simple read-only file wrapper for 'sfnt' resources.""" def __init__(self, path, res_name_or_index): - resref = MyOpenResFile(path) - Res.UseResFile(resref) + from fontTools import ttLib + reader = ResourceReader(path) if isinstance(res_name_or_index, basestring): - res = Res.Get1NamedResource('sfnt', res_name_or_index) + rsrc = reader.getNamedResource('sfnt', res_name_or_index) else: - res = Res.Get1IndResource('sfnt', res_name_or_index) - self.file = BytesIO(res.data) - Res.CloseResFile(resref) + rsrc = reader.getIndResource('sfnt', res_name_or_index) + if rsrc is None: + raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index) + reader.close() + self.rsrc = rsrc + super(SFNTResourceReader, self).__init__(rsrc.data) self.name = path - - def __getattr__(self, attr): - # cheap inheritance - return getattr(self.file, attr) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/sfnt.py fonttools-3.21.2/Snippets/fontTools/ttLib/sfnt.py --- fonttools-3.0/Snippets/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/sfnt.py 2018-01-08 12:40:40.000000000 +0000 @@ -18,6 +18,10 @@ from fontTools.ttLib import getSearchRange import struct from collections import OrderedDict +import logging + + +log = logging.getLogger(__name__) class SFNTReader(object): @@ -84,12 +88,13 @@ if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): from fontTools import ttLib raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") - self.tables = OrderedDict() + tables = {} for i in range(self.numTables): entry = self.DirectoryEntry() entry.fromFile(self.file) tag = Tag(entry.tag) - self.tables[tag] = entry + tables[tag] = entry + self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset)) # Load flavor data if any if self.flavor == "woff": @@ -117,8 +122,8 @@ # Be obnoxious, and barf when it's wrong assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag elif checksum != entry.checkSum: - # Be friendly, and just print a warning. - print("bad checksum for '%s' table" % tag) + # Be friendly, and just log a warning. + log.warning("bad checksum for '%s' table", tag) return data def __delitem__(self, tag): @@ -128,6 +133,46 @@ self.file.close() +# default compression level for WOFF 1.0 tables and metadata +ZLIB_COMPRESSION_LEVEL = 6 + +# if set to True, use zopfli instead of zlib for compressing WOFF 1.0. +# The Python bindings are available at https://pypi.python.org/pypi/zopfli +USE_ZOPFLI = False + +# mapping between zlib's compression levels and zopfli's 'numiterations'. +# Use lower values for files over several MB in size or it will be too slow +ZOPFLI_LEVELS = { + # 0: 0, # can't do 0 iterations... + 1: 1, + 2: 3, + 3: 5, + 4: 8, + 5: 10, + 6: 15, + 7: 25, + 8: 50, + 9: 100, +} + + +def compress(data, level=ZLIB_COMPRESSION_LEVEL): + """ Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, + zopfli is used instead of the zlib module. + The compression 'level' must be between 0 and 9. 1 gives best speed, + 9 gives best compression (0 gives no compression at all). + The default value is a compromise between speed and compression (6). + """ + if not (0 <= level <= 9): + raise ValueError('Bad compression level: %s' % level) + if not USE_ZOPFLI or level == 0: + from zlib import compress + return compress(data, level) + else: + from zopfli.zlib import compress + return compress(data, numiterations=ZOPFLI_LEVELS[level]) + + class SFNTWriter(object): def __new__(cls, *args, **kwargs): @@ -241,8 +286,7 @@ self.metaOrigLength = len(data.metaData) self.file.seek(0,2) self.metaOffset = self.file.tell() - import zlib - compressedMetaData = zlib.compress(data.metaData) + compressedMetaData = compress(data.metaData) self.metaLength = len(compressedMetaData) self.file.write(compressedMetaData) else: @@ -434,7 +478,17 @@ format = woffDirectoryEntryFormat formatSize = woffDirectoryEntrySize - zlibCompressionLevel = 6 + + def __init__(self): + super(WOFFDirectoryEntry, self).__init__() + # With fonttools<=3.1.2, the only way to set a different zlib + # compression level for WOFF directory entries was to set the class + # attribute 'zlibCompressionLevel'. This is now replaced by a globally + # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when + # compressing the metadata. For backward compatibility, we still + # use the class attribute if it was already set. + if not hasattr(WOFFDirectoryEntry, 'zlibCompressionLevel'): + self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL def decodeData(self, rawData): import zlib @@ -443,14 +497,13 @@ else: assert self.length < self.origLength data = zlib.decompress(rawData) - assert len (data) == self.origLength + assert len(data) == self.origLength return data def encodeData(self, data): - import zlib self.origLength = len(data) if not self.uncompressed: - compressedData = zlib.compress(data, self.zlibCompressionLevel) + compressedData = compress(data, self.zlibCompressionLevel) if self.uncompressed or len(compressedData) >= self.origLength: # Encode uncompressed rawData = data diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_n_k_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_n_k_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_n_k_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_n_k_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# The anchor point table provides a way to define anchor points. +# These are points within the coordinate space of a given glyph, +# independent of the control points used to render the glyph. +# Anchor points are used in conjunction with the 'kerx' table. +# +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html +class table__a_n_k_r(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/asciiTable.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/asciiTable.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/asciiTable.py 2018-01-08 12:40:40.000000000 +0000 @@ -12,11 +12,11 @@ data = strjoin(data) writer.begintag("source") writer.newline() - writer.write_noindent(data.replace("\r", "\n")) + writer.write_noindent(data) writer.newline() writer.endtag("source") writer.newline() def fromXML(self, name, attrs, content, ttFont): - lines = strjoin(content).replace("\r", "\n").split("\n") - self.data = tobytes("\r".join(lines[1:-1])) + lines = strjoin(content).split("\n") + self.data = tobytes("\n".join(lines[1:-1])) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,20 +8,26 @@ from . import DefaultTable import array import struct -import warnings +import logging +log = logging.getLogger(__name__) + # Apple's documentation of 'avar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html AVAR_HEADER_FORMAT = """ > # big endian - version: L - axisCount: L + majorVersion: H + minorVersion: H + reserved: H + axisCount: H """ +assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT) class table__a_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar"] def __init__(self, tag=None): @@ -30,7 +36,12 @@ def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {"version": 0x00010000, "axisCount": len(axisTags)} + header = { + "majorVersion": 1, + "minorVersion": 0, + "reserved": 0, + "axisCount": len(axisTags) + } result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] for axis in axisTags: mappings = sorted(self.segments[axis].items()) @@ -46,8 +57,9 @@ header = {} headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) - if header["version"] != 0x00010000: - raise TTLibError("unsupported 'avar' version %04x" % header["version"]) + majorVersion = header["majorVersion"] + if majorVersion != 1: + raise TTLibError("unsupported 'avar' version %d" % majorVersion) pos = headerSize for axis in axisTags: segments = self.segments[axis] = {} @@ -57,7 +69,6 @@ fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) pos = pos + 4 - self.fixupSegments_(warn=warnings.warn) def toXML(self, writer, ttFont, progress=None): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] @@ -65,6 +76,10 @@ writer.begintag("segment", axis=axis) writer.newline() for key, value in sorted(self.segments[axis].items()): + # roundtrip float -> fixed -> float to normalize TTX output + # as dumped after decompiling or straight from varLib + key = fixedToFloat(floatToFixed(key, 14), 14) + value = fixedToFloat(floatToFixed(value, 14), 14) writer.simpletag("mapping", **{"from": key, "to": value}) writer.newline() writer.endtag("segment") @@ -81,14 +96,6 @@ fromValue = safeEval(elementAttrs["from"]) toValue = safeEval(elementAttrs["to"]) if fromValue in segment: - warnings.warn("duplicate entry for %s in axis '%s'" % - (fromValue, axis)) + log.warning("duplicate entry for %s in axis '%s'", + fromValue, axis) segment[fromValue] = toValue - self.fixupSegments_(warn=warnings.warn) - - def fixupSegments_(self, warn): - for axis, mappings in self.segments.items(): - for k in [-1.0, 0.0, 1.0]: - if mappings.get(k) != k: - warn("avar axis '%s' should map %s to %s" % (axis, k, k)) - mappings[k] = k diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis -import collections -import unittest - - -TEST_DATA = deHexStr( - "00 01 00 00 00 00 00 02 " - "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " - "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") - - -class AxisVariationTableTest(unittest.TestCase): - def test_compile(self): - avar = table__a_v_a_r() - avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) - - def test_decompile(self): - avar = table__a_v_a_r() - avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) - self.assertEqual({ - "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, - "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - }, avar.segments) - - def test_decompile_unsupportedVersion(self): - avar = table__a_v_a_r() - font = self.makeFont(["wdth", "wght"]) - self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) - - def test_toXML(self): - avar = table__a_v_a_r() - avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - writer = XMLWriter(BytesIO()) - avar.toXML(writer, self.makeFont(["opsz"])) - self.assertEqual([ - '', - '', - '', - '', - '', - '' - ], self.xml_lines(writer)) - - def test_fromXML(self): - avar = table__a_v_a_r() - avar.fromXML("segment", {"axis":"wdth"}, [ - ("mapping", {"from": "-1.0", "to": "-1.0"}, []), - ("mapping", {"from": "0.0", "to": "0.0"}, []), - ("mapping", {"from": "0.7", "to": "0.2"}, []), - ("mapping", {"from": "1.0", "to": "1.0"}, []) - ], ttFont=None) - self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) - - def test_fixupSegments(self): - avar = table__a_v_a_r() - avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} - warnings = [] - avar.fixupSegments_(lambda w: warnings.append(w)) - self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) - self.assertEqual([ - "avar axis 'wdth' should map -1.0 to -1.0", - "avar axis 'wdth' should map 0.0 to 0.0", - "avar axis 'wdth' should map 1.0 to 1.0" - ], warnings) - - @staticmethod - def makeFont(axisTags): - """['opsz', 'wdth'] --> ttFont""" - fvar = table__f_v_a_r() - for tag in axisTags: - axis = Axis() - axis.axisTag = tag - fvar.axes.append(axis) - return {"fvar": fvar} - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2018-01-08 12:40:40.000000000 +0000 @@ -4,8 +4,11 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +import logging +log = logging.getLogger(__name__) + bigGlyphMetricsFormat = """ > # big endian height: B @@ -48,7 +51,7 @@ if name in metricNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) + log.warning("unknown name '%s' being ignored in %s.", name, self.__class__.__name__) class BigGlyphMetrics(BitmapGlyphMetrics): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_b_s_l_n.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_b_s_l_n.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_b_s_l_n.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_b_s_l_n.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html +class table__b_s_l_n(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F__2.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_F_F__2.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F__2.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_F_F__2.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import cffLib +from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_ + + +class table_C_F_F__2(table_C_F_F_): + + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont, isCFF2=True) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont, isCFF2=True) + return f.getvalue() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_F_F_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_F_F_.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,18 +6,18 @@ class table_C_F_F_(DefaultTable.DefaultTable): - def __init__(self, tag): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.cff = cffLib.CFFFontSet() self._gaveGlyphOrder = False def decompile(self, data, otFont): - self.cff.decompile(BytesIO(data), otFont) + self.cff.decompile(BytesIO(data), otFont, isCFF2=False) assert len(self.cff) == 1, "can't deal with multi-font CFF tables." def compile(self, otFont): f = BytesIO() - self.cff.compile(f, otFont) + self.cff.compile(f, otFont, isCFF2=False) return f.getvalue() def haveGlyphNames(self): @@ -44,4 +44,4 @@ def fromXML(self, name, attrs, content, otFont): if not hasattr(self, "cff"): self.cff = cffLib.CFFFontSet() - self.cff.fromXML(name, attrs, content) + self.cff.fromXML(name, attrs, content, otFont) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_i_d_g.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_i_d_g.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_i_d_g.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_i_d_g.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# The AAT ‘cidg’ table has almost the same structure as ‘gidc’, +# just mapping CIDs to GlyphIDs instead of the reverse direction. +# +# It is useful for fonts that may be used by a PDF renderer in lieu of +# a font reference with a known glyph collection but no subsetted +# glyphs. For instance, a PDF can say “please use a font conforming +# to Adobe-Japan-1”; the ‘cidg’ mapping is necessary if the font is, +# say, a TrueType font. ‘gidc’ is lossy for this purpose and is +# obsoleted by ‘cidg’. +# +# For example, the first font in /System/Library/Fonts/PingFang.ttc +# (which Apple ships pre-installed on MacOS 10.12.6) has a ‘cidg’ table. +class table__c_i_d_g(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_m_a_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -9,8 +9,26 @@ import struct import array import operator +import logging +log = logging.getLogger(__name__) + + +def _make_map(font, chars, gids): + assert len(chars) == len(gids) + cmap = {} + glyphOrder = font.getGlyphOrder() + for char,gid in zip(chars,gids): + if gid is 0: + continue + try: + name = glyphOrder[gid] + except IndexError: + name = font.getGlyphName(gid) + cmap[char] = name + return cmap + class table__c_m_a_p(DefaultTable.DefaultTable): def getcmap(self, platformID, platEncID): @@ -20,6 +38,36 @@ return subtable return None # not found + def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))): + """Return the 'best' unicode cmap dictionary available in the font, + or None, if no unicode cmap subtable is available. + + By default it will search for the following (platformID, platEncID) + pairs: + (3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0) + This can be customized via the cmapPreferences argument. + """ + for platformID, platEncID in cmapPreferences: + cmapSubtable = self.getcmap(platformID, platEncID) + if cmapSubtable is not None: + return cmapSubtable.cmap + return None # None of the requested cmap subtables were found + + def buildReversed(self): + """Returns a reverse cmap such as {'one':{0x31}, 'A':{0x41,0x391}}. + + The values are sets of Unicode codepoints because + some fonts map different codepoints to the same glyph. + For example, U+0041 LATIN CAPITAL LETTER A and U+0391 + GREEK CAPITAL LETTER ALPHA are sometimes the same glyph. + """ + result = {} + for subtable in self.tables: + if subtable.isUnicode(): + for codepoint, name in subtable.cmap.items(): + result.setdefault(name, set()).add(codepoint) + return result + def decompile(self, data, ttFont): tableVersion, numSubTables = struct.unpack(">HH", data[:4]) self.tableVersion = int(tableVersion) @@ -36,7 +84,10 @@ format, length = struct.unpack(">HL", data[offset:offset+6]) if not length: - print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) + log.error( + "cmap subtable is reported as having zero length: platformID %s, " + "platEncID %s, format %s offset %s. Skipping table.", + platformID, platEncID, format, offset) continue table = CmapSubtable.newSubtable(format) table.platformID = platformID @@ -202,26 +253,22 @@ assert (data is None and ttFont is None), "Need both data and ttFont arguments" data = self.data # decompileHeader assigns the data after the header to self.data assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" - glyphIdArray = array.array("B") - glyphIdArray.fromstring(self.data) - self.cmap = cmap = {} - lenArray = len(glyphIdArray) - charCodes = list(range(lenArray)) - names = map(self.ttFont.getGlyphName, glyphIdArray) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + gids = array.array("B") + gids.fromstring(self.data) + charCodes = list(range(len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: return struct.pack(">HHH", 0, 262, self.language) + self.data - charCodeList = sorted(self.cmap.items()) - charCodes = [entry[0] for entry in charCodeList] - valueList = [entry[1] for entry in charCodeList] - assert charCodes == list(range(256)) - valueList = map(ttFont.getGlyphID, valueList) + cmap = self.cmap + assert set(cmap.keys()).issubset(range(256)) + getGlyphID = ttFont.getGlyphID + valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)] - glyphIdArray = array.array("B", valueList) - data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() + gids = array.array("B", valueList) + data = struct.pack(">HHH", 0, 262, self.language) + gids.tostring() assert len(data) == 262 return data @@ -351,7 +398,7 @@ # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! self.data = b"" - self.cmap = cmap = {} + cmap = {} notdefGI = 0 for firstByte in range(256): subHeadindex = subHeaderKeys[firstByte] @@ -382,17 +429,10 @@ # If not subHeader.entryCount, then all char codes with this first byte are # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the # same as mapping it to .notdef. - # cmap values are GID's. - glyphOrder = self.ttFont.getGlyphOrder() + gids = list(cmap.values()) charCodes = list(cmap.keys()) - lenCmap = len(gids) - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -700,15 +740,7 @@ glyphID = 0 # missing glyph gids.append(glyphID & 0xFFFF) - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -838,23 +870,14 @@ firstCode = int(firstCode) data = data[4:] #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! - glyphIndexArray = array.array("H") - glyphIndexArray.fromstring(data[:2 * int(entryCount)]) + gids = array.array("H") + gids.fromstring(data[:2 * int(entryCount)]) if sys.byteorder != "big": - glyphIndexArray.byteswap() + gids.byteswap() self.data = data = None - self.cmap = cmap = {} - - lenArray = len(glyphIndexArray) - charCodes = list(range(firstCode, firstCode + lenArray)) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, glyphIndexArray )) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) + charCodes = list(range(firstCode, firstCode + len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -864,12 +887,14 @@ if codes: # yes, there are empty cmap tables. codes = list(range(codes[0], codes[-1] + 1)) firstCode = codes[0] - valueList = [cmap.get(code, ".notdef") for code in codes] - valueList = map(ttFont.getGlyphID, valueList) - glyphIndexArray = array.array("H", valueList) + valueList = [ + ttFont.getGlyphID(cmap[code]) if code in cmap else 0 + for code in codes + ] + gids = array.array("H", valueList) if sys.byteorder != "big": - glyphIndexArray.byteswap() - data = glyphIndexArray.tostring() + gids.byteswap() + data = gids.tostring() else: data = b"" firstCode = 0 @@ -930,15 +955,7 @@ charCodes.extend(list(range(startCharCode, endCharCode +1))) gids.extend(self._computeGIDs(glyphID, lenGroup)) self.data = data = None - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) + self.cmap = _make_map(self.ttFont, charCodes, gids) def compile(self, ttFont): if self.data: @@ -1243,9 +1260,8 @@ data = bytesjoin(varSelectorRecords) + bytesjoin(data) self.length = 10 + len(data) headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) - self.data = headerdata + data - return self.data + return headerdata + data class cmap_format_unknown(CmapSubtable): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._c_m_a_p import CmapSubtable - -class CmapSubtableTest(unittest.TestCase): - - def makeSubtable(self, platformID, platEncID, langID): - subtable = CmapSubtable(None) - subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) - return subtable - - def test_toUnicode_utf16be(self): - subtable = self.makeSubtable(0, 2, 7) - self.assertEqual("utf_16_be", subtable.getEncoding()) - self.assertEqual(True, subtable.isUnicode()) - - def test_toUnicode_macroman(self): - subtable = self.makeSubtable(1, 0, 7) # MacRoman - self.assertEqual("mac_roman", subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_toUnicode_macromanian(self): - subtable = self.makeSubtable(1, 0, 37) # Mac Romanian - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_mac_encodings(self): - subtable = self.makeSubtable(1, 1, 0) # Mac Japanese - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_unknown(self): - subtable = self.makeSubtable(10, 11, 12) - self.assertEqual(subtable.getEncoding(), None) - self.assertEqual(subtable.getEncoding("ascii"), "ascii") - self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") - - def test_decompile_4(self): - subtable = CmapSubtable.newSubtable(4) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) - - def test_decompile_12(self): - subtable = CmapSubtable.newSubtable(12) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/C_P_A_L_.py 2018-01-08 12:40:40.000000000 +0000 @@ -6,14 +6,23 @@ from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval from . import DefaultTable +import array import struct +import sys class table_C_P_A_L_(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.palettes = [] + self.paletteTypes = [] + self.paletteLabels = [] + self.paletteEntryLabels = [] + def decompile(self, data, ttFont): self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) - assert (self.version == 0), "Version of COLR table is higher than I know how to handle" + assert (self.version <= 1), "Version of CPAL table is higher than I know how to handle" self.palettes = [] pos = 12 for i in range(numPalettes): @@ -26,51 +35,202 @@ palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) ppos += 4 self.palettes.append(palette) + if self.version == 0: + offsetToPaletteTypeArray = 0 + offsetToPaletteLabelArray = 0 + offsetToPaletteEntryLabelArray = 0 + else: + pos = 12 + numPalettes * 2 + (offsetToPaletteTypeArray, offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray) = ( + struct.unpack(">LLL", data[pos:pos+12])) + self.paletteTypes = self._decompileUInt32Array( + data, offsetToPaletteTypeArray, numPalettes) + self.paletteLabels = self._decompileUInt16Array( + data, offsetToPaletteLabelArray, numPalettes) + self.paletteEntryLabels = self._decompileUInt16Array( + data, offsetToPaletteEntryLabelArray, + self.numPaletteEntries) + + def _decompileUInt16Array(self, data, offset, numElements): + if offset == 0: + return [0] * numElements + result = array.array("H", data[offset : offset + 2 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() + + def _decompileUInt32Array(self, data, offset, numElements): + if offset == 0: + return [0] * numElements + result = array.array("I", data[offset : offset + 4 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() def compile(self, ttFont): - dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] - for i in range(len(self.palettes)): - dataList.append(struct.pack(">H", i*self.numPaletteEntries)) + colorRecordIndices, colorRecords = self._compileColorRecords() + paletteTypes = self._compilePaletteTypes() + paletteLabels = self._compilePaletteLabels() + paletteEntryLabels = self._compilePaletteEntryLabels() + numColorRecords = len(colorRecords) // 4 + offsetToFirstColorRecord = 12 + len(colorRecordIndices) + if self.version >= 1: + offsetToFirstColorRecord += 12 + header = struct.pack(">HHHHL", self.version, + self.numPaletteEntries, len(self.palettes), + numColorRecords, offsetToFirstColorRecord) + if self.version == 0: + dataList = [header, colorRecordIndices, colorRecords] + else: + pos = offsetToFirstColorRecord + len(colorRecords) + if len(paletteTypes) == 0: + offsetToPaletteTypeArray = 0 + else: + offsetToPaletteTypeArray = pos + pos += len(paletteTypes) + if len(paletteLabels) == 0: + offsetToPaletteLabelArray = 0 + else: + offsetToPaletteLabelArray = pos + pos += len(paletteLabels) + if len(paletteEntryLabels) == 0: + offsetToPaletteEntryLabelArray = 0 + else: + offsetToPaletteEntryLabelArray = pos + pos += len(paletteLabels) + header1 = struct.pack(">LLL", + offsetToPaletteTypeArray, + offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray) + dataList = [header, colorRecordIndices, header1, + colorRecords, paletteTypes, paletteLabels, + paletteEntryLabels] + return bytesjoin(dataList) + + def _compilePalette(self, palette): + assert(len(palette) == self.numPaletteEntries) + pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha) + return bytesjoin([pack(color) for color in palette]) + + def _compileColorRecords(self): + colorRecords, colorRecordIndices, pool = [], [], {} for palette in self.palettes: - assert(len(palette) == self.numPaletteEntries) - for color in palette: - dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) - data = bytesjoin(dataList) - return data + packedPalette = self._compilePalette(palette) + if packedPalette in pool: + index = pool[packedPalette] + else: + index = len(colorRecords) + colorRecords.append(packedPalette) + pool[packedPalette] = index + colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries)) + return bytesjoin(colorRecordIndices), bytesjoin(colorRecords) + + def _compilePaletteTypes(self): + if self.version == 0 or not any(self.paletteTypes): + return b'' + assert len(self.paletteTypes) == len(self.palettes) + result = bytesjoin([struct.pack(">I", ptype) + for ptype in self.paletteTypes]) + assert len(result) == 4 * len(self.palettes) + return result + + def _compilePaletteLabels(self): + if self.version == 0 or not any(self.paletteLabels): + return b'' + assert len(self.paletteLabels) == len(self.palettes) + result = bytesjoin([struct.pack(">H", label) + for label in self.paletteLabels]) + assert len(result) == 2 * len(self.palettes) + return result + + def _compilePaletteEntryLabels(self): + if self.version == 0 or not any(self.paletteEntryLabels): + return b'' + assert len(self.paletteEntryLabels) == self.numPaletteEntries + result = bytesjoin([struct.pack(">H", label) + for label in self.paletteEntryLabels]) + assert len(result) == 2 * self.numPaletteEntries + return result def toXML(self, writer, ttFont): + numPalettes = len(self.palettes) + paletteLabels = {i: nameID + for (i, nameID) in enumerate(self.paletteLabels)} + paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)} writer.simpletag("version", value=self.version) writer.newline() - writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.simpletag("numPaletteEntries", + value=self.numPaletteEntries) writer.newline() for index, palette in enumerate(self.palettes): - writer.begintag("palette", index=index) + attrs = {"index": index} + paletteType = paletteTypes.get(index) + paletteLabel = paletteLabels.get(index) + if self.version > 0 and paletteLabel is not None: + attrs["label"] = paletteLabel + if self.version > 0 and paletteType is not None: + attrs["type"] = paletteType + writer.begintag("palette", **attrs) writer.newline() + if (self.version > 0 and paletteLabel and + ttFont and "name" in ttFont): + name = ttFont["name"].getDebugName(paletteLabel) + if name is not None: + writer.comment(name) + writer.newline() assert(len(palette) == self.numPaletteEntries) for cindex, color in enumerate(palette): color.toXML(writer, ttFont, cindex) writer.endtag("palette") writer.newline() + if self.version > 0 and any(self.paletteEntryLabels): + writer.begintag("paletteEntryLabels") + writer.newline() + for index, label in enumerate(self.paletteEntryLabels): + if label: + writer.simpletag("label", index=index, value=label) + if (self.version > 0 and label and ttFont and "name" in ttFont): + name = ttFont["name"].getDebugName(label) + if name is not None: + writer.comment(name) + writer.newline() + writer.endtag("paletteEntryLabels") + writer.newline() def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "palettes"): - self.palettes = [] if name == "palette": - palette = [] - for element in content: - if isinstance(element, basestring): - continue + self.paletteLabels.append(int(attrs.get("label", "0"))) + self.paletteTypes.append(int(attrs.get("type", "0"))) palette = [] for element in content: if isinstance(element, basestring): continue color = Color() color.fromXML(element[0], element[1], element[2], ttFont) - palette.append (color) + palette.append(color) self.palettes.append(palette) + elif name == "paletteEntryLabels": + colorLabels = {} + for element in content: + if isinstance(element, basestring): + continue + elementName, elementAttr, _ = element + if elementName == "label": + labelIndex = safeEval(elementAttr["index"]) + nameID = safeEval(elementAttr["value"]) + colorLabels[labelIndex] = nameID + self.paletteEntryLabels = [ + colorLabels.get(i, 0) + for i in range(self.numPaletteEntries)] elif "value" in attrs: value = safeEval(attrs["value"]) setattr(self, name, value) + if name == "numPaletteEntries": + self.paletteEntryLabels = [0] * self.numPaletteEntries + class Color(object): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_v_a_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_v_a_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_c_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_c_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,84 @@ +from __future__ import \ + print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.ttLib.tables.TupleVariation import \ + compileTupleVariationStore, decompileTupleVariationStore, TupleVariation + + +# https://www.microsoft.com/typography/otspec/cvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html + +CVAR_HEADER_FORMAT = """ + > # big endian + majorVersion: H + minorVersion: H + tupleVariationCount: H + offsetToData: H +""" + +CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) + + +class table__c_v_a_r(DefaultTable.DefaultTable): + dependencies = ["cvt ", "fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.majorVersion, self.minorVersion = 1, 0 + self.variations = [] + + def compile(self, ttFont, useSharedPoints=False): + tupleVariationCount, tuples, data = compileTupleVariationStore( + variations=[v for v in self.variations if v.hasImpact()], + pointCount=len(ttFont["cvt "].values), + axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], + sharedTupleIndices={}, + useSharedPoints=useSharedPoints) + header = { + "majorVersion": self.majorVersion, + "minorVersion": self.minorVersion, + "tupleVariationCount": tupleVariationCount, + "offsetToData": CVAR_HEADER_SIZE + len(tuples), + } + return bytesjoin([ + sstruct.pack(CVAR_HEADER_FORMAT, header), + tuples, + data + ]) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) + self.majorVersion = header["majorVersion"] + self.minorVersion = header["minorVersion"] + assert self.majorVersion == 1, self.majorVersion + self.variations = decompileTupleVariationStore( + tableTag=self.tableTag, axisTags=axisTags, + tupleVariationCount=header["tupleVariationCount"], + pointCount=len(ttFont["cvt "].values), sharedTuples=None, + data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"]) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.majorVersion = int(attrs.get("major", "1")) + self.minorVersion = int(attrs.get("minor", "0")) + elif name == "tuple": + valueCount = len(ttFont["cvt "].values) + var = TupleVariation({}, [None] * valueCount) + self.variations.append(var) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + var.fromXML(tupleName, tupleAttrs, tupleContent) + + def toXML(self, writer, ttFont, progress=None): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + writer.simpletag("version", + major=self.majorVersion, minor=self.minorVersion) + writer.newline() + for var in self.variations: + var.toXML(writer, axisTags) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/DefaultTable.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/DefaultTable.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/DefaultTable.py 2018-01-08 12:40:40.000000000 +0000 @@ -39,9 +39,11 @@ def __repr__(self): return "<'%s' table at %x>" % (self.tableTag, id(self)) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/E_B_D_T_.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,10 @@ import itertools import os import struct +import logging + + +log = logging.getLogger(__name__) ebdtTableVersionFormat = """ > # big endian @@ -166,7 +170,7 @@ assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName bitmapGlyphDict[glyphName] = curGlyph else: - print("Warning: %s being ignored by %s", name, self.__class__.__name__) + log.warning("%s being ignored by %s", name, self.__class__.__name__) # Grow the strike data array to the appropriate size. The XML # format allows the strike index value to be out of order. @@ -196,7 +200,7 @@ if name in componentNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) + log.warning("unknown name '%s' being ignored by EbdtComponent.", name) # Helper functions for dealing with binary. @@ -478,7 +482,7 @@ self.metrics = metricsClass() self.metrics.fromXML(name, attrs, content, ttFont) elif name == oppositeMetricsName: - print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) + log.warning("Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat()) return BitmapPlusMetricsMixin @@ -692,7 +696,7 @@ curComponent.fromXML(name, attrs, content, ttFont) self.componentArray.append(curComponent) else: - print("Warning: '%s' being ignored in component array." % name) + log.warning("'%s' being ignored in component array.", name) class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/E_B_L_C_.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,10 @@ import struct import itertools from collections import deque +import logging + + +log = logging.getLogger(__name__) eblcHeaderFormat = """ > # big endian @@ -71,44 +75,47 @@ # Save the original data because offsets are from the start of the table. origData = data + i = 0; - dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) + dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self) + i += 8; self.strikes = [] for curStrikeIndex in range(self.numSizes): curStrike = Strike() self.strikes.append(curStrike) curTable = curStrike.bitmapSizeTable - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) + dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable) + i += 16 for metric in ('hori', 'vert'): metricObj = SbitLineMetrics() vars(curTable)[metric] = metricObj - dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) + dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj) + i += 12 + dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable) + i += 8 for curStrike in self.strikes: curTable = curStrike.bitmapSizeTable for subtableIndex in range(curTable.numberOfIndexSubTables): - lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize - upperBound = lowerBound + indexSubTableArraySize - data = origData[lowerBound:upperBound] + i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize - tup = struct.unpack(indexSubTableArrayFormat, data) + tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize]) (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup - offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable - data = origData[offsetToIndexSubTable:] + i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable - tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) + tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize]) (indexFormat, imageFormat, imageDataOffset) = tup indexFormatClass = self.getIndexFormatClass(indexFormat) - indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) + indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont) indexSubTable.firstGlyphIndex = firstGlyphIndex indexSubTable.lastGlyphIndex = lastGlyphIndex indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable indexSubTable.indexFormat = indexFormat indexSubTable.imageFormat = imageFormat indexSubTable.imageDataOffset = imageDataOffset + indexSubTable.decompile() # https://github.com/behdad/fonttools/issues/317 curStrike.indexSubTables.append(indexSubTable) def compile(self, ttFont): @@ -293,7 +300,7 @@ elif name in dataNames: vars(self)[name] = safeEval(attrs['value']) else: - print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) + log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name) class SbitLineMetrics(object): @@ -336,7 +343,6 @@ if not hasattr(self, "data"): raise AttributeError(attr) self.decompile() - del self.data, self.ttFont return getattr(self, attr) # This method just takes care of the indexSubHeader. Implementing subclasses @@ -439,6 +445,7 @@ self.names = list(map(self.ttFont.getGlyphName, glyphIds)) self.removeSkipGlyphs() + del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Formats 1 and 3 @@ -503,7 +510,7 @@ self.metrics = BigGlyphMetrics() self.metrics.fromXML(name, attrs, content, ttFont) elif name == SmallGlyphMetrics.__name__: - print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) + log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat) def padBitmapData(self, data): # Make sure that the data isn't bigger than the fixed size. @@ -525,12 +532,13 @@ offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): glyphIds = list(map(ttFont.getGlyphID, self.names)) # Make sure all the ids are consecutive. This is required by Format 2. assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." - self.imageDataOffset = min(zip(*self.locations)[0]) + self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) @@ -556,6 +564,7 @@ offsets = [offset + self.imageDataOffset for offset in offsets] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): # First make sure that all the data lines up properly. Format 4 @@ -594,9 +603,10 @@ offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] self.locations = list(zip(offsets, offsets[1:])) self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont def compile(self, ttFont): - self.imageDataOffset = min(zip(*self.locations)[0]) + self.imageDataOffset = min(next(iter(zip(*self.locations)))) dataList = [EblcIndexSubTable.compile(self, ttFont)] dataList.append(struct.pack(">L", self.imageSize)) dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/F__e_a_t.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/F__e_a_t.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/F__e_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/F__e_a_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from .otBase import BaseTTXConverter +from . import DefaultTable +from . import grUtils +import struct + +Feat_hdr_format=''' + > + version: 16.16F +''' + +class table_F__e_a_t(DefaultTable.DefaultTable): + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.features = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Feat_hdr_format, data, self) + numFeats, = struct.unpack('>H', data[:2]) + data = data[8:] + allfeats = [] + maxsetting = 0 + for i in range(numFeats): + if self.version >= 2.0: + (fid, nums, _, offset, flags, lid) = struct.unpack(">LHHLHH", + data[16*i:16*(i+1)]) + offset = int((offset - 12 - 16 * numFeats) / 4) + else: + (fid, nums, offset, flags, lid) = struct.unpack(">HHLHH", + data[12*i:12*(i+1)]) + offset = int((offset - 12 - 12 * numFeats) / 4) + allfeats.append((fid, nums, offset, flags, lid)) + maxsetting = max(maxsetting, offset + nums) + data = data[16*numFeats:] + allsettings = [] + for i in range(maxsetting): + if len(data) >= 4 * (i + 1): + (val, lid) = struct.unpack(">HH", data[4*i:4*(i+1)]) + allsettings.append((val, lid)) + for i,f in enumerate(allfeats): + (fid, nums, offset, flags, lid) = f + fobj = Feature() + fobj.flags = flags + fobj.label = lid + self.features[grUtils.num2tag(fid)] = fobj + fobj.settings = {} + fobj.default = None + fobj.index = i + for i in range(offset, offset + nums): + if i >= len(allsettings): continue + (vid, vlid) = allsettings[i] + fobj.settings[vid] = vlid + if fobj.default is None: + fobj.default = vid + + def compile(self, ttFont): + fdat = "" + vdat = "" + offset = 0 + for f, v in sorted(self.features.items(), key=lambda x:x[1].index): + fnum = grUtils.tag2num(f) + if self.version >= 2.0: + fdat += struct.pack(">LHHLHH", grUtils.tag2num(f), len(v.settings), + 0, offset * 4 + 12 + 16 * len(self.features), v.flags, v.label) + elif fnum > 65535: # self healing for alphabetic ids + self.version = 2.0 + return self.compile(ttFont) + else: + fdat += struct.pack(">HHLHH", grUtils.tag2num(f), len(v.settings), + offset * 4 + 12 + 12 * len(self.features), v.flags, v.label) + for s, l in sorted(v.settings.items(), key=lambda x:(-1, x[1]) if x[0] == v.default else x): + vdat += struct.pack(">HH", s, l) + offset += len(v.settings) + hdr = sstruct.pack(Feat_hdr_format, self) + return hdr + struct.pack('>HHL', len(self.features), 0, 0) + fdat + vdat + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version) + writer.newline() + for f, v in sorted(self.features.items(), key=lambda x:x[1].index): + writer.begintag('feature', fid=f, label=v.label, flags=v.flags, + default=(v.default if v.default else 0)) + writer.newline() + for s, l in sorted(v.settings.items()): + writer.simpletag('setting', value=s, label=l) + writer.newline() + writer.endtag('feature') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = float(safeEval(attrs['version'])) + elif name == 'feature': + fid = attrs['fid'] + fobj = Feature() + fobj.flags = int(safeEval(attrs['flags'])) + fobj.label = int(safeEval(attrs['label'])) + fobj.default = int(safeEval(attrs.get('default','0'))) + fobj.index = len(self.features) + self.features[fid] = fobj + fobj.settings = {} + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag == 'setting': + fobj.settings[int(safeEval(a['value']))] = int(safeEval(a['label'])) + +class Feature(object): + pass + diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_p_g_m.py 2018-01-08 12:40:40.000000000 +0000 @@ -15,7 +15,6 @@ def toXML(self, writer, ttFont): self.program.toXML(writer, ttFont) - writer.newline() def fromXML(self, name, attrs, content, ttFont): program = ttProgram.Program() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -29,24 +29,29 @@ defaultValue: 16.16F maxValue: 16.16F flags: H - nameID: H + axisNameID: H """ FVAR_INSTANCE_FORMAT = """ > # big endian - nameID: H + subfamilyNameID: H flags: H """ class table__f_v_a_r(DefaultTable.DefaultTable): dependencies = ["name"] - def __init__(self, tag="fvar"): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.axes = [] self.instances = [] def compile(self, ttFont): + instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4) + includePostScriptNames = any(instance.postscriptNameID != 0xFFFF + for instance in self.instances) + if includePostScriptNames: + instanceSize += 2 header = { "version": 0x00010000, "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), @@ -54,12 +59,13 @@ "axisCount": len(self.axes), "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), "instanceCount": len(self.instances), - "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 + "instanceSize": instanceSize, } result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] result.extend([axis.compile() for axis in self.axes]) axisTags = [axis.axisTag for axis in self.axes] - result.extend([instance.compile(axisTags) for instance in self.instances]) + for instance in self.instances: + result.append(instance.compile(axisTags, includePostScriptNames)) return bytesjoin(result) def decompile(self, data, ttFont): @@ -102,8 +108,8 @@ class Axis(object): def __init__(self): self.axisTag = None - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values + self.axisNameID = 0 + self.flags = 0 self.minValue = -1.0 self.defaultValue = 0.0 self.maxValue = 1.0 @@ -115,7 +121,7 @@ sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) + name = ttFont["name"].getDebugName(self.axisNameID) if name is not None: writer.newline() writer.comment(name) @@ -123,10 +129,11 @@ writer.begintag("Axis") writer.newline() for tag, value in [("AxisTag", self.axisTag), + ("Flags", "0x%X" % self.flags), ("MinValue", str(self.minValue)), ("DefaultValue", str(self.defaultValue)), ("MaxValue", str(self.maxValue)), - ("NameID", str(self.nameID))]: + ("AxisNameID", str(self.axisNameID))]: writer.begintag(tag) writer.write(value) writer.endtag(tag) @@ -139,21 +146,26 @@ for tag, _, value in filter(lambda t: type(t) is tuple, content): value = ''.join(value) if tag == "AxisTag": - self.axisTag = value - elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: + self.axisTag = Tag(value) + elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", + "AxisNameID"}: setattr(self, tag[0].lower() + tag[1:], safeEval(value)) + class NamedInstance(object): def __init__(self): - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values + self.subfamilyNameID = 0 + self.postscriptNameID = 0xFFFF + self.flags = 0 self.coordinates = {} - def compile(self, axisTags): + def compile(self, axisTags, includePostScriptName): result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] for axis in axisTags: fixedCoord = floatToFixed(self.coordinates[axis], 16) result.append(struct.pack(">l", fixedCoord)) + if includePostScriptName: + result.append(struct.pack(">H", self.postscriptNameID)) return bytesjoin(result) def decompile(self, data, axisTags): @@ -163,14 +175,28 @@ value = struct.unpack(">l", data[pos : pos + 4])[0] self.coordinates[axis] = fixedToFloat(value, 16) pos += 4 + if pos + 2 <= len(data): + self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0] + else: + self.postscriptNameID = 0xFFFF def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) + name = ttFont["name"].getDebugName(self.subfamilyNameID) if name is not None: writer.newline() writer.comment(name) writer.newline() - writer.begintag("NamedInstance", nameID=self.nameID) + psname = ttFont["name"].getDebugName(self.postscriptNameID) + if psname is not None: + writer.comment(u"PostScript: " + psname) + writer.newline() + if self.postscriptNameID == 0xFFFF: + writer.begintag("NamedInstance", flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID) + else: + writer.begintag("NamedInstance", flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID, + postscriptNameID=self.postscriptNameID, ) writer.newline() for axis in ttFont["fvar"].axes: writer.simpletag("coord", axis=axis.axisTag, @@ -181,7 +207,13 @@ def fromXML(self, name, attrs, content, ttFont): assert(name == "NamedInstance") - self.nameID = safeEval(attrs["nameID"]) + self.subfamilyNameID = safeEval(attrs["subfamilyNameID"]) + self.flags = safeEval(attrs.get("flags", "0")) + if "postscriptNameID" in attrs: + self.postscriptNameID = safeEval(attrs["postscriptNameID"]) + else: + self.postscriptNameID = 0xFFFF + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): if tag == "coord": self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance -from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord -import unittest - - - -FVAR_DATA = deHexStr( - "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " - "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " - "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " - "01 03 00 00 01 2c 00 00 00 64 00 00 " - "01 04 00 00 01 2c 00 00 00 4b 00 00") - -FVAR_AXIS_DATA = deHexStr( - "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") - -FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") - - -def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -def AddName(font, name): - nameTable = font.get("name") - if nameTable is None: - nameTable = font["name"] = table__n_a_m_e() - nameTable.names = [] - namerec = NameRecord() - namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) - namerec.string = name.encode('mac_roman') - namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) - nameTable.names.append(namerec) - return namerec - - -def MakeFont(): - axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] - instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] - fvarTable = table__f_v_a_r() - font = {"fvar": fvarTable} - for tag, name, minValue, defaultValue, maxValue in axes: - axis = Axis() - axis.axisTag = tag - axis.defaultValue = defaultValue - axis.minValue, axis.maxValue = minValue, maxValue - axis.nameID = AddName(font, name).nameID - fvarTable.axes.append(axis) - for name, weight, width in instances: - inst = NamedInstance() - inst.nameID = AddName(font, name).nameID - inst.coordinates = {"wght": weight, "wdth": width} - fvarTable.instances.append(inst) - return font - - -class FontVariationTableTest(unittest.TestCase): - def test_compile(self): - font = MakeFont() - h = font["fvar"].compile(font) - self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) - - def test_decompile(self): - fvar = table__f_v_a_r() - fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) - self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) - self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) - - def test_toXML(self): - font = MakeFont() - writer = XMLWriter(BytesIO()) - font["fvar"].toXML(writer, font) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual(2, xml.count("")) - self.assertTrue("wght" in xml) - self.assertTrue("wdth" in xml) - self.assertEqual(2, xml.count("" in xml) - self.assertTrue("" in xml) - - def test_fromXML(self): - fvar = table__f_v_a_r() - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) - self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) - self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) - - -class AxisTest(unittest.TestCase): - def test_compile(self): - axis = Axis() - axis.axisTag, axis.nameID = ('opsz', 345) - axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) - self.assertEqual(FVAR_AXIS_DATA, axis.compile()) - - def test_decompile(self): - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - self.assertEqual("opsz", axis.axisTag) - self.assertEqual(345, axis.nameID) - self.assertEqual(-0.5, axis.minValue) - self.assertEqual(1.3, axis.defaultValue) - self.assertEqual(1.5, axis.maxValue) - - def test_toXML(self): - font = MakeFont() - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - AddName(font, "Optical Size").nameID = 256 - axis.nameID = 256 - writer = XMLWriter(BytesIO()) - axis.toXML(writer, font) - self.assertEqual([ - '', - '', - '', - 'opsz', - '-0.5', - '1.3', - '1.5', - '256', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - axis = Axis() - axis.fromXML("Axis", {}, [ - ("AxisTag", {}, ["wght"]), - ("MinValue", {}, ["100"]), - ("DefaultValue", {}, ["400"]), - ("MaxValue", {}, ["900"]), - ("NameID", {}, ["256"]) - ], ttFont=None) - self.assertEqual("wght", axis.axisTag) - self.assertEqual(100, axis.minValue) - self.assertEqual(400, axis.defaultValue) - self.assertEqual(900, axis.maxValue) - self.assertEqual(256, axis.nameID) - - -class NamedInstanceTest(unittest.TestCase): - def test_compile(self): - inst = NamedInstance() - inst.nameID = 345 - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) - - def test_decompile(self): - inst = NamedInstance() - inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - def test_toXML(self): - font = MakeFont() - inst = NamedInstance() - inst.nameID = AddName(font, "Light Condensed").nameID - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - writer = XMLWriter(BytesIO()) - inst.toXML(writer, font) - self.assertEqual([ - '', - '', - '' % inst.nameID, - '', - '', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - inst = NamedInstance() - attrs = {"nameID": "345"} - inst.fromXML("NamedInstance", attrs, [ - ("coord", {"axis": "wght", "value": "0.7"}, []), - ("coord", {"axis": "wdth", "value": "0.5"}, []), - ], ttFont=MakeFont()) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_c_i_d.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_c_i_d.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_c_i_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_c_i_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html +class table__g_c_i_d(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/G__l_a_t.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/G__l_a_t.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/G__l_a_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/G__l_a_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,221 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from itertools import * +from functools import partial +from . import DefaultTable +from . import grUtils +import struct, operator, warnings +try: + import lz4 +except: + lz4 = None + + +Glat_format_0 = """ + > # big endian + version: 16.16F +""" + +Glat_format_3 = """ + > + version: 16.16F + compression:L # compression scheme or reserved +""" + +Glat_format_1_entry = """ + > + attNum: B # Attribute number of first attribute + num: B # Number of attributes in this run +""" +Glat_format_23_entry = """ + > + attNum: H # Attribute number of first attribute + num: H # Number of attributes in this run +""" + +Glat_format_3_octabox_metrics = """ + > + subboxBitmap: H # Which subboxes exist on 4x4 grid + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + +Glat_format_3_subbox_entry = """ + > + left: B # xi + right: B # xa + bottom: B # yi + top: B # ya + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + +class _Object() : + pass + +class _Dict(dict) : + pass + +class table_G__l_a_t(DefaultTable.DefaultTable): + ''' + Support Graphite Glat tables + ''' + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.scheme = 0 + + def decompile(self, data, ttFont): + sstruct.unpack2(Glat_format_0, data, self) + if self.version <= 1.9: + decoder = partial(self.decompileAttributes12,fmt=Glat_format_1_entry) + elif self.version <= 2.9: + decoder = partial(self.decompileAttributes12,fmt=Glat_format_23_entry) + elif self.version >= 3.0: + (data, self.scheme) = grUtils.decompress(data) + sstruct.unpack2(Glat_format_3, data, self) + self.hasOctaboxes = (self.compression & 1) == 1 + decoder = self.decompileAttributes3 + + gloc = ttFont['Gloc'] + self.attributes = {} + count = 0 + for s,e in zip(gloc,gloc[1:]): + self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e]) + count += 1 + + def decompileAttributes12(self, data, fmt): + attributes = _Dict() + while len(data) > 3: + e, data = sstruct.unpack2(fmt, data, _Object()) + keys = range(e.attNum, e.attNum+e.num) + if len(data) >= 2 * e.num : + vals = struct.unpack_from(('>%dh' % e.num), data) + attributes.update(zip(keys,vals)) + data = data[2*e.num:] + return attributes + + def decompileAttributes3(self, data): + if self.hasOctaboxes: + o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object()) + numsub = bin(o.subboxBitmap).count("1") + o.subboxes = [] + for b in range(numsub): + if len(data) >= 8 : + subbox, data = sstruct.unpack2(Glat_format_3_subbox_entry, + data, _Object()) + o.subboxes.append(subbox) + attrs = self.decompileAttributes12(data, Glat_format_23_entry) + if self.hasOctaboxes: + attrs.octabox = o + return attrs + + def compile(self, ttFont): + data = sstruct.pack(Glat_format_0, self) + if self.version <= 1.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version <= 2.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version >= 3.0: + self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0) + data = sstruct.pack(Glat_format_3, self) + encoder = self.compileAttributes3 + + glocs = [] + for n in range(len(self.attributes)): + glocs.append(len(data)) + data += encoder(self.attributes[ttFont.getGlyphName(n)]) + glocs.append(len(data)) + ttFont['Gloc'].set(glocs) + + if self.version >= 3.0: + data = grUtils.compress(self.scheme, data) + return data + + def compileAttributes12(self, attrs, fmt): + data = [] + for e in grUtils.entries(attrs): + data.extend(sstruct.pack(fmt, {'attNum' : e[0], 'num' : e[1]})) + data.extend(struct.pack(('>%dh' % len(e[2])), *e[2])) + return "".join(data) + + def compileAttributes3(self, attrs): + if self.hasOctaboxes: + o = attrs.octabox + data = sstruct.pack(Glat_format_3_octabox_metrics, o) + numsub = bin(o.subboxBitmap).count("1") + for b in range(numsub) : + data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b]) + else: + data = "" + return data + self.compileAttributes12(attrs, Glat_format_23_entry) + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version, compressionScheme=self.scheme) + writer.newline() + for n, a in sorted(self.attributes.items(), key=lambda x:ttFont.getGlyphID(x[0])): + writer.begintag('glyph', name=n) + writer.newline() + if hasattr(a, 'octabox'): + o = a.octabox + formatstring, names, fixes = sstruct.getformat(Glat_format_3_octabox_metrics) + vals = {} + for k in names: + if k == 'subboxBitmap': continue + vals[k] = "{:.3f}%".format(getattr(o, k) * 100. / 256) + vals['bitmap'] = "{:0X}".format(o.subboxBitmap) + writer.begintag('octaboxes', **vals) + writer.newline() + formatstring, names, fixes = sstruct.getformat(Glat_format_3_subbox_entry) + for s in o.subboxes: + vals = {} + for k in names: + vals[k] = "{:.3f}%".format(getattr(s, k) * 100. / 256) + writer.simpletag('octabox', **vals) + writer.newline() + writer.endtag('octaboxes') + writer.newline() + for k, v in sorted(a.items()): + writer.simpletag('attribute', index=k, value=v) + writer.newline() + writer.endtag('glyph') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version' : + self.version = float(safeEval(attrs['version'])) + if name != 'glyph' : return + if not hasattr(self, 'attributes'): + self.attributes = {} + gname = attrs['name'] + attributes = _Dict() + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'attribute' : + k = int(safeEval(attrs['index'])) + v = int(safeEval(attrs['value'])) + attributes[k]=v + elif tag == 'octaboxes': + self.hasOctaboxes = True + o = _Object() + o.subboxBitmap = int(attrs['bitmap'], 16) + o.subboxes = [] + del attrs['bitmap'] + for k, v in attrs.items(): + setattr(o, k, int(float(v[:-1]) * 256. / 100. + 0.5)) + for element in subcontent: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + so = _Object() + for k, v in attrs.items(): + setattr(so, k, int(float(v[:-1]) * 256. / 100. + 0.5)) + o.subboxes.append(so) + attributes.octabox = o + self.attributes[gname] = attributes diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/G__l_o_c.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/G__l_o_c.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/G__l_o_c.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/G__l_o_c.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,71 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import array + +Gloc_header = ''' + > # big endian + version: 16.16F # Table version + flags: H # bit 0: 1=long format, 0=short format + # bit 1: 1=attribute names, 0=no names + numAttribs: H # NUmber of attributes +''' + +class table_G__l_o_c(DefaultTable.DefaultTable): + """ + Support Graphite Gloc tables + """ + dependencies = ['Glat'] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.attribIds = None + self.numAttribs = 0 + + def decompile(self, data, ttFont): + _, data = sstruct.unpack2(Gloc_header, data, self) + flags = self.flags + del self.flags + self.locations = array.array('I' if flags & 1 else 'H') + self.locations.fromstring(data[:len(data) - self.numAttribs * (flags & 2)]) + self.locations.byteswap() + self.attribIds = array.array('H') + if flags & 2: + self.attribIds.fromstring(data[-self.numAttribs * 2:]) + self.attribIds.byteswap() + + def compile(self, ttFont): + data = sstruct.pack(Gloc_header, dict(version=1.0, + flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'), + numAttribs=self.numAttribs)) + self.locations.byteswap() + data += self.locations.tostring() + self.locations.byteswap() + if self.attribIds: + self.attribIds.byteswap() + data += self.attribIds.tostring() + self.attribIds.byteswap() + return data + + def set(self, locations): + long_format = max(locations) >= 65536 + self.locations = array.array('I' if long_format else 'H', locations) + + def toXML(self, writer, ttFont): + writer.simpletag("attributes", number=self.numAttribs) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'attributes': + self.numAttribs = int(safeEval(attrs['number'])) + + def __getitem__(self, index): + return self.locations[index] + + def __len__(self): + return len(self.locations) + + def __iter__(self): + return iter(self.locations) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_l_y_f.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,7 @@ """_g_l_y_f.py -- Converter classes for the 'glyf' table.""" from __future__ import print_function, division, absolute_import +from collections import namedtuple from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools import ttLib @@ -8,12 +9,16 @@ from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect from fontTools.misc.bezierTools import calcQuadraticBounds from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from numbers import Number from . import DefaultTable from . import ttProgram import sys import struct import array -import warnings +import logging + + +log = logging.getLogger(__name__) # # The Apple and MS rasterizers behave differently for @@ -30,6 +35,12 @@ class table__g_l_y_f(DefaultTable.DefaultTable): + # this attribute controls the amount of padding applied to glyph data upon compile. + # Glyph lenghts are aligned to multiples of the specified value. + # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means + # no padding, except for when padding would allow to use short loca offsets. + padding = 1 + def decompile(self, data, ttFont): loca = ttFont['loca'] last = int(loca[0]) @@ -50,10 +61,11 @@ self.glyphs[glyphName] = glyph last = next if len(data) - next >= 4: - warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % - (next, len(data))) + log.warning( + "too much 'glyf' table data: expected %d, received %d bytes", + next, len(data)) if noname: - warnings.warn('%s glyphs have no name' % noname) + log.warning('%s glyphs have no name', noname) if ttFont.lazy is False: # Be lazy for None and True for glyph in self.glyphs.values(): glyph.expand(self) @@ -61,7 +73,8 @@ def compile(self, ttFont): if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() - padding = self.padding if hasattr(self, 'padding') else None + padding = self.padding + assert padding in (0, 1, 2, 4) locations = [] currentLocation = 0 dataList = [] @@ -69,14 +82,14 @@ for glyphName in self.glyphOrder: glyph = self.glyphs[glyphName] glyphData = glyph.compile(self, recalcBBoxes) - if padding: + if padding > 1: glyphData = pad(glyphData, size=padding) locations.append(currentLocation) currentLocation = currentLocation + len(glyphData) dataList.append(glyphData) locations.append(currentLocation) - if padding is None and currentLocation < 0x20000: + if padding == 1 and currentLocation < 0x20000: # See if we can pad any odd-lengthed glyphs to allow loca # table to use the short offsets. indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] @@ -138,8 +151,7 @@ if not hasattr(self, "glyphOrder"): self.glyphOrder = ttFont.getGlyphOrder() glyphName = attrs["name"] - if ttFont.verbose: - ttLib.debugmsg("unpacking glyph '%s'" % glyphName) + log.debug("unpacking glyph '%s'", glyphName) glyph = Glyph() for attr in ['xMin', 'yMin', 'xMax', 'yMax']: setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) @@ -162,6 +174,10 @@ # XXX optimize with reverse dict!!! return self.glyphOrder.index(glyphName) + def removeHinting(self): + for glyph in self.glyphs.values(): + glyph.removeHinting() + def keys(self): return self.glyphs.keys() @@ -286,6 +302,9 @@ UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) +CompositeMaxpValues = namedtuple('CompositeMaxpValues', ['nPoints', 'nContours', 'maxComponentDepth']) + + class Glyph(object): def __init__(self, data=""): @@ -306,10 +325,16 @@ return if not self.data: # empty char + del self.data self.numberOfContours = 0 return dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) del self.data + # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in + # some glyphs; decompileCoordinates assumes that there's at least + # one, so short-circuit here. + if self.numberOfContours == 0: + return if self.isComposite(): self.decompileComponents(data, glyfTable) else: @@ -317,7 +342,11 @@ def compile(self, glyfTable, recalcBBoxes=True): if hasattr(self, "data"): - return self.data + if recalcBBoxes: + # must unpack glyph in order to recalculate bounding box + self.expand(glyfTable) + else: + return self.data if self.numberOfContours == 0: return "" if recalcBBoxes: @@ -333,11 +362,7 @@ if self.isComposite(): for compo in self.components: compo.toXML(writer, ttFont) - if hasattr(self, "program"): - writer.begintag("instructions") - self.program.toXML(writer, ttFont) - writer.endtag("instructions") - writer.newline() + haveInstructions = hasattr(self, "program") else: last = 0 for i in range(self.numberOfContours): @@ -352,11 +377,16 @@ last = self.endPtsOfContours[i] + 1 writer.endtag("contour") writer.newline() - if self.numberOfContours: + haveInstructions = self.numberOfContours > 0 + if haveInstructions: + if self.program: writer.begintag("instructions") + writer.newline() self.program.toXML(writer, ttFont) writer.endtag("instructions") - writer.newline() + else: + writer.simpletag("instructions") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "contour": @@ -414,7 +444,7 @@ glyfTable, maxComponentDepth + 1) nPoints = nPoints + nP nContours = nContours + nC - return nPoints, nContours, maxComponentDepth + return CompositeMaxpValues(nPoints, nContours, maxComponentDepth) def getMaxpValues(self): assert self.numberOfContours > 0 @@ -436,7 +466,9 @@ self.program.fromBytecode(data[:numInstructions]) data = data[numInstructions:] if len(data) >= 4: - warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) + log.warning( + "too much glyph data at the end of composite glyph: %d excess bytes", + len(data)) def decompileCoordinates(self, data): endPtsOfContours = array.array("h") @@ -528,7 +560,8 @@ xDataLen = struct.calcsize(xFormat) yDataLen = struct.calcsize(yFormat) if len(data) - (xDataLen + yDataLen) >= 4: - warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) + log.warning( + "too much glyph data: %d excess bytes", len(data) - (xDataLen + yDataLen)) xCoordinates = struct.unpack(xFormat, data[:xDataLen]) yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) return flags, xCoordinates, yCoordinates @@ -563,8 +596,7 @@ deltas = self.coordinates.copy() if deltas.isFloat(): # Warn? - xPoints = [int(round(x)) for x in xPoints] - yPoints = [int(round(y)) for y in xPoints] + deltas.toInt() deltas.absoluteToRelative() # TODO(behdad): Add a configuration option for this? @@ -718,7 +750,7 @@ bbox = calcBounds([coords[last], coords[next]]) if not pointInRect(coords[j], bbox): # Ouch! - warnings.warn("Outline has curve with implicit extrema.") + log.warning("Outline has curve with implicit extrema.") # Ouch! Find analytical curve bounds. pthis = coords[j] plast = coords[last] @@ -957,13 +989,14 @@ cFlags = cFlags[nextOnCurve:] pen.closePath() - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result class GlyphComponent(object): @@ -989,7 +1022,6 @@ self.flags = int(flags) glyphID = int(glyphID) self.glyphName = glyfTable.getGlyphName(int(glyphID)) - #print ">>", reprflag(self.flags) data = data[4:] if self.flags & ARG_1_AND_2_ARE_WORDS: @@ -1025,7 +1057,7 @@ haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) + NON_OVERLAPPING | OVERLAP_COMPOUND) return more, haveInstructions, data def compile(self, more, haveInstructions, glyfTable): @@ -1034,7 +1066,7 @@ # reset all flags we will calculate ourselves flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) + NON_OVERLAPPING | OVERLAP_COMPOUND) if more: flags = flags | MORE_COMPONENTS if haveInstructions: @@ -1047,11 +1079,13 @@ data = data + struct.pack(">HH", self.firstPt, self.secondPt) flags = flags | ARG_1_AND_2_ARE_WORDS else: + x = round(self.x) + y = round(self.y) flags = flags | ARGS_ARE_XY_VALUES - if (-128 <= self.x <= 127) and (-128 <= self.y <= 127): - data = data + struct.pack(">bb", self.x, self.y) + if (-128 <= x <= 127) and (-128 <= y <= 127): + data = data + struct.pack(">bb", x, y) else: - data = data + struct.pack(">hh", self.x, self.y) + data = data + struct.pack(">hh", x, y) flags = flags | ARG_1_AND_2_ARE_WORDS if hasattr(self, "transform"): @@ -1120,29 +1154,37 @@ self.transform = [[scale, 0], [0, scale]] self.flags = safeEval(attrs["flags"]) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.__dict__ == other.__dict__ + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + class GlyphCoordinates(object): - def __init__(self, iterable=[]): - self._a = array.array("h") + def __init__(self, iterable=[], typecode="h"): + self._a = array.array(typecode) self.extend(iterable) + @property + def array(self): + return self._a + def isFloat(self): - return self._a.typecode == 'f' + return self._a.typecode == 'd' def _ensureFloat(self): if self.isFloat(): return # The conversion to list() is to work around Jython bug - self._a = array.array("f", list(self._a)) + self._a = array.array("d", list(self._a)) def _checkFloat(self, p): + if self.isFloat(): + return p if any(isinstance(v, float) for v in p): p = [int(v) if int(v) == v else v for v in p] if any(isinstance(v, float) for v in p): @@ -1154,7 +1196,7 @@ return GlyphCoordinates([(0,0)] * count) def copy(self): - c = GlyphCoordinates() + c = GlyphCoordinates(typecode=self._a.typecode) c._a.extend(self._a) return c @@ -1171,13 +1213,18 @@ if isinstance(k, slice): indices = range(*k.indices(len(self))) # XXX This only works if len(v) == len(indices) - # TODO Implement __delitem__ for j,i in enumerate(indices): self[i] = v[j] return v = self._checkFloat(v) self._a[2*k],self._a[2*k+1] = v + def __delitem__(self, i): + i = (2*i) % len(self._a) + del self._a[i] + del self._a[i] + + def __repr__(self): return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' @@ -1190,6 +1237,14 @@ p = self._checkFloat(p) self._a.extend(p) + def toInt(self): + if not self.isFloat(): + return + a = array.array("h") + for n in self._a: + a.append(round(n)) + self._a = a + def relativeToAbsolute(self): a = self._a x,y = 0,0 @@ -1209,13 +1264,29 @@ a[2*i+1] = dy def translate(self, p): - (x,y) = p + """ + >>> GlyphCoordinates([(1,2)]).translate((.5,0)) + """ + (x,y) = self._checkFloat(p) a = self._a for i in range(len(a) // 2): a[2*i ] += x a[2*i+1] += y + def scale(self, p): + """ + >>> GlyphCoordinates([(1,2)]).scale((.5,0)) + """ + (x,y) = self._checkFloat(p) + a = self._a + for i in range(len(a) // 2): + a[2*i ] *= x + a[2*i+1] *= y + def transform(self, t): + """ + >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5))) + """ a = self._a for i in range(len(a) // 2): x = a[2*i ] @@ -1224,13 +1295,197 @@ py = x * t[0][1] + y * t[1][1] self[i] = (px, py) - def __ne__(self, other): - return not self.__eq__(other) def __eq__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g == g2 + True + >>> g == g3 + False + >>> g2 == g3 + False + """ if type(self) != type(other): return NotImplemented return self._a == other._a + def __ne__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g != g2 + False + >>> g != g3 + True + >>> g2 != g3 + True + """ + result = self.__eq__(other) + return result if result is NotImplemented else not result + + # Math operations + + def __pos__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = +g + >>> g2 + GlyphCoordinates([(1, 2)]) + >>> g2.translate((1,0)) + >>> g2 + GlyphCoordinates([(2, 2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + return self.copy() + def __neg__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = -g + >>> g2 + GlyphCoordinates([(-1, -2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + r = self.copy() + a = r._a + for i in range(len(a)): + a[i] = -a[i] + return r + def __round__(self): + """ + Note: This is Python 3 only. Python 2 does not call __round__. + As such, we cannot test this method either. :( + """ + r = self.copy() + r.toInt() + return r + + def __add__(self, other): return self.copy().__iadd__(other) + def __sub__(self, other): return self.copy().__isub__(other) + def __mul__(self, other): return self.copy().__imul__(other) + def __truediv__(self, other): return self.copy().__itruediv__(other) + + __radd__ = __add__ + __rmul__ = __mul__ + def __rsub__(self, other): return other + (-self) + + def __iadd__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g += (.5,0) + >>> g + GlyphCoordinates([(1.5, 2.0)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g += g2 + >>> g + GlyphCoordinates([(4.5, 6.0)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate(other) + return self + if isinstance(other, GlyphCoordinates): + if other.isFloat(): self._ensureFloat() + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] += other[i] + return self + return NotImplemented + + def __isub__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g -= (.5,0) + >>> g + GlyphCoordinates([(0.5, 2.0)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g -= g2 + >>> g + GlyphCoordinates([(-2.5, -2.0)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate((-other[0],-other[1])) + return self + if isinstance(other, GlyphCoordinates): + if other.isFloat(): self._ensureFloat() + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] -= other[i] + return self + return NotImplemented + + def __imul__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= (2,.5) + >>> g *= 2 + >>> g + GlyphCoordinates([(4.0, 2.0)]) + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= 2 + >>> g + GlyphCoordinates([(2, 4)]) + """ + if isinstance(other, Number): + other = (other, other) + if isinstance(other, tuple): + if other == (1,1): + return self + assert len(other) == 2 + self.scale(other) + return self + return NotImplemented + + def __itruediv__(self, other): + """ + >>> g = GlyphCoordinates([(1,3)]) + >>> g /= (.5,1.5) + >>> g /= 2 + >>> g + GlyphCoordinates([(1.0, 1.0)]) + """ + if isinstance(other, Number): + other = (other, other) + if isinstance(other, tuple): + if other == (1,1): + return self + assert len(other) == 2 + self.scale((1./other[0],1./other[1])) + return self + return NotImplemented + + def __bool__(self): + """ + >>> g = GlyphCoordinates([]) + >>> bool(g) + False + >>> g = GlyphCoordinates([(0,0), (0.,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,0), (1,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,.5), (0,0)]) + >>> bool(g) + True + """ + return bool(self._a) + + __nonzero__ = __bool__ + def reprflag(flag): bin = "" @@ -1244,3 +1499,8 @@ flag = flag >> 1 bin = (14 - len(bin)) * "0" + bin return bin + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/grUtils.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/grUtils.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/grUtils.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/grUtils.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,79 @@ +import struct, warnings +try: + import lz4 +except: + lz4 = None + +#old scheme for VERSION < 0.9 otherwise use lz4.block + +def decompress(data): + (compression,) = struct.unpack(">L", data[4:8]) + scheme = compression >> 27 + size = compression & 0x07ffffff + if scheme == 0: + pass + elif scheme == 1 and lz4: + res = lz4.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07ffffff)) + if scheme == 0 : + return data + elif scheme == 1 and lz4: + res = lz4.compress(hdr + data) + return res + else: + warnings.warn("Table failed to compress by unsupported compression scheme") + return data + +def _entries(attrs, sameval): + ak = 0 + vals = [] + lastv = 0 + for k,v in attrs: + if len(vals) and (k != ak + 1 or (sameval and v != lastv)) : + yield (ak - len(vals) + 1, len(vals), vals) + vals = [] + ak = k + vals.append(v) + lastv = v + yield (ak - len(vals) + 1, len(vals), vals) + +def entries(attributes, sameval = False): + g = _entries(sorted(attributes.iteritems(), key=lambda x:int(x[0])), sameval) + return g + +def bininfo(num, size=1): + if num == 0: + return struct.pack(">4H", 0, 0, 0, 0) + srange = 1; + select = 0 + while srange <= num: + srange *= 2 + select += 1 + select -= 1 + srange /= 2 + srange *= size + shift = num * size - srange + return struct.pack(">4H", num, srange, select, shift) + +def num2tag(n): + if n < 0x200000: + return str(n) + else: + return struct.unpack('4s', struct.pack('>L', n))[0].replace(b'\000', b'').decode() + +def tag2num(n): + try: + return int(n) + except ValueError: + n = (n+" ")[:4] + return struct.unpack('>L', n.encode('ascii'))[0] + diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_v_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,15 +2,24 @@ from fontTools.misc.py23 import * from fontTools import ttLib from fontTools.misc import sstruct -from fontTools.misc.fixedTools import fixedToFloat, floatToFixed from fontTools.misc.textTools import safeEval from fontTools.ttLib import TTLibError from . import DefaultTable import array -import io -import sys +import itertools +import logging import struct +import sys +import fontTools.ttLib.tables.TupleVariation as tv + + +log = logging.getLogger(__name__) +TupleVariation = tv.TupleVariation + +# https://www.microsoft.com/typography/otspec/gvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# # Apple's documentation of 'gvar': # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html # @@ -19,46 +28,35 @@ GVAR_HEADER_FORMAT = """ > # big endian - version: H - reserved: H - axisCount: H - sharedCoordCount: H - offsetToCoord: I - glyphCount: H - flags: H - offsetToData: I + version: H + reserved: H + axisCount: H + sharedTupleCount: H + offsetToSharedTuples: I + glyphCount: H + flags: H + offsetToGlyphVariationData: I """ GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) -TUPLES_SHARE_POINT_NUMBERS = 0x8000 -TUPLE_COUNT_MASK = 0x0fff - -EMBEDDED_TUPLE_COORD = 0x8000 -INTERMEDIATE_TUPLE = 0x4000 -PRIVATE_POINT_NUMBERS = 0x2000 -TUPLE_INDEX_MASK = 0x0fff - -DELTAS_ARE_ZERO = 0x80 -DELTAS_ARE_WORDS = 0x40 -DELTA_RUN_COUNT_MASK = 0x3f - -POINTS_ARE_WORDS = 0x80 -POINT_RUN_COUNT_MASK = 0x7f - class table__g_v_a_r(DefaultTable.DefaultTable): - dependencies = ["fvar", "glyf"] + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.reserved = 1, 0 + self.variations = {} + def compile(self, ttFont): axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - - sharedCoords = self.compileSharedCoords_(axisTags) - sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} - sharedCoordSize = sum([len(c) for c in sharedCoords]) - - compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) + sharedTuples = tv.compileSharedTuples( + axisTags, itertools.chain(*self.variations.values())) + sharedTupleIndices = {coord:i for i, coord in enumerate(sharedTuples)} + sharedTupleSize = sum([len(c) for c in sharedTuples]) + compiledGlyphs = self.compileGlyphs_( + ttFont, axisTags, sharedTupleIndices) offset = 0 offsets = [] for glyph in compiledGlyphs: @@ -71,100 +69,26 @@ header["version"] = self.version header["reserved"] = self.reserved header["axisCount"] = len(axisTags) - header["sharedCoordCount"] = len(sharedCoords) - header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["sharedTupleCount"] = len(sharedTuples) + header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets) header["glyphCount"] = len(compiledGlyphs) header["flags"] = tableFormat - header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize + header["offsetToGlyphVariationData"] = header["offsetToSharedTuples"] + sharedTupleSize compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) result = [compiledHeader, compiledOffsets] - result.extend(sharedCoords) + result.extend(sharedTuples) result.extend(compiledGlyphs) return bytesjoin(result) - def compileSharedCoords_(self, axisTags): - coordCount = {} - for variations in self.variations.values(): - for gvar in variations: - coord = gvar.compileCoord(axisTags) - coordCount[coord] = coordCount.get(coord, 0) + 1 - sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] - sharedCoords.sort(reverse=True) - MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 - sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] - return [c[1] for c in sharedCoords] # Strip off counts. - def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): result = [] for glyphName in ttFont.getGlyphOrder(): glyph = ttFont["glyf"][glyphName] - numPointsInGlyph = self.getNumPoints_(glyph) - result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) - return result - - def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): - variations = self.variations.get(glyphName, []) - variations = [v for v in variations if v.hasImpact()] - if len(variations) == 0: - return b"" - - # Each glyph variation tuples modifies a set of control points. To indicate - # which exact points are getting modified, a single tuple can either refer - # to a shared set of points, or the tuple can supply its private point numbers. - # Because the impact of sharing can be positive (no need for a private point list) - # or negative (need to supply 0,0 deltas for unused points), it is not obvious - # how to determine which tuples should take their points from the shared - # pool versus have their own. Perhaps we should resort to brute force, - # and try all combinations? However, if a glyph has n variation tuples, - # we would need to try 2^n combinations (because each tuple may or may not - # be part of the shared set). How many variations tuples do glyphs have? - # - # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} - # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} - # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} - # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). - # - # Is this even worth optimizing? If we never use a shared point list, - # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, - # and 15K for JamRegular. If we always use a shared point list, - # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, - # and 10K for JamRegular. However, in the latter case the delta arrays - # will become larger, but I haven't yet measured by how much. From - # gut feeling (which may be wrong), the optimum is to share some but - # not all points; however, then we would need to try all combinations. - # - # For the time being, we try two variants and then pick the better one: - # (a) each tuple supplies its own private set of points; - # (b) all tuples refer to a shared set of points, which consists of - # "every control point in the glyph". - allPoints = set(range(numPointsInGlyph)) - tuples = [] - data = [] - someTuplesSharePoints = False - for gvar in variations: - privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) - # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. - # This is probably a problem with our code; find the problem and fix it. - #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): - if False: - tuples.append(sharedTuple) - data.append(sharedData) - someTuplesSharePoints = True - else: - tuples.append(privateTuple) - data.append(privateData) - if someTuplesSharePoints: - data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" - tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) - else: - data = bytesjoin(data) - tupleCount = len(tuples) - tuples = bytesjoin(tuples) - result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data - if len(result) % 2 != 0: - result = result + b"\0" # padding + pointCount = self.getNumPoints_(glyph) + variations = self.variations.get(glyphName, []) + result.append(compileGlyph_(variations, pointCount, + axisTags, sharedCoordIndices)) return result def decompile(self, data, ttFont): @@ -174,19 +98,17 @@ assert len(glyphs) == self.glyphCount assert len(axisTags) == self.axisCount offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) - sharedCoords = self.decompileSharedCoords_(axisTags, data) + sharedCoords = tv.decompileSharedTuples( + axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples) self.variations = {} + offsetToData = self.offsetToGlyphVariationData for i in range(self.glyphCount): glyphName = glyphs[i] glyph = ttFont["glyf"][glyphName] numPointsInGlyph = self.getNumPoints_(glyph) - gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] - self.variations[glyphName] = \ - self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) - - def decompileSharedCoords_(self, axisTags, data): - result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) - return result + gvarData = data[offsetToData + offsets[i] : offsetToData + offsets[i + 1]] + self.variations[glyphName] = decompileGlyph_( + numPointsInGlyph, sharedCoords, axisTags, gvarData) @staticmethod def decompileOffsets_(data, tableFormat, glyphCount): @@ -234,68 +156,6 @@ packed.byteswap() return (packed.tostring(), tableFormat) - def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): - if len(data) < 4: - return [] - numAxes = len(axisTags) - tuples = [] - flags, offsetToData = struct.unpack(">HH", data[:4]) - pos = 4 - dataPos = offsetToData - if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: - sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) - else: - sharedPoints = [] - for _ in range(flags & TUPLE_COUNT_MASK): - dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) - tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) - tupleData = data[pos : pos + tupleSize] - pointDeltaData = data[dataPos : dataPos + dataSize] - tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) - pos += tupleSize - dataPos += dataSize - return tuples - - @staticmethod - def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): - flags = struct.unpack(">H", data[2:4])[0] - - pos = 4 - if (flags & EMBEDDED_TUPLE_COORD) == 0: - coord = sharedCoords[flags & TUPLE_INDEX_MASK] - else: - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - if (flags & INTERMEDIATE_TUPLE) != 0: - minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - else: - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - axes = {} - for axis in axisTags: - coords = minCoord[axis], coord[axis], maxCoord[axis] - if coords != (0.0, 0.0, 0.0): - axes[axis] = coords - pos = 0 - if (flags & PRIVATE_POINT_NUMBERS) != 0: - points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) - else: - points = sharedPoints - deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas = [None] * numPointsInGlyph - for p, x, y in zip(points, deltas_x, deltas_y): - deltas[p] = (x, y) - return GlyphVariation(axes, deltas) - - @staticmethod - def computeMinMaxCoord_(coord): - minCoord = {} - maxCoord = {} - for (axis, value) in coord.items(): - minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - return (minCoord, maxCoord) - def toXML(self, writer, ttFont, progress=None): writer.simpletag("version", value=self.version) writer.newline() @@ -329,7 +189,7 @@ if isinstance(element, tuple): name, attrs, content = element if name == "tuple": - gvar = GlyphVariation({}, [None] * numPointsInGlyph) + gvar = TupleVariation({}, [None] * numPointsInGlyph) glyphVariations.append(gvar) for tupleElement in content: if isinstance(tupleElement, tuple): @@ -347,371 +207,23 @@ return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS -class GlyphVariation(object): - def __init__(self, axes, coordinates): - self.axes = axes - self.coordinates = coordinates - - def __repr__(self): - axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) - return "" % (axes, self.coordinates) - - def __eq__(self, other): - return self.coordinates == other.coordinates and self.axes == other.axes - - def getUsedPoints(self): - result = set() - for i, point in enumerate(self.coordinates): - if point is not None: - result.add(i) - return result - - def hasImpact(self): - """Returns True if this GlyphVariation has any visible impact. - - If the result is False, the GlyphVariation can be omitted from the font - without making any visible difference. - """ - for c in self.coordinates: - if c is not None: - return True - return False - - def toXML(self, writer, axisTags): - writer.begintag("tuple") - writer.newline() - for axis in axisTags: - value = self.axes.get(axis) - if value is not None: - minValue, value, maxValue = value - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if minValue == defaultMinValue and maxValue == defaultMaxValue: - writer.simpletag("coord", axis=axis, value=value) - else: - writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) - writer.newline() - wrote_any_points = False - for i, point in enumerate(self.coordinates): - if point is not None: - writer.simpletag("delta", pt=i, x=point[0], y=point[1]) - writer.newline() - wrote_any_points = True - if not wrote_any_points: - writer.comment("no deltas") - writer.newline() - writer.endtag("tuple") - writer.newline() - - def fromXML(self, name, attrs, _content): - if name == "coord": - axis = attrs["axis"] - value = float(attrs["value"]) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - minValue = float(attrs.get("min", defaultMinValue)) - maxValue = float(attrs.get("max", defaultMaxValue)) - self.axes[axis] = (minValue, value, maxValue) - elif name == "delta": - point = safeEval(attrs["pt"]) - x = safeEval(attrs["x"]) - y = safeEval(attrs["y"]) - self.coordinates[point] = (x, y) - - def compile(self, axisTags, sharedCoordIndices, sharedPoints): - tupleData = [] - - coord = self.compileCoord(axisTags) - if coord in sharedCoordIndices: - flags = sharedCoordIndices[coord] - else: - flags = EMBEDDED_TUPLE_COORD - tupleData.append(coord) - - intermediateCoord = self.compileIntermediateCoord(axisTags) - if intermediateCoord is not None: - flags |= INTERMEDIATE_TUPLE - tupleData.append(intermediateCoord) - - if sharedPoints is not None: - auxData = self.compileDeltas(sharedPoints) - else: - flags |= PRIVATE_POINT_NUMBERS - points = self.getUsedPoints() - numPointsInGlyph = len(self.coordinates) - auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) - - tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) - return (tupleData, auxData) - - def compileCoord(self, axisTags): - result = [] - for axis in axisTags: - _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - result.append(struct.pack(">h", floatToFixed(value, 14))) - return bytesjoin(result) - - def compileIntermediateCoord(self, axisTags): - needed = False - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): - needed = True - break - if not needed: - return None - minCoords = [] - maxCoords = [] - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) - maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) - return bytesjoin(minCoords + maxCoords) - - @staticmethod - def decompileCoord_(axisTags, data, offset): - coord = {} - pos = offset - for axis in axisTags: - coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) - pos += 2 - return coord, pos - - @staticmethod - def decompileCoords_(axisTags, numCoords, data, offset): - result = [] - pos = offset - for _ in range(numCoords): - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - result.append(coord) - return result, pos - - @staticmethod - def compilePoints(points, numPointsInGlyph): - # If the set consists of all points in the glyph, it gets encoded with - # a special encoding: a single zero byte. - if len(points) == numPointsInGlyph: - return b"\0" - - # In the 'gvar' table, the packing of point numbers is a little surprising. - # It consists of multiple runs, each being a delta-encoded list of integers. - # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as - # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. - # There are two types of runs, with values being either 8 or 16 bit unsigned - # integers. - points = list(points) - points.sort() - numPoints = len(points) - - # The binary representation starts with the total number of points in the set, - # encoded into one or two bytes depending on the value. - if numPoints < 0x80: - result = [bytechr(numPoints)] - else: - result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] - - MAX_RUN_LENGTH = 127 - pos = 0 - while pos < numPoints: - run = io.BytesIO() - runLength = 0 - lastValue = 0 - useByteEncoding = (points[pos] <= 0xff) - while pos < numPoints and runLength <= MAX_RUN_LENGTH: - curValue = points[pos] - delta = curValue - lastValue - if useByteEncoding and delta > 0xff: - # we need to start a new run (which will not use byte encoding) - break - if useByteEncoding: - run.write(bytechr(delta)) - else: - run.write(bytechr(delta >> 8)) - run.write(bytechr(delta & 0xff)) - lastValue = curValue - pos += 1 - runLength += 1 - if useByteEncoding: - runHeader = bytechr(runLength - 1) - else: - runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) - result.append(runHeader) - result.append(run.getvalue()) - - return bytesjoin(result) - - @staticmethod - def decompilePoints_(numPointsInGlyph, data, offset): - """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" - pos = offset - numPointsInData = byteord(data[pos]) - pos += 1 - if (numPointsInData & POINTS_ARE_WORDS) != 0: - numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) - pos += 1 - if numPointsInData == 0: - return (range(numPointsInGlyph), pos) - result = [] - while len(result) < numPointsInData: - runHeader = byteord(data[pos]) - pos += 1 - numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 - point = 0 - if (runHeader & POINTS_ARE_WORDS) == 0: - for _ in range(numPointsInRun): - point += byteord(data[pos]) - pos += 1 - result.append(point) - else: - for _ in range(numPointsInRun): - point += struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - result.append(point) - if max(result) >= numPointsInGlyph: - raise TTLibError("malformed 'gvar' table") - return (result, pos) - - def compileDeltas(self, points): - deltaX = [] - deltaY = [] - for p in sorted(list(points)): - c = self.coordinates[p] - if c is not None: - deltaX.append(c[0]) - deltaY.append(c[1]) - return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) - - @staticmethod - def compileDeltaValues_(deltas): - """[value1, value2, value3, ...] --> bytestring - - Emits a sequence of runs. Each run starts with a - byte-sized header whose 6 least significant bits - (header & 0x3F) indicate how many values are encoded - in this run. The stored length is the actual length - minus one; run lengths are thus in the range [1..64]. - If the header byte has its most significant bit (0x80) - set, all values in this run are zero, and no data - follows. Otherwise, the header byte is followed by - ((header & 0x3F) + 1) signed values. If (header & - 0x40) is clear, the delta values are stored as signed - bytes; if (header & 0x40) is set, the delta values are - signed 16-bit integers. - """ # Explaining the format because the 'gvar' spec is hard to understand. - stream = io.BytesIO() - pos = 0 - while pos < len(deltas): - value = deltas[pos] - if value == 0: - pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) - elif value >= -128 and value <= 127: - pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) - else: - pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) - return stream.getvalue() - - @staticmethod - def encodeDeltaRunAsZeroes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64 and deltas[pos] == 0: - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) - return pos - - @staticmethod - def encodeDeltaRunAsBytes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - if value < -128 or value > 127: - break - # Within a byte-encoded run of deltas, a single zero - # is best stored literally as 0x00 value. However, - # if are two or more zeroes in a sequence, it is - # better to start a new run. For example, the sequence - # of deltas [15, 15, 0, 15, 15] becomes 6 bytes - # (04 0F 0F 00 0F 0F) when storing the zero value - # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) - # when starting a new run. - if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(runLength - 1)) - for i in range(offset, pos): - stream.write(struct.pack('b', deltas[i])) - return pos - - @staticmethod - def encodeDeltaRunAsWords_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - # Within a word-encoded run of deltas, it is easiest - # to start a new run (with a different encoding) - # whenever we encounter a zero value. For example, - # the sequence [0x6666, 0, 0x7777] needs 7 bytes when - # storing the zero literally (42 66 66 00 00 77 77), - # and equally 7 bytes when starting a new run - # (40 66 66 80 40 77 77). - if value == 0: - break - - # Within a word-encoded run of deltas, a single value - # in the range (-128..127) should be encoded literally - # because it is more compact. For example, the sequence - # [0x6666, 2, 0x7777] becomes 7 bytes when storing - # the value literally (42 66 66 00 02 77 77), but 8 bytes - # when starting a new run (40 66 66 00 02 40 77 77). - isByteEncodable = lambda value: value >= -128 and value <= 127 - if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) - for i in range(offset, pos): - stream.write(struct.pack('>h', deltas[i])) - return pos - - @staticmethod - def decompileDeltas_(numDeltas, data, offset): - """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" - result = [] - pos = offset - while len(result) < numDeltas: - runHeader = byteord(data[pos]) - pos += 1 - numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 - if (runHeader & DELTAS_ARE_ZERO) != 0: - result.extend([0] * numDeltasInRun) - elif (runHeader & DELTAS_ARE_WORDS) != 0: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">h", data[pos:pos+2])[0]) - pos += 2 - else: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">b", data[pos:pos+1])[0]) - pos += 1 - assert len(result) == numDeltas - return (result, pos) - - @staticmethod - def getTupleSize_(flags, axisCount): - size = 4 - if (flags & EMBEDDED_TUPLE_COORD) != 0: - size += axisCount * 2 - if (flags & INTERMEDIATE_TUPLE) != 0: - size += axisCount * 4 - return size +def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices): + tupleVariationCount, tuples, data = tv.compileTupleVariationStore( + variations, pointCount, axisTags, sharedCoordIndices) + if tupleVariationCount == 0: + return b"" + result = (struct.pack(">HH", tupleVariationCount, 4 + len(tuples)) + + tuples + data) + if len(result) % 2 != 0: + result = result + b"\0" # padding + return result + + +def decompileGlyph_(pointCount, sharedTuples, axisTags, data): + if len(data) < 4: + return [] + tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4]) + dataPos = offsetToData + return tv.decompileTupleVariationStore("gvar", axisTags, + tupleVariationCount, pointCount, + sharedTuples, data, 4, offsetToData) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,539 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr, hexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation -import random -import unittest - -def hexencode(s): - h = hexStr(s).upper() - return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) - -# Glyph variation table of uppercase I in the Skia font, as printed in Apple's -# TrueType spec. The actual Skia font uses a different table for uppercase I. -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html -SKIA_GVAR_I = deHexStr( - "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " - "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " - "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " - "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " - "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " - "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " - "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " - "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " - "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " - "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " - "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " - "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " - "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " - "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " - "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " - "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " - "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") - -# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. -SKIA_SHARED_COORDS = deHexStr( - "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " - "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") - - -class GlyphVariationTableTest(unittest.TestCase): - def test_compileOffsets_shortFormat(self): - self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), - table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) - - def test_compileOffsets_longFormat(self): - self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), - table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) - - def test_decompileOffsets_shortFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], - list(decompileOffsets(data, tableFormat=0, glyphCount=5))) - - def test_decompileOffsets_longFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], - list(decompileOffsets(data, tableFormat=1, glyphCount=2))) - - def test_compileGlyph_noVariations(self): - table = table__g_v_a_r() - table.variations = {} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_emptyVariations(self): - table = table__g_v_a_r() - table.variations = {"glyphname": []} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_onlyRedundantVariations(self): - table = table__g_v_a_r() - axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} - table.variations = {"glyphname": [ - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4) - ]} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_roundTrip(self): - table = table__g_v_a_r() - axisTags = ["wght", "wdth"] - numPointsInGlyph = 4 - glyphCoords = [(1,1), (2,2), (3,3), (4,4)] - gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - table.variations = {"oslash": [gvar1, gvar2]} - data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) - self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) - - def test_compileSharedCoords(self): - table = table__g_v_a_r() - table.variations = {} - deltas = [None] * 4 - table.variations["A"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) - ] - table.variations["B"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) - ] - table.variations["C"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) - ] - # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. - # Min and max values are not part of the shared coordinate pool and should get ignored. - result = table.compileSharedCoords_(["wght", "wdth"]) - self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) - - def test_decompileSharedCoords_Skia(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) - self.assertEqual([ - {"wght": 1.0, "wdth": 0.0}, - {"wght": -1.0, "wdth": 0.0}, - {"wght": 0.0, "wdth": 1.0}, - {"wght": 0.0, "wdth": -1.0}, - {"wght": -1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": 1.0}, - {"wght": -1.0, "wdth": 1.0} - ], sharedCoords) - - def test_decompileSharedCoords_empty(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 0 - self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) - - def test_decompileGlyph_Skia_I(self): - axes = ["wght", "wdth"] - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - table.axisCount = len(axes) - sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) - tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) - self.assertEqual(8, len(tuples)) - self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) - self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " - "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", - " ".join(["%d,%d" % c for c in tuples[0].coordinates])) - - def test_decompileGlyph_empty(self): - table = table__g_v_a_r() - self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) - - def test_computeMinMaxCord(self): - coord = {"wght": -0.3, "wdth": 0.7} - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) - self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) - -class GlyphVariationTest(unittest.TestCase): - def test_equal(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - self.assertEqual(gvar1, gvar2) - - def test_equal_differentAxes(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) - self.assertNotEqual(gvar1, gvar2) - - def test_equal_differentCoordinates(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) - self.assertNotEqual(gvar1, gvar2) - - def test_hasImpact_someDeltasNotZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasNone(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [None, None, None]) - self.assertFalse(gvar.hasImpact()) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} - g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) - g.toXML(writer, ["wdth", "wght", "opsz"]) - self.assertEqual([ - '', - '', - '', - '', - '', - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_toXML_allDeltasNone(self): - writer = XMLWriter(BytesIO()) - axes = {"wght":(0.0, 1.0, 1.0)} - g = GlyphVariation(axes, [None] * 5) - g.toXML(writer, ["wght", "wdth"]) - self.assertEqual([ - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_fromXML(self): - g = GlyphVariation({}, [None] * 4) - g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) - g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) - g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) - g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) - g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) - self.assertEqual({ - "wdth":( 0.3, 0.4, 0.5), - "wght":( 0.0, 1.0, 1.0), - "opsz":(-0.5, -0.5, 0.0) - }, g.axes) - self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) - - def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=None; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 08 00 77", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] - self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 09 20 77", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] - self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] - self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD - # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] - self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compileCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) - self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) - self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) - - def test_compileIntermediateCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) - self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) - self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) - self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) - - def test_decompileCoord(self): - decompileCoord = GlyphVariation.decompileCoord_ - data = deHexStr("DE AD C0 00 20 00 DE AD") - self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) - - def test_decompileCoord_roundTrip(self): - # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 - data = deHexStr("7F B9 80 35") - values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) - axisValues = {axis:(val, val, val) for axis, val in values.items()} - gvar = GlyphVariation(axisValues, [None] * 4) - self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) - - def test_decompileCoords(self): - decompileCoords = GlyphVariation.decompileCoords_ - axes = ["wght", "wdth", "opsz"] - coords = [ - {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, - {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, - {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} - ] - data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") - self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) - - def test_compilePoints(self): - compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) - self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph - self.assertEqual("01 00 07", hexencode(compilePoints([7]))) - self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) - self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) - self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) - self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) - self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) - self.assertEqual("81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - hexencode(compilePoints(range(300)))) - self.assertEqual("81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - hexencode(compilePoints(range(399)))) - - def test_decompilePoints(self): - numPointsInGlyph = 65536 - allPoints = list(range(numPointsInGlyph)) - def decompilePoints(data, offset): - points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) - # Conversion to list needed for Python 3. - return (list(points), offset) - # all points in glyph - self.assertEqual((allPoints, 1), decompilePoints("00", 0)) - # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) - self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) - # 2 points; first run: [9, 9+6] - self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) - # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) - self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) - # 1 point; first run: [7] - self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) - # 1 point; first run: [7] in overly verbose encoding - self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) - # 1 point; first run: [65535]; requires words to be treated as unsigned numbers - self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) - # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). - self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) - # combination of all encodings, preceded and followed by 4 bytes of unused data - data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" - self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) - self.assertSetEqual(set(range(300)), set(decompilePoints( - "81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - 0)[0])) - self.assertSetEqual(set(range(399)), set(decompilePoints( - "81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - 0)[0])) - - def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): - decompilePoints = GlyphVariation.decompilePoints_ - # 2 points; first run: [3, 9]. - numPointsInGlyph = 8 - self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) - - def test_decompilePoints_roundTrip(self): - numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding - compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) - decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) - for i in range(50): - points = set(random.sample(range(numPointsInGlyph), 30)) - self.assertSetEqual(points, decompile(compile(points)), - "failed round-trip decompile/compilePoints; points=%s" % points) - allPoints = set(range(numPointsInGlyph)) - self.assertSetEqual(allPoints, decompile(compile(allPoints))) - - def test_compileDeltas(self): - gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) - points = {1, 2} - # deltaX for points: [1, 2]; deltaY for points: [0, 0] - self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) - - def test_compileDeltaValues(self): - compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) - # zeroes - self.assertEqual("80", compileDeltaValues([0])) - self.assertEqual("BF", compileDeltaValues([0] * 64)) - self.assertEqual("BF 80", compileDeltaValues([0] * 65)) - self.assertEqual("BF A3", compileDeltaValues([0] * 100)) - self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) - # bytes - self.assertEqual("00 01", compileDeltaValues([1])) - self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) - self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) - self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) - # words - self.assertEqual("40 66 66", compileDeltaValues([0x6666])) - self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) - self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) - self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) - # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run - self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) - self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) - # bytes, zeroes - self.assertEqual("01 01 00", compileDeltaValues([1, 0])) - self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) - # words, bytes, words: a single byte is more compact when encoded as part of the words run - self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) - self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) - # words, zeroes, words - self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) - self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) - self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) - # words, zeroes, bytes - self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) - self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) - self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) - # words, zeroes - self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) - self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) - - def test_decompileDeltas(self): - decompileDeltas = GlyphVariation.decompileDeltas_ - # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) - self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) - # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) - self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) - # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) - self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) - # combination of all three encodings, preceded and followed by 4 bytes of unused data - data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") - self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) - - def test_decompileDeltas_roundTrip(self): - numDeltas = 30 - compile = GlyphVariation.compileDeltaValues_ - decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] - for i in range(50): - deltas = random.sample(range(-128, 127), 10) - deltas.extend(random.sample(range(-32768, 32767), 10)) - deltas.extend([0] * 10) - random.shuffle(deltas) - self.assertListEqual(deltas, decompile(compile(deltas))) - - def test_getTupleSize(self): - getTupleSize = GlyphVariation.getTupleSize_ - numAxes = 3 - self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) - self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) - self.assertEqual(4, getTupleSize(0x2077, numAxes)) - self.assertEqual(4, getTupleSize(11, numAxes)) - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_e_a_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,9 +5,11 @@ from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat from . import DefaultTable -import warnings +import logging +log = logging.getLogger(__name__) + headFormat = """ > # big endian tableVersion: 16.16F @@ -31,13 +33,13 @@ class table__h_e_a_d(DefaultTable.DefaultTable): - dependencies = ['maxp', 'loca'] + dependencies = ['maxp', 'loca', 'CFF '] def decompile(self, data, ttFont): dummy, rest = sstruct.unpack2(headFormat, data, self) if rest: # this is quite illegal, but there seem to be fonts out there that do this - warnings.warn("extra bytes at the end of 'head' table") + log.warning("extra bytes at the end of 'head' table") assert rest == "\0\0" # For timestamp fields, ignore the top four bytes. Some fonts have @@ -48,15 +50,20 @@ for stamp in 'created', 'modified': value = getattr(self, stamp) if value > 0xFFFFFFFF: - warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) + log.warning("'%s' timestamp out of range; ignoring top bytes", stamp) value &= 0xFFFFFFFF setattr(self, stamp, value) if value < 0x7C259DC0: # January 1, 1970 00:00:00 - warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) + log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp) value += 0x7C259DC0 setattr(self, stamp, value) def compile(self, ttFont): + if ttFont.recalcBBoxes: + # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc(). + if 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + self.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox if ttFont.recalcTimestamp: self.modified = timestampNow() data = sstruct.pack(headFormat, self) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_h_e_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,11 +2,15 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import ( + ensureVersionIsLong as fi2ve, versionToFixed as ve2fi) from . import DefaultTable +import math + hheaFormat = """ > # big endian - tableVersion: 16.16F + tableVersion: L ascent: h descent: h lineGap: h @@ -30,29 +34,26 @@ # Note: Keep in sync with table__v_h_e_a - dependencies = ['hmtx', 'glyf'] + dependencies = ['hmtx', 'glyf', 'CFF '] def decompile(self, data, ttFont): sstruct.unpack(hheaFormat, data, self) def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')): self.recalc(ttFont) + self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(hheaFormat, self) def recalc(self, ttFont): - hmtxTable = ttFont['hmtx'] + if 'hmtx' in ttFont: + hmtxTable = ttFont['hmtx'] + self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values()) + + boundsWidthDict = {} if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceWidthMax = 0 - minLeftSideBearing = +INFINITY # arbitrary big number - minRightSideBearing = +INFINITY # arbitrary big number - xMaxExtent = -INFINITY # arbitrary big negative number - for name in ttFont.getGlyphOrder(): - width, lsb = hmtxTable[name] - advanceWidthMax = max(advanceWidthMax, width) g = glyfTable[name] if g.numberOfContours == 0: continue @@ -60,32 +61,48 @@ # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) + boundsWidthDict[name] = g.xMax - g.xMin + elif 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + for name in ttFont.getGlyphOrder(): + cs = topDict.CharStrings[name] + bounds = cs.calcBounds() + if bounds is not None: + boundsWidthDict[name] = int( + math.ceil(bounds[2]) - math.floor(bounds[0])) + + if boundsWidthDict: + minLeftSideBearing = float('inf') + minRightSideBearing = float('inf') + xMaxExtent = -float('inf') + for name, boundsWidth in boundsWidthDict.items(): + advanceWidth, lsb = hmtxTable[name] + rsb = advanceWidth - lsb - boundsWidth + extent = lsb + boundsWidth minLeftSideBearing = min(minLeftSideBearing, lsb) - rsb = width - lsb - (g.xMax - g.xMin) minRightSideBearing = min(minRightSideBearing, rsb) - extent = lsb + (g.xMax - g.xMin) xMaxExtent = max(xMaxExtent, extent) - - if xMaxExtent == -INFINITY: - # No glyph has outlines. - minLeftSideBearing = 0 - minRightSideBearing = 0 - xMaxExtent = 0 - - self.advanceWidthMax = advanceWidthMax self.minLeftSideBearing = minLeftSideBearing self.minRightSideBearing = minRightSideBearing self.xMaxExtent = xMaxExtent - else: - # XXX CFF recalc... - pass + + else: # No glyph has outlines. + self.minLeftSideBearing = 0 + self.minRightSideBearing = 0 + self.xMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(hheaFormat) for name in names: value = getattr(self, name) + if name == "tableVersion": + value = fi2ve(value) + value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + setattr(self, name, ve2fi(attrs["value"])) + return setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_h_m_t_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,15 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * +from fontTools import ttLib from fontTools.misc.textTools import safeEval from . import DefaultTable import sys +import struct import array -import warnings +import logging + + +log = logging.getLogger(__name__) class table__h_m_t_x(DefaultTable.DefaultTable): @@ -13,16 +18,22 @@ advanceName = 'width' sideBearingName = 'lsb' numberOfMetricsName = 'numberOfHMetrics' + longMetricFormat = 'Hh' def decompile(self, data, ttFont): numGlyphs = ttFont['maxp'].numGlyphs numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) if numberOfMetrics > numGlyphs: - numberOfMetrics = numGlyphs # We warn later. - # Note: advanceWidth is unsigned, but we read/write as signed. - metrics = array.array("h", data[:4 * numberOfMetrics]) - if sys.byteorder != "big": - metrics.byteswap() + log.warning("The %s.%s exceeds the maxp.numGlyphs" % ( + self.headerTag, self.numberOfMetricsName)) + numberOfMetrics = numGlyphs + if len(data) < 4 * numberOfMetrics: + raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag) + # Note: advanceWidth is unsigned, but some font editors might + # read/write as signed. We can't be sure whether it was a mistake + # or not, so we read as unsigned but also issue a warning... + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics]) data = data[4 * numberOfMetrics:] numberOfSideBearings = numGlyphs - numberOfMetrics sideBearings = array.array("h", data[:2 * numberOfSideBearings]) @@ -31,21 +42,33 @@ if sys.byteorder != "big": sideBearings.byteswap() if data: - warnings.warn("too much 'hmtx'/'vmtx' table data") + log.warning("too much '%s' table data" % self.tableTag) self.metrics = {} glyphOrder = ttFont.getGlyphOrder() for i in range(numberOfMetrics): glyphName = glyphOrder[i] - self.metrics[glyphName] = list(metrics[i*2:i*2+2]) + advanceWidth, lsb = metrics[i*2:i*2+2] + if advanceWidth > 32767: + log.warning( + "Glyph %r has a huge advance %s (%d); is it intentional or " + "an (invalid) negative value?", glyphName, self.advanceName, + advanceWidth) + self.metrics[glyphName] = (advanceWidth, lsb) lastAdvance = metrics[-2] for i in range(numberOfSideBearings): glyphName = glyphOrder[i + numberOfMetrics] - self.metrics[glyphName] = [lastAdvance, sideBearings[i]] + self.metrics[glyphName] = (lastAdvance, sideBearings[i]) def compile(self, ttFont): metrics = [] + hasNegativeAdvances = False for glyphName in ttFont.getGlyphOrder(): - metrics.append(self.metrics[glyphName]) + advanceWidth, sideBearing = self.metrics[glyphName] + if advanceWidth < 0: + log.error("Glyph %r has negative advance %s" % ( + glyphName, self.advanceName)) + hasNegativeAdvances = True + metrics.append([advanceWidth, sideBearing]) lastAdvance = metrics[-1][0] lastIndex = len(metrics) while metrics[lastIndex-2][0] == lastAdvance: @@ -55,18 +78,24 @@ lastIndex = 1 break additionalMetrics = metrics[lastIndex:] - additionalMetrics = [sb for advance, sb in additionalMetrics] + additionalMetrics = [round(sb) for _, sb in additionalMetrics] metrics = metrics[:lastIndex] - setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) + numberOfMetrics = len(metrics) + setattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics) allMetrics = [] - for item in metrics: - allMetrics.extend(item) - allMetrics = array.array("h", allMetrics) - if sys.byteorder != "big": - allMetrics.byteswap() - data = allMetrics.tostring() - + for advance, sb in metrics: + allMetrics.extend([round(advance), round(sb)]) + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + try: + data = struct.pack(metricsFmt, *allMetrics) + except struct.error as e: + if "out of range" in str(e) and hasNegativeAdvances: + raise ttLib.TTLibError( + "'%s' table can't contain negative advance %ss" + % (self.tableTag, self.advanceName)) + else: + raise additionalMetrics = array.array("h", additionalMetrics) if sys.byteorder != "big": additionalMetrics.byteswap() @@ -88,8 +117,8 @@ if not hasattr(self, "metrics"): self.metrics = {} if name == "mtx": - self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), - safeEval(attrs[self.sideBearingName])] + self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]), + safeEval(attrs[self.sideBearingName])) def __delitem__(self, glyphName): del self.metrics[glyphName] diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/H_V_A_R_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/H_V_A_R_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/H_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/H_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_H_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/__init__.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/__init__.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. -def _moduleFinderHint(): - """Dummy function to let modulefinder know what tables may be - dynamically imported. Generated by MetaTools/buildTableList.py. - - >>> _moduleFinderHint() - """ - from . import B_A_S_E_ - from . import C_B_D_T_ - from . import C_B_L_C_ - from . import C_F_F_ - from . import C_O_L_R_ - from . import C_P_A_L_ - from . import D_S_I_G_ - from . import E_B_D_T_ - from . import E_B_L_C_ - from . import F_F_T_M_ - from . import G_D_E_F_ - from . import G_M_A_P_ - from . import G_P_K_G_ - from . import G_P_O_S_ - from . import G_S_U_B_ - from . import J_S_T_F_ - from . import L_T_S_H_ - from . import M_A_T_H_ - from . import M_E_T_A_ - from . import O_S_2f_2 - from . import S_I_N_G_ - from . import S_V_G_ - from . import T_S_I_B_ - from . import T_S_I_D_ - from . import T_S_I_J_ - from . import T_S_I_P_ - from . import T_S_I_S_ - from . import T_S_I_V_ - from . import T_S_I__0 - from . import T_S_I__1 - from . import T_S_I__2 - from . import T_S_I__3 - from . import T_S_I__5 - from . import V_D_M_X_ - from . import V_O_R_G_ - from . import _a_v_a_r - from . import _c_m_a_p - from . import _c_v_t - from . import _f_e_a_t - from . import _f_p_g_m - from . import _f_v_a_r - from . import _g_a_s_p - from . import _g_l_y_f - from . import _g_v_a_r - from . import _h_d_m_x - from . import _h_e_a_d - from . import _h_h_e_a - from . import _h_m_t_x - from . import _k_e_r_n - from . import _l_o_c_a - from . import _l_t_a_g - from . import _m_a_x_p - from . import _m_e_t_a - from . import _n_a_m_e - from . import _p_o_s_t - from . import _p_r_e_p - from . import _s_b_i_x - from . import _v_h_e_a - from . import _v_m_t_x - -if __name__ == "__main__": - import doctest, sys - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_k_e_r_n.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,18 +2,24 @@ from fontTools.misc.py23 import * from fontTools.ttLib import getSearchRange from fontTools.misc.textTools import safeEval, readHex -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi) from . import DefaultTable import struct +import sys import array -import warnings +import logging + + +log = logging.getLogger(__name__) class table__k_e_r_n(DefaultTable.DefaultTable): def getkern(self, format): for subtable in self.kernTables: - if subtable.version == format: + if subtable.format == format: return subtable return None # not found @@ -29,21 +35,23 @@ else: self.version = version data = data[4:] - tablesIndex = [] self.kernTables = [] for i in range(nTables): if self.version == 1.0: # Apple - length, coverage, tupleIndex = struct.unpack(">lHH", data[:8]) - version = coverage & 0xff + length, coverage, subtableFormat = struct.unpack( + ">LBB", data[:6]) else: - version, length = struct.unpack(">HH", data[:4]) - length = int(length) - if version not in kern_classes: - subtable = KernTable_format_unkown(version) + # in OpenType spec the "version" field refers to the common + # subtable header; the actual subtable format is stored in + # the 8-15 mask bits of "coverage" field. + # This "version" is always 0 so we ignore it here + _, length, subtableFormat, coverage = struct.unpack( + ">HHBB", data[:6]) + if subtableFormat not in kern_classes: + subtable = KernTable_format_unkown(subtableFormat) else: - subtable = kern_classes[version]() - subtable.apple = apple + subtable = kern_classes[subtableFormat](apple) subtable.decompile(data[:length], ttFont) self.kernTables.append(subtable) data = data[length:] @@ -55,7 +63,7 @@ nTables = 0 if self.version == 1.0: # AAT Apple's "new" format. - data = struct.pack(">ll", fl2fi(self.version, 16), nTables) + data = struct.pack(">LL", fl2fi(self.version, 16), nTables) else: data = struct.pack(">HH", self.version, nTables) if hasattr(self, "kernTables"): @@ -81,80 +89,142 @@ if format not in kern_classes: subtable = KernTable_format_unkown(format) else: - subtable = kern_classes[format]() + apple = self.version == 1.0 + subtable = kern_classes[format](apple) self.kernTables.append(subtable) subtable.fromXML(name, attrs, content, ttFont) class KernTable_format_0(object): + # 'version' is kept for backward compatibility + version = format = 0 + + def __init__(self, apple=False): + self.apple = apple + def decompile(self, data, ttFont): - version, length, coverage = (0,0,0) if not self.apple: - version, length, coverage = struct.unpack(">HHH", data[:6]) + version, length, subtableFormat, coverage = struct.unpack( + ">HHBB", data[:6]) + if version != 0: + from fontTools.ttLib import TTLibError + raise TTLibError( + "unsupported kern subtable version: %d" % version) + tupleIndex = None + # Should we also assert length == len(data)? data = data[6:] else: - version, length, coverage = struct.unpack(">LHH", data[:8]) + length, coverage, subtableFormat, tupleIndex = struct.unpack( + ">LBBH", data[:8]) data = data[8:] - self.version, self.coverage = int(version), int(coverage) + assert self.format == subtableFormat, "unsupported format" + self.coverage = coverage + self.tupleIndex = tupleIndex self.kernTable = kernTable = {} - nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) + nPairs, searchRange, entrySelector, rangeShift = struct.unpack( + ">HHHH", data[:8]) data = data[8:] nPairs = min(nPairs, len(data) // 6) datas = array.array("H", data[:6 * nPairs]) - if sys.byteorder != "big": + if sys.byteorder != "big": # pragma: no cover datas.byteswap() it = iter(datas) glyphOrder = ttFont.getGlyphOrder() for k in range(nPairs): left, right, value = next(it), next(it), next(it) - if value >= 32768: value -= 65536 + if value >= 32768: + value -= 65536 try: kernTable[(glyphOrder[left], glyphOrder[right])] = value except IndexError: - # Slower, but will not throw an IndexError on an invalid glyph id. - kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value - if len(data) > 6 * nPairs: - warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) + # Slower, but will not throw an IndexError on an invalid + # glyph id. + kernTable[( + ttFont.getGlyphName(left), + ttFont.getGlyphName(right))] = value + if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess + log.warning( + "excess data in 'kern' subtable: %d bytes", + len(data) - 6 * nPairs) def compile(self, ttFont): nPairs = len(self.kernTable) searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) - data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) + data = struct.pack( + ">HHHH", nPairs, searchRange, entrySelector, rangeShift) # yeehee! (I mean, turn names into indices) try: reverseOrder = ttFont.getReverseGlyphMap() - kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) + kernTable = sorted( + (reverseOrder[left], reverseOrder[right], value) + for ((left, right), value) in self.kernTable.items()) except KeyError: # Slower, but will not throw KeyError on invalid glyph id. getGlyphID = ttFont.getGlyphID - kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) + kernTable = sorted( + (getGlyphID(left), getGlyphID(right), value) + for ((left, right), value) in self.kernTable.items()) for left, right, value in kernTable: data = data + struct.pack(">HHh", left, right, value) - return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data + + if not self.apple: + version = 0 + length = len(data) + 6 + header = struct.pack( + ">HHBB", version, length, self.format, self.coverage) + else: + if self.tupleIndex is None: + # sensible default when compiling a TTX from an old fonttools + # or when inserting a Windows-style format 0 subtable into an + # Apple version=1.0 kern table + log.warning("'tupleIndex' is None; default to 0") + self.tupleIndex = 0 + length = len(data) + 8 + header = struct.pack( + ">LBBH", length, self.coverage, self.format, self.tupleIndex) + return header + data def toXML(self, writer, ttFont): - writer.begintag("kernsubtable", coverage=self.coverage, format=0) + attrs = dict(coverage=self.coverage, format=self.format) + if self.apple: + if self.tupleIndex is None: + log.warning("'tupleIndex' is None; default to 0") + attrs["tupleIndex"] = 0 + else: + attrs["tupleIndex"] = self.tupleIndex + writer.begintag("kernsubtable", **attrs) writer.newline() items = sorted(self.kernTable.items()) for (left, right), value in items: writer.simpletag("pair", [ - ("l", left), - ("r", right), - ("v", value) - ]) + ("l", left), + ("r", right), + ("v", value) + ]) writer.newline() writer.endtag("kernsubtable") writer.newline() def fromXML(self, name, attrs, content, ttFont): self.coverage = safeEval(attrs["coverage"]) - self.version = safeEval(attrs["format"]) + subtableFormat = safeEval(attrs["format"]) + if self.apple: + if "tupleIndex" in attrs: + self.tupleIndex = safeEval(attrs["tupleIndex"]) + else: + # previous fontTools versions didn't export tupleIndex + log.warning( + "Apple kern subtable is missing 'tupleIndex' attribute") + self.tupleIndex = None + else: + self.tupleIndex = None + assert subtableFormat == self.format, "unsupported format" if not hasattr(self, "kernTable"): self.kernTable = {} for element in content: diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -from __future__ import print_function, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._k_e_r_n import KernTable_format_0 - -class MockFont(object): - - def getGlyphOrder(self): - return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] - - def getGlyphName(self, glyphID): - return "glyph%.5d" % glyphID - -class KernTable_format_0_Test(unittest.TestCase): - - def test_decompileBadGlyphId(self): - subtable = KernTable_format_0() - subtable.apple = False - subtable.decompile( b'\x00' * 6 - + b'\x00' + b'\x02' + b'\x00' * 6 - + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' - + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', - MockFont()) - self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) - self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_c_a_r.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_c_a_r.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_c_a_r.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_c_a_r.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table__l_c_a_r(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_o_c_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,7 +3,11 @@ from . import DefaultTable import sys import array -import warnings +import logging + + +log = logging.getLogger(__name__) + class table__l_o_c_a(DefaultTable.DefaultTable): @@ -25,7 +29,8 @@ l.append(locations[i] * 2) locations = l if len(locations) < (ttFont['maxp'].numGlyphs + 1): - warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) + log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d", + len(locations) - 1, ttFont['maxp'].numGlyphs) self.locations = locations def compile(self, ttFont): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_t_a_g.py 2018-01-08 12:40:40.000000000 +0000 @@ -7,6 +7,21 @@ # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html class table__l_t_a_g(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.flags = 1, 0 + self.tags = [] + + def addTag(self, tag): + """Add 'tag' to the list of langauge tags if not already there. + + Returns the integer index of 'tag' in the list of all tags. + """ + try: + return self.tags.index(tag) + except ValueError: + self.tags.append(tag) + return len(self.tags) - 1 def decompile(self, data, ttFont): self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) @@ -28,7 +43,7 @@ stringPool = stringPool + tag offset = offset + 12 + len(self.tags) * 4 dataList.append(struct.pack(">HH", offset, len(tag))) - dataList.append(stringPool) + dataList.append(tobytes(stringPool)) return bytesjoin(dataList) def toXML(self, writer, ttFont): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import os -import struct -import unittest -from ._l_t_a_g import table__l_t_a_g - -class Test_l_t_a_g(unittest.TestCase): - - DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" - TAGS_ = ["en", "zh-Hant", "zh"] - - def test_decompile_compile(self): - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(0, table.flags) - self.assertEqual(self.TAGS_, table.tags) - self.assertEqual(self.DATA_, table.compile(ttFont=None)) - - def test_fromXML(self): - table = table__l_t_a_g() - table.fromXML("version", {"value": "1"}, content=None, ttFont=None) - table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(777, table.flags) - self.assertEqual(["sr-Latn", "fa"], table.tags) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - table.toXML(writer, ttFont=None) - expected = os.linesep.join([ - '', - '', - '', - '', - '', - '' - ]) + os.linesep - self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_a_x_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -73,12 +73,12 @@ maxCompositeContours = 0 maxComponentElements = 0 maxComponentDepth = 0 - allXMaxIsLsb = 1 + allXMinIsLsb = 1 for glyphName in ttFont.getGlyphOrder(): g = glyfTable[glyphName] if g.numberOfContours: if hmtxTable[glyphName][1] != g.xMin: - allXMaxIsLsb = 0 + allXMinIsLsb = 0 xMin = min(xMin, g.xMin) yMin = min(yMin, g.yMin) xMax = max(xMax, g.xMax) @@ -108,7 +108,7 @@ self.maxCompositePoints = maxCompositePoints self.maxCompositeContours = maxCompositeContours self.maxComponentDepth = maxComponentDepth - if allXMaxIsLsb: + if allXMinIsLsb: headTable.flags = headTable.flags | 0x2 else: headTable.flags = headTable.flags & ~0x2 diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_e_t_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -16,15 +16,6 @@ numDataMaps: L """ -# According to Apple's spec, the dataMaps entries contain a dataOffset -# that is documented as "Offset from the beginning of the data section -# to the data for this tag". However, this is *not* the case with -# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, -# and it also does not reflect how Apple's ftxdumperfuser tool is parsing -# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework -# build 187). Instead of what is claimed in the spec, the data maps contain -# a dataOffset relative to the very beginning of the 'meta' table. -# The dataOffset field of the 'meta' header apparently gets ignored. DATA_MAP_FORMAT = """ > # big endian @@ -35,7 +26,7 @@ class table__m_e_t_a(DefaultTable.DefaultTable): - def __init__(self, tag="meta"): + def __init__(self, tag=None): DefaultTable.DefaultTable.__init__(self, tag) self.data = {} @@ -54,6 +45,8 @@ tag = dataMap["tag"] offset = dataMap["dataOffset"] self.data[tag] = data[offset : offset + dataMap["dataLength"]] + if tag in ["dlng", "slng"]: + self.data[tag] = self.data[tag].decode("utf-8") def compile(self, ttFont): keys = sorted(self.data.keys()) @@ -68,7 +61,10 @@ dataMaps = [] dataBlocks = [] for tag in keys: - data = self.data[tag] + if tag in ["dlng", "slng"]: + data = self.data[tag].encode("utf-8") + else: + data = self.data[tag] dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { "tag": tag, "dataOffset": dataOffset, @@ -80,14 +76,24 @@ def toXML(self, writer, ttFont, progress=None): for tag in sorted(self.data.keys()): - writer.begintag("hexdata", tag=tag) - writer.newline() - writer.dumphex(self.data[tag]) - writer.endtag("hexdata") - writer.newline() + if tag in ["dlng", "slng"]: + writer.begintag("text", tag=tag) + writer.newline() + writer.write(self.data[tag]) + writer.newline() + writer.endtag("text") + writer.newline() + else: + writer.begintag("hexdata", tag=tag) + writer.newline() + writer.dumphex(self.data[tag]) + writer.endtag("hexdata") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "hexdata": self.data[attrs["tag"]] = readHex(content) + elif name == "text" and attrs["tag"] in ["dlng", "slng"]: + self.data[attrs["tag"]] = strjoin(content).strip() else: raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a -import unittest - - -# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', -# and shortened the payload. Note that from the 'meta' spec, one would -# expect that header.dataOffset is 0x0000001C (pointing to the beginning -# of the data section) and that dataMap[0].dataOffset should be 0 (relative -# to the beginning of the data section). However, in the fonts that Apple -# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative -# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the -# following test data is invalid according to the 'meta' specification, -# it is reflecting the 'meta' table structure in all Apple-supplied fonts. -META_DATA = deHexStr( - "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " - "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") - - -class MetaTableTest(unittest.TestCase): - def test_decompile(self): - table = table__m_e_t_a() - table.decompile(META_DATA, ttFont={"meta": table}) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - def test_compile(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) - - def test_toXML(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - writer = XMLWriter(BytesIO()) - table.toXML(writer, {"meta": table}) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual([ - '', - 'cafebeef', - '' - ], [line.strip() for line in xml.splitlines()][1:]) - - def test_fromXML(self): - table = table__m_e_t_a() - table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_o_r_t.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_o_r_t.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_o_r_t.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_o_r_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html +class table__m_o_r_t(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_o_r_x.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_o_r_x.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_m_o_r_x.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_m_o_r_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html +class table__m_o_r_x(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/M_V_A_R_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/M_V_A_R_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/M_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/M_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_M_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_n_a_m_e.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,10 +1,17 @@ +# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import newTable from . import DefaultTable import struct +import logging + + +log = logging.getLogger(__name__) nameRecordFormat = """ > # big endian @@ -20,22 +27,27 @@ class table__n_a_m_e(DefaultTable.DefaultTable): + dependencies = ["ltag"] def decompile(self, data, ttFont): - format, n, stringOffset = struct.unpack(">HHH", data[:6]) + format, n, stringOffset = struct.unpack(b">HHH", data[:6]) expectedStringOffset = 6 + n * nameRecordSize if stringOffset != expectedStringOffset: - # XXX we need a warn function - print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) + log.error( + "'name' table stringOffset incorrect. Expected: %s; Actual: %s", + expectedStringOffset, stringOffset) stringData = data[stringOffset:] data = data[6:] self.names = [] for i in range(n): if len(data) < 12: - # compensate for buggy font - break + log.error('skipping malformed name record #%d', i) + continue name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) name.string = stringData[name.offset:name.offset+name.length] + if name.offset + name.length > len(stringData): + log.error('skipping malformed name record #%d', i) + continue assert len(name.string) == name.length #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): # if len(name.string) % 2: @@ -55,7 +67,7 @@ format = 0 n = len(names) stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) - data = struct.pack(">HHH", format, n, stringOffset) + data = struct.pack(b">HHH", format, n, stringOffset) lastoffset = 0 done = {} # remember the data so we can reuse the "pointers" for name in names: @@ -111,6 +123,186 @@ else: return None + def setName(self, string, nameID, platformID, platEncID, langID): + """ Set the 'string' for the name record identified by 'nameID', 'platformID', + 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it + and append to the name table. + + 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case, + it is assumed to be already encoded with the correct plaform-specific encoding + identified by the (platformID, platEncID, langID) triplet. A warning is issued + to prevent unexpected results. + """ + if not hasattr(self, 'names'): + self.names = [] + if not isinstance(string, unicode): + if isinstance(string, bytes): + log.warning( + "name string is bytes, ensure it's correctly encoded: %r", string) + else: + raise TypeError( + "expected unicode or bytes, found %s: %r" % ( + type(string).__name__, string)) + namerecord = self.getName(nameID, platformID, platEncID, langID) + if namerecord: + namerecord.string = string + else: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + + def _findUnusedNameID(self, minNameID=256): + """Finds an unused name id. + + The nameID is assigned in the range between 'minNameID' and 32767 (inclusive), + following the last nameID in the name table. + """ + names = getattr(self, 'names', []) + nameID = 1 + max([n.nameID for n in names] + [minNameID - 1]) + if nameID > 32767: + raise ValueError("nameID must be less than 32768") + return nameID + + def addMultilingualName(self, names, ttFont=None, nameID=None): + """Add a multilingual name, returning its name ID + + 'names' is a dictionary with the name in multiple languages, + such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. + The keys can be arbitrary IETF BCP 47 language codes; + the values are Unicode strings. + + 'ttFont' is the TTFont to which the names are added, or None. + If present, the font's 'ltag' table can get populated + to store exotic language codes, which allows encoding + names that otherwise cannot get encoded at all. + + 'nameID' is the name ID to be used, or None to let the library + pick an unused name ID. + """ + if not hasattr(self, 'names'): + self.names = [] + if nameID is None: + nameID = self._findUnusedNameID() + # TODO: Should minimize BCP 47 language codes. + # https://github.com/fonttools/fonttools/issues/930 + for lang, name in sorted(names.items()): + # Apple platforms have been recognizing Windows names + # since early OSX (~2001), so we only add names + # for the Macintosh platform when we cannot not make + # a Windows name. This can happen for exotic BCP47 + # language tags that have no Windows language code. + windowsName = _makeWindowsName(name, nameID, lang) + if windowsName is not None: + self.names.append(windowsName) + else: + macName = _makeMacName(name, nameID, lang, ttFont) + if macName is not None: + self.names.append(macName) + return nameID + + def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255): + """ Add a new name record containing 'string' for each (platformID, platEncID, + langID) tuple specified in the 'platforms' list. + + The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive), + following the last nameID in the name table. + If no 'platforms' are specified, two English name records are added, one for the + Macintosh (platformID=0), and one for the Windows platform (3). + + The 'string' must be a Unicode string, so it can be encoded with different, + platform-specific encodings. + + Return the new nameID. + """ + assert len(platforms) > 0, \ + "'platforms' must contain at least one (platformID, platEncID, langID) tuple" + if not hasattr(self, 'names'): + self.names = [] + if not isinstance(string, unicode): + raise TypeError( + "expected %s, found %s: %r" % ( + unicode.__name__, type(string).__name__,string )) + nameID = self._findUnusedNameID(minNameID + 1) + for platformID, platEncID, langID in platforms: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + return nameID + + +def makeName(string, nameID, platformID, platEncID, langID): + name = NameRecord() + name.string, name.nameID, name.platformID, name.platEncID, name.langID = ( + string, nameID, platformID, platEncID, langID) + return name + + +def _makeWindowsName(name, nameID, language): + """Create a NameRecord for the Microsoft Windows platform + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows + does not support the desired language, the result will be None. + Future versions of fonttools might return a NameRecord for the + OpenType 'name' table format 1, but this is not implemented yet. + """ + langID = _WINDOWS_LANGUAGE_CODES.get(language.lower()) + if langID is not None: + return makeName(name, nameID, 3, 1, langID) + else: + log.warning("cannot add Windows name in language %s " + "because fonttools does not yet support " + "name table format 1" % language) + return None + + +def _makeMacName(name, nameID, language, font=None): + """Create a NameRecord for Apple platforms + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we + create a Macintosh NameRecord that is understood by old applications + (platform ID 1 and an old-style Macintosh language enum). If this + is not possible, we create a Unicode NameRecord (platform ID 0) + whose language points to the font’s 'ltag' table. The latter + can encode any string in any language, but legacy applications + might not recognize the format (in which case they will ignore + those names). + + 'font' should be the TTFont for which you want to create a name. + If 'font' is None, we only return NameRecords for legacy Macintosh; + in that case, the result will be None for names that need to + be encoded with an 'ltag' table. + + See the section “The language identifier” in Apple’s specification: + https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + """ + macLang = _MAC_LANGUAGE_CODES.get(language.lower()) + macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang) + if macLang is not None and macScript is not None: + encoding = getEncoding(1, macScript, macLang, default="ascii") + # Check if we can actually encode this name. If we can't, + # for example because we have no support for the legacy + # encoding, or because the name string contains Unicode + # characters that the legacy encoding cannot represent, + # we fall back to encoding the name in Unicode and put + # the language tag into the ltag table. + try: + _ = tobytes(name, encoding, errors="strict") + return makeName(name, nameID, 1, macScript, macLang) + except UnicodeEncodeError: + pass + if font is not None: + ltag = font.tables.get("ltag") + if ltag is None: + ltag = font["ltag"] = newTable("ltag") + # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)” + # “The preferred platform-specific code for Unicode would be 3 or 4.” + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + return makeName(name, nameID, 0, 4, ltag.addTag(language)) + else: + log.warning("cannot store language %s into 'ltag' table " + "without having access to the TTFont object" % + language) + return None + + class NameRecord(object): def getEncoding(self, default='ascii'): @@ -125,10 +317,7 @@ return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] def __str__(self): - try: - return self.toUnicode() - except UnicodeDecodeError: - return str(self.string) + return self.toStr(errors='backslashreplace') def isUnicode(self): return (self.platformID == 0 or @@ -198,6 +387,14 @@ """ return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + def toStr(self, errors='strict'): + if str == bytes: + # python 2 + return self.toBytes(errors) + else: + # python 3 + return self.toUnicode(errors) + def toXML(self, writer, ttFont): try: unistr = self.toUnicode() @@ -260,3 +457,493 @@ def __repr__(self): return "" % ( self.nameID, self.platformID, self.langID) + + +# Windows language ID → IETF BCP-47 language tag +# +# While Microsoft indicates a region/country for all its language +# IDs, we follow Unicode practice by omitting “most likely subtags” +# as per Unicode CLDR. For example, English is simply “en” and not +# “en-Latn” because according to Unicode, the default script +# for English is Latin. +# +# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html +# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry +_WINDOWS_LANGUAGES = { + 0x0436: 'af', + 0x041C: 'sq', + 0x0484: 'gsw', + 0x045E: 'am', + 0x1401: 'ar-DZ', + 0x3C01: 'ar-BH', + 0x0C01: 'ar', + 0x0801: 'ar-IQ', + 0x2C01: 'ar-JO', + 0x3401: 'ar-KW', + 0x3001: 'ar-LB', + 0x1001: 'ar-LY', + 0x1801: 'ary', + 0x2001: 'ar-OM', + 0x4001: 'ar-QA', + 0x0401: 'ar-SA', + 0x2801: 'ar-SY', + 0x1C01: 'aeb', + 0x3801: 'ar-AE', + 0x2401: 'ar-YE', + 0x042B: 'hy', + 0x044D: 'as', + 0x082C: 'az-Cyrl', + 0x042C: 'az', + 0x046D: 'ba', + 0x042D: 'eu', + 0x0423: 'be', + 0x0845: 'bn', + 0x0445: 'bn-IN', + 0x201A: 'bs-Cyrl', + 0x141A: 'bs', + 0x047E: 'br', + 0x0402: 'bg', + 0x0403: 'ca', + 0x0C04: 'zh-HK', + 0x1404: 'zh-MO', + 0x0804: 'zh', + 0x1004: 'zh-SG', + 0x0404: 'zh-TW', + 0x0483: 'co', + 0x041A: 'hr', + 0x101A: 'hr-BA', + 0x0405: 'cs', + 0x0406: 'da', + 0x048C: 'prs', + 0x0465: 'dv', + 0x0813: 'nl-BE', + 0x0413: 'nl', + 0x0C09: 'en-AU', + 0x2809: 'en-BZ', + 0x1009: 'en-CA', + 0x2409: 'en-029', + 0x4009: 'en-IN', + 0x1809: 'en-IE', + 0x2009: 'en-JM', + 0x4409: 'en-MY', + 0x1409: 'en-NZ', + 0x3409: 'en-PH', + 0x4809: 'en-SG', + 0x1C09: 'en-ZA', + 0x2C09: 'en-TT', + 0x0809: 'en-GB', + 0x0409: 'en', + 0x3009: 'en-ZW', + 0x0425: 'et', + 0x0438: 'fo', + 0x0464: 'fil', + 0x040B: 'fi', + 0x080C: 'fr-BE', + 0x0C0C: 'fr-CA', + 0x040C: 'fr', + 0x140C: 'fr-LU', + 0x180C: 'fr-MC', + 0x100C: 'fr-CH', + 0x0462: 'fy', + 0x0456: 'gl', + 0x0437: 'ka', + 0x0C07: 'de-AT', + 0x0407: 'de', + 0x1407: 'de-LI', + 0x1007: 'de-LU', + 0x0807: 'de-CH', + 0x0408: 'el', + 0x046F: 'kl', + 0x0447: 'gu', + 0x0468: 'ha', + 0x040D: 'he', + 0x0439: 'hi', + 0x040E: 'hu', + 0x040F: 'is', + 0x0470: 'ig', + 0x0421: 'id', + 0x045D: 'iu', + 0x085D: 'iu-Latn', + 0x083C: 'ga', + 0x0434: 'xh', + 0x0435: 'zu', + 0x0410: 'it', + 0x0810: 'it-CH', + 0x0411: 'ja', + 0x044B: 'kn', + 0x043F: 'kk', + 0x0453: 'km', + 0x0486: 'quc', + 0x0487: 'rw', + 0x0441: 'sw', + 0x0457: 'kok', + 0x0412: 'ko', + 0x0440: 'ky', + 0x0454: 'lo', + 0x0426: 'lv', + 0x0427: 'lt', + 0x082E: 'dsb', + 0x046E: 'lb', + 0x042F: 'mk', + 0x083E: 'ms-BN', + 0x043E: 'ms', + 0x044C: 'ml', + 0x043A: 'mt', + 0x0481: 'mi', + 0x047A: 'arn', + 0x044E: 'mr', + 0x047C: 'moh', + 0x0450: 'mn', + 0x0850: 'mn-CN', + 0x0461: 'ne', + 0x0414: 'nb', + 0x0814: 'nn', + 0x0482: 'oc', + 0x0448: 'or', + 0x0463: 'ps', + 0x0415: 'pl', + 0x0416: 'pt', + 0x0816: 'pt-PT', + 0x0446: 'pa', + 0x046B: 'qu-BO', + 0x086B: 'qu-EC', + 0x0C6B: 'qu', + 0x0418: 'ro', + 0x0417: 'rm', + 0x0419: 'ru', + 0x243B: 'smn', + 0x103B: 'smj-NO', + 0x143B: 'smj', + 0x0C3B: 'se-FI', + 0x043B: 'se', + 0x083B: 'se-SE', + 0x203B: 'sms', + 0x183B: 'sma-NO', + 0x1C3B: 'sms', + 0x044F: 'sa', + 0x1C1A: 'sr-Cyrl-BA', + 0x0C1A: 'sr', + 0x181A: 'sr-Latn-BA', + 0x081A: 'sr-Latn', + 0x046C: 'nso', + 0x0432: 'tn', + 0x045B: 'si', + 0x041B: 'sk', + 0x0424: 'sl', + 0x2C0A: 'es-AR', + 0x400A: 'es-BO', + 0x340A: 'es-CL', + 0x240A: 'es-CO', + 0x140A: 'es-CR', + 0x1C0A: 'es-DO', + 0x300A: 'es-EC', + 0x440A: 'es-SV', + 0x100A: 'es-GT', + 0x480A: 'es-HN', + 0x080A: 'es-MX', + 0x4C0A: 'es-NI', + 0x180A: 'es-PA', + 0x3C0A: 'es-PY', + 0x280A: 'es-PE', + 0x500A: 'es-PR', + + # Microsoft has defined two different language codes for + # “Spanish with modern sorting” and “Spanish with traditional + # sorting”. This makes sense for collation APIs, and it would be + # possible to express this in BCP 47 language tags via Unicode + # extensions (eg., “es-u-co-trad” is “Spanish with traditional + # sorting”). However, for storing names in fonts, this distinction + # does not make sense, so we use “es” in both cases. + 0x0C0A: 'es', + 0x040A: 'es', + + 0x540A: 'es-US', + 0x380A: 'es-UY', + 0x200A: 'es-VE', + 0x081D: 'sv-FI', + 0x041D: 'sv', + 0x045A: 'syr', + 0x0428: 'tg', + 0x085F: 'tzm', + 0x0449: 'ta', + 0x0444: 'tt', + 0x044A: 'te', + 0x041E: 'th', + 0x0451: 'bo', + 0x041F: 'tr', + 0x0442: 'tk', + 0x0480: 'ug', + 0x0422: 'uk', + 0x042E: 'hsb', + 0x0420: 'ur', + 0x0843: 'uz-Cyrl', + 0x0443: 'uz', + 0x042A: 'vi', + 0x0452: 'cy', + 0x0488: 'wo', + 0x0485: 'sah', + 0x0478: 'ii', + 0x046A: 'yo', +} + + +_MAC_LANGUAGES = { + 0: 'en', + 1: 'fr', + 2: 'de', + 3: 'it', + 4: 'nl', + 5: 'sv', + 6: 'es', + 7: 'da', + 8: 'pt', + 9: 'no', + 10: 'he', + 11: 'ja', + 12: 'ar', + 13: 'fi', + 14: 'el', + 15: 'is', + 16: 'mt', + 17: 'tr', + 18: 'hr', + 19: 'zh-Hant', + 20: 'ur', + 21: 'hi', + 22: 'th', + 23: 'ko', + 24: 'lt', + 25: 'pl', + 26: 'hu', + 27: 'es', + 28: 'lv', + 29: 'se', + 30: 'fo', + 31: 'fa', + 32: 'ru', + 33: 'zh', + 34: 'nl-BE', + 35: 'ga', + 36: 'sq', + 37: 'ro', + 38: 'cz', + 39: 'sk', + 40: 'sl', + 41: 'yi', + 42: 'sr', + 43: 'mk', + 44: 'bg', + 45: 'uk', + 46: 'be', + 47: 'uz', + 48: 'kk', + 49: 'az-Cyrl', + 50: 'az-Arab', + 51: 'hy', + 52: 'ka', + 53: 'mo', + 54: 'ky', + 55: 'tg', + 56: 'tk', + 57: 'mn-CN', + 58: 'mn', + 59: 'ps', + 60: 'ks', + 61: 'ku', + 62: 'sd', + 63: 'bo', + 64: 'ne', + 65: 'sa', + 66: 'mr', + 67: 'bn', + 68: 'as', + 69: 'gu', + 70: 'pa', + 71: 'or', + 72: 'ml', + 73: 'kn', + 74: 'ta', + 75: 'te', + 76: 'si', + 77: 'my', + 78: 'km', + 79: 'lo', + 80: 'vi', + 81: 'id', + 82: 'tl', + 83: 'ms', + 84: 'ms-Arab', + 85: 'am', + 86: 'ti', + 87: 'om', + 88: 'so', + 89: 'sw', + 90: 'rw', + 91: 'rn', + 92: 'ny', + 93: 'mg', + 94: 'eo', + 128: 'cy', + 129: 'eu', + 130: 'ca', + 131: 'la', + 132: 'qu', + 133: 'gn', + 134: 'ay', + 135: 'tt', + 136: 'ug', + 137: 'dz', + 138: 'jv', + 139: 'su', + 140: 'gl', + 141: 'af', + 142: 'br', + 143: 'iu', + 144: 'gd', + 145: 'gv', + 146: 'ga', + 147: 'to', + 148: 'el-polyton', + 149: 'kl', + 150: 'az', + 151: 'nn', +} + + +_WINDOWS_LANGUAGE_CODES = {lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items()} +_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()} + + +# MacOS language ID → MacOS script ID +# +# Note that the script ID is not sufficient to determine what encoding +# to use in TrueType files. For some languages, MacOS used a modification +# of a mainstream script. For example, an Icelandic name would be stored +# with smRoman in the TrueType naming table, but the actual encoding +# is a special Icelandic version of the normal Macintosh Roman encoding. +# As another example, Inuktitut uses an 8-bit encoding for Canadian Aboriginal +# Syllables but MacOS had run out of available script codes, so this was +# done as a (pretty radical) “modification” of Ethiopic. +# +# http://unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt +_MAC_LANGUAGE_TO_SCRIPT = { + 0: 0, # langEnglish → smRoman + 1: 0, # langFrench → smRoman + 2: 0, # langGerman → smRoman + 3: 0, # langItalian → smRoman + 4: 0, # langDutch → smRoman + 5: 0, # langSwedish → smRoman + 6: 0, # langSpanish → smRoman + 7: 0, # langDanish → smRoman + 8: 0, # langPortuguese → smRoman + 9: 0, # langNorwegian → smRoman + 10: 5, # langHebrew → smHebrew + 11: 1, # langJapanese → smJapanese + 12: 4, # langArabic → smArabic + 13: 0, # langFinnish → smRoman + 14: 6, # langGreek → smGreek + 15: 0, # langIcelandic → smRoman (modified) + 16: 0, # langMaltese → smRoman + 17: 0, # langTurkish → smRoman (modified) + 18: 0, # langCroatian → smRoman (modified) + 19: 2, # langTradChinese → smTradChinese + 20: 4, # langUrdu → smArabic + 21: 9, # langHindi → smDevanagari + 22: 21, # langThai → smThai + 23: 3, # langKorean → smKorean + 24: 29, # langLithuanian → smCentralEuroRoman + 25: 29, # langPolish → smCentralEuroRoman + 26: 29, # langHungarian → smCentralEuroRoman + 27: 29, # langEstonian → smCentralEuroRoman + 28: 29, # langLatvian → smCentralEuroRoman + 29: 0, # langSami → smRoman + 30: 0, # langFaroese → smRoman (modified) + 31: 4, # langFarsi → smArabic (modified) + 32: 7, # langRussian → smCyrillic + 33: 25, # langSimpChinese → smSimpChinese + 34: 0, # langFlemish → smRoman + 35: 0, # langIrishGaelic → smRoman (modified) + 36: 0, # langAlbanian → smRoman + 37: 0, # langRomanian → smRoman (modified) + 38: 29, # langCzech → smCentralEuroRoman + 39: 29, # langSlovak → smCentralEuroRoman + 40: 0, # langSlovenian → smRoman (modified) + 41: 5, # langYiddish → smHebrew + 42: 7, # langSerbian → smCyrillic + 43: 7, # langMacedonian → smCyrillic + 44: 7, # langBulgarian → smCyrillic + 45: 7, # langUkrainian → smCyrillic (modified) + 46: 7, # langByelorussian → smCyrillic + 47: 7, # langUzbek → smCyrillic + 48: 7, # langKazakh → smCyrillic + 49: 7, # langAzerbaijani → smCyrillic + 50: 4, # langAzerbaijanAr → smArabic + 51: 24, # langArmenian → smArmenian + 52: 23, # langGeorgian → smGeorgian + 53: 7, # langMoldavian → smCyrillic + 54: 7, # langKirghiz → smCyrillic + 55: 7, # langTajiki → smCyrillic + 56: 7, # langTurkmen → smCyrillic + 57: 27, # langMongolian → smMongolian + 58: 7, # langMongolianCyr → smCyrillic + 59: 4, # langPashto → smArabic + 60: 4, # langKurdish → smArabic + 61: 4, # langKashmiri → smArabic + 62: 4, # langSindhi → smArabic + 63: 26, # langTibetan → smTibetan + 64: 9, # langNepali → smDevanagari + 65: 9, # langSanskrit → smDevanagari + 66: 9, # langMarathi → smDevanagari + 67: 13, # langBengali → smBengali + 68: 13, # langAssamese → smBengali + 69: 11, # langGujarati → smGujarati + 70: 10, # langPunjabi → smGurmukhi + 71: 12, # langOriya → smOriya + 72: 17, # langMalayalam → smMalayalam + 73: 16, # langKannada → smKannada + 74: 14, # langTamil → smTamil + 75: 15, # langTelugu → smTelugu + 76: 18, # langSinhalese → smSinhalese + 77: 19, # langBurmese → smBurmese + 78: 20, # langKhmer → smKhmer + 79: 22, # langLao → smLao + 80: 30, # langVietnamese → smVietnamese + 81: 0, # langIndonesian → smRoman + 82: 0, # langTagalog → smRoman + 83: 0, # langMalayRoman → smRoman + 84: 4, # langMalayArabic → smArabic + 85: 28, # langAmharic → smEthiopic + 86: 28, # langTigrinya → smEthiopic + 87: 28, # langOromo → smEthiopic + 88: 0, # langSomali → smRoman + 89: 0, # langSwahili → smRoman + 90: 0, # langKinyarwanda → smRoman + 91: 0, # langRundi → smRoman + 92: 0, # langNyanja → smRoman + 93: 0, # langMalagasy → smRoman + 94: 0, # langEsperanto → smRoman + 128: 0, # langWelsh → smRoman (modified) + 129: 0, # langBasque → smRoman + 130: 0, # langCatalan → smRoman + 131: 0, # langLatin → smRoman + 132: 0, # langQuechua → smRoman + 133: 0, # langGuarani → smRoman + 134: 0, # langAymara → smRoman + 135: 7, # langTatar → smCyrillic + 136: 4, # langUighur → smArabic + 137: 26, # langDzongkha → smTibetan + 138: 0, # langJavaneseRom → smRoman + 139: 0, # langSundaneseRom → smRoman + 140: 0, # langGalician → smRoman + 141: 0, # langAfrikaans → smRoman + 142: 0, # langBreton → smRoman (modified) + 143: 28, # langInuktitut → smEthiopic (modified) + 144: 0, # langScottishGaelic → smRoman (modified) + 145: 0, # langManxGaelic → smRoman (modified) + 146: 0, # langIrishGaelicScript → smRoman (modified) + 147: 0, # langTongan → smRoman + 148: 6, # langGreekAncient → smRoman + 149: 0, # langGreenlandic → smRoman + 150: 0, # langAzerbaijanRoman → smRoman + 151: 0, # langNynorsk → smRoman +} diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import unittest -from ._n_a_m_e import table__n_a_m_e, NameRecord - - -def makeName(text, nameID, platformID, platEncID, langID): - name = NameRecord() - name.nameID, name.platformID, name.platEncID, name.langID = ( - nameID, platformID, platEncID, langID) - name.string = tobytes(text, encoding=name.getEncoding()) - return name - - -class NameTableTest(unittest.TestCase): - - def test_getDebugName(self): - table = table__n_a_m_e() - table.names = [ - makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English - makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French - makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German - makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese - ] - self.assertEqual("Bold", table.getDebugName(258)) - self.assertEqual("Sem Fracções", table.getDebugName(292)) - self.assertEqual(None, table.getDebugName(999)) - - -class NameRecordTest(unittest.TestCase): - - def test_toUnicode_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertEqual("Foo Bold", name.toUnicode()) - - def test_toUnicode_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual("mac_roman", name.getEncoding()) - self.assertEqual("Foo Italic", name.toUnicode()) - - def test_toUnicode_macromanian(self): - name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian - self.assertEqual("mac_romanian", name.getEncoding()) - self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) - - def test_toUnicode_UnicodeDecodeError(self): - name = makeName(b"\1", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertRaises(UnicodeDecodeError, name.toUnicode) - - def toXML(self, name): - writer = XMLWriter(BytesIO()) - name.toXML(writer, ttFont=None) - xml = writer.file.getvalue().decode("utf_8").strip() - return xml.split(writer.newlinestr.decode("utf_8"))[1:] - - def test_toXML_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo Bold', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length1(self): - name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length2(self): - name = makeName(b"\0Fooz", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fooz', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_double_encoded(self): - name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fo', - '' - ], self.toXML(name)) - - def test_toXML_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual([ - '', - ' Foo Italic', - '' - ], self.toXML(name)) - - def test_toXML_macroman_actual_utf16be(self): - name = makeName("\0F\0o\0o", 222, 1, 0, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_nonASCII(self): - name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' BŠrli', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_ASCII(self): - name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' Barli', - '' - ], self.toXML(name)) - - def test_encoding_macroman_misc(self): - name = makeName('', 123, 1, 0, 17) # Mac Turkish - self.assertEqual(name.getEncoding(), "mac_turkish") - name.langID = 37 - self.assertEqual(name.getEncoding(), "mac_romanian") - name.langID = 45 # Other - self.assertEqual(name.getEncoding(), "mac_roman") - - def test_extended_mac_encodings(self): - name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese - self.assertEqual(name.toUnicode(), unichr(0x2122)) - - def test_extended_unknown(self): - name = makeName(b'\xfe', 123, 10, 11, 12) - self.assertEqual(name.getEncoding(), "ascii") - self.assertEqual(name.getEncoding(None), None) - self.assertEqual(name.getEncoding(default=None), None) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_o_p_b_d.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_o_p_b_d.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_o_p_b_d.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_o_p_b_d.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html +class table__o_p_b_d(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/O_S_2f_2.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,10 +2,12 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval, num2binary, binary2num -from . import DefaultTable -import warnings +from fontTools.ttLib.tables import DefaultTable +import logging +log = logging.getLogger(__name__) + # panose classification panoseFormat = """ @@ -101,6 +103,8 @@ """the OS/2 table""" + dependencies = ["head"] + def decompile(self, data, ttFont): dummy, data = sstruct.unpack2(OS2_format_0, data, self) @@ -116,13 +120,26 @@ from fontTools import ttLib raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) if len(data): - warnings.warn("too much 'OS/2' table data") + log.warning("too much 'OS/2' table data") self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) def compile(self, ttFont): self.updateFirstAndLastCharIndex(ttFont) panose = self.panose + head = ttFont["head"] + if (self.fsSelection & 1) and not (head.macStyle & 1<<1): + log.warning("fsSelection bit 0 (italic) and " + "head table macStyle bit 1 (italic) should match") + if (self.fsSelection & 1<<5) and not (head.macStyle & 1): + log.warning("fsSelection bit 5 (bold) and " + "head table macStyle bit 0 (bold) should match") + if (self.fsSelection & 1<<6) and (self.fsSelection & 1 + (1<<5)): + log.warning("fsSelection bit 6 (regular) is set, " + "bits 0 (italic) and 5 (bold) must be clear") + if self.version < 4 and self.fsSelection & 0b1110000000: + log.warning("fsSelection bits 7, 8 and 9 are only defined in " + "OS/2 table version 4 and up: version %s", self.version) self.panose = sstruct.pack(panoseFormat, self.panose) if self.version == 0: data = sstruct.pack(OS2_format_0, self) @@ -132,8 +149,8 @@ data = sstruct.pack(OS2_format_2, self) elif self.version == 5: d = self.__dict__.copy() - d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) - d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) + d['usLowerOpticalPointSize'] = round(self.usLowerOpticalPointSize * 20) + d['usUpperOpticalPointSize'] = round(self.usUpperOpticalPointSize * 20) data = sstruct.pack(OS2_format_5, d) else: from fontTools import ttLib @@ -192,16 +209,18 @@ setattr(self, name, safeEval(attrs["value"])) def updateFirstAndLastCharIndex(self, ttFont): + if 'cmap' not in ttFont: + return codes = set() - for table in ttFont['cmap'].tables: + for table in getattr(ttFont['cmap'], 'tables', []): if table.isUnicode(): codes.update(table.cmap.keys()) if codes: minCode = min(codes) maxCode = max(codes) # USHORT cannot hold codepoints greater than 0xFFFF - self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode - self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode + self.usFirstCharIndex = min(0xFFFF, minCode) + self.usLastCharIndex = min(0xFFFF, maxCode) # misspelled attributes kept for legacy reasons @@ -228,3 +247,275 @@ @fsLastCharIndex.setter def fsLastCharIndex(self, value): self.usLastCharIndex = value + + def getUnicodeRanges(self): + """ Return the set of 'ulUnicodeRange*' bits currently enabled. """ + bits = set() + ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2 + ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4 + for i in range(32): + if ul1 & (1 << i): + bits.add(i) + if ul2 & (1 << i): + bits.add(i + 32) + if ul3 & (1 << i): + bits.add(i + 64) + if ul4 & (1 << i): + bits.add(i + 96) + return bits + + def setUnicodeRanges(self, bits): + """ Set the 'ulUnicodeRange*' fields to the specified 'bits'. """ + ul1, ul2, ul3, ul4 = 0, 0, 0, 0 + for bit in bits: + if 0 <= bit < 32: + ul1 |= (1 << bit) + elif 32 <= bit < 64: + ul2 |= (1 << (bit - 32)) + elif 64 <= bit < 96: + ul3 |= (1 << (bit - 64)) + elif 96 <= bit < 123: + ul4 |= (1 << (bit - 96)) + else: + raise ValueError('expected 0 <= int <= 122, found: %r' % bit) + self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2 + self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4 + + def recalcUnicodeRanges(self, ttFont, pruneOnly=False): + """ Intersect the codepoints in the font's Unicode cmap subtables with + the Unicode block ranges defined in the OpenType specification (v1.7), + and set the respective 'ulUnicodeRange*' bits if there is at least ONE + intersection. + If 'pruneOnly' is True, only clear unused bits with NO intersection. + """ + unicodes = set() + for table in ttFont['cmap'].tables: + if table.isUnicode(): + unicodes.update(table.cmap.keys()) + if pruneOnly: + empty = intersectUnicodeRanges(unicodes, inverse=True) + bits = self.getUnicodeRanges() - empty + else: + bits = intersectUnicodeRanges(unicodes) + self.setUnicodeRanges(bits) + return bits + + +# Unicode ranges data from the OpenType OS/2 table specification v1.7 + +OS2_UNICODE_RANGES = ( + (('Basic Latin', (0x0000, 0x007F)),), + (('Latin-1 Supplement', (0x0080, 0x00FF)),), + (('Latin Extended-A', (0x0100, 0x017F)),), + (('Latin Extended-B', (0x0180, 0x024F)),), + (('IPA Extensions', (0x0250, 0x02AF)), + ('Phonetic Extensions', (0x1D00, 0x1D7F)), + ('Phonetic Extensions Supplement', (0x1D80, 0x1DBF))), + (('Spacing Modifier Letters', (0x02B0, 0x02FF)), + ('Modifier Tone Letters', (0xA700, 0xA71F))), + (('Combining Diacritical Marks', (0x0300, 0x036F)), + ('Combining Diacritical Marks Supplement', (0x1DC0, 0x1DFF))), + (('Greek and Coptic', (0x0370, 0x03FF)),), + (('Coptic', (0x2C80, 0x2CFF)),), + (('Cyrillic', (0x0400, 0x04FF)), + ('Cyrillic Supplement', (0x0500, 0x052F)), + ('Cyrillic Extended-A', (0x2DE0, 0x2DFF)), + ('Cyrillic Extended-B', (0xA640, 0xA69F))), + (('Armenian', (0x0530, 0x058F)),), + (('Hebrew', (0x0590, 0x05FF)),), + (('Vai', (0xA500, 0xA63F)),), + (('Arabic', (0x0600, 0x06FF)), + ('Arabic Supplement', (0x0750, 0x077F))), + (('NKo', (0x07C0, 0x07FF)),), + (('Devanagari', (0x0900, 0x097F)),), + (('Bengali', (0x0980, 0x09FF)),), + (('Gurmukhi', (0x0A00, 0x0A7F)),), + (('Gujarati', (0x0A80, 0x0AFF)),), + (('Oriya', (0x0B00, 0x0B7F)),), + (('Tamil', (0x0B80, 0x0BFF)),), + (('Telugu', (0x0C00, 0x0C7F)),), + (('Kannada', (0x0C80, 0x0CFF)),), + (('Malayalam', (0x0D00, 0x0D7F)),), + (('Thai', (0x0E00, 0x0E7F)),), + (('Lao', (0x0E80, 0x0EFF)),), + (('Georgian', (0x10A0, 0x10FF)), + ('Georgian Supplement', (0x2D00, 0x2D2F))), + (('Balinese', (0x1B00, 0x1B7F)),), + (('Hangul Jamo', (0x1100, 0x11FF)),), + (('Latin Extended Additional', (0x1E00, 0x1EFF)), + ('Latin Extended-C', (0x2C60, 0x2C7F)), + ('Latin Extended-D', (0xA720, 0xA7FF))), + (('Greek Extended', (0x1F00, 0x1FFF)),), + (('General Punctuation', (0x2000, 0x206F)), + ('Supplemental Punctuation', (0x2E00, 0x2E7F))), + (('Superscripts And Subscripts', (0x2070, 0x209F)),), + (('Currency Symbols', (0x20A0, 0x20CF)),), + (('Combining Diacritical Marks For Symbols', (0x20D0, 0x20FF)),), + (('Letterlike Symbols', (0x2100, 0x214F)),), + (('Number Forms', (0x2150, 0x218F)),), + (('Arrows', (0x2190, 0x21FF)), + ('Supplemental Arrows-A', (0x27F0, 0x27FF)), + ('Supplemental Arrows-B', (0x2900, 0x297F)), + ('Miscellaneous Symbols and Arrows', (0x2B00, 0x2BFF))), + (('Mathematical Operators', (0x2200, 0x22FF)), + ('Supplemental Mathematical Operators', (0x2A00, 0x2AFF)), + ('Miscellaneous Mathematical Symbols-A', (0x27C0, 0x27EF)), + ('Miscellaneous Mathematical Symbols-B', (0x2980, 0x29FF))), + (('Miscellaneous Technical', (0x2300, 0x23FF)),), + (('Control Pictures', (0x2400, 0x243F)),), + (('Optical Character Recognition', (0x2440, 0x245F)),), + (('Enclosed Alphanumerics', (0x2460, 0x24FF)),), + (('Box Drawing', (0x2500, 0x257F)),), + (('Block Elements', (0x2580, 0x259F)),), + (('Geometric Shapes', (0x25A0, 0x25FF)),), + (('Miscellaneous Symbols', (0x2600, 0x26FF)),), + (('Dingbats', (0x2700, 0x27BF)),), + (('CJK Symbols And Punctuation', (0x3000, 0x303F)),), + (('Hiragana', (0x3040, 0x309F)),), + (('Katakana', (0x30A0, 0x30FF)), + ('Katakana Phonetic Extensions', (0x31F0, 0x31FF))), + (('Bopomofo', (0x3100, 0x312F)), + ('Bopomofo Extended', (0x31A0, 0x31BF))), + (('Hangul Compatibility Jamo', (0x3130, 0x318F)),), + (('Phags-pa', (0xA840, 0xA87F)),), + (('Enclosed CJK Letters And Months', (0x3200, 0x32FF)),), + (('CJK Compatibility', (0x3300, 0x33FF)),), + (('Hangul Syllables', (0xAC00, 0xD7AF)),), + (('Non-Plane 0 *', (0xD800, 0xDFFF)),), + (('Phoenician', (0x10900, 0x1091F)),), + (('CJK Unified Ideographs', (0x4E00, 0x9FFF)), + ('CJK Radicals Supplement', (0x2E80, 0x2EFF)), + ('Kangxi Radicals', (0x2F00, 0x2FDF)), + ('Ideographic Description Characters', (0x2FF0, 0x2FFF)), + ('CJK Unified Ideographs Extension A', (0x3400, 0x4DBF)), + ('CJK Unified Ideographs Extension B', (0x20000, 0x2A6DF)), + ('Kanbun', (0x3190, 0x319F))), + (('Private Use Area (plane 0)', (0xE000, 0xF8FF)),), + (('CJK Strokes', (0x31C0, 0x31EF)), + ('CJK Compatibility Ideographs', (0xF900, 0xFAFF)), + ('CJK Compatibility Ideographs Supplement', (0x2F800, 0x2FA1F))), + (('Alphabetic Presentation Forms', (0xFB00, 0xFB4F)),), + (('Arabic Presentation Forms-A', (0xFB50, 0xFDFF)),), + (('Combining Half Marks', (0xFE20, 0xFE2F)),), + (('Vertical Forms', (0xFE10, 0xFE1F)), + ('CJK Compatibility Forms', (0xFE30, 0xFE4F))), + (('Small Form Variants', (0xFE50, 0xFE6F)),), + (('Arabic Presentation Forms-B', (0xFE70, 0xFEFF)),), + (('Halfwidth And Fullwidth Forms', (0xFF00, 0xFFEF)),), + (('Specials', (0xFFF0, 0xFFFF)),), + (('Tibetan', (0x0F00, 0x0FFF)),), + (('Syriac', (0x0700, 0x074F)),), + (('Thaana', (0x0780, 0x07BF)),), + (('Sinhala', (0x0D80, 0x0DFF)),), + (('Myanmar', (0x1000, 0x109F)),), + (('Ethiopic', (0x1200, 0x137F)), + ('Ethiopic Supplement', (0x1380, 0x139F)), + ('Ethiopic Extended', (0x2D80, 0x2DDF))), + (('Cherokee', (0x13A0, 0x13FF)),), + (('Unified Canadian Aboriginal Syllabics', (0x1400, 0x167F)),), + (('Ogham', (0x1680, 0x169F)),), + (('Runic', (0x16A0, 0x16FF)),), + (('Khmer', (0x1780, 0x17FF)), + ('Khmer Symbols', (0x19E0, 0x19FF))), + (('Mongolian', (0x1800, 0x18AF)),), + (('Braille Patterns', (0x2800, 0x28FF)),), + (('Yi Syllables', (0xA000, 0xA48F)), + ('Yi Radicals', (0xA490, 0xA4CF))), + (('Tagalog', (0x1700, 0x171F)), + ('Hanunoo', (0x1720, 0x173F)), + ('Buhid', (0x1740, 0x175F)), + ('Tagbanwa', (0x1760, 0x177F))), + (('Old Italic', (0x10300, 0x1032F)),), + (('Gothic', (0x10330, 0x1034F)),), + (('Deseret', (0x10400, 0x1044F)),), + (('Byzantine Musical Symbols', (0x1D000, 0x1D0FF)), + ('Musical Symbols', (0x1D100, 0x1D1FF)), + ('Ancient Greek Musical Notation', (0x1D200, 0x1D24F))), + (('Mathematical Alphanumeric Symbols', (0x1D400, 0x1D7FF)),), + (('Private Use (plane 15)', (0xF0000, 0xFFFFD)), + ('Private Use (plane 16)', (0x100000, 0x10FFFD))), + (('Variation Selectors', (0xFE00, 0xFE0F)), + ('Variation Selectors Supplement', (0xE0100, 0xE01EF))), + (('Tags', (0xE0000, 0xE007F)),), + (('Limbu', (0x1900, 0x194F)),), + (('Tai Le', (0x1950, 0x197F)),), + (('New Tai Lue', (0x1980, 0x19DF)),), + (('Buginese', (0x1A00, 0x1A1F)),), + (('Glagolitic', (0x2C00, 0x2C5F)),), + (('Tifinagh', (0x2D30, 0x2D7F)),), + (('Yijing Hexagram Symbols', (0x4DC0, 0x4DFF)),), + (('Syloti Nagri', (0xA800, 0xA82F)),), + (('Linear B Syllabary', (0x10000, 0x1007F)), + ('Linear B Ideograms', (0x10080, 0x100FF)), + ('Aegean Numbers', (0x10100, 0x1013F))), + (('Ancient Greek Numbers', (0x10140, 0x1018F)),), + (('Ugaritic', (0x10380, 0x1039F)),), + (('Old Persian', (0x103A0, 0x103DF)),), + (('Shavian', (0x10450, 0x1047F)),), + (('Osmanya', (0x10480, 0x104AF)),), + (('Cypriot Syllabary', (0x10800, 0x1083F)),), + (('Kharoshthi', (0x10A00, 0x10A5F)),), + (('Tai Xuan Jing Symbols', (0x1D300, 0x1D35F)),), + (('Cuneiform', (0x12000, 0x123FF)), + ('Cuneiform Numbers and Punctuation', (0x12400, 0x1247F))), + (('Counting Rod Numerals', (0x1D360, 0x1D37F)),), + (('Sundanese', (0x1B80, 0x1BBF)),), + (('Lepcha', (0x1C00, 0x1C4F)),), + (('Ol Chiki', (0x1C50, 0x1C7F)),), + (('Saurashtra', (0xA880, 0xA8DF)),), + (('Kayah Li', (0xA900, 0xA92F)),), + (('Rejang', (0xA930, 0xA95F)),), + (('Cham', (0xAA00, 0xAA5F)),), + (('Ancient Symbols', (0x10190, 0x101CF)),), + (('Phaistos Disc', (0x101D0, 0x101FF)),), + (('Carian', (0x102A0, 0x102DF)), + ('Lycian', (0x10280, 0x1029F)), + ('Lydian', (0x10920, 0x1093F))), + (('Domino Tiles', (0x1F030, 0x1F09F)), + ('Mahjong Tiles', (0x1F000, 0x1F02F))), +) + + +_unicodeRangeSets = [] + +def _getUnicodeRangeSets(): + # build the sets of codepoints for each unicode range bit, and cache result + if not _unicodeRangeSets: + for bit, blocks in enumerate(OS2_UNICODE_RANGES): + rangeset = set() + for _, (start, stop) in blocks: + rangeset.update(set(range(start, stop+1))) + if bit == 57: + # The spec says that bit 57 ("Non Plane 0") implies that there's + # at least one codepoint beyond the BMP; so I also include all + # the non-BMP codepoints here + rangeset.update(set(range(0x10000, 0x110000))) + _unicodeRangeSets.append(rangeset) + return _unicodeRangeSets + + +def intersectUnicodeRanges(unicodes, inverse=False): + """ Intersect a sequence of (int) Unicode codepoints with the Unicode block + ranges defined in the OpenType specification v1.7, and return the set of + 'ulUnicodeRanges' bits for which there is at least ONE intersection. + If 'inverse' is True, return the the bits for which there is NO intersection. + + >>> intersectUnicodeRanges([0x0410]) == {9} + True + >>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122} + True + >>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == ( + ... set(range(123)) - {9, 57, 122}) + True + """ + unicodes = set(unicodes) + uniranges = _getUnicodeRangeSets() + bits = set([ + bit for bit, unirange in enumerate(uniranges) + if not unirange.isdisjoint(unicodes) ^ inverse]) + return bits + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/otBase.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otBase.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otBase.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,7 +1,12 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from .DefaultTable import DefaultTable +import sys +import array import struct +import logging + +log = logging.getLogger(__name__) class OverflowErrorRecord(object): def __init__(self, overflowTuple): @@ -31,26 +36,10 @@ def decompile(self, data, font): from . import otTables - cachingStats = None if True else {} - class GlobalState(object): - def __init__(self, tableType, cachingStats): - self.tableType = tableType - self.cachingStats = cachingStats - globalState = GlobalState(tableType=self.tableTag, - cachingStats=cachingStats) - reader = OTTableReader(data, globalState) + reader = OTTableReader(data, tableTag=self.tableTag) tableClass = getattr(otTables, self.tableTag) self.table = tableClass() self.table.decompile(reader, font) - if cachingStats: - stats = sorted([(v, k) for k, v in cachingStats.items()]) - stats.reverse() - print("cachingsstats for ", self.tableTag) - for v, k in stats: - if v < 2: - break - print(v, k) - print("---", len(stats)) def compile(self, font): """ Create a top-level OTFWriter for the GPOS/GSUB table. @@ -73,15 +62,11 @@ If a lookup subtable overflows an offset, we have to start all over. """ - class GlobalState(object): - def __init__(self, tableType): - self.tableType = tableType - globalState = GlobalState(tableType=self.tableTag) overflowRecord = None while True: try: - writer = OTTableWriter(globalState) + writer = OTTableWriter(tableTag=self.tableTag) self.table.compile(writer, font) return writer.getAllData() @@ -91,7 +76,7 @@ raise # Oh well... overflowRecord = e.value - print("Attempting to fix OTLOffsetOverflowError", e) + log.info("Attempting to fix OTLOffsetOverflowError %s", e) lastItem = overflowRecord ok = 0 @@ -119,31 +104,29 @@ """Helper class to retrieve data from an OpenType table.""" - __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') + __slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag') - def __init__(self, data, globalState={}, localState=None, offset=0): + def __init__(self, data, localState=None, offset=0, tableTag=None): self.data = data self.offset = offset self.pos = offset - self.globalState = globalState self.localState = localState + self.tableTag = tableTag def advance(self, count): self.pos += count + def seek(self, pos): self.pos = pos def copy(self): - other = self.__class__(self.data, self.globalState, self.localState, self.offset) + other = self.__class__(self.data, self.localState, self.offset, self.tableTag) other.pos = self.pos return other def getSubReader(self, offset): offset = self.offset + offset - cachingStats = self.globalState.cachingStats - if cachingStats is not None: - cachingStats[offset] = cachingStats.get(offset, 0) + 1 - return self.__class__(self.data, self.globalState, self.localState, offset) + return self.__class__(self.data, self.localState, offset, self.tableTag) def readUShort(self): pos = self.pos @@ -152,6 +135,22 @@ self.pos = newpos return value + def readUShortArray(self, count): + pos = self.pos + newpos = pos + count * 2 + value = array.array("H", self.data[pos:newpos]) + if sys.byteorder != "big": + value.byteswap() + self.pos = newpos + return value + + def readInt8(self): + pos = self.pos + newpos = pos + 1 + value, = struct.unpack(">b", self.data[pos:newpos]) + self.pos = newpos + return value + def readShort(self): pos = self.pos newpos = pos + 2 @@ -166,6 +165,13 @@ self.pos = newpos return value + def readUInt8(self): + pos = self.pos + newpos = pos + 1 + value, = struct.unpack(">B", self.data[pos:newpos]) + self.pos = newpos + return value + def readUInt24(self): pos = self.pos newpos = pos + 3 @@ -184,7 +190,7 @@ pos = self.pos newpos = pos + 4 value = Tag(self.data[pos:newpos]) - assert len(value) == 4 + assert len(value) == 4, value self.pos = newpos return value @@ -211,11 +217,11 @@ """Helper class to gather and assemble data for OpenType tables.""" - def __init__(self, globalState, localState=None): + def __init__(self, localState=None, tableTag=None): self.items = [] self.pos = None - self.globalState = globalState self.localState = localState + self.tableTag = tableTag self.longOffset = False self.parent = None @@ -227,45 +233,19 @@ def __getitem__(self, name): return self.localState[name] - # assembler interface - - def getAllData(self): - """Assemble all data, including all subtables.""" - self._doneWriting() - tables, extTables = self._gatherTables() - tables.reverse() - extTables.reverse() - # Gather all data in two passes: the absolute positions of all - # subtable are needed before the actual data can be assembled. - pos = 0 - for table in tables: - table.pos = pos - pos = pos + table.getDataLength() - - for table in extTables: - table.pos = pos - pos = pos + table.getDataLength() - - data = [] - for table in tables: - tableData = table.getData() - data.append(tableData) - - for table in extTables: - tableData = table.getData() - data.append(tableData) + def __delitem__(self, name): + del self.localState[name] - return bytesjoin(data) + # assembler interface def getDataLength(self): """Return the length of this table in bytes, without subtables.""" l = 0 for item in self.items: - if hasattr(item, "getData") or hasattr(item, "getCountData"): - if item.longOffset: - l = l + 4 # sizeof(ULong) - else: - l = l + 2 # sizeof(UShort) + if hasattr(item, "getCountData"): + l += item.size + elif hasattr(item, "getData"): + l += 4 if item.longOffset else 2 else: l = l + len(item) return l @@ -286,36 +266,6 @@ items[i] = packUShort(item.pos - pos) except struct.error: # provide data to fix overflow problem. - # If the overflow is to a lookup, or from a lookup to a subtable, - # just report the current item. Otherwise... - if self.name not in [ 'LookupList', 'Lookup']: - # overflow is within a subTable. Life is more complicated. - # If we split the sub-table just before the current item, we may still suffer overflow. - # This is because duplicate table merging is done only within an Extension subTable tree; - # when we split the subtable in two, some items may no longer be duplicates. - # Get worst case by adding up all the item lengths, depth first traversal. - # and then report the first item that overflows a short. - def getDeepItemLength(table): - if hasattr(table, "getDataLength"): - length = 0 - for item in table.items: - length = length + getDeepItemLength(item) - else: - length = len(table) - return length - - length = self.getDataLength() - if hasattr(self, "sortCoverageLast") and item.name == "Coverage": - # Coverage is first in the item list, but last in the table list, - # The original overflow is really in the item list. Skip the Coverage - # table in the following test. - items = items[i+1:] - - for j in range(len(items)): - item = items[j] - length = length + getDeepItemLength(item) - if length > 65535: - break overflowErrorRecord = self.getOverflowErrorRecord(item) raise OTLOffsetOverflowError(overflowErrorRecord) @@ -327,13 +277,15 @@ return hash(self.items) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented return self.items == other.items - def _doneWriting(self, internedTables=None): + def _doneWriting(self, internedTables): # Convert CountData references to data string items # collapse duplicate table references to a unique entry # "tables" are OTTableWriter objects. @@ -341,32 +293,29 @@ # For Extension Lookup types, we can # eliminate duplicates only within the tree under the Extension Lookup, # as offsets may exceed 64K even between Extension LookupTable subtables. - if internedTables is None: + isExtension = hasattr(self, "Extension") + + # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level + # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly + # empty, array. So, we don't share those. + # See: https://github.com/behdad/fonttools/issues/518 + dontShare = hasattr(self, 'DontShare') + + if isExtension: internedTables = {} - items = self.items - iRange = list(range(len(items))) - if hasattr(self, "Extension"): - newTree = 1 - else: - newTree = 0 - for i in iRange: + items = self.items + for i in range(len(items)): item = items[i] if hasattr(item, "getCountData"): items[i] = item.getCountData() elif hasattr(item, "getData"): - if newTree: - item._doneWriting() - else: - item._doneWriting(internedTables) - internedItem = internedTables.get(item) - if internedItem: - items[i] = item = internedItem - else: - internedTables[item] = item + item._doneWriting(internedTables) + if not dontShare: + items[i] = item = internedTables.setdefault(item, item) self.items = tuple(items) - def _gatherTables(self, tables=None, extTables=None, done=None): + def _gatherTables(self, tables, extTables, done): # Convert table references in self.items tree to a flat # list of tables in depth-first traversal order. # "tables" are OTTableWriter objects. @@ -374,21 +323,21 @@ # resolve duplicate references to be the last reference in the list of tables. # For extension lookups, duplicate references can be merged only within the # writer tree under the extension lookup. - if tables is None: # init call for first time. - tables = [] - extTables = [] - done = {} - done[self] = 1 + done[id(self)] = True numItems = len(self.items) iRange = list(range(numItems)) iRange.reverse() - if hasattr(self, "Extension"): - appendExtensions = 1 - else: - appendExtensions = 0 + isExtension = hasattr(self, "Extension") + dontShare = hasattr(self, 'DontShare') + + selfTables = tables + + if isExtension: + assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" + tables, extTables, done = extTables, None, {} # add Coverage table if it is sorted last. sortCoverageLast = 0 @@ -399,7 +348,7 @@ if hasattr(item, "name") and (item.name == "Coverage"): sortCoverageLast = 1 break - if item not in done: + if id(item) not in done: item._gatherTables(tables, extTables, done) else: # We're a new parent of item @@ -414,24 +363,50 @@ # we've already 'gathered' it above continue - if appendExtensions: - assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" - newDone = {} - item._gatherTables(extTables, None, newDone) - - elif item not in done: + if id(item) not in done: item._gatherTables(tables, extTables, done) else: - # We're a new parent of item + # Item is already written out by other parent pass - tables.append(self) - return tables, extTables + selfTables.append(self) + + def getAllData(self): + """Assemble all data, including all subtables.""" + internedTables = {} + self._doneWriting(internedTables) + tables = [] + extTables = [] + done = {} + self._gatherTables(tables, extTables, done) + tables.reverse() + extTables.reverse() + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + for table in extTables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getData() + data.append(tableData) + + for table in extTables: + tableData = table.getData() + data.append(tableData) + + return bytesjoin(data) # interface for gathering data, as used by table.compile() def getSubWriter(self): - subwriter = self.__class__(self.globalState, self.localState) + subwriter = self.__class__(self.localState, self.tableTag) subwriter.parent = self # because some subtables have idential values, we discard # the duplicates under the getAllData method. Hence some # subtable writers can have more than one parent writer. @@ -439,14 +414,23 @@ return subwriter def writeUShort(self, value): - assert 0 <= value < 0x10000 + assert 0 <= value < 0x10000, value self.items.append(struct.pack(">H", value)) def writeShort(self, value): + assert -32768 <= value < 32768, value self.items.append(struct.pack(">h", value)) + def writeUInt8(self, value): + assert 0 <= value < 256, value + self.items.append(struct.pack(">B", value)) + + def writeInt8(self, value): + assert -128 <= value < 128, value + self.items.append(struct.pack(">b", value)) + def writeUInt24(self, value): - assert 0 <= value < 0x1000000 + assert 0 <= value < 0x1000000, value b = struct.pack(">L", value) self.items.append(b[1:]) @@ -458,14 +442,14 @@ def writeTag(self, tag): tag = Tag(tag).tobytes() - assert len(tag) == 4 + assert len(tag) == 4, tag self.items.append(tag) def writeSubTable(self, subWriter): self.items.append(subWriter) - def writeCountReference(self, table, name): - ref = CountReference(table, name) + def writeCountReference(self, table, name, size=2, value=None): + ref = CountReference(table, name, size=size, value=value) self.items.append(ref) return ref @@ -476,7 +460,7 @@ def writeData(self, data): self.items.append(data) - def getOverflowErrorRecord(self, item): + def getOverflowErrorRecord(self, item): LookupListIndex = SubTableIndex = itemName = itemIndex = None if self.name == 'LookupList': LookupListIndex = item.repeatIndex @@ -484,7 +468,7 @@ LookupListIndex = self.repeatIndex SubTableIndex = item.repeatIndex else: - itemName = item.name + itemName = getattr(item, 'name', '') if hasattr(item, 'repeatIndex'): itemIndex = item.repeatIndex if self.name == 'SubTable': @@ -494,10 +478,10 @@ LookupListIndex = self.parent.parent.repeatIndex SubTableIndex = self.parent.repeatIndex else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. - itemName = ".".join([self.name, item.name]) + itemName = ".".join([self.name, itemName]) p1 = self.parent while p1 and p1.name not in ['ExtSubTable', 'SubTable']: - itemName = ".".join([p1.name, item.name]) + itemName = ".".join([p1.name, itemName]) p1 = p1.parent if p1: if p1.name == 'ExtSubTable': @@ -507,14 +491,17 @@ LookupListIndex = p1.parent.repeatIndex SubTableIndex = p1.repeatIndex - return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) + return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) ) class CountReference(object): """A reference to a Count value, not a count of references.""" - def __init__(self, table, name): + def __init__(self, table, name, size=None, value=None): self.table = table self.name = name + self.size = size + if value is not None: + self.setValue(value) def setValue(self, value): table = self.table name = self.name @@ -523,13 +510,17 @@ else: assert table[name] == value, (name, table[name], value) def getCountData(self): - return packUShort(self.table[self.name]) + v = self.table[self.name] + if v is None: v = 0 + return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v) + +def packUInt8 (value): + return struct.pack(">B", value) def packUShort(value): return struct.pack(">H", value) - def packULong(value): assert 0 <= value < 0x100000000, value return struct.pack(">L", value) @@ -579,22 +570,50 @@ def getConverterByName(self, name): return self.convertersByName[name] + def populateDefaults(self, propagator=None): + for conv in self.getConverters(): + if conv.repeat: + if not hasattr(self, conv.name): + setattr(self, conv.name, []) + countValue = len(getattr(self, conv.name)) - conv.aux + try: + count_conv = self.getConverterByName(conv.repeat) + setattr(self, conv.repeat, countValue) + except KeyError: + # conv.repeat is a propagated count + if propagator and conv.repeat in propagator: + propagator[conv.repeat].setValue(countValue) + else: + if conv.aux and not eval(conv.aux, None, self.__dict__): + continue + if hasattr(self, conv.name): + continue # Warn if it should NOT be present?! + if hasattr(conv, 'writeNullOffset'): + setattr(self, conv.name, None) # Warn? + #elif not conv.isCount: + # # Warn? + # pass + def decompile(self, reader, font): self.readFormat(reader) table = {} self.__rawTable = table # for debugging - converters = self.getConverters() - for conv in converters: + for conv in self.getConverters(): if conv.name == "SubTable": - conv = conv.getConverter(reader.globalState.tableType, + conv = conv.getConverter(reader.tableTag, table["LookupType"]) if conv.name == "ExtSubTable": - conv = conv.getConverter(reader.globalState.tableType, + conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"]) if conv.name == "FeatureParams": conv = conv.getConverter(reader["FeatureTag"]) + if conv.name == "SubStruct": + conv = conv.getConverter(reader.tableTag, + table["MorphType"]) if conv.repeat: - if conv.repeat in table: + if isinstance(conv.repeat, int): + countValue = conv.repeat + elif conv.repeat in table: countValue = table[conv.repeat] else: # conv.repeat is a propagated count @@ -608,33 +627,52 @@ if conv.isPropagated: reader[conv.name] = table[conv.name] - self.postRead(table, font) + if hasattr(self, 'postRead'): + self.postRead(table, font) + else: + self.__dict__.update(table) del self.__rawTable # succeeded, get rid of debugging info def compile(self, writer, font): self.ensureDecompiled() - table = self.preWrite(font) + if hasattr(self, 'preWrite'): + table = self.preWrite(font) + else: + table = self.__dict__.copy() + if hasattr(self, 'sortCoverageLast'): writer.sortCoverageLast = 1 + if hasattr(self, 'DontShare'): + writer.DontShare = True + if hasattr(self.__class__, 'LookupType'): writer['LookupType'].setValue(self.__class__.LookupType) self.writeFormat(writer) for conv in self.getConverters(): - value = table.get(conv.name) + value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None! if conv.repeat: if value is None: value = [] countValue = len(value) - conv.aux - if conv.repeat in table: - CountReference(table, conv.repeat).setValue(countValue) + if isinstance(conv.repeat, int): + assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value)) + elif conv.repeat in table: + CountReference(table, conv.repeat, value=countValue) else: # conv.repeat is a propagated count writer[conv.repeat].setValue(countValue) - conv.writeArray(writer, font, table, value) + values = value + for i, value in enumerate(values): + try: + conv.write(writer, font, table, value, i) + except Exception as e: + name = value.__class__.__name__ if value is not None else conv.name + e.args = e.args + (name+'['+str(i)+']',) + raise elif conv.isCount: # Special-case Count values. # Assumption: a Count field will *always* precede @@ -643,18 +681,27 @@ # table. We will later store it here. # We add a reference: by the time the data is assembled # the Count value will be filled in. - ref = writer.writeCountReference(table, conv.name) + ref = writer.writeCountReference(table, conv.name, conv.staticSize) table[conv.name] = None if conv.isPropagated: writer[conv.name] = ref elif conv.isLookupType: - ref = writer.writeCountReference(table, conv.name) - table[conv.name] = None + # We make sure that subtables have the same lookup type, + # and that the type is the same as the one set on the + # Lookup object, if any is set. + if conv.name not in table: + table[conv.name] = None + ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name]) writer['LookupType'] = ref else: if conv.aux and not eval(conv.aux, None, table): continue - conv.write(writer, font, table, value) + try: + conv.write(writer, font, table, value) + except Exception as e: + name = value.__class__.__name__ if value is not None else conv.name + e.args = e.args + (name,) + raise if conv.isPropagated: writer[conv.name] = value @@ -664,12 +711,6 @@ def writeFormat(self, writer): pass - def postRead(self, table, font): - self.__dict__.update(table) - - def preWrite(self, font): - return self.__dict__.copy() - def toXML(self, xmlWriter, font, attrs=None, name=None): tableName = name if name else self.__class__.__name__ if attrs is None: @@ -688,7 +729,7 @@ # do it ourselves. I think I'm getting schizophrenic... for conv in self.getConverters(): if conv.repeat: - value = getattr(self, conv.name) + value = getattr(self, conv.name, []) for i in range(len(value)): item = value[i] conv.xmlWrite(xmlWriter, font, item, conv.name, @@ -696,7 +737,7 @@ else: if conv.aux and not eval(conv.aux, None, vars(self)): continue - value = getattr(self, conv.name) + value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None! conv.xmlWrite(xmlWriter, font, value, conv.name, []) def fromXML(self, name, attrs, content, font): @@ -715,7 +756,9 @@ setattr(self, conv.name, value) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented @@ -736,20 +779,19 @@ return NotImplemented def getConverters(self): - return self.converters[self.Format] + return self.converters.get(self.Format, []) def getConverterByName(self, name): return self.convertersByName[self.Format][name] def readFormat(self, reader): self.Format = reader.readUShort() - assert self.Format != 0, (self, reader.pos, len(reader.data)) def writeFormat(self, writer): writer.writeUShort(self.Format) def toXML(self, xmlWriter, font, attrs=None, name=None): - BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) + BaseTable.toXML(self, xmlWriter, font, attrs, name) # @@ -845,6 +887,18 @@ # see ValueRecordFactory + def __init__(self, valueFormat=None, src=None): + if valueFormat is not None: + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + setattr(self, name, None if isDevice else 0) + if src is not None: + for key,val in src.__dict__.items(): + assert hasattr(self, key) + setattr(self, key, val) + elif src is not None: + self.__dict__ = src.__dict__.copy() + def getFormat(self): format = 0 for name in self.__dict__.keys(): @@ -870,7 +924,7 @@ xmlWriter.newline() for name, deviceRecord in deviceItems: if deviceRecord is not None: - deviceRecord.toXML(xmlWriter, font) + deviceRecord.toXML(xmlWriter, font, name=name) xmlWriter.endtag(valueName) xmlWriter.newline() else: @@ -894,7 +948,9 @@ setattr(self, name, value) def __ne__(self, other): - return not self.__eq__(other) + result = self.__eq__(other) + return result if result is NotImplemented else not result + def __eq__(self, other): if type(self) != type(other): return NotImplemented diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/otConverters.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otConverters.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otConverters.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,9 +1,22 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from .otBase import ValueRecordFactory -import array +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, floatToFixed as fl2fi, ensureVersionIsLong as fi2ve, + versionToFixed as ve2fi) +from fontTools.misc.textTools import pad, safeEval +from fontTools.ttLib import getSearchRange +from .otBase import (CountReference, FormatSwitchingBaseTable, + OTTableReader, OTTableWriter, ValueRecordFactory) +from .otTables import (lookupTypes, AATStateTable, AATState, AATAction, + ContextualMorphAction, LigatureMorphAction, + MorxSubtable) +from functools import partial +import struct +import logging + + +log = logging.getLogger(__name__) +istuple = lambda t: isinstance(t, tuple) def buildConverters(tableSpec, tableNamespace): @@ -17,24 +30,37 @@ if name.startswith("ValueFormat"): assert tp == "uint16" converterClass = ValueFormat - elif name.endswith("Count") or name.endswith("LookupType"): - assert tp == "uint16" - converterClass = ComputedUShort + elif name.endswith("Count") or name in ("StructLength", "MorphType"): + converterClass = { + "uint8": ComputedUInt8, + "uint16": ComputedUShort, + "uint32": ComputedULong, + }[tp] elif name == "SubTable": converterClass = SubTable elif name == "ExtSubTable": converterClass = ExtSubTable + elif name == "SubStruct": + converterClass = SubStruct elif name == "FeatureParams": converterClass = FeatureParams + elif name in ("CIDGlyphMapping", "GlyphCIDMapping"): + converterClass = StructWithLength else: - if not tp in converterMapping: + if not tp in converterMapping and '(' not in tp: tableName = tp converterClass = Struct else: - converterClass = converterMapping[tp] - tableClass = tableNamespace.get(tableName) - conv = converterClass(name, repeat, aux, tableClass) - if name in ["SubTable", "ExtSubTable"]: + converterClass = eval(tp, tableNamespace, converterMapping) + if tp in ('MortChain', 'MortSubtable', 'MorxChain'): + tableClass = tableNamespace.get(tp) + else: + tableClass = tableNamespace.get(tableName) + if tableClass is not None: + conv = converterClass(name, repeat, aux, tableClass=tableClass) + else: + conv = converterClass(name, repeat, aux) + if name in ["SubTable", "ExtSubTable", "SubStruct"]: conv.lookupTypes = tableNamespace['lookupTypes'] # also create reverse mapping for t in conv.lookupTypes.values(): @@ -54,15 +80,18 @@ class _MissingItem(tuple): __slots__ = () + try: from collections import UserList -except: +except ImportError: from UserList import UserList + class _LazyList(UserList): def __getslice__(self, i, j): return self.__getitem__(slice(i, j)) + def __getitem__(self, k): if isinstance(k, slice): indices = range(*k.indices(len(self))) @@ -74,19 +103,34 @@ self.data[k] = item return item + def __add__(self, other): + if isinstance(other, _LazyList): + other = list(other) + elif isinstance(other, list): + pass + else: + return NotImplemented + return list(self) + other + + def __radd__(self, other): + if not isinstance(other, list): + return NotImplemented + return other + list(self) + + class BaseConverter(object): """Base class for converter objects. Apart from the constructor, this is an abstract class.""" - def __init__(self, name, repeat, aux, tableClass): + def __init__(self, name, repeat, aux, tableClass=None): self.name = name self.repeat = repeat self.aux = aux self.tableClass = tableClass - self.isCount = name.endswith("Count") - self.isLookupType = name.endswith("LookupType") - self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] + self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize'] + self.isLookupType = name.endswith("LookupType") or name == "MorphType" + self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "VarRegionCount", "MappingCount", "RegionAxisCount", 'DesignAxisCount', 'DesignAxisRecordSize', 'AxisValueCount', 'ValueRecordSize'] def readArray(self, reader, font, tableDict, count): """Read an array of values from the reader.""" @@ -120,8 +164,8 @@ raise NotImplementedError(self) def writeArray(self, writer, font, tableDict, values): - for i in range(len(values)): - self.write(writer, font, tableDict, values[i], i) + for i, value in enumerate(values): + self.write(writer, font, tableDict, value, i) def write(self, writer, font, tableDict, value, repeatIndex=None): """Write a value to the writer.""" @@ -161,6 +205,11 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeULong(value) +class Flags32(ULong): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", "0x%08X" % value)]) + xmlWriter.newline() + class Short(IntValue): staticSize = 2 def read(self, reader, font, tableDict): @@ -175,6 +224,20 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(value) +class Int8(IntValue): + staticSize = 1 + def read(self, reader, font, tableDict): + return reader.readInt8() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeInt8(value) + +class UInt8(IntValue): + staticSize = 1 + def read(self, reader, font, tableDict): + return reader.readUInt8() + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt8(value) + class UInt24(IntValue): staticSize = 3 def read(self, reader, font, tableDict): @@ -182,10 +245,18 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUInt24(value) -class ComputedUShort(UShort): +class ComputedInt(IntValue): def xmlWrite(self, xmlWriter, font, value, name, attrs): - xmlWriter.comment("%s=%s" % (name, value)) - xmlWriter.newline() + if value is not None: + xmlWriter.comment("%s=%s" % (name, value)) + xmlWriter.newline() + +class ComputedUInt8(ComputedInt, UInt8): + pass +class ComputedUShort(ComputedInt, UShort): + pass +class ComputedULong(ComputedInt, ULong): + pass class Tag(SimpleValue): staticSize = 4 @@ -198,9 +269,7 @@ staticSize = 2 def readArray(self, reader, font, tableDict, count): glyphOrder = font.getGlyphOrder() - gids = array.array("H", reader.readData(2 * count)) - if sys.byteorder != "big": - gids.byteswap() + gids = reader.readUShortArray(count) try: l = [glyphOrder[gid] for gid in gids] except IndexError: @@ -212,6 +281,22 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeUShort(font.getGlyphID(value)) + +class NameID(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + nameTable = font.get("name") if font else None + if nameTable: + name = nameTable.getDebugName(value) + xmlWriter.write(" ") + if name: + xmlWriter.comment(name) + else: + xmlWriter.comment("missing from name table") + log.warning("name id %d missing from name table" % value) + xmlWriter.newline() + + class FloatValue(SimpleValue): def xmlRead(self, attrs, content, font): return float(attrs["value"]) @@ -222,7 +307,7 @@ return reader.readUShort() / 10 def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUShort(int(round(value * 10))) + writer.writeUShort(round(value * 10)) class Fixed(FloatValue): staticSize = 4 @@ -231,33 +316,68 @@ def write(self, writer, font, tableDict, value, repeatIndex=None): writer.writeLong(fl2fi(value, 16)) +class F2Dot14(FloatValue): + staticSize = 2 + def read(self, reader, font, tableDict): + return fi2fl(reader.readShort(), 14) + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeShort(fl2fi(value, 14)) + class Version(BaseConverter): staticSize = 4 def read(self, reader, font, tableDict): value = reader.readLong() assert (value >> 16) == 1, "Unsupported version 0x%08x" % value - return fi2fl(value, 16) + return value def write(self, writer, font, tableDict, value, repeatIndex=None): - if value < 0x10000: - value = fl2fi(value, 16) - value = int(round(value)) + value = fi2ve(value) assert (value >> 16) == 1, "Unsupported version 0x%08x" % value writer.writeLong(value) def xmlRead(self, attrs, content, font): value = attrs["value"] - value = float(int(value, 0)) if value.startswith("0") else float(value) - if value >= 0x10000: - value = fi2fl(value, 16) + value = ve2fi(value) return value def xmlWrite(self, xmlWriter, font, value, name, attrs): - if value >= 0x10000: - value = fi2fl(value, 16) - if value % 1 != 0: - # Write as hex - value = "0x%08x" % fl2fi(value, 16) + value = fi2ve(value) + value = "0x%08x" % value xmlWriter.simpletag(name, attrs + [("value", value)]) xmlWriter.newline() + @staticmethod + def fromFloat(v): + return fl2fi(v, 16) + + +class Char64(SimpleValue): + """An ASCII string with up to 64 characters. + + Unused character positions are filled with 0x00 bytes. + Used in Apple AAT fonts in the `gcid` table. + """ + staticSize = 64 + + def read(self, reader, font, tableDict): + data = reader.readData(self.staticSize) + zeroPos = data.find(b"\0") + if zeroPos >= 0: + data = data[:zeroPos] + s = tounicode(data, encoding="ascii", errors="replace") + if s != tounicode(data, encoding="ascii", errors="ignore"): + log.warning('replaced non-ASCII characters in "%s"' % + s) + return s + + def write(self, writer, font, tableDict, value, repeatIndex=None): + data = tobytes(value, encoding="ascii", errors="replace") + if data != tobytes(value, encoding="ascii", errors="ignore"): + log.warning('replacing non-ASCII characters in "%s"' % + value) + if len(data) > self.staticSize: + log.warning('truncating overlong "%s" to %d bytes' % + (value, self.staticSize)) + data = (data + b"\0" * self.staticSize)[:self.staticSize] + writer.writeData(data) + class Struct(BaseConverter): @@ -292,18 +412,73 @@ Format = attrs.get("Format") if Format is not None: table.Format = int(Format) + + noPostRead = not hasattr(table, 'postRead') + if noPostRead: + # TODO Cache table.hasPropagated. + cleanPropagation = False + for conv in table.getConverters(): + if conv.isPropagated: + cleanPropagation = True + if not hasattr(font, '_propagator'): + font._propagator = {} + propagator = font._propagator + assert conv.name not in propagator, (conv.name, propagator) + setattr(table, conv.name, None) + propagator[conv.name] = CountReference(table.__dict__, conv.name) + for element in content: if isinstance(element, tuple): name, attrs, content = element table.fromXML(name, attrs, content, font) else: pass + + table.populateDefaults(propagator=getattr(font, '_propagator', None)) + + if noPostRead: + if cleanPropagation: + for conv in table.getConverters(): + if conv.isPropagated: + propagator = font._propagator + del propagator[conv.name] + if not propagator: + del font._propagator + return table def __repr__(self): return "Struct of " + repr(self.tableClass) +class StructWithLength(Struct): + def read(self, reader, font, tableDict): + pos = reader.pos + table = self.tableClass() + table.decompile(reader, font) + reader.seek(pos + table.StructLength) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + for convIndex, conv in enumerate(value.getConverters()): + if conv.name == "StructLength": + break + lengthIndex = len(writer.items) + convIndex + if isinstance(value, FormatSwitchingBaseTable): + lengthIndex += 1 # implicit Format field + deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize] + + before = writer.getDataLength() + value.StructLength = deadbeef + value.compile(writer, font) + length = writer.getDataLength() - before + lengthWriter = writer.getSubWriter() + conv.write(lengthWriter, font, tableDict, length) + assert(writer.items[lengthIndex] == + b"\xde\xad\xbe\xef"[:conv.staticSize]) + writer.items[lengthIndex] = lengthWriter.getAllData() + + class Table(Struct): longOffset = False @@ -322,11 +497,6 @@ offset = self.readOffset(reader) if offset == 0: return None - if offset <= 3: - # XXX hack to work around buggy pala.ttf - print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ - % (offset, self.tableClass.__name__)) - return None table = self.tableClass() reader = reader.getSubReader(offset) if font.lazy: @@ -357,18 +527,31 @@ return reader.readULong() +# TODO Clean / merge the SubTable and SubStruct + +class SubStruct(Struct): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs) + class SubTable(Table): def getConverter(self, tableType, lookupType): tableClass = self.lookupTypes[tableType][lookupType] return self.__class__(self.name, self.repeat, self.aux, tableClass) + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs) class ExtSubTable(LTable, SubTable): def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. + writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer. Table.write(self, writer, font, tableDict, value, repeatIndex) + class FeatureParams(Table): def getConverter(self, featureTag): tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) @@ -377,7 +560,7 @@ class ValueFormat(IntValue): staticSize = 2 - def __init__(self, name, repeat, aux, tableClass): + def __init__(self, name, repeat, aux, tableClass=None): BaseConverter.__init__(self, name, repeat, aux, tableClass) self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") def read(self, reader, font, tableDict): @@ -408,6 +591,858 @@ return value +class AATLookup(BaseConverter): + BIN_SEARCH_HEADER_SIZE = 10 + + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + if issubclass(self.tableClass, SimpleValue): + self.converter = self.tableClass(name='Value', repeat=None, aux=None) + else: + self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass) + + def read(self, reader, font, tableDict): + format = reader.readUShort() + if format == 0: + return self.readFormat0(reader, font) + elif format == 2: + return self.readFormat2(reader, font) + elif format == 4: + return self.readFormat4(reader, font) + elif format == 6: + return self.readFormat6(reader, font) + elif format == 8: + return self.readFormat8(reader, font) + else: + assert False, "unsupported lookup format: %d" % format + + def write(self, writer, font, tableDict, value, repeatIndex=None): + values = list(sorted([(font.getGlyphID(glyph), val) + for glyph, val in value.items()])) + # TODO: Also implement format 4. + formats = list(sorted(filter(None, [ + self.buildFormat0(writer, font, values), + self.buildFormat2(writer, font, values), + self.buildFormat6(writer, font, values), + self.buildFormat8(writer, font, values), + ]))) + # We use the format ID as secondary sort key to make the output + # deterministic when multiple formats have same encoded size. + dataSize, lookupFormat, writeMethod = formats[0] + pos = writer.getDataLength() + writeMethod() + actualSize = writer.getDataLength() - pos + assert actualSize == dataSize, ( + "AATLookup format %d claimed to write %d bytes, but wrote %d" % + (lookupFormat, dataSize, actualSize)) + + @staticmethod + def writeBinSearchHeader(writer, numUnits, unitSize): + writer.writeUShort(unitSize) + writer.writeUShort(numUnits) + searchRange, entrySelector, rangeShift = \ + getSearchRange(n=numUnits, itemSize=unitSize) + writer.writeUShort(searchRange) + writer.writeUShort(entrySelector) + writer.writeUShort(rangeShift) + + def buildFormat0(self, writer, font, values): + numGlyphs = len(font.getGlyphOrder()) + if len(values) != numGlyphs: + return None + valueSize = self.converter.staticSize + return (2 + numGlyphs * valueSize, 0, + lambda: self.writeFormat0(writer, font, values)) + + def writeFormat0(self, writer, font, values): + writer.writeUShort(0) + for glyphID_, value in values: + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + + def buildFormat2(self, writer, font, values): + segStart, segValue = values[0] + segEnd = segStart + segments = [] + for glyphID, curValue in values[1:]: + if glyphID != segEnd + 1 or curValue != segValue: + segments.append((segStart, segEnd, segValue)) + segStart = segEnd = glyphID + segValue = curValue + else: + segEnd = glyphID + segments.append((segStart, segEnd, segValue)) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments) + 1, valueSize + 4 + return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2, + lambda: self.writeFormat2(writer, font, segments)) + + def writeFormat2(self, writer, font, segments): + writer.writeUShort(2) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments), valueSize + 4 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for firstGlyph, lastGlyph, value in segments: + writer.writeUShort(lastGlyph) + writer.writeUShort(firstGlyph) + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + writer.writeUShort(0xFFFF) + writer.writeUShort(0xFFFF) + writer.writeData(b'\x00' * valueSize) + + def buildFormat6(self, writer, font, values): + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6, + lambda: self.writeFormat6(writer, font, values)) + + def writeFormat6(self, writer, font, values): + writer.writeUShort(6) + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for glyphID, value in values: + writer.writeUShort(glyphID) + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + writer.writeUShort(0xFFFF) + writer.writeData(b'\x00' * valueSize) + + def buildFormat8(self, writer, font, values): + minGlyphID, maxGlyphID = values[0][0], values[-1][0] + if len(values) != maxGlyphID - minGlyphID + 1: + return None + valueSize = self.converter.staticSize + return (6 + len(values) * valueSize, 8, + lambda: self.writeFormat8(writer, font, values)) + + def writeFormat8(self, writer, font, values): + firstGlyphID = values[0][0] + writer.writeUShort(8) + writer.writeUShort(firstGlyphID) + writer.writeUShort(len(values)) + for _, value in values: + self.converter.write( + writer, font, tableDict=None, + value=value, repeatIndex=None) + + def readFormat0(self, reader, font): + numGlyphs = len(font.getGlyphOrder()) + data = self.converter.readArray( + reader, font, tableDict=None, count=numGlyphs) + return {font.getGlyphName(k): value + for k, value in enumerate(data)} + + def readFormat2(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize, numUnits = reader.readUShort(), reader.readUShort() + assert unitSize >= 4 + self.converter.staticSize, unitSize + for i in range(numUnits): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + value = self.converter.read(reader, font, tableDict=None) + if last != 0xFFFF: + for k in range(first, last + 1): + mapping[font.getGlyphName(k)] = value + return mapping + + def readFormat4(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 6, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + offset = reader.readUShort() + if last != 0xFFFF: + dataReader = reader.getSubReader(0) # relative to current position + dataReader.seek(pos + offset) # relative to start of table + data = self.converter.readArray( + dataReader, font, tableDict=None, + count=last - first + 1) + for k, v in enumerate(data): + mapping[font.getGlyphName(first + k)] = v + return mapping + + def readFormat6(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 2 + self.converter.staticSize, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + glyphID = reader.readUShort() + value = self.converter.read( + reader, font, tableDict=None) + if glyphID != 0xFFFF: + mapping[font.getGlyphName(glyphID)] = value + return mapping + + def readFormat8(self, reader, font): + first = reader.readUShort() + count = reader.readUShort() + data = self.converter.readArray( + reader, font, tableDict=None, count=count) + return {font.getGlyphName(first + k): value + for (k, value) in enumerate(data)} + + def xmlRead(self, attrs, content, font): + value = {} + for element in content: + if isinstance(element, tuple): + name, a, eltContent = element + if name == "Lookup": + value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font) + return value + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, value in sorted(value.items()): + self.converter.xmlWrite( + xmlWriter, font, value=value, + name="Lookup", attrs=[("glyph", glyph)]) + xmlWriter.endtag(name) + xmlWriter.newline() + + +# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup +# followed by an offset to a glyph data table. Other than usual, the +# offsets in the AATLookup are not relative to the beginning of +# the beginning of the 'ankr' table, but relative to the glyph data table. +# So, to find the anchor data for a glyph, one needs to add the offset +# to the data table to the offset found in the AATLookup, and then use +# the sum of these two offsets to find the actual data. +class AATLookupWithDataOffset(BaseConverter): + def read(self, reader, font, tableDict): + lookupOffset = reader.readULong() + dataOffset = reader.readULong() + lookupReader = reader.getSubReader(lookupOffset) + lookup = AATLookup('DataOffsets', None, None, UShort) + offsets = lookup.read(lookupReader, font, tableDict) + result = {} + for glyph, offset in offsets.items(): + dataReader = reader.getSubReader(offset + dataOffset) + item = self.tableClass() + item.decompile(dataReader, font) + result[glyph] = item + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + # We do not work with OTTableWriter sub-writers because + # the offsets in our AATLookup are relative to our data + # table, for which we need to provide an offset value itself. + # It might have been possible to somehow make a kludge for + # performing this indirect offset computation directly inside + # OTTableWriter. But this would have made the internal logic + # of OTTableWriter even more complex than it already is, + # so we decided to roll our own offset computation for the + # contents of the AATLookup and associated data table. + offsetByGlyph, offsetByData, dataLen = {}, {}, 0 + compiledData = [] + for glyph in sorted(value, key=font.getGlyphID): + subWriter = OTTableWriter() + value[glyph].compile(subWriter, font) + data = subWriter.getAllData() + offset = offsetByData.get(data, None) + if offset == None: + offset = dataLen + dataLen = dataLen + len(data) + offsetByData[data] = offset + compiledData.append(data) + offsetByGlyph[glyph] = offset + # For calculating the offsets to our AATLookup and data table, + # we can use the regular OTTableWriter infrastructure. + lookupWriter = writer.getSubWriter() + lookupWriter.longOffset = True + lookup = AATLookup('DataOffsets', None, None, UShort) + lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None) + + dataWriter = writer.getSubWriter() + dataWriter.longOffset = True + writer.writeSubTable(lookupWriter) + writer.writeSubTable(dataWriter) + for d in compiledData: + dataWriter.writeData(d) + + def xmlRead(self, attrs, content, font): + lookup = AATLookup('DataOffsets', None, None, self.tableClass) + return lookup.xmlRead(attrs, content, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + lookup = AATLookup('DataOffsets', None, None, self.tableClass) + lookup.xmlWrite(xmlWriter, font, value, name, attrs) + + +class MorxSubtableConverter(BaseConverter): + _PROCESSING_ORDERS = { + # bits 30 and 28 of morx.CoverageFlags; see morx spec + (False, False): "LayoutOrder", + (True, False): "ReversedLayoutOrder", + (False, True): "LogicalOrder", + (True, True): "ReversedLogicalOrder", + } + + _PROCESSING_ORDERS_REVERSED = { + val: key for key, val in _PROCESSING_ORDERS.items() + } + + def __init__(self, name, repeat, aux): + BaseConverter.__init__(self, name, repeat, aux) + + def _setTextDirectionFromCoverageFlags(self, flags, subtable): + if (flags & 0x20) != 0: + subtable.TextDirection = "Any" + elif (flags & 0x80) != 0: + subtable.TextDirection = "Vertical" + else: + subtable.TextDirection = "Horizontal" + + def read(self, reader, font, tableDict): + pos = reader.pos + m = MorxSubtable() + m.StructLength = reader.readULong() + flags = reader.readUInt8() + orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] + self._setTextDirectionFromCoverageFlags(flags, m) + m.Reserved = reader.readUShort() + m.Reserved |= (flags & 0xF) << 16 + m.MorphType = reader.readUInt8() + m.SubFeatureFlags = reader.readULong() + tableClass = lookupTypes["morx"].get(m.MorphType) + if tableClass is None: + assert False, ("unsupported 'morx' lookup type %s" % + m.MorphType) + # To decode AAT ligatures, we need to know the subtable size. + # The easiest way to pass this along is to create a new reader + # that works on just the subtable as its data. + headerLength = reader.pos - pos + data = reader.data[ + reader.pos + : reader.pos + m.StructLength - headerLength] + assert len(data) == m.StructLength - headerLength + subReader = OTTableReader(data=data, tableTag=reader.tableTag) + m.SubStruct = tableClass() + m.SubStruct.decompile(subReader, font) + reader.seek(pos + m.StructLength) + return m + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("StructLength=%d" % value.StructLength) + xmlWriter.newline() + xmlWriter.simpletag("TextDirection", value=value.TextDirection) + xmlWriter.newline() + xmlWriter.simpletag("ProcessingOrder", + value=value.ProcessingOrder) + xmlWriter.newline() + if value.Reserved != 0: + xmlWriter.simpletag("Reserved", + value="0x%04x" % value.Reserved) + xmlWriter.newline() + xmlWriter.comment("MorphType=%d" % value.MorphType) + xmlWriter.newline() + xmlWriter.simpletag("SubFeatureFlags", + value="0x%08x" % value.SubFeatureFlags) + xmlWriter.newline() + value.SubStruct.toXML(xmlWriter, font) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + m = MorxSubtable() + covFlags = 0 + m.Reserved = 0 + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "CoverageFlags": + # Only in XML from old versions of fonttools. + covFlags = safeEval(eltAttrs["value"]) + orderKey = ((covFlags & 0x40) != 0, + (covFlags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[ + orderKey] + self._setTextDirectionFromCoverageFlags( + covFlags, m) + elif eltName == "ProcessingOrder": + m.ProcessingOrder = eltAttrs["value"] + assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder + elif eltName == "TextDirection": + m.TextDirection = eltAttrs["value"] + assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection + elif eltName == "Reserved": + m.Reserved = safeEval(eltAttrs["value"]) + elif eltName == "SubFeatureFlags": + m.SubFeatureFlags = safeEval(eltAttrs["value"]) + elif eltName.endswith("Morph"): + m.fromXML(eltName, eltAttrs, eltContent, font) + else: + assert False, eltName + m.Reserved = (covFlags & 0xF) << 16 | m.Reserved + return m + + def write(self, writer, font, tableDict, value, repeatIndex=None): + covFlags = (value.Reserved & 0x000F0000) >> 16 + reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[ + value.ProcessingOrder] + covFlags |= 0x80 if value.TextDirection == "Vertical" else 0 + covFlags |= 0x40 if reverseOrder else 0 + covFlags |= 0x20 if value.TextDirection == "Any" else 0 + covFlags |= 0x10 if logicalOrder else 0 + value.CoverageFlags = covFlags + lengthIndex = len(writer.items) + before = writer.getDataLength() + value.StructLength = 0xdeadbeef + # The high nibble of value.Reserved is actuallly encoded + # into coverageFlags, so we need to clear it here. + origReserved = value.Reserved # including high nibble + value.Reserved = value.Reserved & 0xFFFF # without high nibble + value.compile(writer, font) + value.Reserved = origReserved # restore original value + assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef" + length = writer.getDataLength() - before + writer.items[lengthIndex] = struct.pack(">L", length) + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader +# TODO: Untangle the implementation of the various lookup-specific formats. +class STXHeader(BaseConverter): + def __init__(self, name, repeat, aux, tableClass): + BaseConverter.__init__(self, name, repeat, aux, tableClass) + assert issubclass(self.tableClass, AATAction) + self.classLookup = AATLookup("GlyphClasses", None, None, UShort) + if issubclass(self.tableClass, ContextualMorphAction): + self.perGlyphLookup = AATLookup("PerGlyphLookup", + None, None, GlyphID) + else: + self.perGlyphLookup = None + + def read(self, reader, font, tableDict): + table = AATStateTable() + pos = reader.pos + classTableReader = reader.getSubReader(0) + stateArrayReader = reader.getSubReader(0) + entryTableReader = reader.getSubReader(0) + actionReader = None + ligaturesReader = None + table.GlyphClassCount = reader.readULong() + classTableReader.seek(pos + reader.readULong()) + stateArrayReader.seek(pos + reader.readULong()) + entryTableReader.seek(pos + reader.readULong()) + if self.perGlyphLookup is not None: + perGlyphTableReader = reader.getSubReader(0) + perGlyphTableReader.seek(pos + reader.readULong()) + if issubclass(self.tableClass, LigatureMorphAction): + actionReader = reader.getSubReader(0) + actionReader.seek(pos + reader.readULong()) + ligComponentReader = reader.getSubReader(0) + ligComponentReader.seek(pos + reader.readULong()) + ligaturesReader = reader.getSubReader(0) + ligaturesReader.seek(pos + reader.readULong()) + numLigComponents = (ligaturesReader.pos + - ligComponentReader.pos) // 2 + assert numLigComponents >= 0 + table.LigComponents = \ + ligComponentReader.readUShortArray(numLigComponents) + table.Ligatures = self._readLigatures(ligaturesReader, font) + table.GlyphClasses = self.classLookup.read(classTableReader, + font, tableDict) + numStates = int((entryTableReader.pos - stateArrayReader.pos) + / (table.GlyphClassCount * 2)) + for stateIndex in range(numStates): + state = AATState() + table.States.append(state) + for glyphClass in range(table.GlyphClassCount): + entryIndex = stateArrayReader.readUShort() + state.Transitions[glyphClass] = \ + self._readTransition(entryTableReader, + entryIndex, font, + actionReader) + if self.perGlyphLookup is not None: + table.PerGlyphLookups = self._readPerGlyphLookups( + table, perGlyphTableReader, font) + return table + + def _readTransition(self, reader, entryIndex, font, actionReader): + transition = self.tableClass() + entryReader = reader.getSubReader( + reader.pos + entryIndex * transition.staticSize) + transition.decompile(entryReader, font, actionReader) + return transition + + def _readLigatures(self, reader, font): + limit = len(reader.data) + numLigatureGlyphs = (limit - reader.pos) // 2 + return [font.getGlyphName(g) + for g in reader.readUShortArray(numLigatureGlyphs)] + + def _countPerGlyphLookups(self, table): + # Somewhat annoyingly, the morx table does not encode + # the size of the per-glyph table. So we need to find + # the maximum value that MorphActions use as index + # into this table. + numLookups = 0 + for state in table.States: + for t in state.Transitions.values(): + if isinstance(t, ContextualMorphAction): + if t.MarkIndex != 0xFFFF: + numLookups = max( + numLookups, + t.MarkIndex + 1) + if t.CurrentIndex != 0xFFFF: + numLookups = max( + numLookups, + t.CurrentIndex + 1) + return numLookups + + def _readPerGlyphLookups(self, table, reader, font): + pos = reader.pos + lookups = [] + for _ in range(self._countPerGlyphLookups(table)): + lookupReader = reader.getSubReader(0) + lookupReader.seek(pos + reader.readULong()) + lookups.append( + self.perGlyphLookup.read(lookupReader, font, {})) + return lookups + + def write(self, writer, font, tableDict, value, repeatIndex=None): + glyphClassWriter = OTTableWriter() + self.classLookup.write(glyphClassWriter, font, tableDict, + value.GlyphClasses, repeatIndex=None) + glyphClassData = pad(glyphClassWriter.getAllData(), 4) + glyphClassCount = max(value.GlyphClasses.values()) + 1 + glyphClassTableOffset = 16 # size of STXHeader + if self.perGlyphLookup is not None: + glyphClassTableOffset += 4 + + actionData, actionIndex = None, None + if issubclass(self.tableClass, LigatureMorphAction): + glyphClassTableOffset += 12 + actionData, actionIndex = \ + self._compileLigActions(value, font) + actionData = pad(actionData, 4) + + stateArrayData, entryTableData = self._compileStates( + font, value.States, glyphClassCount, actionIndex) + stateArrayOffset = glyphClassTableOffset + len(glyphClassData) + entryTableOffset = stateArrayOffset + len(stateArrayData) + perGlyphOffset = entryTableOffset + len(entryTableData) + perGlyphData = \ + pad(self._compilePerGlyphLookups(value, font), 4) + ligComponentsData = self._compileLigComponents(value, font) + ligaturesData = self._compileLigatures(value, font) + if actionData is None: + actionOffset = None + ligComponentsOffset = None + ligaturesOffset = None + else: + assert len(perGlyphData) == 0 + actionOffset = entryTableOffset + len(entryTableData) + ligComponentsOffset = actionOffset + len(actionData) + ligaturesOffset = ligComponentsOffset + len(ligComponentsData) + writer.writeULong(glyphClassCount) + writer.writeULong(glyphClassTableOffset) + writer.writeULong(stateArrayOffset) + writer.writeULong(entryTableOffset) + if self.perGlyphLookup is not None: + writer.writeULong(perGlyphOffset) + if actionOffset is not None: + writer.writeULong(actionOffset) + writer.writeULong(ligComponentsOffset) + writer.writeULong(ligaturesOffset) + writer.writeData(glyphClassData) + writer.writeData(stateArrayData) + writer.writeData(entryTableData) + writer.writeData(perGlyphData) + if actionData is not None: + writer.writeData(actionData) + if ligComponentsData is not None: + writer.writeData(ligComponentsData) + if ligaturesData is not None: + writer.writeData(ligaturesData) + + def _compileStates(self, font, states, glyphClassCount, actionIndex): + stateArrayWriter = OTTableWriter() + entries, entryIDs = [], {} + for state in states: + for glyphClass in range(glyphClassCount): + transition = state.Transitions[glyphClass] + entryWriter = OTTableWriter() + transition.compile(entryWriter, font, + actionIndex) + entryData = entryWriter.getAllData() + assert len(entryData) == transition.staticSize, ( \ + "%s has staticSize %d, " + "but actually wrote %d bytes" % ( + repr(transition), + transition.staticSize, + len(entryData))) + entryIndex = entryIDs.get(entryData) + if entryIndex is None: + entryIndex = len(entries) + entryIDs[entryData] = entryIndex + entries.append(entryData) + stateArrayWriter.writeUShort(entryIndex) + stateArrayData = pad(stateArrayWriter.getAllData(), 4) + entryTableData = pad(bytesjoin(entries), 4) + return stateArrayData, entryTableData + + def _compilePerGlyphLookups(self, table, font): + if self.perGlyphLookup is None: + return b"" + numLookups = self._countPerGlyphLookups(table) + assert len(table.PerGlyphLookups) == numLookups, ( + "len(AATStateTable.PerGlyphLookups) is %d, " + "but the actions inside the table refer to %d" % + (len(table.PerGlyphLookups), numLookups)) + writer = OTTableWriter() + for lookup in table.PerGlyphLookups: + lookupWriter = writer.getSubWriter() + lookupWriter.longOffset = True + self.perGlyphLookup.write(lookupWriter, font, + {}, lookup, None) + writer.writeSubTable(lookupWriter) + return writer.getAllData() + + def _compileLigActions(self, table, font): + assert issubclass(self.tableClass, LigatureMorphAction) + actions = set() + for state in table.States: + for _glyphClass, trans in state.Transitions.items(): + actions.add(trans.compileLigActions()) + result, actionIndex = b"", {} + # Sort the compiled actions in decreasing order of + # length, so that the longer sequence come before the + # shorter ones. For each compiled action ABCD, its + # suffixes BCD, CD, and D do not be encoded separately + # (in case they occur); instead, we can just store an + # index that points into the middle of the longer + # sequence. Every compiled AAT ligature sequence is + # terminated with an end-of-sequence flag, which can + # only be set on the last element of the sequence. + # Therefore, it is sufficient to consider just the + # suffixes. + for a in sorted(actions, key=lambda x:(-len(x), x)): + if a not in actionIndex: + for i in range(0, len(a), 4): + suffix = a[i:] + suffixIndex = (len(result) + i) // 4 + actionIndex.setdefault( + suffix, suffixIndex) + result += a + assert len(result) % self.tableClass.staticSize == 0 + return (result, actionIndex) + + def _compileLigComponents(self, table, font): + if not hasattr(table, "LigComponents"): + return None + writer = OTTableWriter() + for component in table.LigComponents: + writer.writeUShort(component) + return writer.getAllData() + + def _compileLigatures(self, table, font): + if not hasattr(table, "Ligatures"): + return None + writer = OTTableWriter() + for glyphName in table.Ligatures: + writer.writeUShort(font.getGlyphID(glyphName)) + return writer.getAllData() + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount) + xmlWriter.newline() + for g, klass in sorted(value.GlyphClasses.items()): + xmlWriter.simpletag("GlyphClass", glyph=g, value=klass) + xmlWriter.newline() + for stateIndex, state in enumerate(value.States): + xmlWriter.begintag("State", index=stateIndex) + xmlWriter.newline() + for glyphClass, trans in sorted(state.Transitions.items()): + trans.toXML(xmlWriter, font=font, + attrs={"onGlyphClass": glyphClass}, + name="Transition") + xmlWriter.endtag("State") + xmlWriter.newline() + for i, lookup in enumerate(value.PerGlyphLookups): + xmlWriter.begintag("PerGlyphLookup", index=i) + xmlWriter.newline() + for glyph, val in sorted(lookup.items()): + xmlWriter.simpletag("Lookup", glyph=glyph, + value=val) + xmlWriter.newline() + xmlWriter.endtag("PerGlyphLookup") + xmlWriter.newline() + if hasattr(value, "LigComponents"): + xmlWriter.begintag("LigComponents") + xmlWriter.newline() + for i, val in enumerate(getattr(value, "LigComponents")): + xmlWriter.simpletag("LigComponent", index=i, + value=val) + xmlWriter.newline() + xmlWriter.endtag("LigComponents") + xmlWriter.newline() + self._xmlWriteLigatures(xmlWriter, font, value, name, attrs) + xmlWriter.endtag(name) + xmlWriter.newline() + + def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs): + if not hasattr(value, "Ligatures"): + return + xmlWriter.begintag("Ligatures") + xmlWriter.newline() + for i, g in enumerate(getattr(value, "Ligatures")): + xmlWriter.simpletag("Ligature", index=i, glyph=g) + xmlWriter.newline() + xmlWriter.endtag("Ligatures") + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + table = AATStateTable() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "GlyphClass": + glyph = eltAttrs["glyph"] + value = eltAttrs["value"] + table.GlyphClasses[glyph] = safeEval(value) + elif eltName == "State": + state = self._xmlReadState(eltAttrs, eltContent, font) + table.States.append(state) + elif eltName == "PerGlyphLookup": + lookup = self.perGlyphLookup.xmlRead( + eltAttrs, eltContent, font) + table.PerGlyphLookups.append(lookup) + elif eltName == "LigComponents": + table.LigComponents = \ + self._xmlReadLigComponents( + eltAttrs, eltContent, font) + elif eltName == "Ligatures": + table.Ligatures = \ + self._xmlReadLigatures( + eltAttrs, eltContent, font) + table.GlyphClassCount = max(table.GlyphClasses.values()) + 1 + return table + + def _xmlReadState(self, attrs, content, font): + state = AATState() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "Transition": + glyphClass = safeEval(eltAttrs["onGlyphClass"]) + transition = self.tableClass() + transition.fromXML(eltName, eltAttrs, + eltContent, font) + state.Transitions[glyphClass] = transition + return state + + def _xmlReadLigComponents(self, attrs, content, font): + ligComponents = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "LigComponent": + ligComponents.append( + safeEval(eltAttrs["value"])) + return ligComponents + + def _xmlReadLigatures(self, attrs, content, font): + ligs = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "Ligature": + ligs.append(eltAttrs["glyph"]) + return ligs + + +class CIDGlyphMap(BaseConverter): + def read(self, reader, font, tableDict): + numCIDs = reader.readUShort() + result = {} + for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)): + if glyphID != 0xFFFF: + result[cid] = font.getGlyphName(glyphID) + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = {cid: font.getGlyphID(glyph) + for cid, glyph in value.items()} + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for cid in range(count): + writer.writeUShort(items.get(cid, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[safeEval(eAttrs["cid"])] = \ + eAttrs["glyph"].strip() + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for cid, glyph in sorted(value.items()): + if glyph is not None and glyph != 0xFFFF: + xmlWriter.simpletag( + "CID", cid=cid, glyph=glyph) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class GlyphCIDMap(BaseConverter): + def read(self, reader, font, tableDict): + glyphOrder = font.getGlyphOrder() + count = reader.readUShort() + cids = reader.readUShortArray(count) + if count > len(glyphOrder): + log.warning("GlyphCIDMap has %d elements, " + "but the font has only %d glyphs; " + "ignoring the rest" % + (count, len(glyphOrder))) + result = {} + for glyphID in range(min(len(cids), len(glyphOrder))): + cid = cids[glyphID] + if cid != 0xFFFF: + result[glyphOrder[glyphID]] = cid + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = {font.getGlyphID(g): cid + for g, cid in value.items() + if cid is not None and cid != 0xFFFF} + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for glyphID in range(count): + writer.writeUShort(items.get(glyphID, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[eAttrs["glyph"]] = \ + safeEval(eAttrs["value"]) + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, cid in sorted(value.items()): + if cid is not None and cid != 0xFFFF: + xmlWriter.simpletag( + "CID", glyph=glyph, value=cid) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + class DeltaValue(BaseConverter): def read(self, reader, font, tableDict): @@ -462,20 +1497,129 @@ return safeEval(attrs["value"]) +class VarIdxMapValue(BaseConverter): + + def read(self, reader, font, tableDict): + fmt = tableDict['EntryFormat'] + nItems = tableDict['MappingCount'] + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1<> 4) + read = { + 1: reader.readUInt8, + 2: reader.readUShort, + 3: reader.readUInt24, + 4: reader.readULong, + }[entrySize] + + mapping = [] + for i in range(nItems): + raw = read() + idx = ((raw & outerMask) << outerShift) | (raw & innerMask) + mapping.append(idx) + + return mapping + + def write(self, writer, font, tableDict, value, repeatIndex=None): + fmt = tableDict['EntryFormat'] + mapping = value + writer['MappingCount'].setValue(len(mapping)) + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1<> 4) + write = { + 1: writer.writeUInt8, + 2: writer.writeUShort, + 3: writer.writeUInt24, + 4: writer.writeULong, + }[entrySize] + + for idx in mapping: + raw = ((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask) + write(raw) + + +class VarDataValue(BaseConverter): + + def read(self, reader, font, tableDict): + values = [] + + regionCount = tableDict["VarRegionCount"] + shortCount = tableDict["NumShorts"] + + for i in range(min(regionCount, shortCount)): + values.append(reader.readShort()) + for i in range(min(regionCount, shortCount), regionCount): + values.append(reader.readInt8()) + for i in range(regionCount, shortCount): + reader.readInt8() + + return values + + def write(self, writer, font, tableDict, value, repeatIndex=None): + regionCount = tableDict["VarRegionCount"] + shortCount = tableDict["NumShorts"] + + for i in range(min(regionCount, shortCount)): + writer.writeShort(value[i]) + for i in range(min(regionCount, shortCount), regionCount): + writer.writeInt8(value[i]) + for i in range(regionCount, shortCount): + writer.writeInt8(0) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + converterMapping = { # type class - "int16": Short, + "int8": Int8, + "int16": Short, + "uint8": UInt8, + "uint8": UInt8, "uint16": UShort, "uint24": UInt24, "uint32": ULong, + "char64": Char64, + "Flags32": Flags32, "Version": Version, "Tag": Tag, "GlyphID": GlyphID, + "NameID": NameID, "DeciPoints": DeciPoints, "Fixed": Fixed, + "F2Dot14": F2Dot14, "struct": Struct, "Offset": Table, "LOffset": LTable, "ValueRecord": ValueRecord, "DeltaValue": DeltaValue, + "VarIdxMapValue": VarIdxMapValue, + "VarDataValue": VarDataValue, + + # AAT + "CIDGlyphMap": CIDGlyphMap, + "GlyphCIDMap": GlyphCIDMap, + "MortChain": StructWithLength, + "MortSubtable": StructWithLength, + "MorxChain": StructWithLength, + "MorxSubtable": MorxSubtableConverter, + + # "Template" types + "AATLookup": lambda C: partial(AATLookup, tableClass=C), + "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C), + "STXHeader": lambda C: partial(STXHeader, tableClass=C), + "OffsetTo": lambda C: partial(Table, tableClass=C), + "LOffsetTo": lambda C: partial(LTable, tableClass=C), } diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/otData.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otData.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otData.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,4 @@ +# coding: utf-8 from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * @@ -59,23 +60,23 @@ ('FeatureParamsSize', [ ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), - ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), + ('NameID', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), ]), ('FeatureParamsStylisticSet', [ ('uint16', 'Version', None, None, 'Set to 0.'), - ('uint16', 'UINameID', None, None, 'UI NameID.'), + ('NameID', 'UINameID', None, None, 'UI NameID.'), ]), ('FeatureParamsCharacterVariants', [ ('uint16', 'Format', None, None, 'Set to 0.'), - ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), - ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), - ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), + ('NameID', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), + ('NameID', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), + ('NameID', 'SampleTextNameID', None, None, 'Sample text NameID.'), ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), - ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), + ('NameID', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), ]), @@ -134,7 +135,7 @@ ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), - ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), + ('DeltaValue', 'DeltaValue', '', 'DeltaFormat in (1,2,3)', 'Array of compressed data'), ]), @@ -143,10 +144,11 @@ # ('GPOS', [ - ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GPOS table- 0x00010000 or 0x00010001'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), + ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GPOS table'), ]), ('SinglePosFormat1', [ @@ -443,10 +445,11 @@ # ('GSUB', [ - ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GSUB table- 0x00010000 or 0x00010001'), ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), + ('LOffset', 'FeatureVariations', None, 'Version >= 0x00010001', 'Offset to FeatureVariations table-from beginning of GSUB table'), ]), ('SingleSubstFormat1', [ @@ -639,12 +642,13 @@ # ('GDEF', [ - ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), + ('Version', 'Version', None, None, 'Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003'), ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), - ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ('Offset', 'MarkGlyphSetsDef', None, 'Version >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + ('LOffset', 'VarStore', None, 'Version >= 0x00010003', 'Offset to variation store (may be NULL)'), ]), ('AttachList', [ @@ -836,6 +840,193 @@ ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), ]), + + # + # STAT + # + ('STAT', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000, currently 0x00010002.'), + ('uint16', 'DesignAxisRecordSize', None, None, 'Size in bytes of each design axis record'), + ('uint16', 'DesignAxisCount', None, None, 'Number of design axis records'), + ('LOffsetTo(AxisRecordArray)', 'DesignAxisRecord', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the design axes array'), + ('uint16', 'AxisValueCount', None, None, 'Number of axis value tables'), + ('LOffsetTo(AxisValueArray)', 'AxisValueArray', None, None, 'Offset in bytes from the beginning of the STAT table to the start of the axes value offset array'), + ('NameID', 'ElidedFallbackNameID', None, 'Version >= 0x00010001', 'NameID to use when all style attributes are elided.'), + ]), + + ('AxisRecordArray', [ + ('AxisRecord', 'Axis', 'DesignAxisCount', 0, 'Axis records'), + ]), + + ('AxisRecord', [ + ('Tag', 'AxisTag', None, None, 'A tag identifying the axis of design variation'), + ('NameID', 'AxisNameID', None, None, 'The name ID for entries in the "name" table that provide a display string for this axis'), + ('uint16', 'AxisOrdering', None, None, 'A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names'), + ('uint8', 'MoreBytes', 'DesignAxisRecordSize', -8, 'Extra bytes. Set to empty array.'), + ]), + + ('AxisValueArray', [ + ('Offset', 'AxisValue', 'AxisValueCount', 0, 'Axis values'), + ]), + + ('AxisValueFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'Value', None, None, ''), + ]), + + ('AxisValueFormat2', [ + ('uint16', 'Format', None, None, 'Format, = 2'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'NominalValue', None, None, ''), + ('Fixed', 'RangeMinValue', None, None, ''), + ('Fixed', 'RangeMaxValue', None, None, ''), + ]), + + ('AxisValueFormat3', [ + ('uint16', 'Format', None, None, 'Format, = 3'), + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('Fixed', 'Value', None, None, ''), + ('Fixed', 'LinkedValue', None, None, ''), + ]), + + ('AxisValueFormat4', [ + ('uint16', 'Format', None, None, 'Format, = 4'), + ('uint16', 'AxisCount', None, None, 'The total number of axes contributing to this axis-values combination.'), + ('uint16', 'Flags', None, None, 'Flags.'), + ('NameID', 'ValueNameID', None, None, ''), + ('struct', 'AxisValueRecord', 'AxisCount', 0, 'Array of AxisValue records that provide the combination of axis values, one for each contributing axis. '), + ]), + + ('AxisValueRecord', [ + ('uint16', 'AxisIndex', None, None, 'Index into the axis record array identifying the axis of design variation to which the axis value record applies.'), + ('Fixed', 'Value', None, None, 'A numeric value for this attribute value.'), + ]), + + + # + # Variation fonts + # + + # GSUB/GPOS FeatureVariations + + ('FeatureVariations', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'), + ('uint32', 'FeatureVariationCount', None, None, 'Number of records in the FeatureVariationRecord array'), + ('struct', 'FeatureVariationRecord', 'FeatureVariationCount', 0, 'Array of FeatureVariationRecord'), + ]), + + ('FeatureVariationRecord', [ + ('LOffset', 'ConditionSet', None, None, 'Offset to a ConditionSet table, from beginning of the FeatureVariations table.'), + ('LOffset', 'FeatureTableSubstitution', None, None, 'Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table'), + ]), + + ('ConditionSet', [ + ('uint16', 'ConditionCount', None, None, 'Number of condition tables in the ConditionTable array'), + ('LOffset', 'ConditionTable', 'ConditionCount', 0, 'Array of condition tables.'), + ]), + + ('ConditionTableFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1'), + ('uint16', 'AxisIndex', None, None, 'Index for the variation axis within the fvar table, base 0.'), + ('F2Dot14', 'FilterRangeMinValue', None, None, 'Minimum normalized axis value of the font variation instances that satisfy this condition.'), + ('F2Dot14', 'FilterRangeMaxValue', None, None, 'Maximum value that satisfies this condition.'), + ]), + + ('FeatureTableSubstitution', [ + ('Version', 'Version', None, None, 'Version of the table-initially set to 0x00010000'), + ('uint16', 'SubstitutionCount', None, None, 'Number of records in the FeatureVariationRecords array'), + ('FeatureTableSubstitutionRecord', 'SubstitutionRecord', 'SubstitutionCount', 0, 'Array of FeatureTableSubstitutionRecord'), + ]), + + ('FeatureTableSubstitutionRecord', [ + ('uint16', 'FeatureIndex', None, None, 'The feature table index to match.'), + ('LOffset', 'Feature', None, None, 'Offset to an alternate feature table, from start of the FeatureTableSubstitution table.'), + ]), + + # VariationStore + + ('VarRegionAxis', [ + ('F2Dot14', 'StartCoord', None, None, ''), + ('F2Dot14', 'PeakCoord', None, None, ''), + ('F2Dot14', 'EndCoord', None, None, ''), + ]), + + ('VarRegion', [ + ('struct', 'VarRegionAxis', 'RegionAxisCount', 0, ''), + ]), + + ('VarRegionList', [ + ('uint16', 'RegionAxisCount', None, None, ''), + ('uint16', 'RegionCount', None, None, ''), + ('VarRegion', 'Region', 'RegionCount', 0, ''), + ]), + + ('VarData', [ + ('uint16', 'ItemCount', None, None, ''), + ('uint16', 'NumShorts', None, None, ''), # Automatically computed + ('uint16', 'VarRegionCount', None, None, ''), + ('uint16', 'VarRegionIndex', 'VarRegionCount', 0, ''), + ('VarDataValue', 'Item', 'ItemCount', 0, ''), + ]), + + ('VarStore', [ + ('uint16', 'Format', None, None, 'Set to 1.'), + ('LOffset', 'VarRegionList', None, None, ''), + ('uint16', 'VarDataCount', None, None, ''), + ('LOffset', 'VarData', 'VarDataCount', 0, ''), + ]), + + # Variation helpers + + ('VarIdxMap', [ + ('uint16', 'EntryFormat', None, None, ''), # Automatically computed + ('uint16', 'MappingCount', None, None, ''), # Automatically computed + ('VarIdxMapValue', 'mapping', '', 0, 'Array of compressed data'), + ]), + + # Glyph advance variations + + ('HVAR', [ + ('Version', 'Version', None, None, 'Version of the HVAR table-initially = 0x00010000'), + ('LOffset', 'VarStore', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'AdvWidthMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'LsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'RsbMap', None, None, ''), + ]), + ('VVAR', [ + ('Version', 'Version', None, None, 'Version of the VVAR table-initially = 0x00010000'), + ('LOffset', 'VarStore', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'AdvHeightMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'TsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'BsbMap', None, None, ''), + ('LOffsetTo(VarIdxMap)', 'VOrgMap', None, None, 'Vertical origin mapping.'), + ]), + + # Font-wide metrics variations + + ('MetricsValueRecord', [ + ('Tag', 'ValueTag', None, None, '4-byte font-wide measure identifier'), + ('uint32', 'VarIdx', None, None, 'Combined outer-inner variation index'), + ('uint8', 'MoreBytes', 'ValueRecordSize', -8, 'Extra bytes. Set to empty array.'), + ]), + + ('MVAR', [ + ('Version', 'Version', None, None, 'Version of the MVAR table-initially = 0x00010000'), + ('uint16', 'Reserved', None, None, 'Set to 0'), + ('uint16', 'ValueRecordSize', None, None, ''), + ('uint16', 'ValueRecordCount', None, None, ''), + ('Offset', 'VarStore', None, None, ''), + ('MetricsValueRecord', 'ValueRecord', 'ValueRecordCount', 0, ''), + ]), + + # # math # @@ -989,6 +1180,96 @@ ## Apple Advanced Typography (AAT) tables ## + ('AATLookupSegment', [ + ('uint16', 'lastGlyph', None, None, 'Last glyph index in this segment.'), + ('uint16', 'firstGlyph', None, None, 'First glyph index in this segment.'), + ('uint16', 'value', None, None, 'A 16-bit offset from the start of the table to the data.'), + ]), + + + # + # ankr + # + + ('ankr', [ + ('struct', 'AnchorPoints', None, None, 'Anchor points table.'), + ]), + + ('AnchorPointsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the anchor points table, = 0.'), + ('uint16', 'Flags', None, None, 'Flags. Currenty unused, set to zero.'), + ('AATLookupWithDataOffset(AnchorGlyphData)', 'Anchors', None, None, 'Table of with anchor overrides for each glyph.'), + ]), + + ('AnchorGlyphData', [ + ('uint32', 'AnchorPointCount', None, None, 'Number of anchor points for this glyph.'), + ('struct', 'AnchorPoint', 'AnchorPointCount', 0, 'Individual anchor points.'), + ]), + + ('AnchorPoint', [ + ('int16', 'XCoordinate', None, None, 'X coordinate of this anchor point.'), + ('int16', 'YCoordinate', None, None, 'Y coordinate of this anchor point.'), + ]), + + # + # bsln + # + + ('bsln', [ + ('Version', 'Version', None, None, 'Version number of the AAT baseline table (0x00010000 for the initial version).'), + ('struct', 'Baseline', None, None, 'Baseline table.'), + ]), + + ('BaselineFormat0', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 0.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'), + ]), + + ('BaselineFormat1', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('uint16', 'Delta', 32, 0, u'These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.'), + ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'), + ]), + + ('BaselineFormat2', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'), + ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'), + ]), + + ('BaselineFormat3', [ + ('uint16', 'Format', None, None, 'Format of the baseline table, = 1.'), + ('uint16', 'DefaultBaseline', None, None, 'Default baseline value for all glyphs. This value can be from 0 through 31.'), + ('GlyphID', 'StandardGlyph', None, None, 'Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.'), + ('uint16', 'ControlPoint', 32, 0, 'Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.'), + ('AATLookup(uint16)', 'BaselineValues', None, None, 'Lookup table that maps glyphs to their baseline values.'), + ]), + + + # + # cidg + # + + ('cidg', [ + ('struct', 'CIDGlyphMapping', None, None, 'CID-to-glyph mapping table.'), + ]), + + ('CIDGlyphMappingFormat0', [ + ('uint16', 'Format', None, None, 'Format of the CID-to-glyph mapping table, = 0.'), + ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'), + ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'), + ('uint16', 'Registry', None, None, 'The registry ID.'), + ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'Order', None, None, 'The order ID.'), + ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'SupplementVersion', None, None, 'The supplement version.'), + ('CIDGlyphMap', 'Mapping', None, None, 'A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used'), + ]), + + # # feat # @@ -1010,7 +1291,7 @@ ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), - ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), + ('NameID', 'FeatureNameID', None, None, 'The name table index for the feature name.'), ]), ('Settings', [ @@ -1019,7 +1300,217 @@ ('Setting', [ ('uint16', 'SettingValue', None, None, 'The setting.'), - ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ('NameID', 'SettingNameID', None, None, 'The name table index for the setting name.'), + ]), + + + # + # gcid + # + + ('gcid', [ + ('struct', 'GlyphCIDMapping', None, None, 'Glyph to CID mapping table.'), + ]), + + ('GlyphCIDMappingFormat0', [ + ('uint16', 'Format', None, None, 'Format of the glyph-to-CID mapping table, = 0.'), + ('uint16', 'DataFormat', None, None, 'Currenty unused, set to zero.'), + ('uint32', 'StructLength', None, None, 'Size of the table in bytes.'), + ('uint16', 'Registry', None, None, 'The registry ID.'), + ('char64', 'RegistryName', None, None, 'The registry name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'Order', None, None, 'The order ID.'), + ('char64', 'OrderName', None, None, 'The order name in ASCII; unused bytes should be set to 0.'), + ('uint16', 'SupplementVersion', None, None, 'The supplement version.'), + ('GlyphCIDMap', 'Mapping', None, None, 'The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used'), + ]), + + + # + # lcar + # + + ('lcar', [ + ('Version', 'Version', None, None, 'Version number of the ligature caret table (0x00010000 for the initial version).'), + ('struct', 'LigatureCarets', None, None, 'Ligature carets table.'), + ]), + + ('LigatureCaretsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'), + ('AATLookup(LigCaretDistances)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, in font unit distances.'), + ]), + + ('LigatureCaretsFormat1', [ + ('uint16', 'Format', None, None, 'Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.'), + ('AATLookup(LigCaretPoints)', 'Carets', None, None, 'Lookup table associating ligature glyphs with their caret positions, as control points.'), + ]), + + ('LigCaretDistances', [ + ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'), + ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'Distance in font units through which a subdivision is made orthogonally to the baseline.'), + ]), + + ('LigCaretPoints', [ + ('uint16', 'DivsionPointCount', None, None, 'Number of division points.'), + ('int16', 'DivisionPoint', 'DivsionPointCount', 0, 'The number of the control point through which a subdivision is made orthogonally to the baseline.'), + ]), + + + # + # mort + # + + ('mort', [ + ('Version', 'Version', None, None, 'Version of the mort table.'), + ('uint32', 'MorphChainCount', None, None, 'Number of metamorphosis chains.'), + ('MortChain', 'MorphChain', 'MorphChainCount', 0, 'Array of metamorphosis chains.'), + ]), + + ('MortChain', [ + ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'), + ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'), + ('uint16', 'MorphFeatureCount', None, None, 'Number of metamorphosis feature entries.'), + ('uint16', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'), + ('struct', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'), + ('MortSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of metamorphosis subtables.'), + ]), + + ('MortSubtable', [ + ('uint16', 'StructLength', None, None, 'Total subtable length, including this header.'), + ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'), + ('uint8', 'MorphType', None, None, 'Subtable type.'), + ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'), + ('SubStruct', 'SubStruct', None, None, 'SubTable.'), + ]), + + # + # morx + # + + ('morx', [ + ('uint16', 'Version', None, None, 'Version of the morx table.'), + ('uint16', 'Reserved', None, None, 'Reserved (set to zero).'), + ('uint32', 'MorphChainCount', None, None, 'Number of extended metamorphosis chains.'), + ('MorxChain', 'MorphChain', 'MorphChainCount', 0, 'Array of extended metamorphosis chains.'), + ]), + + ('MorxChain', [ + ('Flags32', 'DefaultFlags', None, None, 'The default specification for subtables.'), + ('uint32', 'StructLength', None, None, 'Total byte count, including this header; must be a multiple of 4.'), + ('uint32', 'MorphFeatureCount', None, None, 'Number of feature subtable entries.'), + ('uint32', 'MorphSubtableCount', None, None, 'The number of subtables in the chain.'), + ('MorphFeature', 'MorphFeature', 'MorphFeatureCount', 0, 'Array of metamorphosis features.'), + ('MorxSubtable', 'MorphSubtable', 'MorphSubtableCount', 0, 'Array of extended metamorphosis subtables.'), + ]), + + ('MorphFeature', [ + ('uint16', 'FeatureType', None, None, 'The type of feature.'), + ('uint16', 'FeatureSetting', None, None, "The feature's setting (aka selector)."), + ('Flags32', 'EnableFlags', None, None, 'Flags for the settings that this feature and setting enables.'), + ('Flags32', 'DisableFlags', None, None, 'Complement of flags for the settings that this feature and setting disable.'), + ]), + + # Apple TrueType Reference Manual, chapter “The ‘morx’ table”, + # section “Metamorphosis Subtables”. + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html + ('MorxSubtable', [ + ('uint32', 'StructLength', None, None, 'Total subtable length, including this header.'), + ('uint8', 'CoverageFlags', None, None, 'Most significant byte of coverage flags.'), + ('uint16', 'Reserved', None, None, 'Unused.'), + ('uint8', 'MorphType', None, None, 'Subtable type.'), + ('Flags32', 'SubFeatureFlags', None, None, 'The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).'), + ('SubStruct', 'SubStruct', None, None, 'SubTable.'), + ]), + + ('StateHeader', [ + ('uint32', 'ClassCount', None, None, 'Number of classes, which is the number of 16-bit entry indices in a single line in the state array.'), + ('uint32', 'MorphClass', None, None, 'Offset from the start of this state table header to the start of the class table.'), + ('uint32', 'StateArrayOffset', None, None, 'Offset from the start of this state table header to the start of the state array.'), + ('uint32', 'EntryTableOffset', None, None, 'Offset from the start of this state table header to the start of the entry table.'), + ]), + + ('RearrangementMorph', [ + ('STXHeader(RearrangementMorphAction)', 'StateTable', None, None, 'Finite-state transducer table for indic rearrangement.'), + ]), + + ('ContextualMorph', [ + ('STXHeader(ContextualMorphAction)', 'StateTable', None, None, 'Finite-state transducer for contextual glyph substitution.'), + ]), + + ('LigatureMorph', [ + ('STXHeader(LigatureMorphAction)', 'StateTable', None, None, 'Finite-state transducer for ligature substitution.'), + ]), + + ('NoncontextualMorph', [ + ('AATLookup(GlyphID)', 'Substitution', None, None, 'The noncontextual glyph substitution table.'), + ]), + + ('InsertionMorph', [ + ('struct', 'StateHeader', None, None, 'Header.'), + # TODO: Add missing parts. + ]), + + ('MorphClass', [ + ('uint16', 'FirstGlyph', None, None, 'Glyph index of the first glyph in the class table.'), + #('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'), + #('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'), + ]), + + # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below. + # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + + + # + # prop + # + + ('prop', [ + ('Fixed', 'Version', None, None, 'Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.'), + ('struct', 'GlyphProperties', None, None, 'Glyph properties.'), + ]), + + ('GlyphPropertiesFormat0', [ + ('uint16', 'Format', None, None, 'Format, = 0.'), + ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.'), + ]), + + ('GlyphPropertiesFormat1', [ + ('uint16', 'Format', None, None, 'Format, = 1.'), + ('uint16', 'DefaultProperties', None, None, 'Default properties applied to a glyph if that glyph is not present in the Properties lookup table.'), + ('AATLookup(uint16)', 'Properties', None, None, 'Lookup data associating glyphs with their properties.'), + ]), + + + # + # opbd + # + + ('opbd', [ + ('Version', 'Version', None, None, 'Version number of the optical bounds table (0x00010000 for the initial version).'), + ('struct', 'OpticalBounds', None, None, 'Optical bounds table.'), + ]), + + ('OpticalBoundsFormat0', [ + ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 0.'), + ('AATLookup(OpticalBoundsDeltas)', 'OpticalBoundsDeltas', None, None, 'Lookup table associating glyphs with their optical bounds, given as deltas in font units.'), + ]), + + ('OpticalBoundsFormat1', [ + ('uint16', 'Format', None, None, 'Format of the optical bounds table, = 1.'), + ('AATLookup(OpticalBoundsPoints)', 'OpticalBoundsPoints', None, None, 'Lookup table associating glyphs with their optical bounds, given as references to control points.'), + ]), + + ('OpticalBoundsDeltas', [ + ('int16', 'Left', None, None, 'Delta value for the left-side optical edge.'), + ('int16', 'Top', None, None, 'Delta value for the top-side optical edge.'), + ('int16', 'Right', None, None, 'Delta value for the right-side optical edge.'), + ('int16', 'Bottom', None, None, 'Delta value for the bottom-side optical edge.'), + ]), + + ('OpticalBoundsPoints', [ + ('int16', 'Left', None, None, 'Control point index for the left-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Top', None, None, 'Control point index for the top-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Right', None, None, 'Control point index for the right-side optical edge, or -1 if this glyph has none.'), + ('int16', 'Bottom', None, None, 'Control point index for the bottom-side optical edge, or -1 if this glyph has none.'), ]), ] diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/otTables.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otTables.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/otTables.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,14 +1,421 @@ +# coding: utf-8 """fontTools.ttLib.tables.otTables -- A collection of classes representing the various OpenType subtables. Most are constructed upon import from data in otData.py, all are populated with converter objects from otConverters.py. """ -from __future__ import print_function, division, absolute_import +from __future__ import print_function, division, absolute_import, unicode_literals from fontTools.misc.py23 import * +from fontTools.misc.textTools import safeEval from .otBase import BaseTable, FormatSwitchingBaseTable import operator -import warnings +import logging +import struct + + +log = logging.getLogger(__name__) + + +class AATStateTable(object): + def __init__(self): + self.GlyphClasses = {} # GlyphID --> GlyphClass + self.States = [] # List of AATState, indexed by state number + self.PerGlyphLookups = [] # [{GlyphID:GlyphID}, ...] + + +class AATState(object): + def __init__(self): + self.Transitions = {} # GlyphClass --> AATAction + + +class AATAction(object): + _FLAGS = None + + def _writeFlagsToXML(self, xmlWriter): + flags = [f for f in self._FLAGS if self.__dict__[f]] + if flags: + xmlWriter.simpletag("Flags", value=",".join(flags)) + xmlWriter.newline() + if self.ReservedFlags != 0: + xmlWriter.simpletag( + "ReservedFlags", + value='0x%04X' % self.ReservedFlags) + xmlWriter.newline() + + def _setFlag(self, flag): + assert flag in self._FLAGS, "unsupported flag %s" % flag + self.__dict__[flag] = True + + +class RearrangementMorphAction(AATAction): + staticSize = 4 + _FLAGS = ["MarkFirst", "DontAdvance", "MarkLast"] + + _VERBS = { + 0: "no change", + 1: "Ax ⇒ xA", + 2: "xD ⇒ Dx", + 3: "AxD ⇒ DxA", + 4: "ABx ⇒ xAB", + 5: "ABx ⇒ xBA", + 6: "xCD ⇒ CDx", + 7: "xCD ⇒ DCx", + 8: "AxCD ⇒ CDxA", + 9: "AxCD ⇒ DCxA", + 10: "ABxD ⇒ DxAB", + 11: "ABxD ⇒ DxBA", + 12: "ABxCD ⇒ CDxAB", + 13: "ABxCD ⇒ CDxBA", + 14: "ABxCD ⇒ DCxAB", + 15: "ABxCD ⇒ DCxBA", + } + + def __init__(self): + self.NewState = 0 + self.Verb = 0 + self.MarkFirst = False + self.DontAdvance = False + self.MarkLast = False + self.ReservedFlags = 0 + + def compile(self, writer, font, actionIndex): + assert actionIndex is None + writer.writeUShort(self.NewState) + assert self.Verb >= 0 and self.Verb <= 15, self.Verb + flags = self.Verb | self.ReservedFlags + if self.MarkFirst: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if self.MarkLast: flags |= 0x2000 + writer.writeUShort(flags) + + def decompile(self, reader, font, actionReader): + assert actionReader is None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.Verb = flags & 0xF + self.MarkFirst = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.MarkLast = bool(flags & 0x2000) + self.ReservedFlags = flags & 0x1FF0 + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + xmlWriter.simpletag("Verb", value=self.Verb) + verbComment = self._VERBS.get(self.Verb) + if verbComment is not None: + xmlWriter.comment(verbComment) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.NewState = self.Verb = self.ReservedFlags = 0 + self.MarkFirst = self.DontAdvance = self.MarkLast = False + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Verb": + self.Verb = safeEval(eltAttrs["value"]) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + + +class ContextualMorphAction(AATAction): + staticSize = 8 + _FLAGS = ["SetMark", "DontAdvance"] + + def __init__(self): + self.NewState = 0 + self.SetMark, self.DontAdvance = False, False + self.ReservedFlags = 0 + self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF + + def compile(self, writer, font, actionIndex): + assert actionIndex is None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetMark: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + writer.writeUShort(flags) + writer.writeUShort(self.MarkIndex) + writer.writeUShort(self.CurrentIndex) + + def decompile(self, reader, font, actionReader): + assert actionReader is None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetMark = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.ReservedFlags = flags & 0x3FFF + self.MarkIndex = reader.readUShort() + self.CurrentIndex = reader.readUShort() + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + xmlWriter.simpletag("MarkIndex", value=self.MarkIndex) + xmlWriter.newline() + xmlWriter.simpletag("CurrentIndex", + value=self.CurrentIndex) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.NewState = self.ReservedFlags = 0 + self.SetMark = self.DontAdvance = False + self.MarkIndex, self.CurrentIndex = 0xFFFF, 0xFFFF + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "MarkIndex": + self.MarkIndex = safeEval(eltAttrs["value"]) + elif eltName == "CurrentIndex": + self.CurrentIndex = safeEval(eltAttrs["value"]) + + +class LigAction(object): + def __init__(self): + self.Store = False + # GlyphIndexDelta is a (possibly negative) delta that gets + # added to the glyph ID at the top of the AAT runtime + # execution stack. It is *not* a byte offset into the + # morx table. The result of the addition, which is performed + # at run time by the shaping engine, is an index into + # the ligature components table. See 'morx' specification. + # In the AAT specification, this field is called Offset; + # but its meaning is quite different from other offsets + # in either AAT or OpenType, so we use a different name. + self.GlyphIndexDelta = 0 + + +class LigatureMorphAction(AATAction): + staticSize = 6 + _FLAGS = ["SetComponent", "DontAdvance"] + + def __init__(self): + self.NewState = 0 + self.SetComponent, self.DontAdvance = False, False + self.ReservedFlags = 0 + self.Actions = [] + + def compile(self, writer, font, actionIndex): + assert actionIndex is not None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetComponent: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if len(self.Actions) > 0: flags |= 0x2000 + writer.writeUShort(flags) + if len(self.Actions) > 0: + actions = self.compileLigActions() + writer.writeUShort(actionIndex[actions]) + else: + writer.writeUShort(0) + + def decompile(self, reader, font, actionReader): + assert actionReader is not None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetComponent = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + performAction = bool(flags & 0x2000) + # As of 2017-09-12, the 'morx' specification says that + # the reserved bitmask in ligature subtables is 0x3FFF. + # However, the specification also defines a flag 0x2000, + # so the reserved value should actually be 0x1FFF. + # TODO: Report this specification bug to Apple. + self.ReservedFlags = flags & 0x1FFF + actionIndex = reader.readUShort() + if performAction: + self.Actions = self._decompileLigActions( + actionReader, actionIndex) + else: + self.Actions = [] + + def compileLigActions(self): + result = [] + for i, action in enumerate(self.Actions): + last = (i == len(self.Actions) - 1) + value = action.GlyphIndexDelta & 0x3FFFFFFF + value |= 0x80000000 if last else 0 + value |= 0x40000000 if action.Store else 0 + result.append(struct.pack(">L", value)) + return bytesjoin(result) + + def _decompileLigActions(self, actionReader, actionIndex): + actions = [] + last = False + reader = actionReader.getSubReader( + actionReader.pos + actionIndex * 4) + while not last: + value = reader.readULong() + last = bool(value & 0x80000000) + action = LigAction() + actions.append(action) + action.Store = bool(value & 0x40000000) + delta = value & 0x3FFFFFFF + if delta >= 0x20000000: # sign-extend 30-bit value + delta = -0x40000000 + delta + action.GlyphIndexDelta = delta + return actions + + def fromXML(self, name, attrs, content, font): + self.NewState = self.ReservedFlags = 0 + self.SetComponent = self.DontAdvance = False + self.ReservedFlags = 0 + self.Actions = [] + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "ReservedFlags": + self.ReservedFlags = safeEval(eltAttrs["value"]) + elif eltName == "Action": + action = LigAction() + flags = eltAttrs.get("Flags", "").split(",") + flags = [f.strip() for f in flags] + action.Store = "Store" in flags + action.GlyphIndexDelta = safeEval( + eltAttrs["GlyphIndexDelta"]) + self.Actions.append(action) + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + for action in self.Actions: + attribs = [("GlyphIndexDelta", action.GlyphIndexDelta)] + if action.Store: + attribs.append(("Flags", "Store")) + xmlWriter.simpletag("Action", attribs) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class InsertionMorphAction(AATAction): + staticSize = 8 + + _FLAGS = ["SetMark", "DontAdvance", + "CurrentIsKashidaLike", "MarkedIsKashidaLike", + "CurrentInsertBefore", "MarkedInsertBefore"] + + def __init__(self): + self.NewState = 0 + for flag in self._FLAGS: + setattr(self, flag, False) + self.ReservedFlags = 0 + self.CurrentInsertionAction, self.MarkedInsertionAction = [], [] + + def compile(self, writer, font, actionIndex): + assert actionIndex is not None + writer.writeUShort(self.NewState) + flags = self.ReservedFlags + if self.SetMark: flags |= 0x8000 + if self.DontAdvance: flags |= 0x4000 + if self.CurrentIsKashidaLike: flags |= 0x2000 + if self.MarkedIsKashidaLike: flags |= 0x1000 + if self.CurrentInsertBefore: flags |= 0x0800 + if self.MarkedInsertBefore: flags |= 0x0400 + flags |= len(self.CurrentInsertionAction) << 5 + flags |= len(self.MarkedInsertionAction) + writer.writeUShort(flags) + if len(self.CurrentInsertionAction) > 0: + currentIndex = actionIndex[ + tuple(self.CurrentInsertionAction)] + else: + currentIndex = 0xFFFF + writer.writeUShort(currentIndex) + if len(self.MarkedInsertionAction) > 0: + markedIndex = actionIndex[ + tuple(self.MarkedInsertionAction)] + else: + markedIndex = 0xFFFF + writer.writeUShort(markedIndex) + + def decompile(self, reader, font, actionReader): + assert actionReader is not None + self.NewState = reader.readUShort() + flags = reader.readUShort() + self.SetMark = bool(flags & 0x8000) + self.DontAdvance = bool(flags & 0x4000) + self.CurrentIsKashidaLike = bool(flags & 0x2000) + self.MarkedIsKashidaLike = bool(flags & 0x1000) + self.CurrentInsertBefore = bool(flags & 0x0800) + self.MarkedInsertBefore = bool(flags & 0x0400) + self.CurrentInsertionAction = self._decompileInsertionAction( + actionReader, font, + index=reader.readUShort(), + count=((flags & 0x03E0) >> 5)) + self.MarkedInsertionAction = self._decompileInsertionAction( + actionReader, font, + index=reader.readUShort(), + count=(flags & 0x001F)) + + def _decompileInsertionAction(self, actionReader, font, index, count): + if index == 0xFFFF or count == 0: + return [] + reader = actionReader.getSubReader( + actionReader.pos + index * 2) + return [font.getGlyphName(glyphID) + for glyphID in reader.readUShortArray(count)] + + def toXML(self, xmlWriter, font, attrs, name): + xmlWriter.begintag(name, **attrs) + xmlWriter.newline() + xmlWriter.simpletag("NewState", value=self.NewState) + xmlWriter.newline() + self._writeFlagsToXML(xmlWriter) + for g in self.CurrentInsertionAction: + xmlWriter.simpletag("CurrentInsertionAction", glyph=g) + xmlWriter.newline() + for g in self.MarkedInsertionAction: + xmlWriter.simpletag("MarkedInsertionAction", glyph=g) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + self.__init__() + content = [t for t in content if isinstance(t, tuple)] + for eltName, eltAttrs, eltContent in content: + if eltName == "NewState": + self.NewState = safeEval(eltAttrs["value"]) + elif eltName == "Flags": + for flag in eltAttrs["value"].split(","): + self._setFlag(flag.strip()) + elif eltName == "CurrentInsertionAction": + self.CurrentInsertionAction.append( + eltAttrs["glyph"]) + elif eltName == "MarkedInsertionAction": + self.MarkedInsertionAction.append( + eltAttrs["glyph"]) + else: + assert False, eltName class FeatureParams(BaseTable): @@ -33,6 +440,10 @@ # manual implementation to get rid of glyphID dependencies + def populateDefaults(self, propagator=None): + if not hasattr(self, 'glyphs'): + self.glyphs = [] + def postRead(self, rawTable, font): if self.Format == 1: # TODO only allow glyphs that are valid? @@ -46,7 +457,7 @@ # this when writing font out. sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) if ranges != sorted_ranges: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges = sorted_ranges del sorted_ranges for r in ranges: @@ -57,21 +468,21 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("Coverage table has start glyph ID out of range: %s." % start) + log.warning("Coverage table has start glyph ID out of range: %s.", start) continue try: endID = font.getGlyphID(end, requireReal=True) + 1 except KeyError: # Apparently some tools use 65535 to "match all" the range if end != 'glyph65535': - warnings.warn("Coverage table has end glyph ID out of range: %s." % end) + log.warning("Coverage table has end glyph ID out of range: %s.", end) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) else: - assert 0, "unknown format: %s" % self.Format - del self.Format # Don't need this anymore + self.glyphs = [] + log.warning("Unknown Coverage format: %s" % self.Format) def preWrite(self, font): glyphs = getattr(self, "glyphs", None) @@ -107,7 +518,7 @@ ranges[i] = r index = index + end - start + 1 if brokenOrder: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") + log.warning("GSUB/GPOS Coverage is not sorted by glyph ids.") ranges.sort(key=lambda a: a.StartID) for r in ranges: del r.StartID @@ -131,8 +542,79 @@ glyphs.append(attrs["value"]) +class VarIdxMap(BaseTable): + + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = [] + + def postRead(self, rawTable, font): + assert (rawTable['EntryFormat'] & 0xFFC0) == 0 + self.mapping = rawTable['mapping'] + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = [] + rawTable = { 'mapping': mapping } + rawTable['MappingCount'] = len(mapping) + + # TODO Remove this abstraction/optimization and move it varLib.builder? + + ored = 0 + for idx in mapping: + ored |= idx + + inner = ored & 0xFFFF + innerBits = 0 + while inner: + innerBits += 1 + inner >>= 1 + innerBits = max(innerBits, 1) + assert innerBits <= 16 + + ored = (ored >> (16-innerBits)) | (ored & ((1<> 16), + ('inner', value & 0xFFFF), + ) + xmlWriter.simpletag("Map", attrs) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = [] + self.mapping = mapping + outer = safeEval(attrs['outer']) + inner = safeEval(attrs['inner']) + assert inner <= 0xFFFF + mapping.append((outer << 16) | inner) + + class SingleSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = {} + def postRead(self, rawTable, font): mapping = {} input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) @@ -151,7 +633,6 @@ else: assert 0, "unknown format: %s" % self.Format self.mapping = mapping - del self.Format # Don't need this anymore def preWrite(self, font): mapping = getattr(self, "mapping", None) @@ -172,7 +653,11 @@ if (inID + delta) % 65536 != outID: break else: - format = 1 + if delta is None: + # the mapping is empty, better use format 2 + format = 2 + else: + format = 1 rawTable = {} self.Format = format @@ -203,8 +688,89 @@ mapping[attrs["in"]] = attrs["out"] +class MultipleSubst(FormatSwitchingBaseTable): + + def populateDefaults(self, propagator=None): + if not hasattr(self, 'mapping'): + self.mapping = {} + + def postRead(self, rawTable, font): + mapping = {} + if self.Format == 1: + glyphs = _getGlyphsFromCoverageTable(rawTable["Coverage"]) + subst = [s.Substitute for s in rawTable["Sequence"]] + mapping = dict(zip(glyphs, subst)) + else: + assert 0, "unknown format: %s" % self.Format + self.mapping = mapping + + def preWrite(self, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = self.mapping = {} + cov = Coverage() + cov.glyphs = sorted(list(mapping.keys()), key=font.getGlyphID) + self.Format = 1 + rawTable = { + "Coverage": cov, + "Sequence": [self.makeSequence_(mapping[glyph]) + for glyph in cov.glyphs], + } + return rawTable + + def toXML2(self, xmlWriter, font): + items = sorted(self.mapping.items()) + for inGlyph, outGlyphs in items: + out = ",".join(outGlyphs) + xmlWriter.simpletag("Substitution", + [("in", inGlyph), ("out", out)]) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + mapping = getattr(self, "mapping", None) + if mapping is None: + mapping = {} + self.mapping = mapping + + # TTX v3.0 and earlier. + if name == "Coverage": + self.old_coverage_ = [] + for element in content: + if not isinstance(element, tuple): + continue + element_name, element_attrs, _ = element + if element_name == "Glyph": + self.old_coverage_.append(element_attrs["value"]) + return + if name == "Sequence": + index = int(attrs.get("index", len(mapping))) + glyph = self.old_coverage_[index] + glyph_mapping = mapping[glyph] = [] + for element in content: + if not isinstance(element, tuple): + continue + element_name, element_attrs, _ = element + if element_name == "Substitute": + glyph_mapping.append(element_attrs["value"]) + return + + # TTX v3.1 and later. + outGlyphs = attrs["out"].split(",") if attrs["out"] else [] + mapping[attrs["in"]] = [g.strip() for g in outGlyphs] + + @staticmethod + def makeSequence_(g): + seq = Sequence() + seq.Substitute = g + return seq + + class ClassDef(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'classDefs'): + self.classDefs = {} + def postRead(self, rawTable, font): classDefs = {} glyphOrder = font.getGlyphOrder() @@ -215,17 +781,19 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + log.warning("ClassDef table has start glyph ID out of range: %s.", start) startID = len(glyphOrder) endID = startID + len(classList) if endID > len(glyphOrder): - warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) + log.warning("ClassDef table has entries for out of range glyph IDs: %s,%s.", + start, len(classList)) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) for glyphID, cls in zip(range(startID, endID), classList): - classDefs[glyphOrder[glyphID]] = cls + if cls: + classDefs[glyphOrder[glyphID]] = cls elif self.Format == 2: records = rawTable["ClassRangeRecord"] @@ -236,37 +804,37 @@ try: startID = font.getGlyphID(start, requireReal=True) except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) + log.warning("ClassDef table has start glyph ID out of range: %s.", start) continue try: endID = font.getGlyphID(end, requireReal=True) + 1 except KeyError: # Apparently some tools use 65535 to "match all" the range if end != 'glyph65535': - warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) + log.warning("ClassDef table has end glyph ID out of range: %s.", end) # NOTE: We clobber out-of-range things here. There are legit uses for those, # but none that we have seen in the wild. endID = len(glyphOrder) for glyphID in range(startID, endID): - classDefs[glyphOrder[glyphID]] = cls + if cls: + classDefs[glyphOrder[glyphID]] = cls else: assert 0, "unknown format: %s" % self.Format self.classDefs = classDefs - del self.Format # Don't need this anymore - def preWrite(self, font): + def _getClassRanges(self, font): classDefs = getattr(self, "classDefs", None) if classDefs is None: - classDefs = self.classDefs = {} - items = list(classDefs.items()) - format = 2 - rawTable = {"ClassRangeRecord": []} + self.classDefs = {} + return getGlyphID = font.getGlyphID - for i in range(len(items)): - glyphName, cls = items[i] - items[i] = getGlyphID(glyphName), glyphName, cls - items.sort() + items = [] + for glyphName, cls in classDefs.items(): + if not cls: + continue + items.append((getGlyphID(glyphName), glyphName, cls)) if items: + items.sort() last, lastName, lastCls = items[0] ranges = [[lastCls, last, lastName]] for glyphID, glyphName, cls in items[1:]: @@ -277,7 +845,13 @@ lastName = glyphName lastCls = cls ranges[-1].extend([last, lastName]) + return ranges + def preWrite(self, font): + format = 2 + rawTable = {"ClassRangeRecord": []} + ranges = self._getClassRanges(font) + if ranges: startGlyph = ranges[0][1] endGlyph = ranges[-1][3] glyphCount = endGlyph - startGlyph + 1 @@ -320,19 +894,21 @@ class AlternateSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'alternates'): + self.alternates = {} + def postRead(self, rawTable, font): alternates = {} if self.Format == 1: input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) alts = rawTable["AlternateSet"] - if len(input) != len(alts): - assert len(input) == len(alts) - for i in range(len(input)): - alternates[input[i]] = alts[i].Alternate + assert len(input) == len(alts) + for inp,alt in zip(input,alts): + alternates[inp] = alt.Alternate else: assert 0, "unknown format: %s" % self.Format self.alternates = alternates - del self.Format # Don't need this anymore def preWrite(self, font): self.Format = 1 @@ -348,7 +924,7 @@ cov.glyphs = [ item[1] for item in items] alternates = [] setList = [ item[-1] for item in items] - for set in setList: + for set in setList: alts = AlternateSet() alts.Alternate = set alternates.append(alts) @@ -388,6 +964,10 @@ class LigatureSubst(FormatSwitchingBaseTable): + def populateDefaults(self, propagator=None): + if not hasattr(self, 'ligatures'): + self.ligatures = {} + def postRead(self, rawTable, font): ligatures = {} if self.Format == 1: @@ -399,13 +979,27 @@ else: assert 0, "unknown format: %s" % self.Format self.ligatures = ligatures - del self.Format # Don't need this anymore def preWrite(self, font): self.Format = 1 ligatures = getattr(self, "ligatures", None) if ligatures is None: ligatures = self.ligatures = {} + + if ligatures and isinstance(next(iter(ligatures)), tuple): + # New high-level API in v3.1 and later. Note that we just support compiling this + # for now. We don't load to this API, and don't do XML with it. + + # ligatures is map from components-sequence to lig-glyph + newLigatures = dict() + for comps,lig in sorted(ligatures.items(), key=lambda item: (-len(item[0]), item[0])): + ligature = Ligature() + ligature.Component = comps[1:] + ligature.CompCount = len(comps) + ligature.LigGlyph = lig + newLigatures.setdefault(comps[0], []).append(ligature) + ligatures = newLigatures + items = list(ligatures.items()) for i in range(len(items)): glyphName, set = items[i] @@ -459,7 +1053,6 @@ ligs.append(lig) -# # For each subtable format there is a class. However, we don't really distinguish # between "field name" and "format name": often these are the same. Yet there's # a whole bunch of fields with different names. The following dict is a mapping @@ -542,6 +1135,7 @@ return ok lookup = lookups[lookupIndex] + lookup.LookupType = extType for si in range(len(lookup.SubTable)): subTable = lookup.SubTable[si] extSubTableClass = lookupTypes[overflowRecord.tableType][extType] @@ -609,6 +1203,73 @@ return ok +def splitPairPos(oldSubTable, newSubTable, overflowRecord): + st = oldSubTable + ok = False + newSubTable.Format = oldSubTable.Format + if oldSubTable.Format == 1 and len(oldSubTable.PairSet) > 1: + for name in 'ValueFormat1', 'ValueFormat2': + setattr(newSubTable, name, getattr(oldSubTable, name)) + + # Move top half of coverage to new subtable + + newSubTable.Coverage = oldSubTable.Coverage.__class__() + + coverage = oldSubTable.Coverage.glyphs + records = oldSubTable.PairSet + + oldCount = len(oldSubTable.PairSet) // 2 + + oldSubTable.Coverage.glyphs = coverage[:oldCount] + oldSubTable.PairSet = records[:oldCount] + + newSubTable.Coverage.glyphs = coverage[oldCount:] + newSubTable.PairSet = records[oldCount:] + + oldSubTable.PairSetCount = len(oldSubTable.PairSet) + newSubTable.PairSetCount = len(newSubTable.PairSet) + + ok = True + + elif oldSubTable.Format == 2 and len(oldSubTable.Class1Record) > 1: + if not hasattr(oldSubTable, 'Class2Count'): + oldSubTable.Class2Count = len(oldSubTable.Class1Record[0].Class2Record) + for name in 'Class2Count', 'ClassDef2', 'ValueFormat1', 'ValueFormat2': + setattr(newSubTable, name, getattr(oldSubTable, name)) + + # The two subtables will still have the same ClassDef2 and the table + # sharing will still cause the sharing to overflow. As such, disable + # sharing on the one that is serialized second (that's oldSubTable). + oldSubTable.DontShare = True + + # Move top half of class numbers to new subtable + + newSubTable.Coverage = oldSubTable.Coverage.__class__() + newSubTable.ClassDef1 = oldSubTable.ClassDef1.__class__() + + coverage = oldSubTable.Coverage.glyphs + classDefs = oldSubTable.ClassDef1.classDefs + records = oldSubTable.Class1Record + + oldCount = len(oldSubTable.Class1Record) // 2 + newGlyphs = set(k for k,v in classDefs.items() if v >= oldCount) + + oldSubTable.Coverage.glyphs = [g for g in coverage if g not in newGlyphs] + oldSubTable.ClassDef1.classDefs = {k:v for k,v in classDefs.items() if v < oldCount} + oldSubTable.Class1Record = records[:oldCount] + + newSubTable.Coverage.glyphs = [g for g in coverage if g in newGlyphs] + newSubTable.ClassDef1.classDefs = {k:(v-oldCount) for k,v in classDefs.items() if v > oldCount} + newSubTable.Class1Record = records[oldCount:] + + oldSubTable.Class1Count = len(oldSubTable.Class1Record) + newSubTable.Class1Count = len(newSubTable.Class1Record) + + ok = True + + return ok + + splitTable = { 'GSUB': { # 1: splitSingleSubst, # 2: splitMultipleSubst, @@ -621,7 +1282,7 @@ }, 'GPOS': { # 1: splitSinglePos, -# 2: splitPairPos, + 2: splitPairPos, # 3: splitCursivePos, # 4: splitMarkBasePos, # 5: splitMarkLigPos, @@ -643,6 +1304,11 @@ subIndex = overflowRecord.SubTableIndex subtable = lookup.SubTable[subIndex] + # First, try not sharing anything for this subtable... + if not hasattr(subtable, "DontShare"): + subtable.DontShare = True + return True + if hasattr(subtable, 'ExtSubTable'): # We split the subtable of the Extension table, and add a new Extension table # to contain the new subtable. @@ -650,7 +1316,7 @@ subTableType = subtable.ExtSubTable.__class__.LookupType extSubTable = subtable subtable = extSubTable.ExtSubTable - newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] + newExtSubTableClass = lookupTypes[overflowRecord.tableType][extSubTable.__class__.LookupType] newExtSubTable = newExtSubTableClass() newExtSubTable.Format = extSubTable.Format lookup.SubTable.insert(subIndex + 1, newExtSubTable) @@ -696,12 +1362,14 @@ if name not in namespace: # the class doesn't exist yet, so the base implementation is used. cls = type(name, (baseClass,), {}) + if name in ('GSUB', 'GPOS'): + cls.DontShare = True namespace[name] = cls for base, alts in _equivalents.items(): base = namespace[base] for alt in alts: - namespace[alt] = type(alt, (base,), {}) + namespace[alt] = base global lookupTypes lookupTypes = { @@ -726,6 +1394,17 @@ 8: ChainContextPos, 9: ExtensionPos, }, + 'mort': { + 4: NoncontextualMorph, + }, + 'morx': { + 0: RearrangementMorph, + 1: ContextualMorph, + 2: LigatureMorph, + # 3: Reserved, + 4: NoncontextualMorph, + # 5: InsertionMorph, + }, } lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS for lookupEnum in lookupTypes.values(): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_p_o_s_t.py 2018-01-08 12:40:40.000000000 +0000 @@ -91,9 +91,7 @@ self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) for glyphID in range(numGlyphs): index = indices[glyphID] - if index > 32767: # reserved for future use; ignore - name = "" - elif index > 257: + if index > 257: try: name = extraNames[index-258] except IndexError: @@ -156,7 +154,8 @@ assert len(glyphOrder) == numGlyphs indices = array.array("H") extraDict = {} - extraNames = self.extraNames + extraNames = self.extraNames = [ + n for n in self.extraNames if n not in standardGlyphOrder] for i in range(len(extraNames)): extraDict[extraNames[i]] = i for glyphID in range(numGlyphs): @@ -171,7 +170,6 @@ index = standardGlyphOrder.index(psName) else: index = 258 + len(extraNames) - assert index < 32768, "Too many glyph names for 'post' table format 2" extraDict[psName] = len(extraNames) extraNames.append(psName) indices.append(index) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_r_o_p.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_p_r_o_p.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_p_r_o_p.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_p_r_o_p.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html +class table__p_r_o_p(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_s_b_i_x.py 2018-01-08 12:40:40.000000000 +0000 @@ -31,8 +31,9 @@ class table__s_b_i_x(DefaultTable.DefaultTable): - def __init__(self, tag): - self.tableTag = tag + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) self.version = 1 self.flags = 1 self.numStrikes = 0 diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/S__i_l_f.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S__i_l_f.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/S__i_l_f.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S__i_l_f.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,877 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from itertools import * +from . import DefaultTable +from . import grUtils +from array import array +import struct, operator, warnings, re, sys + +Silf_hdr_format = ''' + > + version: 16.16F +''' + +Silf_hdr_format_3 = ''' + > + version: 16.16F + compilerVersion: L + numSilf: H + x + x +''' + +Silf_part1_format_v3 = ''' + > + ruleVersion: 16.16F + passOffset: H + pseudosOffset: H +''' + +Silf_part1_format = ''' + > + maxGlyphID: H + extraAscent: h + extraDescent: h + numPasses: B + iSubst: B + iPos: B + iJust: B + iBidi: B + flags: B + maxPreContext: B + maxPostContext: B + attrPseudo: B + attrBreakWeight: B + attrDirectionality: B + attrMirroring: B + attrSkipPasses: B + numJLevels: B +''' + +Silf_justify_format = ''' + > + attrStretch: B + attrShrink: B + attrStep: B + attrWeight: B + runto: B + x + x + x +''' + +Silf_part2_format = ''' + > + numLigComp: H + numUserDefn: B + maxCompPerLig: B + direction: B + attCollisions: B + x + x + x + numCritFeatures: B +''' + +Silf_pseudomap_format = ''' + > + unicode: L + nPseudo: H +''' + +Silf_classmap_format = ''' + > + numClass: H + numLinear: H +''' + +Silf_lookupclass_format = ''' + > + numIDs: H + searchRange: H + entrySelector: H + rangeShift: H +''' + +Silf_lookuppair_format = ''' + > + glyphId: H + index: H +''' + +Silf_pass_format = ''' + > + flags: B + maxRuleLoop: B + maxRuleContext: B + maxBackup: B + numRules: H + fsmOffset: H + pcCode: L + rcCode: L + aCode: L + oDebug: L + numRows: H + numTransitional: H + numSuccess: H + numColumns: H +''' + +aCode_info = ( + ("NOP", 0), + ("PUSH_BYTE", "b"), + ("PUSH_BYTE_U", "B"), + ("PUSH_SHORT", ">h"), + ("PUSH_SHORT_U", ">H"), + ("PUSH_LONG", ">L"), + ("ADD", 0), + ("SUB", 0), + ("MUL", 0), + ("DIV", 0), + ("MIN", 0), + ("MAX", 0), + ("NEG", 0), + ("TRUNC8", 0), + ("TRUNC16", 0), + ("COND", 0), + ("AND", 0), # x10 + ("OR", 0), + ("NOT", 0), + ("EQUAL", 0), + ("NOT_EQ", 0), + ("LESS", 0), + ("GTR", 0), + ("LESS_EQ", 0), + ("GTR_EQ", 0), + ("NEXT", 0), + ("NEXT_N", "b"), + ("COPY_NEXT", 0), + ("PUT_GLYPH_8BIT_OBS", "B"), + ("PUT_SUBS_8BIT_OBS", "bBB"), + ("PUT_COPY", "b"), + ("INSERT", 0), + ("DELETE", 0), # x20 + ("ASSOC", -1), + ("CNTXT_ITEM", "bB"), + ("ATTR_SET", "B"), + ("ATTR_ADD", "B"), + ("ATTR_SUB", "B"), + ("ATTR_SET_SLOT", "B"), + ("IATTR_SET_SLOT", "BB"), + ("PUSH_SLOT_ATTR", "Bb"), + ("PUSH_GLYPH_ATTR_OBS", "Bb"), + ("PUSH_GLYPH_METRIC", "Bbb"), + ("PUSH_FEAT", "Bb"), + ("PUSH_ATT_TO_GATTR_OBS", "Bb"), + ("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"), + ("PUSH_ISLOT_ATTR", "Bbb"), + ("PUSH_IGLYPH_ATTR", "Bbb"), + ("POP_RET", 0), # x30 + ("RET_ZERO", 0), + ("RET_TRUE", 0), + ("IATTR_SET", "BB"), + ("IATTR_ADD", "BB"), + ("IATTR_SUB", "BB"), + ("PUSH_PROC_STATE", "B"), + ("PUSH_VERSION", 0), + ("PUT_SUBS", ">bHH"), + ("PUT_SUBS2", 0), + ("PUT_SUBS3", 0), + ("PUT_GLYPH", ">H"), + ("PUSH_GLYPH_ATTR", ">Hb"), + ("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"), + ("BITOR", 0), + ("BITAND", 0), + ("BITNOT", 0), # x40 + ("BITSET", ">HH"), + ("SET_FEAT", "Bb") +) +aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)]) + +def disassemble(aCode): + codelen = len(aCode) + pc = 0 + res = [] + while pc < codelen: + opcode = byteord(aCode[pc:pc+1]) + if opcode > len(aCode_info): + instr = aCode_info[0] + else: + instr = aCode_info[opcode] + pc += 1 + if instr[1] != 0 and pc >= codelen : return res + if instr[1] == -1: + count = byteord(aCode[pc]) + fmt = "%dB" % count + pc += 1 + elif instr[1] == 0: + fmt = "" + else : + fmt = instr[1] + if fmt == "": + res.append(instr[0]) + continue + parms = struct.unpack_from(fmt, aCode[pc:]) + res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")") + pc += struct.calcsize(fmt) + return res + +instre = re.compile("^\s*([^(]+)\s*(?:\(([^)]+)\))?") +def assemble(instrs): + res = [] + for inst in instrs: + m = instre.match(inst) + if not m or not m.group(1) in aCode_map: + continue + opcode, parmfmt = aCode_map[m.group(1)] + res.append(struct.pack("B", opcode)) + if m.group(2): + if parmfmt == 0: + continue + parms = [int(x) for x in re.split(",\s*", m.group(2))] + if parmfmt == -1: + l = len(parms) + res.append(struct.pack(("%dB" % (l+1)), l, *parms)) + else: + res.append(struct.pack(parmfmt, *parms)) + return b"".join(res) + +def writecode(tag, writer, instrs): + writer.begintag(tag) + writer.newline() + for l in disassemble(instrs): + writer.write(l) + writer.newline() + writer.endtag(tag) + writer.newline() + +def readcode(content): + res = [] + for e in content_string(content).split('\n'): + e = e.strip() + if not len(e): continue + res.append(e) + return assemble(res) + +attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID', + 'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID') +attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi') +attrs_contexts = ('maxPreContext', 'maxPostContext') +attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality', + 'attrMirroring', 'attrSkipPasses', 'attCollisions') +pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup', + 'minRulePreContext', 'maxRulePreContext', 'collisionThreshold') +pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns') + +def writesimple(tag, self, writer, *attrkeys): + attrs = dict([(k, getattr(self, k)) for k in attrkeys]) + writer.simpletag(tag, **attrs) + writer.newline() + +def getSimple(self, attrs, *attr_list): + for k in attr_list: + if k in attrs: + setattr(self, k, int(safeEval(attrs[k]))) + +def content_string(contents): + res = "" + for element in contents: + if isinstance(element, tuple): continue + res += element + return res.strip() + +def wrapline(writer, dat, length=80): + currline = "" + for d in dat: + if len(currline) > length: + writer.write(currline[:-1]) + writer.newline() + currline = "" + currline += d + " " + if len(currline): + writer.write(currline[:-1]) + writer.newline() + +class _Object() : + pass + +class table_S__i_l_f(DefaultTable.DefaultTable): + '''Silf table support''' + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.silfs = [] + + def decompile(self, data, ttFont): + sstruct.unpack2(Silf_hdr_format, data, self) + if self.version >= 5.0: + (data, self.scheme) = grUtils.decompress(data) + sstruct.unpack2(Silf_hdr_format_3, data, self) + base = sstruct.calcsize(Silf_hdr_format_3) + elif self.version < 3.0: + self.numSilf = struct.unpack('>H', data[4:6]) + self.scheme = 0 + self.compilerVersion = 0 + base = 8 + else: + self.scheme = 0 + sstruct.unpack2(Silf_hdr_format_3, data, self) + base = sstruct.calcsize(Silf_hdr_format_3) + + silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:]) + for offset in silfoffsets: + s = Silf() + self.silfs.append(s) + s.decompile(data[offset:], ttFont, self.version) + + def compile(self, ttFont): + self.numSilf = len(self.silfs) + if self.version < 3.0: + hdr = sstruct.pack(Silf_hdr_format, self) + hdr += struct.pack(">HH", self.numSilf, 0) + else: + hdr = sstruct.pack(Silf_hdr_format_3, self) + offset = len(hdr) + 4 * self.numSilf + data = "" + for s in self.silfs: + hdr += struct.pack(">L", offset) + subdata = s.compile(ttFont, self.version) + offset += len(subdata) + data += subdata + if self.version >= 5.0: + return grUtils.compress(self.scheme, hdr+data) + return hdr+data + + def toXML(self, writer, ttFont): + writer.comment('Attributes starting with _ are informative only') + writer.newline() + writer.simpletag('version', version=self.version, + compilerVersion=self.compilerVersion, compressionScheme=self.scheme) + writer.newline() + for s in self.silfs: + writer.begintag('silf') + writer.newline() + s.toXML(writer, ttFont, self.version) + writer.endtag('silf') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.scheme=int(safeEval(attrs['compressionScheme'])) + self.version = float(safeEval(attrs['version'])) + self.compilerVersion = int(safeEval(attrs['compilerVersion'])) + return + if name == 'silf': + s = Silf() + self.silfs.append(s) + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + s.fromXML(tag, attrs, subcontent, ttFont, self.version) + +class Silf(object): + '''A particular Silf subtable''' + + def __init__(self): + self.passes = [] + self.scriptTags = [] + self.critFeatures = [] + self.jLevels = [] + self.pMap = {} + + def decompile(self, data, ttFont, version=2.0): + if version >= 3.0 : + _, data = sstruct.unpack2(Silf_part1_format_v3, data, self) + _, data = sstruct.unpack2(Silf_part1_format, data, self) + for jlevel in range(self.numJLevels): + j, data = sstruct.unpack2(Silf_justify_format, data, _Object()) + self.jLevels.append(j) + _, data = sstruct.unpack2(Silf_part2_format, data, self) + if self.numCritFeatures: + self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data) + data = data[self.numCritFeatures * 2 + 1:] + (numScriptTag,) = struct.unpack_from('B', data) + if numScriptTag: + self.scriptTags = [struct.unpack("4s", data[x:x+4])[0] for x in range(1, 1 + 4 * numScriptTag, 4)] + data = data[1 + 4 * numScriptTag:] + (self.lbGID,) = struct.unpack('>H', data[:2]) + if self.numPasses: + self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses]) + data = data[6 + 4 * self.numPasses:] + (numPseudo,) = struct.unpack(">H", data[:2]) + for i in range(numPseudo): + if version >= 3.0: + pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object()) + else: + pseudo = struct.unpack('>HH', data[8+4*i:12+4*i], _Object()) + self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo) + data = data[8 + 6 * numPseudo:] + currpos = (sstruct.calcsize(Silf_part1_format) + + sstruct.calcsize(Silf_justify_format) * self.numJLevels + + sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures + + 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo) + if version >= 3.0: + currpos += sstruct.calcsize(Silf_part1_format_v3) + self.classes = Classes() + self.classes.decompile(data, ttFont, version) + for i in range(self.numPasses): + p = Pass() + self.passes.append(p) + p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos], + ttFont, version) + + def compile(self, ttFont, version=2.0): + self.numPasses = len(self.passes) + self.numJLevels = len(self.jLevels) + self.numCritFeatures = len(self.critFeatures) + numPseudo = len(self.pMap) + data = "" + if version >= 3.0: + hdroffset = sstruct.calcsize(Silf_part1_format_v3) + else: + hdroffset = 0 + data += sstruct.pack(Silf_part1_format, self) + for j in self.jLevels: + data += sstruct.pack(Silf_justify_format, j) + data += sstruct.pack(Silf_part2_format, self) + if self.numCritFeatures: + data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures) + data += struct.pack("BB", 0, len(self.scriptTags)) + if len(self.scriptTags): + tdata = [struct.pack("4s", x) for x in self.scriptTags] + data += "".join(tdata) + data += struct.pack(">H", self.lbGID) + self.passOffset = len(data) + + data1 = grUtils.bininfo(numPseudo, 6) + currpos = hdroffset + len(data) + 4 * (self.numPasses + 1) + self.pseudosOffset = currpos + len(data1) + for u, p in sorted(self.pMap.items()): + data1 += struct.pack((">LH" if version >= 3.0 else ">HH"), + u, ttFont.getGlyphID(p)) + data1 += self.classes.compile(ttFont, version) + currpos += len(data1) + data2 = "" + datao = "" + for i, p in enumerate(self.passes): + base = currpos + len(data2) + datao += struct.pack(">L", base) + data2 += p.compile(ttFont, base, version) + datao += struct.pack(">L", currpos + len(data2)) + + if version >= 3.0: + data3 = sstruct.pack(Silf_part1_format_v3, self) + else: + data3 = "" + return data3 + data + datao + data1 + data2 + + + def toXML(self, writer, ttFont, version=2.0): + if version >= 3.0: + writer.simpletag('version', ruleVersion=self.ruleVersion) + writer.newline() + writesimple('info', self, writer, *attrs_info) + writesimple('passindexes', self, writer, *attrs_passindexes) + writesimple('contexts', self, writer, *attrs_contexts) + writesimple('attributes', self, writer, *attrs_attributes) + if len(self.jLevels): + writer.begintag('justifications') + writer.newline() + jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format) + for i, j in enumerate(self.jLevels): + attrs = dict([(k, getattr(j, k)) for k in jnames]) + writer.simpletag('justify', **attrs) + writer.newline() + writer.endtag('justifications') + writer.newline() + if len(self.critFeatures): + writer.begintag('critFeatures') + writer.newline() + writer.write(" ".join(map(str, self.critFeatures))) + writer.newline() + writer.endtag('critFeatures') + writer.newline() + if len(self.scriptTags): + writer.begintag('scriptTags') + writer.newline() + writer.write(" ".join(self.scriptTags)) + writer.newline() + writer.endtag('scriptTags') + writer.newline() + if self.pMap: + writer.begintag('pseudoMap') + writer.newline() + for k, v in sorted(self.pMap.items()): + writer.simpletag('pseudo', unicode=hex(k), pseudo=v) + writer.newline() + writer.endtag('pseudoMap') + writer.newline() + self.classes.toXML(writer, ttFont, version) + if len(self.passes): + writer.begintag('passes') + writer.newline() + for i, p in enumerate(self.passes): + writer.begintag('pass', _index=i) + writer.newline() + p.toXML(writer, ttFont, version) + writer.endtag('pass') + writer.newline() + writer.endtag('passes') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'version': + self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0"))) + if name == 'info': + getSimple(self, attrs, *attrs_info) + elif name == 'passindexes': + getSimple(self, attrs, *attrs_passindexes) + elif name == 'contexts': + getSimple(self, attrs, *attrs_contexts) + elif name == 'attributes': + getSimple(self, attrs, *attrs_attributes) + elif name == 'justifications': + for element in content: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + if tag == 'justify': + j = _Object() + for k, v in attrs.items(): + setattr(j, k, int(v)) + self.jLevels.append(j) + elif name == 'critFeatures': + self.critFeatures = [] + element = content_string(content) + self.critFeatures.extend(map(int, element.split())) + elif name == 'scriptTags': + self.scriptTags = [] + element = content_string(content) + for n in element.split(): + self.scriptTags.append(n) + elif name == 'pseudoMap': + self.pMap = {} + for element in content: + if not isinstance(element, tuple): continue + (tag, attrs, subcontent) = element + if tag == 'pseudo': + k = int(attrs['unicode'], 16) + v = attrs['pseudo'] + self.pMap[k] = v + elif name == 'classes': + self.classes = Classes() + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + self.classes.fromXML(tag, attrs, subcontent, ttFont, version) + elif name == 'passes': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'pass': + p = Pass() + for e in subcontent: + if not isinstance(e, tuple): continue + p.fromXML(e[0], e[1], e[2], ttFont, version) + self.passes.append(p) + + +class Classes(object): + + def __init__(self): + self.linear = [] + self.nonLinear = [] + + def decompile(self, data, ttFont, version=2.0): + sstruct.unpack2(Silf_classmap_format, data, self) + if version >= 4.0 : + oClasses = struct.unpack((">%dL" % (self.numClass+1)), + data[4:8+4*self.numClass]) + else: + oClasses = struct.unpack((">%dH" % (self.numClass+1)), + data[4:6+2*self.numClass]) + for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]): + self.linear.append(map(ttFont.getGlyphName, + struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))) + for s,e in zip(oClasses[self.numLinear:self.numClass], + oClasses[self.numLinear+1:self.numClass+1]): + nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)] + nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids]) + self.nonLinear.append(nonLin) + + def compile(self, ttFont, version=2.0): + data = "" + oClasses = [] + if version >= 4.0: + offset = 8 + 4 * (len(self.linear) + len(self.nonLinear)) + else: + offset = 6 + 2 * (len(self.linear) + len(self.nonLinear)) + for l in self.linear: + oClasses.append(len(data) + offset) + gs = map(ttFont.getGlyphID, l) + data += struct.pack((">%dH" % len(l)), *gs) + for l in self.nonLinear: + oClasses.append(len(data) + offset) + gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()] + data += grUtils.bininfo(len(gs)) + data += "".join([struct.pack(">HH", *x) for x in sorted(gs)]) + oClasses.append(len(data) + offset) + self.numClass = len(oClasses) - 1 + self.numLinear = len(self.linear) + return sstruct.pack(Silf_classmap_format, self) + \ + struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)), + *oClasses) + data + + def toXML(self, writer, ttFont, version=2.0): + writer.begintag('classes') + writer.newline() + writer.begintag('linearClasses') + writer.newline() + for i,l in enumerate(self.linear): + writer.begintag('linear', _index=i) + writer.newline() + wrapline(writer, l) + writer.endtag('linear') + writer.newline() + writer.endtag('linearClasses') + writer.newline() + writer.begintag('nonLinearClasses') + writer.newline() + for i, l in enumerate(self.nonLinear): + writer.begintag('nonLinear', _index=i + self.numLinear) + writer.newline() + for inp, ind in l.items(): + writer.simpletag('map', glyph=inp, index=ind) + writer.newline() + writer.endtag('nonLinear') + writer.newline() + writer.endtag('nonLinearClasses') + writer.newline() + writer.endtag('classes') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'linearClasses': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag == 'linear': + l = content_string(subcontent).split() + self.linear.append(l) + elif name == 'nonLinearClasses': + for element in content: + if not isinstance(element, tuple): continue + tag, attrs, subcontent = element + if tag =='nonLinear': + l = {} + for e in subcontent: + if not isinstance(e, tuple): continue + tag, attrs, subsubcontent = e + if tag == 'map': + l[attrs['glyph']] = int(safeEval(attrs['index'])) + self.nonLinear.append(l) + +class Pass(object): + + def __init__(self): + self.colMap = {} + self.rules = [] + self.rulePreContexts = [] + self.ruleSortKeys = [] + self.ruleConstraints = [] + self.passConstraints = "" + self.actions = [] + self.stateTrans = [] + self.startStates = [] + + def decompile(self, data, ttFont, version=2.0): + _, data = sstruct.unpack2(Silf_pass_format, data, self) + (numRange, _, _, _) = struct.unpack(">4H", data[:8]) + data = data[8:] + for i in range(numRange): + (first, last, col) = struct.unpack(">3H", data[6*i:6*i+6]) + for g in range(first, last+1): + self.colMap[ttFont.getGlyphName(g)] = col + data = data[6*numRange:] + oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data) + data = data[2+2*self.numSuccess:] + rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data) + self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])] + data = data[2*oRuleMap[-1]:] + (self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2]) + numStartStates = self.maxRulePreContext - self.minRulePreContext + 1 + self.startStates = struct.unpack((">%dH" % numStartStates), + data[2:2 + numStartStates * 2]) + data = data[2+numStartStates*2:] + self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules]) + data = data[2*self.numRules:] + self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules]) + data = data[self.numRules:] + (self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3]) + oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)), + data[3:5 + self.numRules * 2])) + data = data[5 + self.numRules * 2:] + oActions = list(struct.unpack((">%dH" % (self.numRules + 1)), + data[:2 + self.numRules * 2])) + data = data[2 * self.numRules + 2:] + for i in range(self.numTransitional): + a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2]) + a.byteswap() + self.stateTrans.append(a) + data = data[self.numTransitional * self.numColumns * 2 + 1:] + self.passConstraints = data[:pConstraint] + data = data[pConstraint:] + for i in range(len(oConstraints)-2,-1,-1): + if oConstraints[i] == 0 : + oConstraints[i] = oConstraints[i+1] + self.ruleConstraints = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oConstraints, oConstraints[1:])] + data = data[oConstraints[-1]:] + self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])] + data = data[oActions[-1]:] + # not using debug + + def compile(self, ttFont, base, version=2.0): + # build it all up backwards + oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [""], (0, []))[1] + oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [""], (1, []))[1] + constraintCode = "\000" + "".join(self.ruleConstraints) + transes = [] + for t in self.stateTrans: + t.byteswap() + transes.append(t.tostring()) + t.byteswap() + if not len(transes): + self.startStates = [0] + oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1] + passRanges = [] + gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()]) + for e in grUtils.entries(gidcolmap, sameval = True): + if e[1]: + passRanges.append((e[0], e[0]+e[1]-1, e[2][0])) + self.numRules = len(self.actions) + self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6 + + len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2 + + 2 * len(self.startStates) + 3 * self.numRules + 3 + + 4 * self.numRules + 4) + self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base + self.rcCode = self.pcCode + len(self.passConstraints) + self.aCode = self.rcCode + len(constraintCode) + self.oDebug = 0 + # now generate output + data = sstruct.pack(Silf_pass_format, self) + data += grUtils.bininfo(len(passRanges), 6) + data += "".join(struct.pack(">3H", *p) for p in passRanges) + data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap) + flatrules = reduce(lambda a,x: a+x, self.rules, []) + data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules) + data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext) + data += struct.pack((">%dH" % len(self.startStates)), *self.startStates) + data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys) + data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts) + data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints)) + data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints) + data += struct.pack((">%dH" % (self.numRules+1)), *oActions) + return data + "".join(transes) + struct.pack("B", 0) + \ + self.passConstraints + constraintCode + "".join(self.actions) + + def toXML(self, writer, ttFont, version=2.0): + writesimple('info', self, writer, *pass_attrs_info) + writesimple('fsminfo', self, writer, *pass_attrs_fsm) + writer.begintag('colmap') + writer.newline() + wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(), + key=lambda x:ttFont.getGlyphID(x[0]))]) + writer.endtag('colmap') + writer.newline() + writer.begintag('staterulemap') + writer.newline() + for i, r in enumerate(self.rules): + writer.simpletag('state', number = self.numRows - self.numSuccess + i, + rules = " ".join(map(str, r))) + writer.newline() + writer.endtag('staterulemap') + writer.newline() + writer.begintag('rules') + writer.newline() + for i in range(len(self.actions)): + writer.begintag('rule', index=i, precontext=self.rulePreContexts[i], + sortkey=self.ruleSortKeys[i]) + writer.newline() + if len(self.ruleConstraints[i]): + writecode('constraint', writer, self.ruleConstraints[i]) + writecode('action', writer, self.actions[i]) + writer.endtag('rule') + writer.newline() + writer.endtag('rules') + writer.newline() + if len(self.passConstraints): + writecode('passConstraint', writer, self.passConstraints) + if len(self.stateTrans): + writer.begintag('fsm') + writer.newline() + writer.begintag('starts') + writer.write(" ".join(map(str, self.startStates))) + writer.endtag('starts') + writer.newline() + for i, s in enumerate(self.stateTrans): + writer.begintag('row', _i=i) + # no newlines here + writer.write(" ".join(map(str, s))) + writer.endtag('row') + writer.newline() + writer.endtag('fsm') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, version=2.0): + if name == 'info': + getSimple(self, attrs, *pass_attrs_info) + elif name == 'fsminfo': + getSimple(self, attrs, *pass_attrs_fsm) + elif name == 'colmap': + e = content_string(content) + for w in e.split(): + x = w.split('=') + if len(x) != 2 or x[0] == '' or x[1] == '': continue + self.colMap[x[0]] = int(x[1]) + elif name == 'staterulemap': + for e in content: + if not isinstance(e, tuple): continue + tag, a, c = e + if tag == 'state': + self.rules.append(map(int, a['rules'].split(" "))) + elif name == 'rules': + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag != 'rule': continue + self.rulePreContexts.append(int(a['precontext'])) + self.ruleSortKeys.append(int(a['sortkey'])) + con = "" + act = "" + for e in c: + if not isinstance(e, tuple): continue + tag, a, subc = e + if tag == 'constraint': + con = readcode(subc) + elif tag == 'action': + act = readcode(subc) + self.actions.append(act) + self.ruleConstraints.append(con) + elif name == 'passConstraint': + self.passConstraints = readcode(content) + elif name == 'fsm': + for element in content: + if not isinstance(element, tuple): continue + tag, a, c = element + if tag == 'row': + s = array('H') + e = content_string(c) + s.extend(map(int, e.split())) + self.stateTrans.append(s) + elif tag == 'starts': + s = [] + e = content_string(c) + s.extend(map(int, e.split())) + self.startStates = s + diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/S__i_l_l.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S__i_l_l.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/S__i_l_l.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S__i_l_l.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,79 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable +from . import grUtils +import struct + +Sill_hdr = ''' + > + version: 16.16F +''' + +class table_S__i_l_l(DefaultTable.DefaultTable): + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.langs = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Sill_hdr, data, self) + numLangs, = struct.unpack('>H', data[:2]) + data = data[8:] + maxsetting = 0 + langinfo = [] + for i in range(numLangs): + (langcode, numsettings, offset) = struct.unpack(">4sHH", + data[i * 8:(i+1) * 8]) + offset = int(offset / 8) - (numLangs + 1) + langcode = langcode.replace(b'\000', b'') + langinfo.append((langcode, numsettings, offset)) + maxsetting = max(maxsetting, offset + numsettings) + data = data[numLangs * 8:] + finfo = [] + for i in range(maxsetting): + (fid, val, _) = struct.unpack(">LHH", data[i * 8:(i+1) * 8]) + finfo.append((fid, val)) + self.langs = {} + for c, n, o in langinfo: + self.langs[c] = [] + for i in range(o, o+n): + self.langs[c].append(finfo[i]) + + def compile(self, ttFont): + ldat = "" + fdat = "" + offset = 0 + for c, inf in sorted(self.langs.items()): + ldat += struct.pack(">4sHH", c.encode('utf8'), len(inf), 8 * (offset + len(self.langs) + 1)) + for fid, val in inf: + fdat += struct.pack(">LHH", fid, val, 0) + offset += len(inf) + return sstruct.pack(Sill_hdr, self) + grUtils.bininfo(len(self.langs)) + \ + ldat + fdat + + def toXML(self, writer, ttFont): + writer.simpletag('version', version=self.version) + writer.newline() + for c, inf in sorted(self.langs.items()): + writer.begintag('lang', name=c) + writer.newline() + for fid, val in inf: + writer.simpletag('feature', fid=grUtils.num2tag(fid), val=val) + writer.newline() + writer.endtag('lang') + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = float(safeEval(attrs['version'])) + elif name == 'lang': + c = attrs['name'] + self.langs[c] = [] + for element in content: + if not isinstance(element, tuple): continue + tag, a, subcontent = element + if tag == 'feature': + self.langs[c].append((grUtils.tag2num(a['fid']), + int(safeEval(a['val'])))) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/S_T_A_T_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S_T_A_T_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/S_T_A_T_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S_T_A_T_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_S_T_A_T_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/S_V_G_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S_V_G_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/S_V_G_.py 2018-01-08 12:40:40.000000000 +0000 @@ -8,6 +8,11 @@ import xml.etree.ElementTree as ET import struct import re +import logging + + +log = logging.getLogger(__name__) + __doc__=""" Compiles/decompiles version 0 and 1 SVG tables from/to XML. @@ -94,6 +99,10 @@ class table_S_V_G_(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.colorPalettes = None + def decompile(self, data, ttFont): self.docList = None self.colorPalettes = None @@ -101,10 +110,15 @@ self.version = struct.unpack(">H", data[pos:pos+2])[0] if self.version == 1: + # This is pre-standardization version of the table; and obsolete. But we decompile it for now. + # https://wiki.mozilla.org/SVGOpenTypeFonts self.decompile_format_1(data, ttFont) else: if self.version != 0: - print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) + log.warning( + "Unknown SVG table version '%s'. Decompiling as version 0.", self.version) + # This is the standardized version of the table; and current. + # https://www.microsoft.com/typography/otspec/svg.htm self.decompile_format_0(data, ttFont) def decompile_format_0(self, data, ttFont): @@ -141,10 +155,8 @@ pos += 4 def decompile_format_1(self, data, ttFont): - pos = 2 - self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - self.decompileEntryList(data, pos) + self.offsetToSVGDocIndex = 2 + self.decompileEntryList(data) def decompileEntryList(self, data): # data starts with the first entry of the entry list. @@ -273,7 +285,7 @@ writer.newline() for uiNameID in self.colorPalettes.colorParamUINameIDs: writer.begintag("colorParamUINameID") - writer.writeraw(str(uiNameID)) + writer._writeraw(str(uiNameID)) writer.endtag("colorParamUINameID") writer.newline() for colorPalette in self.colorPalettes.colorPaletteList: @@ -294,10 +306,6 @@ writer.endtag("colorPalettes") writer.newline() - else: - writer.begintag("colorPalettes") - writer.endtag("colorPalettes") - writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "svgDoc": @@ -314,7 +322,7 @@ if self.colorPalettes.numColorParams == 0: self.colorPalettes = None else: - print("Unknown", name, content) + log.warning("Unknown %s %s", name, content) class DocumentIndexEntry(object): def __init__(self): @@ -335,7 +343,7 @@ def fromXML(self, name, attrs, content, ttFont): for element in content: - if isinstance(element, type("")): + if not isinstance(element, tuple): continue name, attrib, content = element if name == "colorParamUINameID": @@ -344,7 +352,7 @@ elif name == "colorPalette": colorPalette = ColorPalette() self.colorPaletteList.append(colorPalette) - colorPalette.fromXML((name, attrib, content), ttFont) + colorPalette.fromXML(name, attrib, content, ttFont) self.numColorParams = len(self.colorParamUINameIDs) self.numColorPalettes = len(self.colorPaletteList) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_t_r_a_k.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_t_r_a_k.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_t_r_a_k.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_t_r_a_k.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,314 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from fontTools.misc.textTools import safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct +try: + from collections.abc import MutableMapping +except ImportError: + from UserDict import DictMixin as MutableMapping + + +# Apple's documentation of 'trak': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html + +TRAK_HEADER_FORMAT = """ + > # big endian + version: 16.16F + format: H + horizOffset: H + vertOffset: H + reserved: H +""" + +TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT) + + +TRACK_DATA_FORMAT = """ + > # big endian + nTracks: H + nSizes: H + sizeTableOffset: L +""" + +TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT) + + +TRACK_TABLE_ENTRY_FORMAT = """ + > # big endian + track: 16.16F + nameIndex: H + offset: H +""" + +TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT) + + +# size values are actually '16.16F' fixed-point values, but here I do the +# fixedToFloat conversion manually instead of relying on sstruct +SIZE_VALUE_FORMAT = ">l" +SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT) + +# per-Size values are in 'FUnits', i.e. 16-bit signed integers +PER_SIZE_VALUE_FORMAT = ">h" +PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT) + + +class table__t_r_a_k(DefaultTable.DefaultTable): + dependencies = ['name'] + + def compile(self, ttFont): + dataList = [] + offset = TRAK_HEADER_FORMAT_SIZE + for direction in ('horiz', 'vert'): + trackData = getattr(self, direction + 'Data', TrackData()) + offsetName = direction + 'Offset' + # set offset to 0 if None or empty + if not trackData: + setattr(self, offsetName, 0) + continue + # TrackData table format must be longword aligned + alignedOffset = (offset + 3) & ~3 + padding, offset = b"\x00"*(alignedOffset - offset), alignedOffset + setattr(self, offsetName, offset) + + data = trackData.compile(offset) + offset += len(data) + dataList.append(padding + data) + + self.reserved = 0 + tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList) + return tableData + + def decompile(self, data, ttFont): + sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self) + for direction in ('horiz', 'vert'): + trackData = TrackData() + offset = getattr(self, direction + 'Offset') + if offset != 0: + trackData.decompile(data, offset) + setattr(self, direction + 'Data', trackData) + + def toXML(self, writer, ttFont, progress=None): + writer.simpletag('version', value=self.version) + writer.newline() + writer.simpletag('format', value=self.format) + writer.newline() + for direction in ('horiz', 'vert'): + dataName = direction + 'Data' + writer.begintag(dataName) + writer.newline() + trackData = getattr(self, dataName, TrackData()) + trackData.toXML(writer, ttFont) + writer.endtag(dataName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == 'version': + self.version = safeEval(attrs['value']) + elif name == 'format': + self.format = safeEval(attrs['value']) + elif name in ('horizData', 'vertData'): + trackData = TrackData() + setattr(self, name, trackData) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content_ = element + trackData.fromXML(name, attrs, content_, ttFont) + + +class TrackData(MutableMapping): + + def __init__(self, initialdata={}): + self._map = dict(initialdata) + + def compile(self, offset): + nTracks = len(self) + sizes = self.sizes() + nSizes = len(sizes) + + # offset to the start of the size subtable + offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE*nTracks + trackDataHeader = sstruct.pack( + TRACK_DATA_FORMAT, + {'nTracks': nTracks, 'nSizes': nSizes, 'sizeTableOffset': offset}) + + entryDataList = [] + perSizeDataList = [] + # offset to per-size tracking values + offset += SIZE_VALUE_FORMAT_SIZE*nSizes + # sort track table entries by track value + for track, entry in sorted(self.items()): + assert entry.nameIndex is not None + entry.track = track + entry.offset = offset + entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)] + # sort per-size values by size + for size, value in sorted(entry.items()): + perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)] + offset += PER_SIZE_VALUE_FORMAT_SIZE*nSizes + # sort size values + sizeDataList = [struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)] + + data = bytesjoin([trackDataHeader] + entryDataList + sizeDataList + perSizeDataList) + return data + + def decompile(self, data, offset): + # initial offset is from the start of trak table to the current TrackData + trackDataHeader = data[offset:offset+TRACK_DATA_FORMAT_SIZE] + if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackData header') + sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self) + offset += TRACK_DATA_FORMAT_SIZE + + nSizes = self.nSizes + sizeTableOffset = self.sizeTableOffset + sizeTable = [] + for i in range(nSizes): + sizeValueData = data[sizeTableOffset:sizeTableOffset+SIZE_VALUE_FORMAT_SIZE] + if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackData size subtable') + sizeValue, = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData) + sizeTable.append(fi2fl(sizeValue, 16)) + sizeTableOffset += SIZE_VALUE_FORMAT_SIZE + + for i in range(self.nTracks): + entry = TrackTableEntry() + entryData = data[offset:offset+TRACK_TABLE_ENTRY_FORMAT_SIZE] + if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE: + raise TTLibError('not enough data to decompile TrackTableEntry record') + sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry) + perSizeOffset = entry.offset + for j in range(nSizes): + size = sizeTable[j] + perSizeValueData = data[perSizeOffset:perSizeOffset+PER_SIZE_VALUE_FORMAT_SIZE] + if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE: + raise TTLibError('not enough data to decompile per-size track values') + perSizeValue, = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData) + entry[size] = perSizeValue + perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE + self[entry.track] = entry + offset += TRACK_TABLE_ENTRY_FORMAT_SIZE + + def toXML(self, writer, ttFont, progress=None): + nTracks = len(self) + nSizes = len(self.sizes()) + writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes)) + writer.newline() + for track, entry in sorted(self.items()): + assert entry.nameIndex is not None + entry.track = track + entry.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name != 'trackEntry': + return + entry = TrackTableEntry() + entry.fromXML(name, attrs, content, ttFont) + self[entry.track] = entry + + def sizes(self): + if not self: + return frozenset() + tracks = list(self.tracks()) + sizes = self[tracks.pop(0)].sizes() + for track in tracks: + entrySizes = self[track].sizes() + if sizes != entrySizes: + raise TTLibError( + "'trak' table entries must specify the same sizes: " + "%s != %s" % (sorted(sizes), sorted(entrySizes))) + return frozenset(sizes) + + def __getitem__(self, track): + return self._map[track] + + def __delitem__(self, track): + del self._map[track] + + def __setitem__(self, track, entry): + self._map[track] = entry + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + + tracks = keys + + def __repr__(self): + return "TrackData({})".format(self._map if self else "") + + +class TrackTableEntry(MutableMapping): + + def __init__(self, values={}, nameIndex=None): + self.nameIndex = nameIndex + self._map = dict(values) + + def toXML(self, writer, ttFont, progress=None): + name = ttFont["name"].getDebugName(self.nameIndex) + writer.begintag( + "trackEntry", + (('value', self.track), ('nameIndex', self.nameIndex))) + writer.newline() + if name: + writer.comment(name) + writer.newline() + for size, perSizeValue in sorted(self.items()): + writer.simpletag("track", size=size, value=perSizeValue) + writer.newline() + writer.endtag("trackEntry") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.track = safeEval(attrs['value']) + self.nameIndex = safeEval(attrs['nameIndex']) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, _ = element + if name != 'track': + continue + size = safeEval(attrs['size']) + self[size] = safeEval(attrs['value']) + + def __getitem__(self, size): + return self._map[size] + + def __delitem__(self, size): + del self._map[size] + + def __setitem__(self, size, value): + self._map[size] = value + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + + sizes = keys + + def __repr__(self): + return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.nameIndex == other.nameIndex and dict(self) == dict(other) + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__0.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,9 +1,16 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI0 is the index table containing the lengths and offsets for the glyph +programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained +in the TSI1 table. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from . import DefaultTable import struct -tsi0Format = '>HHl' +tsi0Format = '>HHL' def fixlongs(glyphID, textLength, textOffset): return int(glyphID), int(textLength), textOffset @@ -22,7 +29,7 @@ indices.append((glyphID, textLength, textOffset)) data = data[size:] assert len(data) == 0 - assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 + assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number" self.indices = indices[:-5] self.extra_indices = indices[-4:] @@ -30,11 +37,11 @@ if not hasattr(self, "indices"): # We have no corresponding table (TSI1 or TSI3); let's return # no data, which effectively means "ignore us". - return "" + return b"" data = b"" for index, textLength, textOffset in self.indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) - data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 + data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34) for index, textLength, textOffset in self.extra_indices: data = data + struct.pack(tsi0Format, index, textLength, textOffset) return data diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__1.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,40 +1,81 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI1 contains the text of the glyph programs in the form of low-level assembly +code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from . import DefaultTable +from fontTools.misc.loggingTools import LogMixin + -class table_T_S_I__1(DefaultTable.DefaultTable): +class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable): extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} indextable = "TSI0" def decompile(self, data, ttFont): + totalLength = len(data) indextable = ttFont[self.indextable] - self.glyphPrograms = {} - for i in range(len(indextable.indices)): - glyphID, textLength, textOffset = indextable.indices[i] - if textLength == 0x8000: - # Ugh. Hi Beat! - textLength = indextable.indices[i+1][1] - if textLength > 0x8000: - pass # XXX Hmmm. - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text - - self.extraPrograms = {} - for i in range(len(indextable.extra_indices)): - extraCode, textLength, textOffset = indextable.extra_indices[i] - if textLength == 0x8000: - if self.extras[extraCode] == "fpgm": # this is the last one - textLength = len(data) - textOffset + for indices, isExtra in zip( + (indextable.indices, indextable.extra_indices), (False, True)): + programs = {} + for i, (glyphID, textLength, textOffset) in enumerate(indices): + if isExtra: + name = self.extras[glyphID] else: - textLength = indextable.extra_indices[i+1][1] - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.extraPrograms[self.extras[extraCode]] = text + name = ttFont.getGlyphName(glyphID) + if textOffset > totalLength: + self.log.warning("textOffset > totalLength; %r skipped" % name) + continue + if textLength < 0x8000: + # If the length stored in the record is less than 32768, then use + # that as the length of the record. + pass + elif textLength == 0x8000: + # If the length is 32768, compute the actual length as follows: + isLast = i == (len(indices)-1) + if isLast: + if isExtra: + # For the last "extra" record (the very last record of the + # table), the length is the difference between the total + # length of the TSI1 table and the textOffset of the final + # record. + nextTextOffset = totalLength + else: + # For the last "normal" record (the last record just prior + # to the record containing the "magic number"), the length + # is the difference between the textOffset of the record + # following the "magic number" (0xFFFE) record (i.e. the + # first "extra" record), and the textOffset of the last + # "normal" record. + nextTextOffset = indextable.extra_indices[0][2] + else: + # For all other records with a length of 0x8000, the length is + # the difference between the textOffset of the record in + # question and the textOffset of the next record. + nextTextOffset = indices[i+1][2] + assert nextTextOffset >= textOffset, "entries not sorted by offset" + if nextTextOffset > totalLength: + self.log.warning( + "nextTextOffset > totalLength; %r truncated" % name) + nextTextOffset = totalLength + textLength = nextTextOffset - textOffset + else: + from fontTools import ttLib + raise ttLib.TTLibError( + "%r textLength (%d) must not be > 32768" % (name, textLength)) + text = data[textOffset:textOffset+textLength] + assert len(text) == textLength + text = tounicode(text, encoding='utf-8') + if text: + programs[name] = text + if isExtra: + self.extraPrograms = programs + else: + self.glyphPrograms = programs def compile(self, ttFont): if not hasattr(self, "glyphPrograms"): @@ -50,12 +91,12 @@ data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. name = glyphNames[i] if name in self.glyphPrograms: - text = tobytes(self.glyphPrograms[name]) + text = tobytes(self.glyphPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? + textLength = 0x8000 indices.append((i, textLength, len(data))) data = data + text @@ -66,12 +107,12 @@ data = data + b"\015" # align on 2-byte boundaries, fill with return chars. code, name = codes[i] if name in self.extraPrograms: - text = tobytes(self.extraPrograms[name]) + text = tobytes(self.extraPrograms[name], encoding="utf-8") else: text = b"" textLength = len(text) if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? + textLength = 0x8000 extra_indices.append((code, textLength, len(data))) data = data + text indextable.set(indices, extra_indices) @@ -86,7 +127,7 @@ continue writer.begintag("glyphProgram", name=name) writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) + writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("glyphProgram") writer.newline() @@ -98,7 +139,7 @@ continue writer.begintag("extraProgram", name=name) writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) + writer.write_noindent(text.replace("\r", "\n")) writer.newline() writer.endtag("extraProgram") writer.newline() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__2.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,10 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI2 is the index table containing the lengths and offsets for the glyph +programs that are contained in the TSI3 table. It uses the same format as +the TSI0 table. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import ttLib diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__3.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,8 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools import ttLib diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I__5.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,3 +1,8 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI5 contains the VTT character groups. +""" from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.misc.textTools import safeEval @@ -23,7 +28,7 @@ glyphNames = ttFont.getGlyphOrder() a = array.array("H") for i in range(len(glyphNames)): - a.append(self.glyphGrouping[glyphNames[i]]) + a.append(self.glyphGrouping.get(glyphNames[i], 0)) if sys.byteorder != "big": a.byteswap() return a.tostring() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_B_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_B_(asciiTable.asciiTable): +class table_T_S_I_B_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_D_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_D_(asciiTable.asciiTable): +class table_T_S_I_D_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_J_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_J_(asciiTable.asciiTable): +class table_T_S_I_J_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_P_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_P_(asciiTable.asciiTable): +class table_T_S_I_P_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_S_.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,6 +1,6 @@ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * -from . import asciiTable +from .T_S_I_V_ import table_T_S_I_V_ -class table_T_S_I_S_(asciiTable.asciiTable): +class table_T_S_I_S_(table_T_S_I_V_): pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_S_I_V_.py 2018-01-08 12:40:40.000000000 +0000 @@ -3,4 +3,19 @@ from . import asciiTable class table_T_S_I_V_(asciiTable.asciiTable): - pass + + def toXML(self, writer, ttFont): + data = tostr(self.data) + # removing null bytes. XXX needed?? + data = data.split('\0') + data = strjoin(data) + writer.begintag("source") + writer.newline() + writer.write_noindent(data.replace("\r", "\n")) + writer.newline() + writer.endtag("source") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + lines = strjoin(content).split("\n") + self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/T_T_F_A_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_T_F_A_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/T_T_F_A_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/T_T_F_A_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from . import asciiTable + +class table_T_T_F_A_(asciiTable.asciiTable): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/ttProgram.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/ttProgram.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/ttProgram.py 2018-01-08 12:40:40.000000000 +0000 @@ -5,6 +5,10 @@ from fontTools.misc.textTools import num2binary, binary2num, readHex import array import re +import logging + + +log = logging.getLogger(__name__) # first, the list of instructions that eat bytes or words from the instruction stream @@ -190,6 +194,8 @@ _pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") +_indentRE = re.compile("^FDEF|IF|ELSE\[ \]\t.+") +_unindentRE = re.compile("^ELSE|ENDF|EIF\[ \]\t.+") def _skipWhite(data, pos): m = _whiteRE.match(data, pos) @@ -218,43 +224,74 @@ self._assemble() return self.bytecode.tostring() - def getAssembly(self, preserve=False): + def getAssembly(self, preserve=True): if not hasattr(self, "assembly"): self._disassemble(preserve=preserve) return self.assembly def toXML(self, writer, ttFont): if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: - assembly = self.getAssembly() - writer.begintag("assembly") - writer.newline() - i = 0 - nInstr = len(assembly) - while i < nInstr: - instr = assembly[i] - writer.write(instr) + try: + assembly = self.getAssembly() + except: + import traceback + tmp = StringIO() + traceback.print_exc(file=tmp) + msg = "An exception occurred during the decompilation of glyph program:\n\n" + msg += tmp.getvalue() + log.error(msg) + writer.begintag("bytecode") writer.newline() - m = _pushCountPat.match(instr) - i = i + 1 - if m: - nValues = int(m.group(1)) - line = [] - j = 0 - for j in range(nValues): - if j and not (j % 25): - writer.write(' '.join(line)) - writer.newline() - line = [] - line.append(assembly[i+j]) - writer.write(' '.join(line)) + writer.comment(msg.strip()) + writer.newline() + writer.dumphex(self.getBytecode()) + writer.endtag("bytecode") + writer.newline() + else: + if not assembly: + return + writer.begintag("assembly") + writer.newline() + i = 0 + indent = 0 + nInstr = len(assembly) + while i < nInstr: + instr = assembly[i] + if _unindentRE.match(instr): + indent -= 1 + writer.write(writer.indentwhite * indent) + writer.write(instr) writer.newline() - i = i + j + 1 - writer.endtag("assembly") + m = _pushCountPat.match(instr) + i = i + 1 + if m: + nValues = int(m.group(1)) + line = [] + j = 0 + for j in range(nValues): + if j and not (j % 25): + writer.write(writer.indentwhite * indent) + writer.write(' '.join(line)) + writer.newline() + line = [] + line.append(assembly[i+j]) + writer.write(writer.indentwhite * indent) + writer.write(' '.join(line)) + writer.newline() + i = i + j + 1 + if _indentRE.match(instr): + indent += 1 + writer.endtag("assembly") + writer.newline() else: + bytecode = self.getBytecode() + if not bytecode: + return writer.begintag("bytecode") writer.newline() - writer.dumphex(self.getBytecode()) + writer.dumphex(bytecode) writer.endtag("bytecode") + writer.newline() def fromXML(self, name, attrs, content, ttFont): if name == "assembly": @@ -266,7 +303,7 @@ self.fromBytecode(readHex(content)) def _assemble(self): - assembly = self.assembly + assembly = getattr(self, 'assembly', []) if isinstance(assembly, type([])): assembly = ' '.join(assembly) bytecode = [] @@ -391,7 +428,7 @@ def _disassemble(self, preserve=False): assembly = [] i = 0 - bytecode = self.bytecode + bytecode = getattr(self, 'bytecode', []) numBytecode = len(bytecode) while i < numBytecode: op = bytecode[i] @@ -477,6 +514,15 @@ __nonzero__ = __bool__ + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + def _test(): """ diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/TupleVariation.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/TupleVariation.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/TupleVariation.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/TupleVariation.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,623 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +from fontTools.misc.textTools import safeEval +import array +import io +import logging +import struct +import sys + + +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm + +EMBEDDED_PEAK_TUPLE = 0x8000 +INTERMEDIATE_REGION = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTA_RUN_COUNT_MASK = 0x3f + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7f + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0fff +TUPLE_INDEX_MASK = 0x0fff + +log = logging.getLogger(__name__) + + +class TupleVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes.copy() + self.coordinates = coordinates[:] + + def __repr__(self): + axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + result = set() + for i, point in enumerate(self.coordinates): + if point is not None: + result.add(i) + return result + + def hasImpact(self): + """Returns True if this TupleVariation has any visible impact. + + If the result is False, the TupleVariation can be omitted from the font + without making any visible difference. + """ + for c in self.coordinates: + if c is not None: + return True + return False + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = (float(v) for v in value) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=value) + else: + writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) + writer.newline() + wrote_any_deltas = False + for i, delta in enumerate(self.coordinates): + if type(delta) == tuple and len(delta) == 2: + writer.simpletag("delta", pt=i, x=delta[0], y=delta[1]) + writer.newline() + wrote_any_deltas = True + elif type(delta) == int: + writer.simpletag("delta", cvt=i, value=delta) + writer.newline() + wrote_any_deltas = True + elif delta is not None: + log.error("bad delta format") + writer.comment("bad delta #%d" % i) + writer.newline() + wrote_any_deltas = True + if not wrote_any_deltas: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = float(attrs["value"]) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = float(attrs.get("min", defaultMinValue)) + maxValue = float(attrs.get("max", defaultMaxValue)) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + if "pt" in attrs: + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + elif "cvt" in attrs: + cvt = safeEval(attrs["cvt"]) + value = safeEval(attrs["value"]) + self.coordinates[cvt] = value + else: + log.warning("bad delta format: %s" % + ", ".join(sorted(attrs.keys()))) + + def compile(self, axisTags, sharedCoordIndices, sharedPoints): + tupleData = [] + + assert all(tag in axisTags for tag in self.axes.keys()), ("Unknown axis tag found.", self.axes.keys(), axisTags) + + coord = self.compileCoord(axisTags) + if coord in sharedCoordIndices: + flags = sharedCoordIndices[coord] + else: + flags = EMBEDDED_PEAK_TUPLE + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_REGION + tupleData.append(intermediateCoord) + + points = self.getUsedPoints() + if sharedPoints == points: + # Only use the shared points if they are identical to the actually used points + auxData = self.compileDeltas(sharedPoints) + usesSharedPoints = True + else: + flags |= PRIVATE_POINT_NUMBERS + numPointsInGlyph = len(self.coordinates) + auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) + usesSharedPoints = False + + tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) + return (tupleData, auxData, usesSharedPoints) + + def compileCoord(self, axisTags): + result = [] + for axis in axisTags: + _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + result.append(struct.pack(">h", floatToFixed(value, 14))) + return bytesjoin(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) + maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) + return bytesjoin(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def compilePoints(points, numPointsInGlyph): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + if len(points) == numPointsInGlyph: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result = [bytechr(numPoints)] + else: + result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] + + MAX_RUN_LENGTH = 127 + pos = 0 + lastValue = 0 + while pos < numPoints: + run = io.BytesIO() + runLength = 0 + useByteEncoding = None + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding is None: + useByteEncoding = 0 <= delta <= 0xff + if useByteEncoding and (delta > 0xff or delta < 0): + # we need to start a new run (which will not use byte encoding) + break + # TODO This never switches back to a byte-encoding from a short-encoding. + # That's suboptimal. + if useByteEncoding: + run.write(bytechr(delta)) + else: + run.write(bytechr(delta >> 8)) + run.write(bytechr(delta & 0xff)) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + runHeader = bytechr(runLength - 1) + else: + runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) + result.append(runHeader) + result.append(run.getvalue()) + + return bytesjoin(result) + + @staticmethod + def decompilePoints_(numPoints, data, offset, tableTag): + """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)""" + assert tableTag in ('cvar', 'gvar') + pos = offset + numPointsInData = byteord(data[pos]) + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) + pos += 1 + if numPointsInData == 0: + return (range(numPoints), pos) + + result = [] + while len(result) < numPointsInData: + runHeader = byteord(data[pos]) + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) != 0: + points = array.array("H") + pointsSize = numPointsInRun * 2 + else: + points = array.array("B") + pointsSize = numPointsInRun + points.fromstring(data[pos:pos+pointsSize]) + if sys.byteorder != "big": + points.byteswap() + + assert len(points) == numPointsInRun + pos += pointsSize + + result.extend(points) + + # Convert relative to absolute + absolute = [] + current = 0 + for delta in result: + current += delta + absolute.append(current) + result = absolute + del absolute + + badPoints = {str(p) for p in result if p < 0 or p >= numPoints} + if badPoints: + log.warning("point %s out of range in '%s' table" % + (",".join(sorted(badPoints)), tableTag)) + return (result, pos) + + def compileDeltas(self, points): + deltaX = [] + deltaY = [] + for p in sorted(list(points)): + c = self.coordinates[p] + if type(c) is tuple and len(c) == 2: + deltaX.append(c[0]) + deltaY.append(c[1]) + elif type(c) is int: + deltaX.append(c) + elif c is not None: + raise ValueError("invalid type of delta: %s" % type(c)) + return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) + + @staticmethod + def compileDeltaValues_(deltas): + """[value1, value2, value3, ...] --> bytestring + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + stream = io.BytesIO() + pos = 0 + while pos < len(deltas): + value = deltas[pos] + if value == 0: + pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) + elif value >= -128 and value <= 127: + pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) + else: + pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, stream) + return stream.getvalue() + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64 and deltas[pos] == 0: + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + if value < -128 or value > 127: + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(runLength - 1)) + for i in range(offset, pos): + stream.write(struct.pack('b', round(deltas[i]))) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, stream): + runLength = 0 + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and runLength < 64: + value = deltas[pos] + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + isByteEncodable = lambda value: value >= -128 and value <= 127 + if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): + break + pos += 1 + runLength += 1 + assert runLength >= 1 and runLength <= 64 + stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) + for i in range(offset, pos): + stream.write(struct.pack('>h', round(deltas[i]))) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas: + runHeader = byteord(data[pos]) + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_ARE_ZERO) != 0: + result.extend([0] * numDeltasInRun) + else: + if (runHeader & DELTAS_ARE_WORDS) != 0: + deltas = array.array("h") + deltasSize = numDeltasInRun * 2 + else: + deltas = array.array("b") + deltasSize = numDeltasInRun + deltas.fromstring(data[pos:pos+deltasSize]) + if sys.byteorder != "big": + deltas.byteswap() + assert len(deltas) == numDeltasInRun + pos += deltasSize + result.extend(deltas) + assert len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_PEAK_TUPLE) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_REGION) != 0: + size += axisCount * 4 + return size + + +def decompileSharedTuples(axisTags, sharedTupleCount, data, offset): + result = [] + for _ in range(sharedTupleCount): + t, offset = TupleVariation.decompileCoord_(axisTags, data, offset) + result.append(t) + return result + + +def compileSharedTuples(axisTags, variations): + coordCount = {} + for var in variations: + coord = var.compileCoord(axisTags) + coordCount[coord] = coordCount.get(coord, 0) + 1 + sharedCoords = [(count, coord) + for (coord, count) in coordCount.items() if count > 1] + sharedCoords.sort(reverse=True) + MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 + sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] + return [c[1] for c in sharedCoords] # Strip off counts. + + +def compileTupleVariationStore(variations, pointCount, + axisTags, sharedTupleIndices, + useSharedPoints=True): + variations = [v for v in variations if v.hasImpact()] + if len(variations) == 0: + return (0, b"", b"") + + # Each glyph variation tuples modifies a set of control points. To + # indicate which exact points are getting modified, a single tuple + # can either refer to a shared set of points, or the tuple can + # supply its private point numbers. Because the impact of sharing + # can be positive (no need for a private point list) or negative + # (need to supply 0,0 deltas for unused points), it is not obvious + # how to determine which tuples should take their points from the + # shared pool versus have their own. Perhaps we should resort to + # brute force, and try all combinations? However, if a glyph has n + # variation tuples, we would need to try 2^n combinations (because + # each tuple may or may not be part of the shared set). How many + # variations tuples do glyphs have? + # + # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} + # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} + # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 8} + # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). + # + + # Is this even worth optimizing? If we never use a shared point + # list, the private lists will consume 112K for Skia, 5K for + # BuffaloGalRegular, and 15K for JamRegular. If we always use a + # shared point list, the shared lists will consume 16K for Skia, + # 3K for BuffaloGalRegular, and 10K for JamRegular. However, in + # the latter case the delta arrays will become larger, but I + # haven't yet measured by how much. From gut feeling (which may be + # wrong), the optimum is to share some but not all points; + # however, then we would need to try all combinations. + # + # For the time being, we try two variants and then pick the better one: + # (a) each tuple supplies its own private set of points; + # (b) all tuples refer to a shared set of points, which consists of + # "every control point in the glyph that has explicit deltas". + usedPoints = set() + for v in variations: + usedPoints |= v.getUsedPoints() + tuples = [] + data = [] + someTuplesSharePoints = False + sharedPointVariation = None # To keep track of a variation that uses shared points + for v in variations: + privateTuple, privateData, _ = v.compile( + axisTags, sharedTupleIndices, sharedPoints=None) + sharedTuple, sharedData, usesSharedPoints = v.compile( + axisTags, sharedTupleIndices, sharedPoints=usedPoints) + if useSharedPoints and (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): + tuples.append(sharedTuple) + data.append(sharedData) + someTuplesSharePoints |= usesSharedPoints + sharedPointVariation = v + else: + tuples.append(privateTuple) + data.append(privateData) + if someTuplesSharePoints: + # Use the last of the variations that share points for compiling the packed point data + data = sharedPointVariation.compilePoints(usedPoints, len(sharedPointVariation.coordinates)) + bytesjoin(data) + tupleVariationCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) + else: + data = bytesjoin(data) + tupleVariationCount = len(tuples) + tuples = bytesjoin(tuples) + return tupleVariationCount, tuples, data + + +def decompileTupleVariationStore(tableTag, axisTags, + tupleVariationCount, pointCount, sharedTuples, + data, pos, dataPos): + numAxes = len(axisTags) + result = [] + if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = TupleVariation.decompilePoints_( + pointCount, data, dataPos, tableTag) + else: + sharedPoints = [] + for _ in range(tupleVariationCount & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) + tupleSize = TupleVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + result.append(decompileTupleVariation_( + pointCount, sharedTuples, sharedPoints, + tableTag, axisTags, tupleData, pointDeltaData)) + pos += tupleSize + dataPos += dataSize + return result + + +def decompileTupleVariation_(pointCount, sharedTuples, sharedPoints, + tableTag, axisTags, data, tupleData): + assert tableTag in ("cvar", "gvar"), tableTag + flags = struct.unpack(">H", data[2:4])[0] + pos = 4 + if (flags & EMBEDDED_PEAK_TUPLE) == 0: + peak = sharedTuples[flags & TUPLE_INDEX_MASK] + else: + peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_REGION) != 0: + start, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + end, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + else: + start, end = inferRegion_(peak) + axes = {} + for axis in axisTags: + region = start[axis], peak[axis], end[axis] + if region != (0.0, 0.0, 0.0): + axes[axis] = region + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = TupleVariation.decompilePoints_( + pointCount, tupleData, pos, tableTag) + else: + points = sharedPoints + + deltas = [None] * pointCount + + if tableTag == "cvar": + deltas_cvt, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + for p, delta in zip(points, deltas_cvt): + if 0 <= p < pointCount: + deltas[p] = delta + + elif tableTag == "gvar": + deltas_x, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + deltas_y, pos = TupleVariation.decompileDeltas_( + len(points), tupleData, pos) + for p, x, y in zip(points, deltas_x, deltas_y): + if 0 <= p < pointCount: + deltas[p] = (x, y) + + return TupleVariation(axes, deltas) + + +def inferRegion_(peak): + """Infer start and end for a (non-intermediate) region + + This helper function computes the applicability region for + variation tuples whose INTERMEDIATE_REGION flag is not set in the + TupleVariationHeader structure. Variation tuples apply only to + certain regions of the variation space; outside that region, the + tuple has no effect. To make the binary encoding more compact, + TupleVariationHeaders can omit the intermediateStartTuple and + intermediateEndTuple fields. + """ + start, end = {}, {} + for (axis, value) in peak.items(): + start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (start, end) diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/V_D_M_X_.py 2018-01-08 12:40:40.000000000 +0000 @@ -176,10 +176,10 @@ writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz)) writer.newline() - for yPelHeight in group.keys(): - yMax, yMin = group[yPelHeight] + for yPelHeight, (yMax, yMin) in sorted(group.items()): writer.simpletag( - "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) + "record", + [('yPelHeight', yPelHeight), ('yMax', yMax), ('yMin', yMin)]) writer.newline() writer.endtag("group") writer.newline() diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/_v_h_e_a.py 2018-01-08 12:40:40.000000000 +0000 @@ -2,11 +2,15 @@ from fontTools.misc.py23 import * from fontTools.misc import sstruct from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import ( + ensureVersionIsLong as fi2ve, versionToFixed as ve2fi) from . import DefaultTable +import math + vheaFormat = """ > # big endian - tableVersion: 16.16F + tableVersion: L ascent: h descent: h lineGap: h @@ -16,7 +20,7 @@ yMaxExtent: h caretSlopeRise: h caretSlopeRun: h - reserved0: h + caretOffset: h reserved1: h reserved2: h reserved3: h @@ -29,29 +33,26 @@ # Note: Keep in sync with table__h_h_e_a - dependencies = ['vmtx', 'glyf'] + dependencies = ['vmtx', 'glyf', 'CFF '] def decompile(self, data, ttFont): sstruct.unpack(vheaFormat, data, self) def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ')): self.recalc(ttFont) + self.tableVersion = fi2ve(self.tableVersion) return sstruct.pack(vheaFormat, self) def recalc(self, ttFont): - vtmxTable = ttFont['vmtx'] + if 'vmtx' in ttFont: + vmtxTable = ttFont['vmtx'] + self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values()) + + boundsHeightDict = {} if 'glyf' in ttFont: glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceHeightMax = 0 - minTopSideBearing = +INFINITY # arbitrary big number - minBottomSideBearing = +INFINITY # arbitrary big number - yMaxExtent = -INFINITY # arbitrary big negative number - for name in ttFont.getGlyphOrder(): - height, tsb = vtmxTable[name] - advanceHeightMax = max(advanceHeightMax, height) g = glyfTable[name] if g.numberOfContours == 0: continue @@ -59,32 +60,57 @@ # Composite glyph without extents set. # Calculate those. g.recalcBounds(glyfTable) + boundsHeightDict[name] = g.yMax - g.yMin + elif 'CFF ' in ttFont: + topDict = ttFont['CFF '].cff.topDictIndex[0] + for name in ttFont.getGlyphOrder(): + cs = topDict.CharStrings[name] + bounds = cs.calcBounds() + if bounds is not None: + boundsHeightDict[name] = int( + math.ceil(bounds[3]) - math.floor(bounds[1])) + + if boundsHeightDict: + minTopSideBearing = float('inf') + minBottomSideBearing = float('inf') + yMaxExtent = -float('inf') + for name, boundsHeight in boundsHeightDict.items(): + advanceHeight, tsb = vmtxTable[name] + bsb = advanceHeight - tsb - boundsHeight + extent = tsb + boundsHeight minTopSideBearing = min(minTopSideBearing, tsb) - bsb = height - tsb - (g.yMax - g.yMin) minBottomSideBearing = min(minBottomSideBearing, bsb) - extent = tsb + (g.yMax - g.yMin) yMaxExtent = max(yMaxExtent, extent) - - if yMaxExtent == -INFINITY: - # No glyph has outlines. - minTopSideBearing = 0 - minBottomSideBearing = 0 - yMaxExtent = 0 - - self.advanceHeightMax = advanceHeightMax self.minTopSideBearing = minTopSideBearing self.minBottomSideBearing = minBottomSideBearing self.yMaxExtent = yMaxExtent - else: - # XXX CFF recalc... - pass + + else: # No glyph has outlines. + self.minTopSideBearing = 0 + self.minBottomSideBearing = 0 + self.yMaxExtent = 0 def toXML(self, writer, ttFont): formatstring, names, fixes = sstruct.getformat(vheaFormat) for name in names: value = getattr(self, name) + if name == "tableVersion": + value = fi2ve(value) + value = "0x%08x" % value writer.simpletag(name, value=value) writer.newline() def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + setattr(self, name, ve2fi(attrs["value"])) + return setattr(self, name, safeEval(attrs["value"])) + + # reserved0 is caretOffset for legacy reasons + @property + def reserved0(self): + return self.caretOffset + + @reserved0.setter + def reserved0(self, value): + self.caretOffset = value diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/tables/V_V_A_R_.py fonttools-3.21.2/Snippets/fontTools/ttLib/tables/V_V_A_R_.py --- fonttools-3.0/Snippets/fontTools/ttLib/tables/V_V_A_R_.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/tables/V_V_A_R_.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from .otBase import BaseTTXConverter + + +class table_V_V_A_R_(BaseTTXConverter): + pass diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 @@ -1,519 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test OTF - - - Regular - - - FontTools: Test OTF: 2015 - - - Test OTF - - - Version 1.000 - - - TestOTF-Regular - - - Test OTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - Test TTF - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test OTF - - - Regular - - - FontTools: Test OTF: 2015 - - - Test OTF - - - Version 1.000 - - - TestOTF-Regular - - - Test OTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 131 122 -131 hlineto - return - - - - - - 500 450 hmoveto - 750 -400 -750 vlineto - 50 50 rmoveto - 650 300 -650 vlineto - endchar - - - 0 endchar - - - 250 endchar - - - 723 55 hmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - endchar - - - 241 55 hmoveto - -107 callsubr - endchar - - - 250 endchar - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-3.0/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 @@ -1,553 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - - - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - - - - - - - - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test TTF - - - Regular - - - FontTools: Test TTF: 2015 - - - Test TTF - - - Version 1.000 - - - TestTTF-Regular - - - Test TTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - Test TTF - - - Copyright (c) 2015 by FontTools. No rights reserved. - - - Test TTF - - - Regular - - - FontTools: Test TTF: 2015 - - - Test TTF - - - Version 1.000 - - - TestTTF-Regular - - - Test TTF is not a trademark of FontTools. - - - FontTools - - - FontTools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools - - - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-3.0/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ - - - - - - - - - - - Description without language. - - - Description with "en" language. - - - Description with "fr" language. - - - - - License without language. - - - License with "en" language. - - - License with "fr" language. - - - - - Copyright without language. - - - Copyright with "en" language. - - - Copyright with "fr" language. - - - - - Trademark without language. - - - Trademark with "en" language. - - - Trademark with "fr" language. - - - - - Extension 1 - Name Without Language - Extension 1 - Name With "en" Language - Extension 1 - Name With "fr" Language - - Extension 1 - Item 1 - Name Without Language - Extension 1 - Item 1 - Name With "en" Language - Extension 1 - Item 1 - Name With "fr" Language - Extension 1 - Item 1 - Value Without Language - Extension 1 - Item 1 - Value With "en" Language - Extension 1 - Item 1 - Value With "fr" Language - - - Extension 1 - Item 2 - Name Without Language - Extension 1 - Item 2 - Name With "en" Language - Extension 1 - Item 2 - Name With "fr" Language - Extension 1 - Item 2 - Value Without Language - Extension 1 - Item 2 - Value With "en" Language - Extension 1 - Item 2 - Value With "fr" Language - - - - Extension 2 - Name Without Language - Extension 2 - Name With "en" Language - Extension 2 - Name With "fr" Language - - Extension 2 - Item 1 - Name Without Language - Extension 2 - Item 1 - Name With "en" Language - Extension 2 - Item 1 - Name With "fr" Language - Extension 2 - Item 1 - Value Without Language - Extension 2 - Item 1 - Value With "en" Language - Extension 2 - Item 1 - Value With "fr" Language - - - Extension 2 - Item 2 - Name Without Language - Extension 2 - Item 2 - Name With "en" Language - Extension 2 - Item 2 - Name With "fr" Language - Extension 2 - Item 2 - Value Without Language - Extension 2 - Item 2 - Value With "en" Language - Extension 2 - Item 2 - Value With "fr" Language - - - Extension 2 - Item 3 - Name Without Language - Extension 2 - Item 3 - Name With "en" Language - Extension 2 - Item 3 - Name With "fr" Language - Extension 2 - Item 3 - Value Without Language - Extension 2 - Item 3 - Value With "en" Language - - - diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/woff2.py fonttools-3.21.2/Snippets/fontTools/ttLib/woff2.py --- fonttools-3.0/Snippets/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/woff2.py 2018-01-08 12:40:40.000000000 +0000 @@ -13,6 +13,10 @@ WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, sfntDirectoryEntrySize, calcChecksum) from fontTools.ttLib.tables import ttProgram +import logging + + +log = logging.getLogger(__name__) haveBrotli = False try: @@ -28,8 +32,9 @@ def __init__(self, file, checkChecksums=1, fontNumber=-1): if not haveBrotli: - print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) + log.error( + 'The WOFF2 decoder requires the Brotli Python extension, available at: ' + 'https://github.com/google/brotli') raise ImportError("No module named brotli") self.file = file @@ -106,7 +111,8 @@ self.ttFont['loca'] = WOFF2LocaTable() glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() glyfTable.reconstruct(data, self.ttFont) - glyfTable.padding = padding + if padding: + glyfTable.padding = padding data = glyfTable.compile(self.ttFont) return data @@ -132,8 +138,9 @@ def __init__(self, file, numTables, sfntVersion="\000\001\000\000", flavor=None, flavorData=None): if not haveBrotli: - print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) + log.error( + 'The WOFF2 encoder requires the Brotli Python extension, available at: ' + 'https://github.com/google/brotli') raise ImportError("No module named brotli") self.file = file @@ -226,7 +233,14 @@ """ if self.sfntVersion == "OTTO": return - for tag in ('maxp', 'head', 'loca', 'glyf'): + + # make up glyph names required to decompile glyf table + self._decompileTable('maxp') + numGlyphs = self.ttFont['maxp'].numGlyphs + glyphOrder = ['.notdef'] + ["glyph%.5d" % i for i in range(1, numGlyphs)] + self.ttFont.setGlyphOrder(glyphOrder) + + for tag in ('head', 'loca', 'glyf'): self._decompileTable(tag) self.ttFont['glyf'].padding = padding for tag in ('glyf', 'loca'): diff -Nru fonttools-3.0/Snippets/fontTools/ttLib/woff2_test.py fonttools-3.21.2/Snippets/fontTools/ttLib/woff2_test.py --- fonttools-3.0/Snippets/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,747 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, - woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, - getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, - WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, - WOFF2Writer) -import unittest -import sstruct -import os -import random -import copy -from collections import OrderedDict - -haveBrotli = False -try: - import brotli - haveBrotli = True -except ImportError: - pass - - -# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires -# deprecation warnings if a program uses the old name. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - -current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) -data_dir = os.path.join(current_dir, 'testdata') -TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') -OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') -METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') - -TT_WOFF2 = BytesIO() -CFF_WOFF2 = BytesIO() - - -def setUpModule(): - if not haveBrotli: - raise unittest.SkipTest("No module named brotli") - assert os.path.exists(TTX) - assert os.path.exists(OTX) - # import TT-flavoured test font and save it as WOFF2 - ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - ttf.importXML(TTX, quiet=True) - ttf.flavor = "woff2" - ttf.save(TT_WOFF2, reorderTables=None) - # import CFF-flavoured test font and save it as WOFF2 - otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - otf.importXML(OTX, quiet=True) - otf.flavor = "woff2" - otf.save(CFF_WOFF2, reorderTables=None) - - -class WOFF2ReaderTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(OTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_bad_signature(self): - with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): - WOFF2Reader(BytesIO(b"wOFF")) - - def test_not_enough_data_header(self): - incomplete_header = self.file.read(woff2DirectorySize - 1) - with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): - WOFF2Reader(BytesIO(incomplete_header)) - - def test_incorrect_compressed_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['totalCompressedSize'] = 0 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaises(brotli.error): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_incorrect_uncompressed_size(self): - decompress_backup = brotli.decompress - brotli.decompress = lambda data: b"" # return empty byte string - with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): - WOFF2Reader(self.file) - brotli.decompress = decompress_backup - - def test_incorrect_file_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['length'] -= 1 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaisesRegex( - ttLib.TTLibError, "doesn't match the actual file size"): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_num_tables(self): - tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - self.assertEqual(header['numTables'], len(tags)) - - def test_table_tags(self): - tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) - reader = WOFF2Reader(self.file) - self.assertEqual(set(reader.keys()), tags) - - def test_get_normal_tables(self): - woff2Reader = WOFF2Reader(self.file) - specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') - for tag in [t for t in self.font.keys() if t not in specialTags]: - origData = self.font.getTableData(tag) - decompressedData = woff2Reader[tag] - self.assertEqual(origData, decompressedData) - - def test_reconstruct_unknown(self): - reader = WOFF2Reader(self.file) - with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): - reader.reconstructTable('ZZZZ') - - -class WOFF2ReaderTTFTest(WOFF2ReaderTest): - """ Tests specific to TT-flavored fonts. """ - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(TTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_reconstruct_glyf(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['glyf'] - self.assertEqual(self.font.getTableData('glyf'), reconstructedData) - - def test_reconstruct_loca(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['loca'] - self.assertEqual(self.font.getTableData('loca'), reconstructedData) - self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) - - def test_reconstruct_loca_not_match_orig_size(self): - reader = WOFF2Reader(self.file) - reader.tables['loca'].origLength -= 1 - with self.assertRaisesRegex( - ttLib.TTLibError, "'loca' table doesn't match original size"): - reader.reconstructTable('loca') - - -def normalise_table(font, tag, padding=4): - """ Return normalised table data. Keep 'font' instance unmodified. """ - assert tag in ('glyf', 'loca', 'head') - assert tag in font - if tag == 'head': - origHeadFlags = font['head'].flags - font['head'].flags |= (1 << 11) - tableData = font['head'].compile(font) - if font.sfntVersion in ("\x00\x01\x00\x00", "true"): - assert {'glyf', 'loca', 'head'}.issubset(font.keys()) - origIndexFormat = font['head'].indexToLocFormat - if hasattr(font['loca'], 'locations'): - origLocations = font['loca'].locations[:] - else: - origLocations = [] - glyfTable = ttLib.getTableClass('glyf')() - glyfTable.decompile(font.getTableData('glyf'), font) - glyfTable.padding = padding - if tag == 'glyf': - tableData = glyfTable.compile(font) - elif tag == 'loca': - glyfTable.compile(font) - tableData = font['loca'].compile(font) - if tag == 'head': - glyfTable.compile(font) - font['loca'].compile(font) - tableData = font['head'].compile(font) - font['head'].indexToLocFormat = origIndexFormat - font['loca'].set(origLocations) - if tag == 'head': - font['head'].flags = origHeadFlags - return tableData - - -def normalise_font(font, padding=4): - """ Return normalised font data. Keep 'font' instance unmodified. """ - # drop DSIG but keep a copy - DSIG_copy = copy.deepcopy(font['DSIG']) - del font['DSIG'] - # ovverride TTFont attributes - origFlavor = font.flavor - origRecalcBBoxes = font.recalcBBoxes - origRecalcTimestamp = font.recalcTimestamp - origLazy = font.lazy - font.flavor = None - font.recalcBBoxes = False - font.recalcTimestamp = False - font.lazy = True - # save font to temporary stream - infile = BytesIO() - font.save(infile) - infile.seek(0) - # reorder tables alphabetically - outfile = BytesIO() - reader = ttLib.sfnt.SFNTReader(infile) - writer = ttLib.sfnt.SFNTWriter( - outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) - for tag in sorted(reader.keys()): - if tag in woff2TransformedTableTags + ('head',): - writer[tag] = normalise_table(font, tag, padding) - else: - writer[tag] = reader[tag] - writer.close() - # restore font attributes - font['DSIG'] = DSIG_copy - font.flavor = origFlavor - font.recalcBBoxes = origRecalcBBoxes - font.recalcTimestamp = origRecalcTimestamp - font.lazy = origLazy - return outfile.getvalue() - - -class WOFF2DirectoryEntryTest(unittest.TestCase): - - def setUp(self): - self.entry = WOFF2DirectoryEntry() - - def test_not_enough_data_table_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): - self.entry.fromString(b"") - - def test_not_enough_data_table_tag(self): - incompleteData = bytearray([0x3F, 0, 0, 0]) - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): - self.entry.fromString(bytes(incompleteData)) - - def test_table_reserved_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): - self.entry.fromString(bytechr(0xC0)) - - def test_loca_zero_transformLength(self): - data = bytechr(getKnownTagIndex('loca')) # flags - data += packBase128(random.randint(1, 100)) # origLength - data += packBase128(1) # non-zero transformLength - with self.assertRaisesRegex( - ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): - self.entry.fromString(data) - - def test_fromFile(self): - unknownTag = Tag('ZZZZ') - data = bytechr(getKnownTagIndex(unknownTag)) - data += unknownTag.tobytes() - data += packBase128(random.randint(1, 100)) - expectedPos = len(data) - f = BytesIO(data + b'\0'*100) - self.entry.fromFile(f) - self.assertEqual(f.tell(), expectedPos) - - def test_transformed_toString(self): - self.entry.tag = Tag('glyf') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = random.randint(101, 200) - self.entry.length = random.randint(1, 100) - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + - base128Size(self.entry.length)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_known_toString(self): - self.entry.tag = Tag('head') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = 54 - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_unknown_toString(self): - self.entry.tag = Tag('ZZZZ') - self.entry.flags = woff2UnknownTagIndex - self.entry.origLength = random.randint(1, 100) - expectedSize = (woff2FlagsSize + woff2UnknownTagSize + - base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - -class DummyReader(WOFF2Reader): - - def __init__(self, file, checkChecksums=1, fontNumber=-1): - self.file = file - for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', - 'metaOrigLength', 'privLength', 'privOffset'): - setattr(self, attr, 0) - - -class WOFF2FlavorDataTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - # make random byte strings; font data must be 4-byte aligned - cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file = BytesIO(self.fontdata) - self.file.seek(0, 2) - - def test_get_metaData_no_privData(self): - self.file.write(self.compressed_metadata) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - - def test_get_privData_no_metaData(self): - self.file.write(self.privData) - reader = DummyReader(self.file) - reader.privOffset = len(self.fontdata) - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_metaData_and_privData(self): - self.file.write(self.compressed_metadata + self.privData) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - reader.privOffset = reader.metaOffset + reader.metaLength - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_major_minorVersion(self): - reader = DummyReader(self.file) - reader.majorVersion = reader.minorVersion = 1 - flavorData = WOFF2FlavorData(reader) - self.assertEqual(flavorData.majorVersion, 1) - self.assertEqual(flavorData.minorVersion, 1) - - -class WOFF2WriterTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(OTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - @classmethod - def setUpFlavorData(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file.seek(0) - self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) - - def test_DSIG_dropped(self): - self.writer['DSIG'] = b"\0" - self.assertEqual(len(self.writer.tables), 0) - self.assertEqual(self.writer.numTables, self.numTables-1) - - def test_no_rewrite_table(self): - self.writer['ZZZZ'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): - self.writer['ZZZZ'] = b"\0" - - def test_num_tables(self): - self.writer['ABCD'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): - self.writer.close() - - def test_required_tables(self): - font = ttLib.TTFont(flavor="woff2") - with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): - font.save(BytesIO()) - - def test_head_transform_flag(self): - headData = self.font.getTableData('head') - origFlags = byteord(headData[16]) - woff2font = ttLib.TTFont(self.file) - newHeadData = woff2font.getTableData('head') - modifiedFlags = byteord(newHeadData[16]) - self.assertNotEqual(origFlags, modifiedFlags) - restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 - self.assertEqual(origFlags, restoredFlags) - - def test_tables_sorted_alphabetically(self): - expected = sorted([t for t in self.tags if t != 'DSIG']) - woff2font = ttLib.TTFont(self.file) - self.assertEqual(expected, list(woff2font.reader.keys())) - - def test_checksums(self): - normFile = BytesIO(normalise_font(self.font, padding=4)) - normFile.seek(0) - normFont = ttLib.TTFont(normFile, checkChecksums=2) - w2font = ttLib.TTFont(self.file) - # force reconstructing glyf table using 4-byte padding - w2font.reader.padding = 4 - for tag in [t for t in self.tags if t != 'DSIG']: - w2data = w2font.reader[tag] - normData = normFont.reader[tag] - if tag == "head": - w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] - normData = normData[:8] + b'\0\0\0\0' + normData[12:] - w2CheckSum = ttLib.sfnt.calcChecksum(w2data) - normCheckSum = ttLib.sfnt.calcChecksum(normData) - self.assertEqual(w2CheckSum, normCheckSum) - normCheckSumAdjustment = normFont['head'].checkSumAdjustment - self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) - - def test_calcSFNTChecksumsLengthsAndOffsets(self): - normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) - self.writer._calcSFNTChecksumsLengthsAndOffsets() - for tag, entry in normFont.reader.tables.items(): - self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) - self.assertEqual(entry.length, self.writer.tables[tag].origLength) - self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) - - def test_bad_sfntVersion(self): - for i in range(self.numTables): - self.writer[bytechr(65 + i)*4] = b"\0" - self.writer.sfntVersion = 'ZZZZ' - with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): - self.writer.close() - - def test_calcTotalSize_no_flavorData(self): - expected = self.length - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData(self): - expected = self.length + len(self.compressed_metadata) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_privData(self): - expected = self.length + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData_and_privData(self): - metaDataLength = (len(self.compressed_metadata) + 3) & ~3 - expected = self.length + metaDataLength + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_getVersion(self): - # no version - self.assertEqual((0, 0), self.writer._getVersion()) - # version from head.fontRevision - fontRevision = self.font['head'].fontRevision - versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) - entry = self.writer.tables['head'] = ttLib.getTableClass('head')() - entry.data = self.font.getTableData('head') - self.assertEqual(versionTuple, self.writer._getVersion()) - # version from writer.flavorData - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.majorVersion, flavorData.minorVersion = (10, 11) - self.assertEqual((10, 11), self.writer._getVersion()) - - -class WOFF2WriterTTFTest(WOFF2WriterTest): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(TTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - def test_normaliseGlyfAndLoca(self): - normTables = {} - for tag in ('head', 'loca', 'glyf'): - normTables[tag] = normalise_table(self.font, tag, padding=4) - for tag in self.tags: - tableData = self.font.getTableData(tag) - self.writer[tag] = tableData - if tag in normTables: - self.assertNotEqual(tableData, normTables[tag]) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - for tag in normTables: - self.assertEqual(self.writer.tables[tag].data, normTables[tag]) - - -class WOFF2LocaTableTest(unittest.TestCase): - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head') - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - - def test_compile_short_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0, 0x20000, 2))) - self.font['glyf'].indexFormat = 0 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20000) - - def test_compile_short_loca_overflow(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20000 + 1))) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex( - ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): - locaTable.compile(self.font) - - def test_compile_short_loca_not_multiples_of_2(self): - locaTable = self.font['loca'] - locaTable.set([1, 3, 5, 7]) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): - locaTable.compile(self.font) - - def test_compile_long_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20001))) - self.font['glyf'].indexFormat = 1 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20001 * 4) - - def test_compile_set_indexToLocFormat_0(self): - locaTable = self.font['loca'] - # offsets are all multiples of 2 and max length is < 0x10000 - locaTable.set(list(range(0, 0x20000, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(0, newIndexFormat) - - def test_compile_set_indexToLocFormat_1(self): - locaTable = self.font['loca'] - # offsets are not multiples of 2 - locaTable.set(list(range(10))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - # max length is >= 0x10000 - locaTable.set(list(range(0, 0x20000 + 1, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - - -class WOFF2GlyfTableTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font.importXML(TTX, quiet=True) - cls.tables = {} - cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') - for tag in reversed(cls.transformedTags): # compile in inverse order - cls.tables[tag] = font.getTableData(tag) - infile = BytesIO(TT_WOFF2.getvalue()) - reader = WOFF2Reader(infile) - cls.transformedGlyfData = reader.tables['glyf'].loadData( - reader.transformBuffer) - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head')() - font['maxp'] = ttLib.getTableClass('maxp')() - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - for tag in self.transformedTags: - font[tag].decompile(self.tables[tag], font) - - def test_reconstruct_glyf_padded_4(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_padded_2(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_unpadded(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.compile(self.font) - self.assertEqual(self.tables['glyf'], data) - - def test_reconstruct_glyf_incorrect_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - badGlyphOrder = self.font.getGlyphOrder()[:-1] - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.reconstruct(self.transformedGlyfData, self.font) - - def test_reconstruct_glyf_missing_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.reconstruct(self.transformedGlyfData, self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_reconstruct_loca_padded_4(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_padded_2(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_unpadded(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - self.assertEqual(self.tables['loca'], data) - - def test_reconstruct_glyf_header_not_enough_data(self): - with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): - WOFF2GlyfTable().reconstruct(b"", self.font) - - def test_reconstruct_glyf_table_incorrect_size(self): - msg = "incorrect size of transformed 'glyf'" - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) - - def test_transform_glyf(self): - glyfTable = self.font['glyf'] - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_transform_glyf_incorrect_glyphOrder(self): - glyfTable = self.font['glyf'] - badGlyphOrder = self.font.getGlyphOrder()[:-1] - del glyfTable.glyphOrder - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - glyfTable.glyphOrder = badGlyphOrder - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - - def test_transform_glyf_missing_glyphOrder(self): - glyfTable = self.font['glyf'] - del glyfTable.glyphOrder - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.transform(self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_roundtrip_glyf_reconstruct_and_transform(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_roundtrip_glyf_transform_and_reconstruct(self): - glyfTable = self.font['glyf'] - transformedData = glyfTable.transform(self.font) - newGlyfTable = WOFF2GlyfTable() - newGlyfTable.reconstruct(transformedData, self.font) - newGlyfTable.padding = 4 - reconstructedData = newGlyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) - self.assertEqual(normGlyfData, reconstructedData) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Snippets/fontTools/ttx.py fonttools-3.21.2/Snippets/fontTools/ttx.py --- fonttools-3.0/Snippets/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/ttx.py 2018-01-08 12:40:40.000000000 +0000 @@ -1,22 +1,23 @@ """\ usage: ttx [options] inputfile1 [... inputfileN] - TTX %s -- From OpenType To XML And Back + TTX -- From OpenType To XML And Back If an input file is a TrueType or OpenType font file, it will be - dumped to an TTX file (an XML-based text format). - If an input file is a TTX file, it will be compiled to a TrueType - or OpenType font file. + decompiled to a TTX file (an XML-based text format). + If an input file is a TTX file, it will be compiled to whatever + format the data is in, a TrueType or OpenType/CFF font file. Output files are created so they are unique: an existing file is never overwritten. General options: - -h Help: print this message + -h Help: print this message. + --version: show version and exit. -d Specify a directory where the output files are to be created. -o Specify a file to write the output to. A special - value of of - would use the standard output. + value of - would use the standard output. -f Overwrite existing output file(s), ie. don't append numbers. -v Verbose: more messages will be written to stdout about what is being done. @@ -56,10 +57,13 @@ If no export format is specified 'raw' format is used. -e Don't ignore decompilation errors, but show a full traceback and abort. - -y Select font number for TrueType Collection, + -y Select font number for TrueType Collection (.ttc/.otc), starting from 0. --unicodedata Use custom database file to write character names in the comments of the cmap TTX output. + --newline Control how line endings are written in the XML + file. It can be 'LF', 'CR', or 'CRLF'. If not specified, the + default platform-specific line endings are used. Compile options: -m Merge with TrueType-input-file: specify a TrueType or OpenType @@ -69,6 +73,11 @@ file as-is. --recalc-timestamp Set font 'modified' timestamp to current time. By default, the modification time of the TTX file will be used. + --flavor Specify flavor of output font file. May be 'woff' + or 'woff2'. Note that WOFF2 requires the Brotli Python extension, + available at https://github.com/google/brotli + --with-zopfli Use Zopfli instead of Zlib to compress WOFF. The Python + extension is available at https://pypi.python.org/pypi/zopfli """ @@ -78,34 +87,19 @@ from fontTools.misc.macCreatorType import getMacCreatorAndType from fontTools.unicode import setUnicodeData from fontTools.misc.timeTools import timestampSinceEpoch +from fontTools.misc.loggingTools import Timer +from fontTools.misc.cliTools import makeOutputFileName import os import sys import getopt import re +import logging -def usage(): - from fontTools import version - print(__doc__ % version) - sys.exit(2) +log = logging.getLogger("fontTools.ttx") -numberAddedRE = re.compile("#\d+$") opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') -def makeOutputFileName(input, outputDir, extension, overWrite=False): - dirName, fileName = os.path.split(input) - fileName, ext = os.path.splitext(fileName) - if outputDir: - dirName = outputDir - fileName = numberAddedRE.split(fileName)[0] - output = os.path.join(dirName, fileName + extension) - n = 1 - if not overWrite: - while os.path.exists(output): - output = os.path.join(dirName, fileName + "#" + repr(n) + extension) - n = n + 1 - return output - class Options(object): @@ -123,7 +117,10 @@ ignoreDecompileErrors = True bitmapGlyphDataFormat = 'raw' unicodedata = None + newlinestr = None recalcTimestamp = False + flavor = None + useZopfli = False def __init__(self, rawOptions, numFiles): self.onlyTables = [] @@ -132,13 +129,15 @@ for option, value in rawOptions: # general options if option == "-h": + print(__doc__) + sys.exit(0) + elif option == "--version": from fontTools import version - print(__doc__ % version) + print(version) sys.exit(0) elif option == "-d": if not os.path.isdir(value): - print("The -d option value must be an existing directory") - sys.exit(2) + raise getopt.GetoptError("The -d option value must be an existing directory") self.outputDir = value elif option == "-o": self.outputFile = value @@ -152,8 +151,12 @@ elif option == "-l": self.listTables = True elif option == "-t": + # pad with space if table tag length is less than 4 + value = value.ljust(4) self.onlyTables.append(value) elif option == "-x": + # pad with space if table tag length is less than 4 + value = value.ljust(4) self.skipTables.append(value) elif option == "-s": self.splitTables = True @@ -162,8 +165,8 @@ elif option == "-z": validOptions = ('raw', 'row', 'bitwise', 'extfile') if value not in validOptions: - print("-z does not allow %s as a format. Use %s" % (option, validOptions)) - sys.exit(2) + raise getopt.GetoptError( + "-z does not allow %s as a format. Use %s" % (option, validOptions)) self.bitmapGlyphDataFormat = value elif option == "-y": self.fontNumber = int(value) @@ -178,14 +181,41 @@ self.ignoreDecompileErrors = False elif option == "--unicodedata": self.unicodedata = value + elif option == "--newline": + validOptions = ('LF', 'CR', 'CRLF') + if value == "LF": + self.newlinestr = "\n" + elif value == "CR": + self.newlinestr = "\r" + elif value == "CRLF": + self.newlinestr = "\r\n" + else: + raise getopt.GetoptError( + "Invalid choice for --newline: %r (choose from %s)" + % (value, ", ".join(map(repr, validOptions)))) elif option == "--recalc-timestamp": self.recalcTimestamp = True - if self.onlyTables and self.skipTables: - print("-t and -x options are mutually exclusive") + elif option == "--flavor": + self.flavor = value + elif option == "--with-zopfli": + self.useZopfli = True + if self.verbose and self.quiet: + raise getopt.GetoptError("-q and -v options are mutually exclusive") + if self.verbose: + self.logLevel = logging.DEBUG + elif self.quiet: + self.logLevel = logging.WARNING + else: + self.logLevel = logging.INFO + if self.mergeFile and self.flavor: + raise getopt.GetoptError("-m and --flavor options are mutually exclusive") sys.exit(2) + if self.onlyTables and self.skipTables: + raise getopt.GetoptError("-t and -x options are mutually exclusive") if self.mergeFile and numFiles > 1: - print("Must specify exactly one TTX source file when using -m") - sys.exit(2) + raise getopt.GetoptError("Must specify exactly one TTX source file when using -m") + if self.flavor != 'woff' and self.useZopfli: + raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'") def ttList(input, output, options): @@ -213,45 +243,43 @@ ttf.close() +@Timer(log, 'Done dumping TTX in %(time).3f seconds') def ttDump(input, output, options): - if not options.quiet: - print('Dumping "%s" to "%s"...' % (input, output)) + log.info('Dumping "%s" to "%s"...', input, output) if options.unicodedata: setUnicodeData(options.unicodedata) - ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, - quiet=options.quiet, + ttf = TTFont(input, 0, allowVID=options.allowVID, ignoreDecompileErrors=options.ignoreDecompileErrors, fontNumber=options.fontNumber) ttf.saveXML(output, - quiet=options.quiet, tables=options.onlyTables, skipTables=options.skipTables, splitTables=options.splitTables, disassembleInstructions=options.disassembleInstructions, - bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat, + newlinestr=options.newlinestr) ttf.close() +@Timer(log, 'Done compiling TTX in %(time).3f seconds') def ttCompile(input, output, options): - if not options.quiet: - print('Compiling "%s" to "%s"...' % (input, output)) - ttf = TTFont(options.mergeFile, + log.info('Compiling "%s" to "%s"...' % (input, output)) + if options.useZopfli: + from fontTools.ttLib import sfnt + sfnt.USE_ZOPFLI = True + ttf = TTFont(options.mergeFile, flavor=options.flavor, recalcBBoxes=options.recalcBBoxes, recalcTimestamp=options.recalcTimestamp, - verbose=options.verbose, allowVID=options.allowVID) - ttf.importXML(input, quiet=options.quiet) + allowVID=options.allowVID) + ttf.importXML(input) - if not options.recalcTimestamp: + if not options.recalcTimestamp and 'head' in ttf: # use TTX file modification time for head "modified" timestamp mtime = os.path.getmtime(input) ttf['head'].modified = timestampSinceEpoch(mtime) ttf.save(output) - if options.verbose: - import time - print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) - def guessFileType(fileName): base, ext = os.path.splitext(fileName) @@ -259,12 +287,15 @@ f = open(fileName, "rb") except IOError: return None + header = f.read(256) + f.close() + if header.startswith(b'\xef\xbb\xbf>> script("a") + 'Latn' + >>> script(",") + 'Zyyy' + >>> script(unichr(0x10FFFF)) + 'Zzzz' + """ + code = byteord(char) + # 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which + # comes after (to the right of) any existing entries of x in a, and it + # partitions array a into two halves so that, for the left side + # all(val <= x for val in a[lo:i]), and for the right side + # all(val > x for val in a[i:hi]). + # Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting + # breakpoints); we want to use `bisect_right` to look up the range that + # contains the given codepoint: i.e. whose start is less than or equal + # to the codepoint. Thus, we subtract -1 from the index returned. + i = bisect_right(Scripts.RANGES, code) + return Scripts.VALUES[i-1] + + +def script_extension(char): + """ Return the script extension property assigned to the Unicode character + 'char' as a set of string. + + >>> script_extension("a") == {'Latn'} + True + >>> script_extension(unichr(0x060C)) == {'Arab', 'Syrc', 'Thaa'} + True + >>> script_extension(unichr(0x10FFFF)) == {'Zzzz'} + True + """ + code = byteord(char) + i = bisect_right(ScriptExtensions.RANGES, code) + value = ScriptExtensions.VALUES[i-1] + if value is None: + # code points not explicitly listed for Script Extensions + # have as their value the corresponding Script property value + return {script(char)} + return value + + +def script_name(code, default=KeyError): + """ Return the long, human-readable script name given a four-letter + Unicode script code. + + If no matching name is found, a KeyError is raised by default. + + You can use the 'default' argument to return a fallback value (e.g. + 'Unknown' or None) instead of throwing an error. + """ + try: + return str(Scripts.NAMES[code].replace("_", " ")) + except KeyError: + if isinstance(default, type) and issubclass(default, KeyError): + raise + return default + + +_normalize_re = re.compile(r"[-_ ]+") + + +def _normalize_property_name(string): + """Remove case, strip space, '-' and '_' for loose matching.""" + return _normalize_re.sub("", string).lower() + + +_SCRIPT_CODES = {_normalize_property_name(v): k + for k, v in Scripts.NAMES.items()} + + +def script_code(script_name, default=KeyError): + """Returns the four-letter Unicode script code from its long name + + If no matching script code is found, a KeyError is raised by default. + + You can use the 'default' argument to return a fallback string (e.g. + 'Zzzz' or None) instead of throwing an error. + """ + normalized_name = _normalize_property_name(script_name) + try: + return _SCRIPT_CODES[normalized_name] + except KeyError: + if isinstance(default, type) and issubclass(default, KeyError): + raise + return default + + +def block(char): + """ Return the block property assigned to the Unicode character 'char' + as a string. + + >>> block("a") + 'Basic Latin' + >>> block(unichr(0x060C)) + 'Arabic' + >>> block(unichr(0xEFFFF)) + 'No_Block' + """ + code = byteord(char) + i = bisect_right(Blocks.RANGES, code) + return Blocks.VALUES[i-1] diff -Nru fonttools-3.0/Snippets/fontTools/unicode.py fonttools-3.21.2/Snippets/fontTools/unicode.py --- fonttools-3.0/Snippets/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/unicode.py 2018-01-08 12:40:40.000000000 +0000 @@ -30,7 +30,12 @@ class _UnicodeBuiltin(object): def __getitem__(self, charCode): - import unicodedata + try: + # use unicodedata backport to python2, if available: + # https://github.com/mikekap/unicodedata2 + import unicodedata2 as unicodedata + except ImportError: + import unicodedata try: return unicodedata.name(unichr(charCode)) except ValueError: diff -Nru fonttools-3.0/Snippets/fontTools/varLib/builder.py fonttools-3.21.2/Snippets/fontTools/varLib/builder.py --- fonttools-3.0/Snippets/fontTools/varLib/builder.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/builder.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from fontTools import ttLib +from fontTools.ttLib.tables import otTables as ot + +# VariationStore + +def buildVarRegionAxis(axisSupport): + self = ot.VarRegionAxis() + self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport] + return self + +def buildVarRegion(support, axisTags): + assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags) + self = ot.VarRegion() + self.VarRegionAxis = [] + for tag in axisTags: + self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0)))) + self.VarRegionAxisCount = len(self.VarRegionAxis) + return self + +def buildVarRegionList(supports, axisTags): + self = ot.VarRegionList() + self.RegionAxisCount = len(axisTags) + self.Region = [] + for support in supports: + self.Region.append(buildVarRegion(support, axisTags)) + self.RegionCount = len(self.Region) + return self + + +def _reorderItem(lst, narrows): + out = [] + count = len(lst) + for i in range(count): + if i not in narrows: + out.append(lst[i]) + for i in range(count): + if i in narrows: + out.append(lst[i]) + return out + +def varDataCalculateNumShorts(self, optimize=True): + count = self.VarRegionCount + items = self.Item + narrows = set(range(count)) + for item in items: + wides = [i for i in narrows if not (-128 <= item[i] <= 127)] + narrows.difference_update(wides) + if not narrows: + break + if optimize: + # Reorder columns such that all SHORT columns come before UINT8 + self.VarRegionIndex = _reorderItem(self.VarRegionIndex, narrows) + for i in range(self.ItemCount): + items[i] = _reorderItem(items[i], narrows) + self.NumShorts = count - len(narrows) + else: + wides = set(range(count)) - narrows + self.NumShorts = 1+max(wides) if wides else 0 + return self + +def buildVarData(varRegionIndices, items, optimize=True): + self = ot.VarData() + self.VarRegionIndex = list(varRegionIndices) + regionCount = self.VarRegionCount = len(self.VarRegionIndex) + records = self.Item = [] + if items: + for item in items: + assert len(item) == regionCount + records.append(list(item)) + self.ItemCount = len(self.Item) + varDataCalculateNumShorts(self, optimize=optimize) + return self + + +def buildVarStore(varRegionList, varDataList): + self = ot.VarStore() + self.Format = 1 + self.VarRegionList = varRegionList + self.VarData = list(varDataList) + self.VarDataCount = len(self.VarData) + return self + + +# Variation helpers + +def buildVarIdxMap(varIdxes): + # TODO Change VarIdxMap mapping to hold separate outer,inner indices + self = ot.VarIdxMap() + self.mapping = list(varIdxes) + return self + +def buildVarDevTable(varIdx): + self = ot.Device() + self.DeltaFormat = 0x8000 + self.StartSize = varIdx >> 16 + self.EndSize = varIdx & 0xFFFF + return self diff -Nru fonttools-3.0/Snippets/fontTools/varLib/designspace.py fonttools-3.21.2/Snippets/fontTools/varLib/designspace.py --- fonttools-3.0/Snippets/fontTools/varLib/designspace.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/designspace.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,113 @@ +"""Rudimentary support for loading MutatorMath .designspace files.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +try: + import xml.etree.cElementTree as ET +except ImportError: + import xml.etree.ElementTree as ET + +__all__ = ['load', 'loads'] + +namespaces = {'xml': '{http://www.w3.org/XML/1998/namespace}'} + + +def _xml_parse_location(et): + loc = {} + for dim in et.find('location'): + assert dim.tag == 'dimension' + name = dim.attrib['name'] + value = float(dim.attrib['xvalue']) + assert name not in loc + loc[name] = value + return loc + + +def _load_item(et): + item = dict(et.attrib) + for element in et: + if element.tag == 'location': + value = _xml_parse_location(et) + else: + value = {} + if 'copy' in element.attrib: + value['copy'] = bool(int(element.attrib['copy'])) + # TODO load more?! + item[element.tag] = value + return item + + +def _xml_parse_axis_or_map(element): + dic = {} + for name in element.attrib: + if name in ['name', 'tag']: + dic[name] = element.attrib[name] + else: + dic[name] = float(element.attrib[name]) + return dic + + +def _load_axis(et): + item = _xml_parse_axis_or_map(et) + maps = [] + labelnames = {} + for element in et: + assert element.tag in ['labelname', 'map'] + if element.tag == 'labelname': + lang = element.attrib["{0}lang".format(namespaces['xml'])] + labelnames[lang] = element.text + elif element.tag == 'map': + maps.append(_xml_parse_axis_or_map(element)) + if labelnames: + item['labelname'] = labelnames + if maps: + item['map'] = maps + return item + + +def _load(et): + designspace = {} + ds = et.getroot() + + axes_element = ds.find('axes') + if axes_element is not None: + axes = [] + for et in axes_element: + axes.append(_load_axis(et)) + designspace['axes'] = axes + + sources_element = ds.find('sources') + if sources_element is not None: + sources = [] + for et in sources_element: + sources.append(_load_item(et)) + designspace['sources'] = sources + + instances_element = ds.find('instances') + if instances_element is not None: + instances = [] + for et in instances_element: + instances.append(_load_item(et)) + designspace['instances'] = instances + + return designspace + + +def load(filename): + """Load designspace from a file name or object. + Returns a dictionary containing three (optional) items: + - list of "axes" + - list of "sources" (aka masters) + - list of "instances" + """ + return _load(ET.parse(filename)) + + +def loads(string): + """Load designspace from a string.""" + return _load(ET.fromstring(string)) + +if __name__ == '__main__': + import sys + from pprint import pprint + for f in sys.argv[1:]: + pprint(load(f)) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/__init__.py fonttools-3.21.2/Snippets/fontTools/varLib/__init__.py --- fonttools-3.0/Snippets/fontTools/varLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,754 @@ +""" +Module for dealing with 'gvar'-style font variations, also known as run-time +interpolation. + +The ideas here are very similar to MutatorMath. There is even code to read +MutatorMath .designspace files in the varLib.designspace module. + +For now, if you run this file on a designspace file, it tries to find +ttf-interpolatable files for the masters and build a variable-font from +them. Such ttf-interpolatable and designspace files can be generated from +a Glyphs source, eg., using noto-source as an example: + + $ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs + +Then you can make a variable-font this way: + + $ fonttools varLib master_ufo/NotoSansArabic.designspace + +API *will* change in near future. +""" +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import Vector +from fontTools.ttLib import TTFont, newTable +from fontTools.ttLib.tables._n_a_m_e import NameRecord +from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates +from fontTools.ttLib.tables.ttProgram import Program +from fontTools.ttLib.tables.TupleVariation import TupleVariation +from fontTools.ttLib.tables import otTables as ot +from fontTools.varLib import builder, designspace, models, varStore +from fontTools.varLib.merger import VariationMerger, _all_equal +from fontTools.varLib.mvar import MVAR_ENTRIES +from fontTools.varLib.iup import iup_delta_optimize +from collections import OrderedDict +import os.path +import logging +from pprint import pformat + +log = logging.getLogger("fontTools.varLib") + + +class VarLibError(Exception): + pass + +# +# Creation routines +# + +def _add_fvar(font, axes, instances): + """ + Add 'fvar' table to font. + + axes is an ordered dictionary of DesignspaceAxis objects. + + instances is list of dictionary objects with 'location', 'stylename', + and possibly 'postscriptfontname' entries. + """ + + assert axes + assert isinstance(axes, OrderedDict) + + log.info("Generating fvar") + + fvar = newTable('fvar') + nameTable = font['name'] + + for a in axes.values(): + axis = Axis() + axis.axisTag = Tag(a.tag) + # TODO Skip axes that have no variation. + axis.minValue, axis.defaultValue, axis.maxValue = a.minimum, a.default, a.maximum + axis.axisNameID = nameTable.addName(tounicode(a.labelname['en'])) + # TODO: + # Replace previous line with the following when the following issues are resolved: + # https://github.com/fonttools/fonttools/issues/930 + # https://github.com/fonttools/fonttools/issues/931 + # axis.axisNameID = nameTable.addMultilingualName(a.labelname, font) + fvar.axes.append(axis) + + for instance in instances: + coordinates = instance['location'] + name = tounicode(instance['stylename']) + psname = instance.get('postscriptfontname') + + inst = NamedInstance() + inst.subfamilyNameID = nameTable.addName(name) + if psname is not None: + psname = tounicode(psname) + inst.postscriptNameID = nameTable.addName(psname) + inst.coordinates = {axes[k].tag:axes[k].map_backward(v) for k,v in coordinates.items()} + #inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()} + fvar.instances.append(inst) + + assert "fvar" not in font + font['fvar'] = fvar + + return fvar + +def _add_avar(font, axes): + """ + Add 'avar' table to font. + + axes is an ordered dictionary of DesignspaceAxis objects. + """ + + assert axes + assert isinstance(axes, OrderedDict) + + log.info("Generating avar") + + avar = newTable('avar') + + interesting = False + for axis in axes.values(): + # Currently, some rasterizers require that the default value maps + # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment + # maps, even when the default normalization mapping for the axis + # was not modified. + # https://github.com/googlei18n/fontmake/issues/295 + # https://github.com/fonttools/fonttools/issues/1011 + # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed + curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + if not axis.map: + continue + + items = sorted(axis.map.items()) + keys = [item[0] for item in items] + vals = [item[1] for item in items] + + # Current avar requirements. We don't have to enforce + # these on the designer and can deduce some ourselves, + # but for now just enforce them. + assert axis.minimum == min(keys) + assert axis.maximum == max(keys) + assert axis.default in keys + # No duplicates + assert len(set(keys)) == len(keys) + assert len(set(vals)) == len(vals) + # Ascending values + assert sorted(vals) == vals + + keys_triple = (axis.minimum, axis.default, axis.maximum) + vals_triple = tuple(axis.map_forward(v) for v in keys_triple) + + keys = [models.normalizeValue(v, keys_triple) for v in keys] + vals = [models.normalizeValue(v, vals_triple) for v in vals] + + if all(k == v for k, v in zip(keys, vals)): + continue + interesting = True + + curve.update(zip(keys, vals)) + + assert 0.0 in curve and curve[0.0] == 0.0 + assert -1.0 not in curve or curve[-1.0] == -1.0 + assert +1.0 not in curve or curve[+1.0] == +1.0 + # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}) + + assert "avar" not in font + if not interesting: + log.info("No need for avar") + avar = None + else: + font['avar'] = avar + + return avar + +def _add_stat(font, axes): + + nameTable = font['name'] + + assert "STAT" not in font + STAT = font["STAT"] = newTable('STAT') + stat = STAT.table = ot.STAT() + stat.Version = 0x00010000 + + axisRecords = [] + for i,a in enumerate(axes.values()): + axis = ot.AxisRecord() + axis.AxisTag = Tag(a.tag) + # Meh. Reuse fvar nameID! + axis.AxisNameID = nameTable.addName(tounicode(a.labelname['en'])) + axis.AxisOrdering = i + axisRecords.append(axis) + + axisRecordArray = ot.AxisRecordArray() + axisRecordArray.Axis = axisRecords + # XXX these should not be hard-coded but computed automatically + stat.DesignAxisRecordSize = 8 + stat.DesignAxisCount = len(axisRecords) + stat.DesignAxisRecord = axisRecordArray + +# TODO Move to glyf or gvar table proper +def _GetCoordinates(font, glyphName): + """font, glyphName --> glyph coordinates as expected by "gvar" table + + The result includes four "phantom points" for the glyph metrics, + as mandated by the "gvar" spec. + """ + glyf = font["glyf"] + if glyphName not in glyf.glyphs: return None + glyph = glyf[glyphName] + if glyph.isComposite(): + coord = GlyphCoordinates([(getattr(c, 'x', 0),getattr(c, 'y', 0)) for c in glyph.components]) + control = (glyph.numberOfContours,[c.glyphName for c in glyph.components]) + else: + allData = glyph.getCoordinates(glyf) + coord = allData[0] + control = (glyph.numberOfContours,)+allData[1:] + + # Add phantom points for (left, right, top, bottom) positions. + horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName] + if not hasattr(glyph, 'xMin'): + glyph.recalcBounds(glyf) + leftSideX = glyph.xMin - leftSideBearing + rightSideX = leftSideX + horizontalAdvanceWidth + # XXX these are incorrect. Load vmtx and fix. + topSideY = glyph.yMax + bottomSideY = -glyph.yMin + coord = coord.copy() + coord.extend([(leftSideX, 0), + (rightSideX, 0), + (0, topSideY), + (0, bottomSideY)]) + + return coord, control + +# TODO Move to glyf or gvar table proper +def _SetCoordinates(font, glyphName, coord): + glyf = font["glyf"] + assert glyphName in glyf.glyphs + glyph = glyf[glyphName] + + # Handle phantom points for (left, right, top, bottom) positions. + assert len(coord) >= 4 + if not hasattr(glyph, 'xMin'): + glyph.recalcBounds(glyf) + leftSideX = coord[-4][0] + rightSideX = coord[-3][0] + topSideY = coord[-2][1] + bottomSideY = coord[-1][1] + + for _ in range(4): + del coord[-1] + + if glyph.isComposite(): + assert len(coord) == len(glyph.components) + for p,comp in zip(coord, glyph.components): + if hasattr(comp, 'x'): + comp.x,comp.y = p + elif glyph.numberOfContours is 0: + assert len(coord) == 0 + else: + assert len(coord) == len(glyph.coordinates) + glyph.coordinates = coord + + glyph.recalcBounds(glyf) + + horizontalAdvanceWidth = round(rightSideX - leftSideX) + leftSideBearing = round(glyph.xMin - leftSideX) + # XXX Handle vertical + font["hmtx"].metrics[glyphName] = horizontalAdvanceWidth, leftSideBearing + +def _add_gvar(font, model, master_ttfs, tolerance=0.5, optimize=True): + + assert tolerance >= 0 + + log.info("Generating gvar") + assert "gvar" not in font + gvar = font["gvar"] = newTable('gvar') + gvar.version = 1 + gvar.reserved = 0 + gvar.variations = {} + + for glyph in font.getGlyphOrder(): + + allData = [_GetCoordinates(m, glyph) for m in master_ttfs] + allCoords = [d[0] for d in allData] + allControls = [d[1] for d in allData] + control = allControls[0] + if (any(c != control for c in allControls)): + log.warning("glyph %s has incompatible masters; skipping" % glyph) + continue + del allControls + + # Update gvar + gvar.variations[glyph] = [] + deltas = model.getDeltas(allCoords) + supports = model.supports + assert len(deltas) == len(supports) + + # Prepare for IUP optimization + origCoords = deltas[0] + endPts = control[1] if control[0] >= 1 else list(range(len(control[1]))) + + for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])): + if all(abs(v) <= tolerance for v in delta.array): + continue + var = TupleVariation(support, delta) + if optimize: + delta_opt = iup_delta_optimize(delta, origCoords, endPts, tolerance=tolerance) + + if None in delta_opt: + # Use "optimized" version only if smaller... + var_opt = TupleVariation(support, delta_opt) + + axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...? + tupleData, auxData, _ = var.compile(axis_tags, [], None) + unoptimized_len = len(tupleData) + len(auxData) + tupleData, auxData, _ = var_opt.compile(axis_tags, [], None) + optimized_len = len(tupleData) + len(auxData) + + if optimized_len < unoptimized_len: + var = var_opt + + gvar.variations[glyph].append(var) + +def _remove_TTHinting(font): + for tag in ("cvar", "cvt ", "fpgm", "prep"): + if tag in font: + del font[tag] + for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"): + setattr(font["maxp"], attr, 0) + font["maxp"].maxZones = 1 + font["glyf"].removeHinting() + # TODO: Modify gasp table to deactivate gridfitting for all ranges? + +def _merge_TTHinting(font, model, master_ttfs, tolerance=0.5): + + log.info("Merging TT hinting") + assert "cvar" not in font + + # Check that the existing hinting is compatible + + # fpgm and prep table + + for tag in ("fpgm", "prep"): + all_pgms = [m[tag].program for m in master_ttfs if tag in m] + if len(all_pgms) == 0: + continue + if tag in font: + font_pgm = font[tag].program + else: + font_pgm = Program() + if any(pgm != font_pgm for pgm in all_pgms): + log.warning("Masters have incompatible %s tables, hinting is discarded." % tag) + _remove_TTHinting(font) + return + + # glyf table + + for name, glyph in font["glyf"].glyphs.items(): + all_pgms = [ + m["glyf"][name].program + for m in master_ttfs + if hasattr(m["glyf"][name], "program") + ] + if not any(all_pgms): + continue + glyph.expand(font["glyf"]) + if hasattr(glyph, "program"): + font_pgm = glyph.program + else: + font_pgm = Program() + if any(pgm != font_pgm for pgm in all_pgms if pgm): + log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name) + _remove_TTHinting(font) + return + + # cvt table + + all_cvs = [Vector(m["cvt "].values) for m in master_ttfs if "cvt " in m] + + if len(all_cvs) == 0: + # There is no cvt table to make a cvar table from, we're done here. + return + + if len(all_cvs) != len(master_ttfs): + log.warning("Some masters have no cvt table, hinting is discarded.") + _remove_TTHinting(font) + return + + num_cvt0 = len(all_cvs[0]) + if (any(len(c) != num_cvt0 for c in all_cvs)): + log.warning("Masters have incompatible cvt tables, hinting is discarded.") + _remove_TTHinting(font) + return + + # We can build the cvar table now. + + cvar = font["cvar"] = newTable('cvar') + cvar.version = 1 + cvar.variations = [] + + deltas = model.getDeltas(all_cvs) + supports = model.supports + for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])): + delta = [round(d) for d in delta] + if all(abs(v) <= tolerance for v in delta): + continue + var = TupleVariation(support, delta) + cvar.variations.append(var) + +def _add_HVAR(font, model, master_ttfs, axisTags): + + log.info("Generating HVAR") + + hAdvanceDeltas = {} + metricses = [m["hmtx"].metrics for m in master_ttfs] + for glyph in font.getGlyphOrder(): + hAdvances = [metrics[glyph][0] for metrics in metricses] + # TODO move round somewhere else? + hAdvanceDeltas[glyph] = tuple(round(d) for d in model.getDeltas(hAdvances)[1:]) + + # We only support the direct mapping right now. + + supports = model.supports[1:] + varTupleList = builder.buildVarRegionList(supports, axisTags) + varTupleIndexes = list(range(len(supports))) + n = len(supports) + items = [] + zeroes = [0]*n + for glyphName in font.getGlyphOrder(): + items.append(hAdvanceDeltas.get(glyphName, zeroes)) + while items and items[-1] is zeroes: + del items[-1] + + advanceMapping = None + # Add indirect mapping to save on duplicates + uniq = set(items) + # TODO Improve heuristic + if (len(items) - len(uniq)) * len(varTupleIndexes) > len(items): + newItems = sorted(uniq) + mapper = {v:i for i,v in enumerate(newItems)} + mapping = [mapper[item] for item in items] + while len(mapping) > 1 and mapping[-1] == mapping[-2]: + del mapping[-1] + advanceMapping = builder.buildVarIdxMap(mapping) + items = newItems + del mapper, mapping, newItems + del uniq + + varData = builder.buildVarData(varTupleIndexes, items) + varstore = builder.buildVarStore(varTupleList, [varData]) + + assert "HVAR" not in font + HVAR = font["HVAR"] = newTable('HVAR') + hvar = HVAR.table = ot.HVAR() + hvar.Version = 0x00010000 + hvar.VarStore = varstore + hvar.AdvWidthMap = advanceMapping + hvar.LsbMap = hvar.RsbMap = None + +def _add_MVAR(font, model, master_ttfs, axisTags): + + log.info("Generating MVAR") + + store_builder = varStore.OnlineVarStoreBuilder(axisTags) + store_builder.setModel(model) + + records = [] + lastTableTag = None + fontTable = None + tables = None + for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]): + if tableTag != lastTableTag: + tables = fontTable = None + if tableTag in font: + # TODO Check all masters have same table set? + fontTable = font[tableTag] + tables = [master[tableTag] for master in master_ttfs] + lastTableTag = tableTag + if tables is None: + continue + + # TODO support gasp entries + + master_values = [getattr(table, itemName) for table in tables] + if _all_equal(master_values): + base, varIdx = master_values[0], None + else: + base, varIdx = store_builder.storeMasters(master_values) + setattr(fontTable, itemName, base) + + if varIdx is None: + continue + log.info(' %s: %s.%s %s', tag, tableTag, itemName, master_values) + rec = ot.MetricsValueRecord() + rec.ValueTag = tag + rec.VarIdx = varIdx + records.append(rec) + + assert "MVAR" not in font + if records: + MVAR = font["MVAR"] = newTable('MVAR') + mvar = MVAR.table = ot.MVAR() + mvar.Version = 0x00010000 + mvar.Reserved = 0 + mvar.VarStore = store_builder.finish() + # XXX these should not be hard-coded but computed automatically + mvar.ValueRecordSize = 8 + mvar.ValueRecordCount = len(records) + mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag) + + +def _merge_OTL(font, model, master_fonts, axisTags): + + log.info("Merging OpenType Layout tables") + merger = VariationMerger(model, axisTags, font) + + merger.mergeTables(font, master_fonts, ['GPOS']) + store = merger.store_builder.finish() + try: + GDEF = font['GDEF'].table + assert GDEF.Version <= 0x00010002 + except KeyError: + font['GDEF']= newTable('GDEF') + GDEFTable = font["GDEF"] = newTable('GDEF') + GDEF = GDEFTable.table = ot.GDEF() + GDEF.Version = 0x00010003 + GDEF.VarStore = store + + + +# Pretty much all of this file should be redesigned and moved inot submodules... +# Such a mess right now, but kludging along... +class _DesignspaceAxis(object): + + def __repr__(self): + return repr(self.__dict__) + + @staticmethod + def _map(v, map): + keys = map.keys() + if not keys: + return v + if v in keys: + return map[v] + k = min(keys) + if v < k: + return v + map[k] - k + k = max(keys) + if v > k: + return v + map[k] - k + # Interpolate + a = max(k for k in keys if k < v) + b = min(k for k in keys if k > v) + va = map[a] + vb = map[b] + return va + (vb - va) * (v - a) / (b - a) + + def map_forward(self, v): + if self.map is None: return v + return self._map(v, self.map) + + def map_backward(self, v): + if self.map is None: return v + map = {v:k for k,v in self.map.items()} + return self._map(v, map) + + +def load_designspace(designspace_filename): + + ds = designspace.load(designspace_filename) + axes = ds.get('axes') + masters = ds.get('sources') + if not masters: + raise VarLibError("no sources found in .designspace") + instances = ds.get('instances', []) + + standard_axis_map = OrderedDict([ + ('weight', ('wght', {'en':'Weight'})), + ('width', ('wdth', {'en':'Width'})), + ('slant', ('slnt', {'en':'Slant'})), + ('optical', ('opsz', {'en':'Optical Size'})), + ]) + + + # Setup axes + axis_objects = OrderedDict() + if axes is not None: + for axis_dict in axes: + axis_name = axis_dict.get('name') + if not axis_name: + axis_name = axis_dict['name'] = axis_dict['tag'] + if 'map' not in axis_dict: + axis_dict['map'] = None + else: + axis_dict['map'] = {m['input']:m['output'] for m in axis_dict['map']} + + if axis_name in standard_axis_map: + if 'tag' not in axis_dict: + axis_dict['tag'] = standard_axis_map[axis_name][0] + if 'labelname' not in axis_dict: + axis_dict['labelname'] = standard_axis_map[axis_name][1].copy() + + axis = _DesignspaceAxis() + for item in ['name', 'tag', 'minimum', 'default', 'maximum', 'map']: + assert item in axis_dict, 'Axis does not have "%s"' % item + if 'labelname' not in axis_dict: + axis_dict['labelname'] = {'en': axis_name} + axis.__dict__ = axis_dict + axis_objects[axis_name] = axis + else: + # No element. Guess things... + base_idx = None + for i,m in enumerate(masters): + if 'info' in m and m['info']['copy']: + assert base_idx is None + base_idx = i + assert base_idx is not None, "Cannot find 'base' master; Either add element to .designspace document, or add element to one of the sources in the .designspace document." + + master_locs = [o['location'] for o in masters] + base_loc = master_locs[base_idx] + axis_names = set(base_loc.keys()) + assert all(name in standard_axis_map for name in axis_names), "Non-standard axis found and there exist no element." + + for name,(tag,labelname) in standard_axis_map.items(): + if name not in axis_names: + continue + + axis = _DesignspaceAxis() + axis.name = name + axis.tag = tag + axis.labelname = labelname.copy() + axis.default = base_loc[name] + axis.minimum = min(m[name] for m in master_locs if name in m) + axis.maximum = max(m[name] for m in master_locs if name in m) + axis.map = None + # TODO Fill in weight / width mapping from OS/2 table? Need loading fonts... + axis_objects[name] = axis + del base_idx, base_loc, axis_names, master_locs + axes = axis_objects + del axis_objects + log.info("Axes:\n%s", pformat(axes)) + + + # Check all master and instance locations are valid and fill in defaults + for obj in masters+instances: + obj_name = obj.get('name', obj.get('stylename', '')) + loc = obj['location'] + for axis_name in loc.keys(): + assert axis_name in axes, "Location axis '%s' unknown for '%s'." % (axis_name, obj_name) + for axis_name,axis in axes.items(): + if axis_name not in loc: + loc[axis_name] = axis.default + else: + v = axis.map_backward(loc[axis_name]) + assert axis.minimum <= v <= axis.maximum, "Location for axis '%s' (mapped to %s) out of range for '%s' [%s..%s]" % (axis_name, v, obj_name, axis.minimum, axis.maximum) + + + # Normalize master locations + + normalized_master_locs = [o['location'] for o in masters] + log.info("Internal master locations:\n%s", pformat(normalized_master_locs)) + + # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar + internal_axis_supports = {} + for axis in axes.values(): + triple = (axis.minimum, axis.default, axis.maximum) + internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple] + log.info("Internal axis supports:\n%s", pformat(internal_axis_supports)) + + normalized_master_locs = [models.normalizeLocation(m, internal_axis_supports) for m in normalized_master_locs] + log.info("Normalized master locations:\n%s", pformat(normalized_master_locs)) + + + # Find base master + base_idx = None + for i,m in enumerate(normalized_master_locs): + if all(v == 0 for v in m.values()): + assert base_idx is None + base_idx = i + assert base_idx is not None, "Base master not found; no master at default location?" + log.info("Index of base master: %s", base_idx) + + return axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances + + +def build(designspace_filename, master_finder=lambda s:s): + """ + Build variation font from a designspace file. + + If master_finder is set, it should be a callable that takes master + filename as found in designspace file and map it to master font + binary as to be opened (eg. .ttf or .otf). + """ + + axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename) + + log.info("Building variable font") + log.info("Loading master fonts") + basedir = os.path.dirname(designspace_filename) + master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters] + master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs] + # Reload base font as target font + vf = TTFont(master_ttfs[base_idx]) + + # TODO append masters as named-instances as well; needs .designspace change. + fvar = _add_fvar(vf, axes, instances) + _add_stat(vf, axes) + _add_avar(vf, axes) + del instances + + # Map from axis names to axis tags... + normalized_master_locs = [{axes[k].tag:v for k,v in loc.items()} for loc in normalized_master_locs] + #del axes + # From here on, we use fvar axes only + axisTags = [axis.axisTag for axis in fvar.axes] + + # Assume single-model for now. + model = models.VariationModel(normalized_master_locs, axisOrder=axisTags) + assert 0 == model.mapping[base_idx] + + log.info("Building variations tables") + _add_MVAR(vf, model, master_fonts, axisTags) + _add_HVAR(vf, model, master_fonts, axisTags) + _merge_OTL(vf, model, master_fonts, axisTags) + if 'glyf' in vf: + _add_gvar(vf, model, master_fonts) + _merge_TTHinting(vf, model, master_fonts) + + return vf, model, master_ttfs + + +def main(args=None): + from argparse import ArgumentParser + from fontTools import configLogger + + parser = ArgumentParser(prog='varLib') + parser.add_argument('designspace') + options = parser.parse_args(args) + + # TODO: allow user to configure logging via command-line options + configLogger(level="INFO") + + designspace_filename = options.designspace + finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf') + outfile = os.path.splitext(designspace_filename)[0] + '-VF.ttf' + + vf, model, master_ttfs = build(designspace_filename, finder) + + log.info("Saving variation font %s", outfile) + vf.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/interpolatable.py fonttools-3.21.2/Snippets/fontTools/varLib/interpolatable.py --- fonttools-3.0/Snippets/fontTools/varLib/interpolatable.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/interpolatable.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,181 @@ +""" +Tool to find wrong contour order between different masters, and +other interpolatability (or lack thereof) issues. + +Call as: +$ fonttools varLib.interpolatable font1 font2 ... +""" + +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +from fontTools.pens.basePen import AbstractPen, BasePen +from fontTools.pens.recordingPen import RecordingPen +from fontTools.pens.statisticsPen import StatisticsPen +import itertools + + +class PerContourPen(BasePen): + def __init__(self, Pen, glyphset=None): + BasePen.__init__(self, glyphset) + self._glyphset = glyphset + self._Pen = Pen + self._pen = None + self.value = [] + def _moveTo(self, p0): + self._newItem() + self._pen.moveTo(p0) + def _lineTo(self, p1): + self._pen.lineTo(p1) + def _qCurveToOne(self, p1, p2): + self._pen.qCurveTo(p1, p2) + def _curveToOne(self, p1, p2, p3): + self._pen.curveTo(p1, p2, p3) + def _closePath(self): + self._pen.closePath() + self._pen = None + def _endPath(self): + self._pen.endPath() + self._pen = None + + def _newItem(self): + self._pen = pen = self._Pen() + self.value.append(pen) + +class PerContourOrComponentPen(PerContourPen): + + def addComponent(self, glyphName, transformation): + self._newItem() + self.value[-1].addComponent(glyphName, transformation) + + +def _vdiff(v0, v1): + return tuple(b-a for a,b in zip(v0,v1)) +def _vlen(vec): + v = 0 + for x in vec: + v += x*x + return v + +def _matching_cost(G, matching): + return sum(G[i][j] for i,j in enumerate(matching)) + +def min_cost_perfect_bipartite_matching(G): + n = len(G) + try: + from scipy.optimize import linear_sum_assignment + rows, cols = linear_sum_assignment(G) + assert (rows == list(range(n))).all() + return list(cols), _matching_cost(G, cols) + except ImportError: + pass + + try: + from munkres import Munkres + cols = [None] * n + for row,col in Munkres().compute(G): + cols[row] = col + return cols, _matching_cost(G, cols) + except ImportError: + pass + + if n > 6: + raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'") + + # Otherwise just brute-force + permutations = itertools.permutations(range(n)) + best = list(next(permutations)) + best_cost = _matching_cost(G, best) + for p in permutations: + cost = _matching_cost(G, p) + if cost < best_cost: + best, best_cost = list(p), cost + return best, best_cost + + +def test(glyphsets, glyphs=None, names=None): + + if names is None: + names = glyphsets + if glyphs is None: + glyphs = glyphsets[0].keys() + + hist = [] + for glyph_name in glyphs: + #print() + #print(glyph_name) + + try: + allVectors = [] + for glyphset,name in zip(glyphsets, names): + #print('.', end='') + glyph = glyphset[glyph_name] + + perContourPen = PerContourOrComponentPen(RecordingPen, glyphset=glyphset) + glyph.draw(perContourPen) + contourPens = perContourPen.value + del perContourPen + + contourVectors = [] + allVectors.append(contourVectors) + for contour in contourPens: + stats = StatisticsPen(glyphset=glyphset) + contour.replay(stats) + size = abs(stats.area) ** .5 * .5 + vector = ( + int(size), + int(stats.meanX), + int(stats.meanY), + int(stats.stddevX * 2), + int(stats.stddevY * 2), + int(stats.correlation * size), + ) + contourVectors.append(vector) + #print(vector) + + # Check each master against the next one in the list. + for i,(m0,m1) in enumerate(zip(allVectors[:-1],allVectors[1:])): + if len(m0) != len(m1): + print('%s: %s+%s: Glyphs not compatible!!!!!' % (glyph_name, names[i], names[i+1])) + continue + if not m0: + continue + costs = [[_vlen(_vdiff(v0,v1)) for v1 in m1] for v0 in m0] + matching, matching_cost = min_cost_perfect_bipartite_matching(costs) + if matching != list(range(len(m0))): + print('%s: %s+%s: Glyph has wrong contour/component order: %s' % (glyph_name, names[i], names[i+1], matching)) #, m0, m1) + break + upem = 2048 + item_cost = round((matching_cost / len(m0) / len(m0[0])) ** .5 / upem * 100) + hist.append(item_cost) + threshold = 7 + if item_cost >= threshold: + print('%s: %s+%s: Glyph has very high cost: %d%%' % (glyph_name, names[i], names[i+1], item_cost)) + + + except ValueError as e: + print('%s: %s: math error %s; skipping glyph.' % (glyph_name, name, e)) + print(contour.value) + #raise + #for x in hist: + # print(x) + +def main(args): + filenames = args + glyphs = None + #glyphs = ['uni08DB', 'uniFD76'] + #glyphs = ['uni08DE', 'uni0034'] + #glyphs = ['uni08DE', 'uni0034', 'uni0751', 'uni0753', 'uni0754', 'uni08A4', 'uni08A4.fina', 'uni08A5.fina'] + + from os.path import basename + names = [basename(filename).rsplit('.', 1)[0] for filename in filenames] + + from fontTools.ttLib import TTFont + fonts = [TTFont(filename) for filename in filenames] + + glyphsets = [font.getGlyphSet() for font in fonts] + test(glyphsets, glyphs=glyphs, names=names) + +if __name__ == '__main__': + import sys + main(sys.argv[1:]) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/interpolate_layout.py fonttools-3.21.2/Snippets/fontTools/varLib/interpolate_layout.py --- fonttools-3.0/Snippets/fontTools/varLib/interpolate_layout.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/interpolate_layout.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,91 @@ +""" +Interpolate OpenType Layout tables (GDEF / GPOS / GSUB). +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib import models, VarLibError, load_designspace +from fontTools.varLib.merger import InstancerMerger +import os.path +import logging +from pprint import pformat + +log = logging.getLogger("fontTools.varLib.interpolate_layout") + + +def interpolate_layout(designspace_filename, loc, master_finder=lambda s:s, mapped=False): + """ + Interpolate GPOS from a designspace file and location. + + If master_finder is set, it should be a callable that takes master + filename as found in designspace file and map it to master font + binary as to be opened (eg. .ttf or .otf). + + If mapped is False (default), then location is mapped using the + map element of the axes in designspace file. If mapped is True, + it is assumed that location is in designspace's internal space and + no mapping is performed. + """ + + axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename) + + + log.info("Building interpolated font") + log.info("Loading master fonts") + basedir = os.path.dirname(designspace_filename) + master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters] + master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs] + + #font = master_fonts[base_idx] + font = TTFont(master_ttfs[base_idx]) + + log.info("Location: %s", pformat(loc)) + if not mapped: + loc = {name:axes[name].map_forward(v) for name,v in loc.items()} + log.info("Internal location: %s", pformat(loc)) + loc = models.normalizeLocation(loc, internal_axis_supports) + log.info("Normalized location: %s", pformat(loc)) + + # Assume single-model for now. + model = models.VariationModel(normalized_master_locs) + assert 0 == model.mapping[base_idx] + + merger = InstancerMerger(font, model, loc) + + log.info("Building interpolated tables") + merger.mergeTables(font, master_fonts, ['GPOS']) + return font + + +def main(args=None): + from fontTools import configLogger + + import sys + if args is None: + args = sys.argv[1:] + + designspace_filename = args[0] + locargs = args[1:] + outfile = os.path.splitext(designspace_filename)[0] + '-instance.ttf' + + # TODO: allow user to configure logging via command-line options + configLogger(level="INFO") + + finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf') + + loc = {} + for arg in locargs: + tag,val = arg.split('=') + loc[tag] = float(val) + + font = interpolate_layout(designspace_filename, loc, finder) + log.info("Saving font %s", outfile) + font.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/iup.py fonttools-3.21.2/Snippets/fontTools/varLib/iup.py --- fonttools-3.0/Snippets/fontTools/varLib/iup.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/iup.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,305 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * + + +def iup_segment(coords, rc1, rd1, rc2, rd2): + # rc1 = reference coord 1 + # rd1 = reference delta 1 + out_arrays = [None, None] + for j in 0,1: + out_arrays[j] = out = [] + x1, x2, d1, d2 = rc1[j], rc2[j], rd1[j], rd2[j] + + + if x1 == x2: + n = len(coords) + if d1 == d2: + out.extend([d1]*n) + else: + out.extend([0]*n) + continue + + if x1 > x2: + x1, x2 = x2, x1 + d1, d2 = d2, d1 + + # x1 < x2 + scale = (d2 - d1) / (x2 - x1) + for pair in coords: + x = pair[j] + + if x <= x1: + d = d1 + elif x >= x2: + d = d2 + else: + # Interpolate + d = d1 + (x - x1) * scale + + out.append(d) + + return zip(*out_arrays) + +def iup_contour(delta, coords): + assert len(delta) == len(coords) + if None not in delta: + return delta + + n = len(delta) + # indices of points with explicit deltas + indices = [i for i,v in enumerate(delta) if v is not None] + if not indices: + # All deltas are None. Return 0,0 for all. + return [(0,0)]*n + + out = [] + it = iter(indices) + start = next(it) + if start != 0: + # Initial segment that wraps around + i1, i2, ri1, ri2 = 0, start, start, indices[-1] + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + out.append(delta[start]) + for end in it: + if end - start > 1: + i1, i2, ri1, ri2 = start+1, end, start, end + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + out.append(delta[end]) + start = end + if start != n-1: + # Final segment that wraps around + i1, i2, ri1, ri2 = start+1, n, start, indices[0] + out.extend(iup_segment(coords[i1:i2], coords[ri1], delta[ri1], coords[ri2], delta[ri2])) + + assert len(delta) == len(out), (len(delta), len(out)) + return out + +def iup_delta(delta, coords, ends): + assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4 + n = len(coords) + ends = ends + [n-4, n-3, n-2, n-1] + out = [] + start = 0 + for end in ends: + end += 1 + contour = iup_contour(delta[start:end], coords[start:end]) + out.extend(contour) + start = end + + return out + +# Optimizer + +def can_iup_in_between(deltas, coords, i, j, tolerance): + assert j - i >= 2 + interp = list(iup_segment(coords[i+1:j], coords[i], deltas[i], coords[j], deltas[j])) + deltas = deltas[i+1:j] + + assert len(deltas) == len(interp) + + return all(abs(complex(x-p, y-q)) <= tolerance for (x,y),(p,q) in zip(deltas, interp)) + +def _iup_contour_bound_forced_set(delta, coords, tolerance=0): + """The forced set is a conservative set of points on the contour that must be encoded + explicitly (ie. cannot be interpolated). Calculating this set allows for significantly + speeding up the dynamic-programming, as well as resolve circularity in DP. + + The set is precise; that is, if an index is in the returned set, then there is no way + that IUP can generate delta for that point, given coords and delta. + """ + assert len(delta) == len(coords) + + forced = set() + # Track "last" and "next" points on the contour as we sweep. + nd, nc = delta[0], coords[0] + ld, lc = delta[-1], coords[-1] + for i in range(len(delta)-1, -1, -1): + d, c = ld, lc + ld, lc = delta[i-1], coords[i-1] + + for j in (0,1): # For X and for Y + cj = c[j] + dj = d[j] + lcj = lc[j] + ldj = ld[j] + ncj = nc[j] + ndj = nd[j] + + if lcj <= ncj: + c1, c2 = lcj, ncj + d1, d2 = ldj, ndj + else: + c1, c2 = ncj, lcj + d1, d2 = ndj, ldj + + # If coordinate for current point is between coordinate of adjacent + # points on the two sides, but the delta for current point is NOT + # between delta for those adjacent points (considering tolerance + # allowance), then there is no way that current point can be IUP-ed. + # Mark it forced. + force = False + if c1 <= cj <= c2: + if not (min(d1,d2)-tolerance <= dj <= max(d1,d2)+tolerance): + force = True + else: # cj < c1 or c2 < cj + if c1 == c2: + if d1 == d2: + if abs(dj - d1) > tolerance: + force = True + else: + if abs(dj) > tolerance: + # Disabled the following because the "d1 == d2" does + # check does not take tolerance into consideration... + pass # force = True + elif d1 != d2: + if cj < c1: + if dj != d1 and ((dj-tolerance < d1) != (d1 < d2)): + force = True + else: # c2 < cj + if d2 != dj and ((d2 < dj+tolerance) != (d1 < d2)): + force = True + + if force: + forced.add(i) + break + + nd, nc = d, c + + return forced + +def _iup_contour_optimize_dp(delta, coords, forced={}, tolerance=0, lookback=None): + """Straightforward Dynamic-Programming. For each index i, find least-costly encoding of + points i to n-1 where i is explicitly encoded. We find this by considering all next + explicit points j and check whether interpolation can fill points between i and j. + + Note that solution always encodes last point explicitly. Higher-level is responsible + for removing that restriction. + + As major speedup, we stop looking further whenever we see a "forced" point.""" + + n = len(delta) + if lookback is None: + lookback = n + costs = {-1:0} + chain = {-1:None} + for i in range(0, n): + best_cost = costs[i-1] + 1 + + costs[i] = best_cost + chain[i] = i - 1 + + if i - 1 in forced: + continue + + for j in range(i-2, max(i-lookback, -2), -1): + + cost = costs[j] + 1 + + if cost < best_cost and can_iup_in_between(delta, coords, j, i, tolerance): + costs[i] = best_cost = cost + chain[i] = j + + if j in forced: + break + + return chain, costs + +def _rot_list(l, k): + """Rotate list by k items forward. Ie. item at position 0 will be + at position k in returned list. Negative k is allowed.""" + n = len(l) + k %= n + if not k: return l + return l[n-k:] + l[:n-k] + +def _rot_set(s, k, n): + k %= n + if not k: return s + return {(v + k) % n for v in s} + +def iup_contour_optimize(delta, coords, tolerance=0.): + n = len(delta) + + # Get the easy cases out of the way: + + # If all are within tolerance distance of 0, encode nothing: + if all(abs(complex(*p)) <= tolerance for p in delta): + return [None] * n + + # If there's exactly one point, return it: + if n == 1: + return delta + + # If all deltas are exactly the same, return just one (the first one): + d0 = delta[0] + if all(d0 == d for d in delta): + return [d0] + [None] * (n-1) + + # Else, solve the general problem using Dynamic Programming. + + forced = _iup_contour_bound_forced_set(delta, coords, tolerance) + # The _iup_contour_optimize_dp() routine returns the optimal encoding + # solution given the constraint that the last point is always encoded. + # To remove this constraint, we use two different methods, depending on + # whether forced set is non-empty or not: + + if forced: + # Forced set is non-empty: rotate the contour start point + # such that the last point in the list is a forced point. + k = (n-1) - max(forced) + assert k >= 0 + + delta = _rot_list(delta, k) + coords = _rot_list(coords, k) + forced = _rot_set(forced, k, n) + + chain, costs = _iup_contour_optimize_dp(delta, coords, forced, tolerance) + + # Assemble solution. + solution = set() + i = n - 1 + while i is not None: + solution.add(i) + i = chain[i] + assert forced <= solution, (forced, solution) + delta = [delta[i] if i in solution else None for i in range(n)] + + delta = _rot_list(delta, -k) + else: + # Repeat the contour an extra time, solve the 2*n case, then look for solutions of the + # circular n-length problem in the solution for 2*n linear case. I cannot prove that + # this always produces the optimal solution... + chain, costs = _iup_contour_optimize_dp(delta+delta, coords+coords, forced, tolerance, n) + best_sol, best_cost = None, n+1 + + for start in range(n-1, 2*n-1): + # Assemble solution. + solution = set() + i = start + while i > start - n: + solution.add(i % n) + i = chain[i] + if i == start - n: + cost = costs[start] - costs[start - n] + if cost <= best_cost: + best_sol, best_cost = solution, cost + + delta = [delta[i] if i in best_sol else None for i in range(n)] + + + return delta + +def iup_delta_optimize(delta, coords, ends, tolerance=0.): + assert sorted(ends) == ends and len(coords) == (ends[-1]+1 if ends else 0) + 4 + n = len(coords) + ends = ends + [n-4, n-3, n-2, n-1] + out = [] + start = 0 + for end in ends: + contour = iup_contour_optimize(delta[start:end+1], coords[start:end+1], tolerance) + assert len(contour) == end - start + 1 + out.extend(contour) + start = end+1 + + return out diff -Nru fonttools-3.0/Snippets/fontTools/varLib/__main__.py fonttools-3.21.2/Snippets/fontTools/varLib/__main__.py --- fonttools-3.0/Snippets/fontTools/varLib/__main__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/__main__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +from fontTools.varLib import main + +if __name__ == '__main__': + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/merger.py fonttools-3.21.2/Snippets/fontTools/varLib/merger.py --- fonttools-3.0/Snippets/fontTools/varLib/merger.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/merger.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,829 @@ +""" +Merge OpenType Layout tables (GDEF / GPOS / GSUB). +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc import classifyTools +from fontTools.ttLib.tables import otTables as ot +from fontTools.ttLib.tables import otBase as otBase +from fontTools.ttLib.tables.DefaultTable import DefaultTable +from fontTools.varLib import builder, varStore +from fontTools.varLib.varStore import VarStoreInstancer +from functools import reduce + + +class Merger(object): + + def __init__(self, font=None): + self.font = font + + @classmethod + def merger(celf, clazzes, attrs=(None,)): + assert celf != Merger, 'Subclass Merger instead.' + if 'mergers' not in celf.__dict__: + celf.mergers = {} + if type(clazzes) == type: + clazzes = (clazzes,) + if type(attrs) == str: + attrs = (attrs,) + def wrapper(method): + assert method.__name__ == 'merge' + done = [] + for clazz in clazzes: + if clazz in done: continue # Support multiple names of a clazz + done.append(clazz) + mergers = celf.mergers.setdefault(clazz, {}) + for attr in attrs: + assert attr not in mergers, \ + "Oops, class '%s' has merge function for '%s' defined already." % (clazz.__name__, attr) + mergers[attr] = method + return None + return wrapper + + @classmethod + def mergersFor(celf, thing, _default={}): + typ = type(thing) + + for celf in celf.mro(): + + mergers = getattr(celf, 'mergers', None) + if mergers is None: + break; + + m = celf.mergers.get(typ, None) + if m is not None: + return m + + return _default + + def mergeObjects(self, out, lst, exclude=()): + keys = sorted(vars(out).keys()) + assert all(keys == sorted(vars(v).keys()) for v in lst), \ + (keys, [sorted(vars(v).keys()) for v in lst]) + mergers = self.mergersFor(out) + defaultMerger = mergers.get('*', self.__class__.mergeThings) + try: + for key in keys: + if key in exclude: continue + value = getattr(out, key) + values = [getattr(table, key) for table in lst] + mergerFunc = mergers.get(key, defaultMerger) + mergerFunc(self, value, values) + except Exception as e: + e.args = e.args + ('.'+key,) + raise + + def mergeLists(self, out, lst): + count = len(out) + assert all(count == len(v) for v in lst), (count, [len(v) for v in lst]) + for i,(value,values) in enumerate(zip(out, zip(*lst))): + try: + self.mergeThings(value, values) + except Exception as e: + e.args = e.args + ('[%d]' % i,) + raise + + def mergeThings(self, out, lst): + clazz = type(out) + try: + assert all(type(item) == clazz for item in lst), (out, lst) + mergerFunc = self.mergersFor(out).get(None, None) + if mergerFunc is not None: + mergerFunc(self, out, lst) + elif hasattr(out, '__dict__'): + self.mergeObjects(out, lst) + elif isinstance(out, list): + self.mergeLists(out, lst) + else: + assert all(out == v for v in lst), (out, lst) + except Exception as e: + e.args = e.args + (clazz.__name__,) + raise + + def mergeTables(self, font, master_ttfs, tables): + + for tag in tables: + if tag not in font: continue + self.mergeThings(font[tag], [m[tag] for m in master_ttfs]) + +# +# Aligning merger +# +class AligningMerger(Merger): + pass + +def _SinglePosUpgradeToFormat2(self): + if self.Format == 2: return self + + ret = ot.SinglePos() + ret.Format = 2 + ret.Coverage = self.Coverage + ret.ValueFormat = self.ValueFormat + ret.Value = [self.Value for g in ret.Coverage.glyphs] + ret.ValueCount = len(ret.Value) + + return ret + +def _merge_GlyphOrders(font, lst, values_lst=None, default=None): + """Takes font and list of glyph lists (must be sorted by glyph id), and returns + two things: + - Combined glyph list, + - If values_lst is None, return input glyph lists, but padded with None when a glyph + was missing in a list. Otherwise, return values_lst list-of-list, padded with None + to match combined glyph lists. + """ + if values_lst is None: + dict_sets = [set(l) for l in lst] + else: + dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)] + combined = set() + combined.update(*dict_sets) + + sortKey = font.getReverseGlyphMap().__getitem__ + order = sorted(combined, key=sortKey) + # Make sure all input glyphsets were in proper order + assert all(sorted(vs, key=sortKey) == vs for vs in lst) + del combined + + paddedValues = None + if values_lst is None: + padded = [[glyph if glyph in dict_set else default + for glyph in order] + for dict_set in dict_sets] + else: + assert len(lst) == len(values_lst) + padded = [[dict_set[glyph] if glyph in dict_set else default + for glyph in order] + for dict_set in dict_sets] + return order, padded + +def _Lookup_SinglePos_get_effective_value(subtables, glyph): + for self in subtables: + if self is None or \ + type(self) != ot.SinglePos or \ + self.Coverage is None or \ + glyph not in self.Coverage.glyphs: + continue + if self.Format == 1: + return self.Value + elif self.Format == 2: + return self.Value[self.Coverage.glyphs.index(glyph)] + else: + assert 0 + return None + +def _Lookup_PairPos_get_effective_value_pair(subtables, firstGlyph, secondGlyph): + for self in subtables: + if self is None or \ + type(self) != ot.PairPos or \ + self.Coverage is None or \ + firstGlyph not in self.Coverage.glyphs: + continue + if self.Format == 1: + ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)] + pvr = ps.PairValueRecord + for rec in pvr: # TODO Speed up + if rec.SecondGlyph == secondGlyph: + return rec + continue + elif self.Format == 2: + klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0) + klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0) + return self.Class1Record[klass1].Class2Record[klass2] + else: + assert 0 + return None + +@AligningMerger.merger(ot.SinglePos) +def merge(merger, self, lst): + self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0) + assert len(lst) == 1 or (valueFormat & ~0xF == 0), valueFormat + + # If all have same coverage table and all are format 1, + if all(v.Format == 1 for v in lst) and all(self.Coverage.glyphs == v.Coverage.glyphs for v in lst): + self.Value = otBase.ValueRecord(valueFormat) + merger.mergeThings(self.Value, [v.Value for v in lst]) + self.ValueFormat = self.Value.getFormat() + return + + # Upgrade everything to Format=2 + self.Format = 2 + lst = [_SinglePosUpgradeToFormat2(v) for v in lst] + + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst], + [v.Value for v in lst]) + + self.Coverage.glyphs = glyphs + self.Value = [otBase.ValueRecord(valueFormat) for g in glyphs] + self.ValueCount = len(self.Value) + + for i,values in enumerate(padded): + for j,glyph in enumerate(glyphs): + if values[j] is not None: continue + # Fill in value from other subtables + # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness + # is different between used subtable and current subtable! + # TODO(behdad) Check and warn if that happens? + v = _Lookup_SinglePos_get_effective_value(merger.lookup_subtables[i], glyph) + if v is None: + v = otBase.ValueRecord(valueFormat) + values[j] = v + + merger.mergeLists(self.Value, padded) + + # Merge everything else; though, there shouldn't be anything else. :) + merger.mergeObjects(self, lst, + exclude=('Format', 'Coverage', 'Value', 'ValueCount')) + self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in self.Value], 0) + +@AligningMerger.merger(ot.PairSet) +def merge(merger, self, lst): + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], + [vs.PairValueRecord for vs in lst]) + + self.PairValueRecord = pvrs = [] + for glyph in glyphs: + pvr = ot.PairValueRecord() + pvr.SecondGlyph = glyph + pvr.Value1 = otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None + pvr.Value2 = otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None + pvrs.append(pvr) + self.PairValueCount = len(self.PairValueRecord) + + for i,values in enumerate(padded): + for j,glyph in enumerate(glyphs): + # Fill in value from other subtables + v = ot.PairValueRecord() + v.SecondGlyph = glyph + if values[j] is not None: + vpair = values[j] + else: + vpair = _Lookup_PairPos_get_effective_value_pair(merger.lookup_subtables[i], self._firstGlyph, glyph) + if vpair is None: + v1, v2 = None, None + else: + v1, v2 = vpair.Value1, vpair.Value2 + v.Value1 = otBase.ValueRecord(merger.valueFormat1, src=v1) if merger.valueFormat1 else None + v.Value2 = otBase.ValueRecord(merger.valueFormat2, src=v2) if merger.valueFormat2 else None + values[j] = v + del self._firstGlyph + + merger.mergeLists(self.PairValueRecord, padded) + +def _PairPosFormat1_merge(self, lst, merger): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." + + # Merge everything else; makes sure Format is the same. + merger.mergeObjects(self, lst, + exclude=('Coverage', + 'PairSet', 'PairSetCount', + 'ValueFormat1', 'ValueFormat2')) + + empty = ot.PairSet() + empty.PairValueRecord = [] + empty.PairValueCount = 0 + + # Align them + glyphs, padded = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst], + [v.PairSet for v in lst], + default=empty) + + self.Coverage.glyphs = glyphs + self.PairSet = [ot.PairSet() for g in glyphs] + self.PairSetCount = len(self.PairSet) + for glyph, ps in zip(glyphs, self.PairSet): + ps._firstGlyph = glyph + + merger.mergeLists(self.PairSet, padded) + +def _ClassDef_invert(self, allGlyphs=None): + + if isinstance(self, dict): + classDefs = self + else: + classDefs = self.classDefs if self and self.classDefs else {} + m = max(classDefs.values()) if classDefs else 0 + + ret = [] + for _ in range(m + 1): + ret.append(set()) + + for k,v in classDefs.items(): + ret[v].add(k) + + # Class-0 is special. It's "everything else". + if allGlyphs is None: + ret[0] = None + else: + # Limit all classes to glyphs in allGlyphs. + # Collect anything without a non-zero class into class=zero. + ret[0] = class0 = set(allGlyphs) + for s in ret[1:]: + s.intersection_update(class0) + class0.difference_update(s) + + return ret + +def _ClassDef_merge_classify(lst, allGlyphs=None): + self = ot.ClassDef() + self.classDefs = classDefs = {} + + classifier = classifyTools.Classifier() + for l in lst: + sets = _ClassDef_invert(l, allGlyphs=allGlyphs) + if allGlyphs is None: + sets = sets[1:] + classifier.update(sets) + classes = classifier.getClasses() + + if allGlyphs is None: + classes.insert(0, set()) + + for i,classSet in enumerate(classes): + if i == 0: + continue + for g in classSet: + classDefs[g] = i + + return self, classes + +def _ClassDef_calculate_Format(self, font): + fmt = 2 + ranges = self._getClassRanges(font) + if ranges: + startGlyph = ranges[0][1] + endGlyph = ranges[-1][3] + glyphCount = endGlyph - startGlyph + 1 + if len(ranges) * 3 >= glyphCount + 1: + # Format 1 is more compact + fmt = 1 + self.Format = fmt + +def _PairPosFormat2_align_matrices(self, lst, font, transparent=False): + + matrices = [l.Class1Record for l in lst] + + # Align first classes + self.ClassDef1, classes = _ClassDef_merge_classify([l.ClassDef1 for l in lst], allGlyphs=set(self.Coverage.glyphs)) + _ClassDef_calculate_Format(self.ClassDef1, font) + self.Class1Count = len(classes) + new_matrices = [] + for l,matrix in zip(lst, matrices): + nullRow = None + coverage = set(l.Coverage.glyphs) + classDef1 = l.ClassDef1.classDefs + class1Records = [] + for classSet in classes: + exemplarGlyph = next(iter(classSet)) + if exemplarGlyph not in coverage: + if nullRow is None: + nullRow = ot.Class1Record() + class2records = nullRow.Class2Record = [] + # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f + for _ in range(l.Class2Count): + if transparent: + rec2 = None + else: + rec2 = ot.Class2Record() + rec2.Value1 = otBase.ValueRecord(self.ValueFormat1) if self.ValueFormat1 else None + rec2.Value2 = otBase.ValueRecord(self.ValueFormat2) if self.ValueFormat2 else None + class2records.append(rec2) + rec1 = nullRow + else: + klass = classDef1.get(exemplarGlyph, 0) + rec1 = matrix[klass] # TODO handle out-of-range? + class1Records.append(rec1) + new_matrices.append(class1Records) + matrices = new_matrices + del new_matrices + + # Align second classes + self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst]) + _ClassDef_calculate_Format(self.ClassDef2, font) + self.Class2Count = len(classes) + new_matrices = [] + for l,matrix in zip(lst, matrices): + classDef2 = l.ClassDef2.classDefs + class1Records = [] + for rec1old in matrix: + oldClass2Records = rec1old.Class2Record + rec1new = ot.Class1Record() + class2Records = rec1new.Class2Record = [] + for classSet in classes: + if not classSet: # class=0 + rec2 = oldClass2Records[0] + else: + exemplarGlyph = next(iter(classSet)) + klass = classDef2.get(exemplarGlyph, 0) + rec2 = oldClass2Records[klass] + class2Records.append(rec2) + class1Records.append(rec1new) + new_matrices.append(class1Records) + matrices = new_matrices + del new_matrices + + return matrices + +def _PairPosFormat2_merge(self, lst, merger): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." + + merger.mergeObjects(self, lst, + exclude=('Coverage', + 'ClassDef1', 'Class1Count', + 'ClassDef2', 'Class2Count', + 'Class1Record', + 'ValueFormat1', 'ValueFormat2')) + + # Align coverages + glyphs, _ = _merge_GlyphOrders(merger.font, + [v.Coverage.glyphs for v in lst]) + self.Coverage.glyphs = glyphs + + # Currently, if the coverage of PairPosFormat2 subtables are different, + # we do NOT bother walking down the subtable list when filling in new + # rows for alignment. As such, this is only correct if current subtable + # is the last subtable in the lookup. Ensure that. + # + # Note that our canonicalization process merges trailing PairPosFormat2's, + # so in reality this is rare. + for l,subtables in zip(lst,merger.lookup_subtables): + if l.Coverage.glyphs != glyphs: + assert l == subtables[-1] + + matrices = _PairPosFormat2_align_matrices(self, lst, merger.font) + + self.Class1Record = list(matrices[0]) # TODO move merger to be selfless + merger.mergeLists(self.Class1Record, matrices) + +@AligningMerger.merger(ot.PairPos) +def merge(merger, self, lst): + merger.valueFormat1 = self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + merger.valueFormat2 = self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + if self.Format == 1: + _PairPosFormat1_merge(self, lst, merger) + elif self.Format == 2: + _PairPosFormat2_merge(self, lst, merger) + else: + assert False + + del merger.valueFormat1, merger.valueFormat2 + + # Now examine the list of value records, and update to the union of format values, + # as merge might have created new values. + vf1 = 0 + vf2 = 0 + if self.Format == 1: + for pairSet in self.PairSet: + for pairValueRecord in pairSet.PairValueRecord: + pv1 = pairValueRecord.Value1 + if pv1 is not None: + vf1 |= pv1.getFormat() + pv2 = pairValueRecord.Value2 + if pv2 is not None: + vf2 |= pv2.getFormat() + elif self.Format == 2: + for class1Record in self.Class1Record: + for class2Record in class1Record.Class2Record: + pv1 = class2Record.Value1 + if pv1 is not None: + vf1 |= pv1.getFormat() + pv2 = class2Record.Value2 + if pv2 is not None: + vf2 |= pv2.getFormat() + self.ValueFormat1 = vf1 + self.ValueFormat2 = vf2 + + +def _PairSet_flatten(lst, font): + self = ot.PairSet() + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + + # Align them + glyphs, padded = _merge_GlyphOrders(font, + [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], + [vs.PairValueRecord for vs in lst]) + + self.Coverage.glyphs = glyphs + self.PairValueRecord = pvrs = [] + for values in zip(*padded): + for v in values: + if v is not None: + pvrs.append(v) + break + else: + assert False + self.PairValueCount = len(self.PairValueRecord) + + return self + +def _Lookup_PairPosFormat1_subtables_flatten(lst, font): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." + + self = ot.PairPos() + self.Format = 1 + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + # Align them + glyphs, padded = _merge_GlyphOrders(font, + [v.Coverage.glyphs for v in lst], + [v.PairSet for v in lst]) + + self.Coverage.glyphs = glyphs + self.PairSet = [_PairSet_flatten([v for v in values if v is not None], font) + for values in zip(*padded)] + self.PairSetCount = len(self.PairSet) + return self + +def _Lookup_PairPosFormat2_subtables_flatten(lst, font): + assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." + + self = ot.PairPos() + self.Format = 2 + self.Coverage = ot.Coverage() + self.Coverage.Format = 1 + self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) + self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) + + # Align them + glyphs, _ = _merge_GlyphOrders(font, + [v.Coverage.glyphs for v in lst]) + self.Coverage.glyphs = glyphs + + matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True) + + matrix = self.Class1Record = [] + for rows in zip(*matrices): + row = ot.Class1Record() + matrix.append(row) + row.Class2Record = [] + row = row.Class2Record + for cols in zip(*list(r.Class2Record for r in rows)): + col = next(iter(c for c in cols if c is not None)) + row.append(col) + + return self + +def _Lookup_PairPos_subtables_canonicalize(lst, font): + """Merge multiple Format1 subtables at the beginning of lst, + and merge multiple consecutive Format2 subtables that have the same + Class2 (ie. were split because of offset overflows). Returns new list.""" + lst = list(lst) + + l = len(lst) + i = 0 + while i < l and lst[i].Format == 1: + i += 1 + lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)] + + l = len(lst) + i = l + while i > 0 and lst[i - 1].Format == 2: + i -= 1 + lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)] + + return lst + +@AligningMerger.merger(ot.Lookup) +def merge(merger, self, lst): + subtables = merger.lookup_subtables = [l.SubTable for l in lst] + + # Remove Extension subtables + for l,sts in list(zip(lst,subtables))+[(self,self.SubTable)]: + if not sts: + continue + if sts[0].__class__.__name__.startswith('Extension'): + assert _all_equal([st.__class__ for st in sts]) + assert _all_equal([st.ExtensionLookupType for st in sts]) + l.LookupType = sts[0].ExtensionLookupType + new_sts = [st.ExtSubTable for st in sts] + del sts[:] + sts.extend(new_sts) + + isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos) + + if isPairPos: + + # AFDKO and feaLib sometimes generate two Format1 subtables instead of one. + # Merge those before continuing. + # https://github.com/fonttools/fonttools/issues/719 + self.SubTable = _Lookup_PairPos_subtables_canonicalize(self.SubTable, merger.font) + subtables = merger.lookup_subtables = [_Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables] + + merger.mergeLists(self.SubTable, subtables) + self.SubTableCount = len(self.SubTable) + + if isPairPos: + # If format-1 subtable created during canonicalization is empty, remove it. + assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1 + if not self.SubTable[0].Coverage.glyphs: + self.SubTable.pop(0) + self.SubTableCount -= 1 + + # If format-2 subtable created during canonicalization is empty, remove it. + assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2 + if not self.SubTable[-1].Coverage.glyphs: + self.SubTable.pop(-1) + self.SubTableCount -= 1 + + merger.mergeObjects(self, lst, exclude=['SubTable', 'SubTableCount']) + + del merger.lookup_subtables + + +# +# InstancerMerger +# + +class InstancerMerger(AligningMerger): + """A merger that takes multiple master fonts, and instantiates + an instance.""" + + def __init__(self, font, model, location): + Merger.__init__(self, font) + self.model = model + self.location = location + self.scalars = model.getScalars(location) + +@InstancerMerger.merger(ot.Anchor) +def merge(merger, self, lst): + XCoords = [a.XCoordinate for a in lst] + YCoords = [a.YCoordinate for a in lst] + model = merger.model + scalars = merger.scalars + self.XCoordinate = round(model.interpolateFromMastersAndScalars(XCoords, scalars)) + self.YCoordinate = round(model.interpolateFromMastersAndScalars(YCoords, scalars)) + +@InstancerMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + model = merger.model + scalars = merger.scalars + # TODO Handle differing valueformats + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + assert not hasattr(self, tableName) + + if hasattr(self, name): + values = [getattr(a, name, 0) for a in lst] + value = round(model.interpolateFromMastersAndScalars(values, scalars)) + setattr(self, name, value) + + +# +# MutatorMerger +# + +class MutatorMerger(AligningMerger): + """A merger that takes a variable font, and instantiates + an instance.""" + + def __init__(self, font, location): + Merger.__init__(self, font) + self.location = location + + store = None + if 'GDEF' in font: + gdef = font['GDEF'].table + if gdef.Version >= 0x00010003: + store = gdef.VarStore + + self.instancer = VarStoreInstancer(store, font['fvar'].axes, location) + + def instantiate(self): + font = self.font + + self.mergeTables(font, [font], ['GPOS']) + + if 'GDEF' in font: + gdef = font['GDEF'].table + if gdef.Version >= 0x00010003: + del gdef.VarStore + gdef.Version = 0x00010002 + if gdef.MarkGlyphSetsDef is None: + del gdef.MarkGlyphSetsDef + gdef.Version = 0x00010000 + if not (gdef.LigCaretList or + gdef.MarkAttachClassDef or + gdef.GlyphClassDef or + gdef.AttachList or + (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)): + del font['GDEF'] + +@MutatorMerger.merger(ot.Anchor) +def merge(merger, self, lst): + if self.Format != 3: + return + + instancer = merger.instancer + for v in "XY": + tableName = v+'DeviceTable' + if not hasattr(self, tableName): + continue + dev = getattr(self, tableName) + delattr(self, tableName) + if dev is None: + continue + + assert dev.DeltaFormat == 0x8000 + varidx = (dev.StartSize << 16) + dev.EndSize + delta = round(instancer[varidx]) + + attr = v+'Coordinate' + setattr(self, attr, getattr(self, attr) + delta) + + self.Format = 1 + +@MutatorMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + + # All other structs are merged with self pointing to a copy of base font, + # except for ValueRecords which are sometimes created later and initialized + # to have 0/None members. Hence the copy. + self.__dict__ = lst[0].__dict__.copy() + + instancer = merger.instancer + # TODO Handle differing valueformats + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + if not hasattr(self, tableName): + continue + dev = getattr(self, tableName) + delattr(self, tableName) + if dev is None: + continue + + assert dev.DeltaFormat == 0x8000 + varidx = (dev.StartSize << 16) + dev.EndSize + delta = round(instancer[varidx]) + + setattr(self, name, getattr(self, name) + delta) + + +# +# VariationMerger +# + +class VariationMerger(AligningMerger): + """A merger that takes multiple master fonts, and builds a + variable font.""" + + def __init__(self, model, axisTags, font): + Merger.__init__(self, font) + self.model = model + self.store_builder = varStore.OnlineVarStoreBuilder(axisTags) + self.store_builder.setModel(model) + +def _all_equal(lst): + if not lst: + return True + it = iter(lst) + v0 = next(it) + for v in it: + if v0 != v: + return False + return True + +def buildVarDevTable(store_builder, master_values): + if _all_equal(master_values): + return master_values[0], None + base, varIdx = store_builder.storeMasters(master_values) + return base, builder.buildVarDevTable(varIdx) + +@VariationMerger.merger(ot.Anchor) +def merge(merger, self, lst): + assert self.Format == 1 + self.XCoordinate, XDeviceTable = buildVarDevTable(merger.store_builder, [a.XCoordinate for a in lst]) + self.YCoordinate, YDeviceTable = buildVarDevTable(merger.store_builder, [a.YCoordinate for a in lst]) + if XDeviceTable or YDeviceTable: + self.Format = 3 + self.XDeviceTable = XDeviceTable + self.YDeviceTable = YDeviceTable + +@VariationMerger.merger(otBase.ValueRecord) +def merge(merger, self, lst): + for name, tableName in [('XAdvance','XAdvDevice'), + ('YAdvance','YAdvDevice'), + ('XPlacement','XPlaDevice'), + ('YPlacement','YPlaDevice')]: + + if hasattr(self, name): + value, deviceTable = buildVarDevTable(merger.store_builder, + [getattr(a, name, 0) for a in lst]) + setattr(self, name, value) + if deviceTable: + setattr(self, tableName, deviceTable) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/models.py fonttools-3.21.2/Snippets/fontTools/varLib/models.py --- fonttools-3.0/Snippets/fontTools/varLib/models.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/models.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,316 @@ +"""Variation fonts interpolation models.""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +__all__ = ['normalizeValue', 'normalizeLocation', 'supportScalar', 'VariationModel'] + +def normalizeValue(v, triple): + """Normalizes value based on a min/default/max triple. + >>> normalizeValue(400, (100, 400, 900)) + 0.0 + >>> normalizeValue(100, (100, 400, 900)) + -1.0 + >>> normalizeValue(650, (100, 400, 900)) + 0.5 + """ + lower, default, upper = triple + assert lower <= default <= upper, "invalid axis values: %3.3f, %3.3f %3.3f"%(lower, default, upper) + v = max(min(v, upper), lower) + if v == default: + v = 0. + elif v < default: + v = (v - default) / (default - lower) + else: + v = (v - default) / (upper - default) + return v + +def normalizeLocation(location, axes): + """Normalizes location based on axis min/default/max values from axes. + >>> axes = {"wght": (100, 400, 900)} + >>> normalizeLocation({"wght": 400}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 100}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": 900}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 650}, axes) + {'wght': 0.5} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': -1.0} + >>> axes = {"wght": (0, 0, 1000)} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": -1}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 1.0} + >>> normalizeLocation({"wght": 500}, axes) + {'wght': 0.5} + >>> normalizeLocation({"wght": 1001}, axes) + {'wght': 1.0} + >>> axes = {"wght": (0, 1000, 1000)} + >>> normalizeLocation({"wght": 0}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": -1}, axes) + {'wght': -1.0} + >>> normalizeLocation({"wght": 500}, axes) + {'wght': -0.5} + >>> normalizeLocation({"wght": 1000}, axes) + {'wght': 0.0} + >>> normalizeLocation({"wght": 1001}, axes) + {'wght': 0.0} + """ + out = {} + for tag,triple in axes.items(): + v = location.get(tag, triple[1]) + out[tag] = normalizeValue(v, triple) + return out + +def supportScalar(location, support, ot=True): + """Returns the scalar multiplier at location, for a master + with support. If ot is True, then a peak value of zero + for support of an axis means "axis does not participate". That + is how OpenType Variation Font technology works. + >>> supportScalar({}, {}) + 1.0 + >>> supportScalar({'wght':.2}, {}) + 1.0 + >>> supportScalar({'wght':.2}, {'wght':(0,2,3)}) + 0.1 + >>> supportScalar({'wght':2.5}, {'wght':(0,2,4)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False) + 0.375 + >>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + >>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}) + 0.75 + """ + scalar = 1. + for axis,(lower,peak,upper) in support.items(): + if ot: + # OpenType-specific case handling + if peak == 0.: + continue + if lower > peak or peak > upper: + continue + if lower < 0. and upper > 0.: + continue + v = location.get(axis, 0.) + else: + assert axis in location + v = location[axis] + if v == peak: + continue + if v <= lower or upper <= v: + scalar = 0. + break; + if v < peak: + scalar *= (v - lower) / (peak - lower) + else: # v > peak + scalar *= (v - upper) / (peak - upper) + return scalar + + +class VariationModel(object): + + """ + Locations must be in normalized space. Ie. base master + is at origin (0). + >>> from pprint import pprint + >>> locations = [ \ + {'wght':100}, \ + {'wght':-100}, \ + {'wght':-180}, \ + {'wdth':+.3}, \ + {'wght':+120,'wdth':.3}, \ + {'wght':+120,'wdth':.2}, \ + {}, \ + {'wght':+180,'wdth':.3}, \ + {'wght':+180}, \ + ] + >>> model = VariationModel(locations, axisOrder=['wght']) + >>> pprint(model.locations) + [{}, + {'wght': -100}, + {'wght': -180}, + {'wght': 100}, + {'wght': 180}, + {'wdth': 0.3}, + {'wdth': 0.3, 'wght': 180}, + {'wdth': 0.3, 'wght': 120}, + {'wdth': 0.2, 'wght': 120}] + >>> pprint(model.deltaWeights) + [{}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0, 4: 1.0, 5: 1.0}, + {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.25}, + {0: 1.0, + 3: 0.75, + 4: 0.25, + 5: 0.6666666666666667, + 6: 0.16666666666666669, + 7: 0.6666666666666667}] + """ + + def __init__(self, locations, axisOrder=[]): + locations = [{k:v for k,v in loc.items() if v != 0.} for loc in locations] + keyFunc = self.getMasterLocationsSortKeyFunc(locations, axisOrder=axisOrder) + axisPoints = keyFunc.axisPoints + self.locations = sorted(locations, key=keyFunc) + # TODO Assert that locations are unique. + self.mapping = [self.locations.index(l) for l in locations] # Mapping from user's master order to our master order + self.reverseMapping = [locations.index(l) for l in self.locations] # Reverse of above + + self._computeMasterSupports(axisPoints) + + @staticmethod + def getMasterLocationsSortKeyFunc(locations, axisOrder=[]): + assert {} in locations, "Base master not found." + axisPoints = {} + for loc in locations: + if len(loc) != 1: + continue + axis = next(iter(loc)) + value = loc[axis] + if axis not in axisPoints: + axisPoints[axis] = {0.} + assert value not in axisPoints[axis] + axisPoints[axis].add(value) + + def getKey(axisPoints, axisOrder): + def sign(v): + return -1 if v < 0 else +1 if v > 0 else 0 + def key(loc): + rank = len(loc) + onPointAxes = [axis for axis,value in loc.items() if value in axisPoints[axis]] + orderedAxes = [axis for axis in axisOrder if axis in loc] + orderedAxes.extend([axis for axis in sorted(loc.keys()) if axis not in axisOrder]) + return ( + rank, # First, order by increasing rank + -len(onPointAxes), # Next, by decreasing number of onPoint axes + tuple(axisOrder.index(axis) if axis in axisOrder else 0x10000 for axis in orderedAxes), # Next, by known axes + tuple(orderedAxes), # Next, by all axes + tuple(sign(loc[axis]) for axis in orderedAxes), # Next, by signs of axis values + tuple(abs(loc[axis]) for axis in orderedAxes), # Next, by absolute value of axis values + ) + return key + + ret = getKey(axisPoints, axisOrder) + ret.axisPoints = axisPoints + return ret + + @staticmethod + def lowerBound(value, lst): + if any(v < value for v in lst): + return max(v for v in lst if v < value) + else: + return value + @staticmethod + def upperBound(value, lst): + if any(v > value for v in lst): + return min(v for v in lst if v > value) + else: + return value + + def _computeMasterSupports(self, axisPoints): + supports = [] + deltaWeights = [] + locations = self.locations + for i,loc in enumerate(locations): + box = {} + + # Account for axisPoints first + for axis,values in axisPoints.items(): + if not axis in loc: + continue + locV = loc[axis] + box[axis] = (self.lowerBound(locV, values), locV, self.upperBound(locV, values)) + + locAxes = set(loc.keys()) + # Walk over previous masters now + for j,m in enumerate(locations[:i]): + # Master with extra axes do not participte + if not set(m.keys()).issubset(locAxes): + continue + # If it's NOT in the current box, it does not participate + relevant = True + for axis, (lower,_,upper) in box.items(): + if axis in m and not (lower < m[axis] < upper): + relevant = False + break + if not relevant: + continue + # Split the box for new master + for axis,val in m.items(): + assert axis in box + lower,locV,upper = box[axis] + if val < locV: + lower = val + elif locV < val: + upper = val + box[axis] = (lower,locV,upper) + supports.append(box) + + deltaWeight = {} + # Walk over previous masters now, populate deltaWeight + for j,m in enumerate(locations[:i]): + scalar = supportScalar(loc, supports[j]) + if scalar: + deltaWeight[j] = scalar + deltaWeights.append(deltaWeight) + + self.supports = supports + self.deltaWeights = deltaWeights + + def getDeltas(self, masterValues): + assert len(masterValues) == len(self.deltaWeights) + mapping = self.reverseMapping + out = [] + for i,weights in enumerate(self.deltaWeights): + delta = masterValues[mapping[i]] + for j,weight in weights.items(): + delta -= out[j] * weight + out.append(delta) + return out + + def getScalars(self, loc): + return [supportScalar(loc, support) for support in self.supports] + + @staticmethod + def interpolateFromDeltasAndScalars(deltas, scalars): + v = None + assert len(deltas) == len(scalars) + for i,(delta,scalar) in enumerate(zip(deltas, scalars)): + if not scalar: continue + contribution = delta * scalar + if v is None: + v = contribution + else: + v += contribution + return v + + def interpolateFromDeltas(self, loc, deltas): + scalars = self.getScalars(loc) + return self.interpolateFromDeltasAndScalars(deltas, scalars) + + def interpolateFromMasters(self, loc, masterValues): + deltas = self.getDeltas(masterValues) + return self.interpolateFromDeltas(loc, deltas) + + def interpolateFromMastersAndScalars(self, masterValues, scalars): + deltas = self.getDeltas(masterValues) + return self.interpolateFromDeltasAndScalars(deltas, scalars) + + +if __name__ == "__main__": + import doctest, sys + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/mutator.py fonttools-3.21.2/Snippets/fontTools/varLib/mutator.py --- fonttools-3.0/Snippets/fontTools/varLib/mutator.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/mutator.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,158 @@ +""" +Instantiate a variation font. Run, eg: + +$ python mutator.py ./NotoSansArabic-VF.ttf wght=140 wdth=85 +""" +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import floatToFixedToFloat +from fontTools.ttLib import TTFont +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates +from fontTools.varLib import _GetCoordinates, _SetCoordinates, _DesignspaceAxis +from fontTools.varLib.models import supportScalar, normalizeLocation +from fontTools.varLib.merger import MutatorMerger +from fontTools.varLib.varStore import VarStoreInstancer +from fontTools.varLib.mvar import MVAR_ENTRIES +from fontTools.varLib.iup import iup_delta +import os.path +import logging + + +log = logging.getLogger("fontTools.varlib.mutator") + + +def instantiateVariableFont(varfont, location, inplace=False): + """ Generate a static instance from a variable TTFont and a dictionary + defining the desired location along the variable font's axes. + The location values must be specified as user-space coordinates, e.g.: + + {'wght': 400, 'wdth': 100} + + By default, a new TTFont object is returned. If ``inplace`` is True, the + input varfont is modified and reduced to a static font. + """ + if not inplace: + # make a copy to leave input varfont unmodified + stream = BytesIO() + varfont.save(stream) + stream.seek(0) + varfont = TTFont(stream) + + fvar = varfont['fvar'] + axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes} + loc = normalizeLocation(location, axes) + if 'avar' in varfont: + maps = varfont['avar'].segments + loc = {k:_DesignspaceAxis._map(v, maps[k]) for k,v in loc.items()} + # Quantize to F2Dot14, to avoid surprise interpolations. + loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()} + # Location is normalized now + log.info("Normalized location: %s", loc) + + log.info("Mutating glyf/gvar tables") + gvar = varfont['gvar'] + glyf = varfont['glyf'] + # get list of glyph names in gvar sorted by component depth + glyphnames = sorted( + gvar.variations.keys(), + key=lambda name: ( + glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth + if glyf[name].isComposite() else 0, + name)) + for glyphname in glyphnames: + variations = gvar.variations[glyphname] + coordinates,_ = _GetCoordinates(varfont, glyphname) + origCoords, endPts = None, None + for var in variations: + scalar = supportScalar(loc, var.axes) + if not scalar: continue + delta = var.coordinates + if None in delta: + if origCoords is None: + origCoords,control = _GetCoordinates(varfont, glyphname) + endPts = control[1] if control[0] >= 1 else list(range(len(control[1]))) + delta = iup_delta(delta, origCoords, endPts) + coordinates += GlyphCoordinates(delta) * scalar + _SetCoordinates(varfont, glyphname, coordinates) + + if 'cvar' in varfont: + log.info("Mutating cvt/cvar tables") + cvar = varfont['cvar'] + cvt = varfont['cvt '] + deltas = {} + for var in cvar.variations: + scalar = supportScalar(loc, var.axes) + if not scalar: continue + for i, c in enumerate(var.coordinates): + if c is not None: + deltas[i] = deltas.get(i, 0) + scalar * c + for i, delta in deltas.items(): + cvt[i] += round(delta) + + if 'MVAR' in varfont: + log.info("Mutating MVAR table") + mvar = varfont['MVAR'].table + varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc) + records = mvar.ValueRecord + for rec in records: + mvarTag = rec.ValueTag + if mvarTag not in MVAR_ENTRIES: + continue + tableTag, itemName = MVAR_ENTRIES[mvarTag] + delta = round(varStoreInstancer[rec.VarIdx]) + if not delta: + continue + setattr(varfont[tableTag], itemName, + getattr(varfont[tableTag], itemName) + delta) + + if 'GDEF' in varfont: + log.info("Mutating GDEF/GPOS/GSUB tables") + merger = MutatorMerger(varfont, loc) + + log.info("Building interpolated tables") + merger.instantiate() + + log.info("Removing variable tables") + for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'): + if tag in varfont: + del varfont[tag] + + return varfont + + +def main(args=None): + from fontTools import configLogger + + if args is None: + import sys + args = sys.argv[1:] + + varfilename = args[0] + locargs = args[1:] + outfile = os.path.splitext(varfilename)[0] + '-instance.ttf' + + # TODO Allow to specify logging verbosity as command line option + configLogger(level=logging.INFO) + + loc = {} + for arg in locargs: + tag,val = arg.split('=') + assert len(tag) <= 4 + loc[tag.ljust(4)] = float(val) + log.info("Location: %s", loc) + + log.info("Loading variable font") + varfont = TTFont(varfilename) + + instantiateVariableFont(varfont, loc, inplace=True) + + log.info("Saving instance font %s", outfile) + varfont.save(outfile) + + +if __name__ == "__main__": + import sys + if len(sys.argv) > 1: + sys.exit(main()) + import doctest + sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Snippets/fontTools/varLib/mvar.py fonttools-3.21.2/Snippets/fontTools/varLib/mvar.py --- fonttools-3.0/Snippets/fontTools/varLib/mvar.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/mvar.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * + +MVAR_ENTRIES = { + 'hasc': ('OS/2', 'sTypoAscender'), # horizontal ascender + 'hdsc': ('OS/2', 'sTypoDescender'), # horizontal descender + 'hlgp': ('OS/2', 'sTypoLineGap'), # horizontal line gap + 'hcla': ('OS/2', 'usWinAscent'), # horizontal clipping ascent + 'hcld': ('OS/2', 'usWinDescent'), # horizontal clipping descent + 'vasc': ('vhea', 'ascent'), # vertical ascender + 'vdsc': ('vhea', 'descent'), # vertical descender + 'vlgp': ('vhea', 'lineGap'), # vertical line gap + 'hcrs': ('hhea', 'caretSlopeRise'), # horizontal caret rise + 'hcrn': ('hhea', 'caretSlopeRun'), # horizontal caret run + 'hcof': ('hhea', 'caretOffset'), # horizontal caret offset + 'vcrs': ('vhea', 'caretSlopeRise'), # vertical caret rise + 'vcrn': ('vhea', 'caretSlopeRun'), # vertical caret run + 'vcof': ('vhea', 'caretOffset'), # vertical caret offset + 'xhgt': ('OS/2', 'sxHeight'), # x height + 'cpht': ('OS/2', 'sCapHeight'), # cap height + 'sbxs': ('OS/2', 'ySubscriptXSize'), # subscript em x size + 'sbys': ('OS/2', 'ySubscriptYSize'), # subscript em y size + 'sbxo': ('OS/2', 'ySubscriptXOffset'), # subscript em x offset + 'sbyo': ('OS/2', 'ySubscriptYOffset'), # subscript em y offset + 'spxs': ('OS/2', 'ySuperscriptXSize'), # superscript em x size + 'spys': ('OS/2', 'ySuperscriptYSize'), # superscript em y size + 'spxo': ('OS/2', 'ySuperscriptXOffset'), # superscript em x offset + 'spyo': ('OS/2', 'ySuperscriptYOffset'), # superscript em y offset + 'strs': ('OS/2', 'yStrikeoutSize'), # strikeout size + 'stro': ('OS/2', 'yStrikeoutPosition'), # strikeout offset + 'unds': ('post', 'underlineThickness'), # underline size + 'undo': ('post', 'underlinePosition'), # underline offset + #'gsp0': ('gasp', 'gaspRange[0].rangeMaxPPEM'), # gaspRange[0] + #'gsp1': ('gasp', 'gaspRange[1].rangeMaxPPEM'), # gaspRange[1] + #'gsp2': ('gasp', 'gaspRange[2].rangeMaxPPEM'), # gaspRange[2] + #'gsp3': ('gasp', 'gaspRange[3].rangeMaxPPEM'), # gaspRange[3] + #'gsp4': ('gasp', 'gaspRange[4].rangeMaxPPEM'), # gaspRange[4] + #'gsp5': ('gasp', 'gaspRange[5].rangeMaxPPEM'), # gaspRange[5] + #'gsp6': ('gasp', 'gaspRange[6].rangeMaxPPEM'), # gaspRange[6] + #'gsp7': ('gasp', 'gaspRange[7].rangeMaxPPEM'), # gaspRange[7] + #'gsp8': ('gasp', 'gaspRange[8].rangeMaxPPEM'), # gaspRange[8] + #'gsp9': ('gasp', 'gaspRange[9].rangeMaxPPEM'), # gaspRange[9] +} diff -Nru fonttools-3.0/Snippets/fontTools/varLib/varStore.py fonttools-3.21.2/Snippets/fontTools/varLib/varStore.py --- fonttools-3.0/Snippets/fontTools/varLib/varStore.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/varLib/varStore.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,100 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.varLib.models import supportScalar +from fontTools.varLib.builder import (buildVarRegionList, buildVarStore, + buildVarRegion, buildVarData, + varDataCalculateNumShorts) + + +def _getLocationKey(loc): + return tuple(sorted(loc.items(), key=lambda kv: kv[0])) + + +class OnlineVarStoreBuilder(object): + + def __init__(self, axisTags): + self._axisTags = axisTags + self._regionMap = {} + self._regionList = buildVarRegionList([], axisTags) + self._store = buildVarStore(self._regionList, []) + + def setModel(self, model): + self._model = model + + regionMap = self._regionMap + regionList = self._regionList + + regions = model.supports[1:] + regionIndices = [] + for region in regions: + key = _getLocationKey(region) + idx = regionMap.get(key) + if idx is None: + varRegion = buildVarRegion(region, self._axisTags) + idx = regionMap[key] = len(regionList.Region) + regionList.Region.append(varRegion) + regionIndices.append(idx) + + data = self._data = buildVarData(regionIndices, [], optimize=False) + self._outer = len(self._store.VarData) + self._store.VarData.append(data) + + def finish(self, optimize=True): + self._regionList.RegionCount = len(self._regionList.Region) + self._store.VarDataCount = len(self._store.VarData) + for data in self._store.VarData: + data.ItemCount = len(data.Item) + varDataCalculateNumShorts(data, optimize) + return self._store + + def storeMasters(self, master_values): + deltas = [round(d) for d in self._model.getDeltas(master_values)] + base = deltas.pop(0) + inner = len(self._data.Item) + self._data.Item.append(deltas) + # TODO Check for full data array? + return base, (self._outer << 16) + inner + + +def VarRegion_get_support(self, fvar_axes): + return {fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord) + for i,reg in enumerate(self.VarRegionAxis)} + +class VarStoreInstancer(object): + + def __init__(self, varstore, fvar_axes, location={}): + self.fvar_axes = fvar_axes + assert varstore is None or varstore.Format == 1 + self._varData = varstore.VarData if varstore else [] + self._regions = varstore.VarRegionList.Region if varstore else [] + self.setLocation(location) + + def setLocation(self, location): + self.location = dict(location) + self._clearCaches() + + def _clearCaches(self): + self._scalars = {} + + def _getScalar(self, regionIdx): + scalar = self._scalars.get(regionIdx) + if scalar is None: + support = VarRegion_get_support(self._regions[regionIdx], self.fvar_axes) + scalar = supportScalar(self.location, support) + self._scalars[regionIdx] = scalar + return scalar + + def __getitem__(self, varidx): + + major, minor = varidx >> 16, varidx & 0xFFFF + + varData = self._varData + scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex] + + deltas = varData[major].Item[minor] + delta = 0. + for d,s in zip(deltas, scalars): + delta += d * s + return delta + diff -Nru fonttools-3.0/Snippets/fontTools/voltLib/ast.py fonttools-3.21.2/Snippets/fontTools/voltLib/ast.py --- fonttools-3.0/Snippets/fontTools/voltLib/ast.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/voltLib/ast.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,257 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError + + +class Statement(object): + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + +class Expression(object): + def __init__(self, location): + self.location = location + + def build(self, builder): + pass + + +class Block(Statement): + def __init__(self, location): + Statement.__init__(self, location) + self.statements = [] + + def build(self, builder): + for s in self.statements: + s.build(builder) + + +class VoltFile(Block): + def __init__(self): + Block.__init__(self, location=None) + + +class LookupBlock(Block): + def __init__(self, location, name): + Block.__init__(self, location) + self.name = name + + def build(self, builder): + builder.start_lookup_block(self.location, self.name) + Block.build(self, builder) + builder.end_lookup_block() + + +class GlyphDefinition(Statement): + def __init__(self, location, name, gid, gunicode, gtype, components): + Statement.__init__(self, location) + self.name = name + self.id = gid + self.unicode = gunicode + self.type = gtype + self.components = components + + +class GroupDefinition(Statement): + def __init__(self, location, name, enum): + Statement.__init__(self, location) + self.name = name + self.enum = enum + self.glyphs_ = None + + def glyphSet(self, groups=None): + if groups is not None and self.name in groups: + raise VoltLibError( + 'Group "%s" contains itself.' % (self.name), + self.location) + if self.glyphs_ is None: + if groups is None: + groups = set({self.name}) + else: + groups.add(self.name) + self.glyphs_ = self.enum.glyphSet(groups) + return self.glyphs_ + + +class GlyphName(Expression): + """A single glyph name, such as cedilla.""" + def __init__(self, location, glyph): + Expression.__init__(self, location) + self.glyph = glyph + + def glyphSet(self): + return frozenset((self.glyph,)) + + +class Enum(Expression): + """An enum""" + def __init__(self, location, enum): + Expression.__init__(self, location) + self.enum = enum + + def __iter__(self): + for e in self.glyphSet(): + yield e + + def glyphSet(self, groups=None): + glyphs = set() + for element in self.enum: + if isinstance(element, (GroupName, Enum)): + glyphs = glyphs.union(element.glyphSet(groups)) + else: + glyphs = glyphs.union(element.glyphSet()) + return frozenset(glyphs) + + +class GroupName(Expression): + """A glyph group""" + def __init__(self, location, group, parser): + Expression.__init__(self, location) + self.group = group + self.parser_ = parser + + def glyphSet(self, groups=None): + group = self.parser_.resolve_group(self.group) + if group is not None: + self.glyphs_ = group.glyphSet(groups) + return self.glyphs_ + else: + raise VoltLibError( + 'Group "%s" is used but undefined.' % (self.group), + self.location) + + +class Range(Expression): + """A glyph range""" + def __init__(self, location, start, end, parser): + Expression.__init__(self, location) + self.start = start + self.end = end + self.parser = parser + + def glyphSet(self): + glyphs = self.parser.glyph_range(self.start, self.end) + return frozenset(glyphs) + + +class ScriptDefinition(Statement): + def __init__(self, location, name, tag, langs): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.langs = langs + + +class LangSysDefinition(Statement): + def __init__(self, location, name, tag, features): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.features = features + + +class FeatureDefinition(Statement): + def __init__(self, location, name, tag, lookups): + Statement.__init__(self, location) + self.name = name + self.tag = tag + self.lookups = lookups + + +class LookupDefinition(Statement): + def __init__(self, location, name, process_base, process_marks, direction, + reversal, comments, context, sub, pos): + Statement.__init__(self, location) + self.name = name + self.process_base = process_base + self.process_marks = process_marks + self.direction = direction + self.reversal = reversal + self.comments = comments + self.context = context + self.sub = sub + self.pos = pos + + +class SubstitutionDefinition(Statement): + def __init__(self, location, mapping): + Statement.__init__(self, location) + self.mapping = mapping + + +class SubstitutionSingleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionMultipleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionLigatureDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class SubstitutionReverseChainingSingleDefinition(SubstitutionDefinition): + def __init__(self, location, mapping): + SubstitutionDefinition.__init__(self, location, mapping) + + +class PositionAttachDefinition(Statement): + def __init__(self, location, coverage, coverage_to): + Statement.__init__(self, location) + self.coverage = coverage + self.coverage_to = coverage_to + + +class PositionAttachCursiveDefinition(Statement): + def __init__(self, location, coverages_exit, coverages_enter): + Statement.__init__(self, location) + self.coverages_exit = coverages_exit + self.coverages_enter = coverages_enter + + +class PositionAdjustPairDefinition(Statement): + def __init__(self, location, coverages_1, coverages_2, adjust_pair): + Statement.__init__(self, location) + self.coverages_1 = coverages_1 + self.coverages_2 = coverages_2 + self.adjust_pair = adjust_pair + + +class PositionAdjustSingleDefinition(Statement): + def __init__(self, location, adjust_single): + Statement.__init__(self, location) + self.adjust_single = adjust_single + + +class ContextDefinition(Statement): + def __init__(self, location, ex_or_in, left=[], right=[]): + Statement.__init__(self, location) + self.ex_or_in = ex_or_in + self.left = left + self.right = right + + +class AnchorDefinition(Statement): + def __init__(self, location, name, gid, glyph_name, component, locked, + pos): + Statement.__init__(self, location) + self.name = name + self.gid = gid + self.glyph_name = glyph_name + self.component = component + self.locked = locked + self.pos = pos + + +class SettingDefinition(Statement): + def __init__(self, location, name, value): + Statement.__init__(self, location) + self.name = name + self.value = value diff -Nru fonttools-3.0/Snippets/fontTools/voltLib/error.py fonttools-3.21.2/Snippets/fontTools/voltLib/error.py --- fonttools-3.0/Snippets/fontTools/voltLib/error.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/voltLib/error.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals + + +class VoltLibError(Exception): + def __init__(self, message, location): + Exception.__init__(self, message) + self.location = location + + def __str__(self): + message = Exception.__str__(self) + if self.location: + path, line, column = self.location + return "%s:%d:%d: %s" % (path, line, column, message) + else: + return message diff -Nru fonttools-3.0/Snippets/fontTools/voltLib/__init__.py fonttools-3.21.2/Snippets/fontTools/voltLib/__init__.py --- fonttools-3.0/Snippets/fontTools/voltLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/voltLib/__init__.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +"""fontTools.voltLib -- a package for dealing with Visual OpenType Layout Tool +(VOLT) files.""" + +# See +# http://www.microsoft.com/typography/VOLT.mspx diff -Nru fonttools-3.0/Snippets/fontTools/voltLib/lexer.py fonttools-3.21.2/Snippets/fontTools/voltLib/lexer.py --- fonttools-3.0/Snippets/fontTools/voltLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/voltLib/lexer.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,98 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError + +class Lexer(object): + NUMBER = "NUMBER" + STRING = "STRING" + NAME = "NAME" + NEWLINE = "NEWLINE" + + CHAR_WHITESPACE_ = " \t" + CHAR_NEWLINE_ = "\r\n" + CHAR_DIGIT_ = "0123456789" + CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz" + CHAR_UNDERSCORE_ = "_" + CHAR_PERIOD_ = "." + CHAR_NAME_START_ = CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + \ + CHAR_UNDERSCORE_ + CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_ + + def __init__(self, text, filename): + self.filename_ = filename + self.line_ = 1 + self.pos_ = 0 + self.line_start_ = 0 + self.text_ = text + self.text_length_ = len(text) + + def __iter__(self): + return self + + def next(self): # Python 2 + return self.__next__() + + def __next__(self): # Python 3 + while True: + token_type, token, location = self.next_() + if token_type not in {Lexer.NEWLINE}: + return (token_type, token, location) + + def next_(self): + self.scan_over_(Lexer.CHAR_WHITESPACE_) + column = self.pos_ - self.line_start_ + 1 + location = (self.filename_, self.line_, column) + start = self.pos_ + text = self.text_ + limit = len(text) + if start >= limit: + raise StopIteration() + cur_char = text[start] + next_char = text[start + 1] if start + 1 < limit else None + + if cur_char == "\n": + self.pos_ += 1 + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == "\r": + self.pos_ += (2 if next_char == "\n" else 1) + self.line_ += 1 + self.line_start_ = self.pos_ + return (Lexer.NEWLINE, None, location) + if cur_char == '"': + self.pos_ += 1 + self.scan_until_('"\r\n') + if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': + self.pos_ += 1 + return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) + else: + raise VoltLibError("Expected '\"' to terminate string", + location) + if cur_char in Lexer.CHAR_NAME_START_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) + token = text[start:self.pos_] + return (Lexer.NAME, token, location) + if cur_char in Lexer.CHAR_DIGIT_: + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: + self.pos_ += 1 + self.scan_over_(Lexer.CHAR_DIGIT_) + return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) + raise VoltLibError("Unexpected character: '%s'" % cur_char, + location) + + def scan_over_(self, valid): + p = self.pos_ + while p < self.text_length_ and self.text_[p] in valid: + p += 1 + self.pos_ = p + + def scan_until_(self, stop_at): + p = self.pos_ + while p < self.text_length_ and self.text_[p] not in stop_at: + p += 1 + self.pos_ = p diff -Nru fonttools-3.0/Snippets/fontTools/voltLib/parser.py fonttools-3.21.2/Snippets/fontTools/voltLib/parser.py --- fonttools-3.0/Snippets/fontTools/voltLib/parser.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/fontTools/voltLib/parser.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,644 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals) +from collections import OrderedDict +import fontTools.voltLib.ast as ast +from fontTools.voltLib.lexer import Lexer +from fontTools.voltLib.error import VoltLibError +from io import open + +PARSE_FUNCS = { + "DEF_GLYPH": "parse_def_glyph_", + "DEF_GROUP": "parse_def_group_", + "DEF_SCRIPT": "parse_def_script_", + "DEF_LOOKUP": "parse_def_lookup_", + "DEF_ANCHOR": "parse_def_anchor_", + "GRID_PPEM": "parse_ppem_", + "PRESENTATION_PPEM": "parse_ppem_", + "PPOSITIONING_PPEM": "parse_ppem_", + "COMPILER_USEEXTENSIONLOOKUPS": "parse_compiler_flag_", + "COMPILER_USEPAIRPOSFORMAT2": "parse_compiler_flag_", + "CMAP_FORMAT": "parse_cmap_format", +} + + +class Parser(object): + def __init__(self, path): + self.doc_ = ast.VoltFile() + self.glyphs_ = OrderedSymbolTable() + self.groups_ = SymbolTable() + self.anchors_ = {} # dictionary of SymbolTable() keyed by glyph + self.scripts_ = SymbolTable() + self.langs_ = SymbolTable() + self.lookups_ = SymbolTable() + self.next_token_type_, self.next_token_ = (None, None) + self.next_token_location_ = None + with open(path, "r") as f: + self.lexer_ = Lexer(f.read(), path) + self.advance_lexer_() + + def parse(self): + statements = self.doc_.statements + while self.next_token_type_ is not None: + self.advance_lexer_() + if self.cur_token_ in PARSE_FUNCS.keys(): + func = getattr(self, PARSE_FUNCS[self.cur_token_]) + statements.append(func()) + elif self.is_cur_keyword_("END"): + if self.next_token_type_ is not None: + raise VoltLibError("Expected the end of the file", + self.cur_token_location_) + return self.doc_ + else: + raise VoltLibError( + "Expected " + ", ".join(sorted(PARSE_FUNCS.keys())), + self.cur_token_location_) + return self.doc_ + + def parse_def_glyph_(self): + assert self.is_cur_keyword_("DEF_GLYPH") + location = self.cur_token_location_ + name = self.expect_string_() + self.expect_keyword_("ID") + gid = self.expect_number_() + if gid < 0: + raise VoltLibError("Invalid glyph ID", self.cur_token_location_) + gunicode = None + if self.next_token_ == "UNICODE": + self.expect_keyword_("UNICODE") + gunicode = [self.expect_number_()] + if gunicode[0] < 0: + raise VoltLibError("Invalid glyph UNICODE", + self.cur_token_location_) + elif self.next_token_ == "UNICODEVALUES": + self.expect_keyword_("UNICODEVALUES") + gunicode = self.parse_unicode_values_() + gtype = None + if self.next_token_ == "TYPE": + self.expect_keyword_("TYPE") + gtype = self.expect_name_() + assert gtype in ("BASE", "LIGATURE", "MARK") + components = None + if self.next_token_ == "COMPONENTS": + self.expect_keyword_("COMPONENTS") + components = self.expect_number_() + self.expect_keyword_("END_GLYPH") + if self.glyphs_.resolve(name) is not None: + raise VoltLibError( + 'Glyph "%s" (gid %i) already defined' % (name, gid), + location + ) + def_glyph = ast.GlyphDefinition(location, name, gid, + gunicode, gtype, components) + self.glyphs_.define(name, def_glyph) + return def_glyph + + def parse_def_group_(self): + assert self.is_cur_keyword_("DEF_GROUP") + location = self.cur_token_location_ + name = self.expect_string_() + enum = None + if self.next_token_ == "ENUM": + self.expect_keyword_("ENUM") + enum = self.parse_enum_() + self.expect_keyword_("END_GROUP") + if self.groups_.resolve(name) is not None: + raise VoltLibError( + 'Glyph group "%s" already defined, ' + 'group names are case insensitive' % name, + location + ) + def_group = ast.GroupDefinition(location, name, enum) + self.groups_.define(name, def_group) + return def_group + + def parse_def_script_(self): + assert self.is_cur_keyword_("DEF_SCRIPT") + location = self.cur_token_location_ + name = None + if self.next_token_ == "NAME": + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + if self.scripts_.resolve(tag) is not None: + raise VoltLibError( + 'Script "%s" already defined, ' + 'script tags are case insensitive' % tag, + location + ) + self.langs_.enter_scope() + langs = [] + while self.next_token_ != "END_SCRIPT": + self.advance_lexer_() + lang = self.parse_langsys_() + self.expect_keyword_("END_LANGSYS") + if self.langs_.resolve(lang.tag) is not None: + raise VoltLibError( + 'Language "%s" already defined in script "%s", ' + 'language tags are case insensitive' % (lang.tag, tag), + location + ) + self.langs_.define(lang.tag, lang) + langs.append(lang) + self.expect_keyword_("END_SCRIPT") + self.langs_.exit_scope() + def_script = ast.ScriptDefinition(location, name, tag, langs) + self.scripts_.define(tag, def_script) + return def_script + + def parse_langsys_(self): + assert self.is_cur_keyword_("DEF_LANGSYS") + location = self.cur_token_location_ + name = None + if self.next_token_ == "NAME": + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + features = [] + while self.next_token_ != "END_LANGSYS": + self.advance_lexer_() + feature = self.parse_feature_() + self.expect_keyword_("END_FEATURE") + features.append(feature) + def_langsys = ast.LangSysDefinition(location, name, tag, features) + return def_langsys + + def parse_feature_(self): + assert self.is_cur_keyword_("DEF_FEATURE") + location = self.cur_token_location_ + self.expect_keyword_("NAME") + name = self.expect_string_() + self.expect_keyword_("TAG") + tag = self.expect_string_() + lookups = [] + while self.next_token_ != "END_FEATURE": + # self.advance_lexer_() + self.expect_keyword_("LOOKUP") + lookup = self.expect_string_() + lookups.append(lookup) + feature = ast.FeatureDefinition(location, name, tag, lookups) + return feature + + def parse_def_lookup_(self): + assert self.is_cur_keyword_("DEF_LOOKUP") + location = self.cur_token_location_ + name = self.expect_string_() + if not name[0].isalpha(): + raise VoltLibError( + 'Lookup name "%s" must start with a letter' % name, + location + ) + if self.lookups_.resolve(name) is not None: + raise VoltLibError( + 'Lookup "%s" already defined, ' + 'lookup names are case insensitive' % name, + location + ) + process_base = True + if self.next_token_ == "PROCESS_BASE": + self.advance_lexer_() + elif self.next_token_ == "SKIP_BASE": + self.advance_lexer_() + process_base = False + process_marks = True + if self.next_token_ == "PROCESS_MARKS": + self.advance_lexer_() + if self.next_token_ == "MARK_GLYPH_SET": + self.advance_lexer_() + process_marks = self.expect_string_() + elif self.next_token_type_ == Lexer.STRING: + process_marks = self.expect_string_() + elif self.next_token_ == "ALL": + self.advance_lexer_() + else: + raise VoltLibError( + "Expected ALL, MARK_GLYPH_SET or an ID. " + "Got %s" % (self.next_token_type_), + location) + elif self.next_token_ == "SKIP_MARKS": + self.advance_lexer_() + process_marks = False + direction = None + if self.next_token_ == "DIRECTION": + self.expect_keyword_("DIRECTION") + direction = self.expect_name_() + assert direction in ("LTR", "RTL") + reversal = None + if self.next_token_ == "REVERSAL": + self.expect_keyword_("REVERSAL") + reversal = True + comments = None + if self.next_token_ == "COMMENTS": + self.expect_keyword_("COMMENTS") + comments = self.expect_string_() + context = [] + while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): + context = self.parse_context_() + as_pos_or_sub = self.expect_name_() + sub = None + pos = None + if as_pos_or_sub == "AS_SUBSTITUTION": + sub = self.parse_substitution_(reversal) + elif as_pos_or_sub == "AS_POSITION": + pos = self.parse_position_() + else: + raise VoltLibError( + "Expected AS_SUBSTITUTION or AS_POSITION. " + "Got %s" % (as_pos_or_sub), + location) + def_lookup = ast.LookupDefinition( + location, name, process_base, process_marks, direction, reversal, + comments, context, sub, pos) + self.lookups_.define(name, def_lookup) + return def_lookup + + def parse_context_(self): + location = self.cur_token_location_ + contexts = [] + while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"): + side = None + coverage = None + ex_or_in = self.expect_name_() + # side_contexts = [] # XXX + if self.next_token_ != "END_CONTEXT": + left = [] + right = [] + while self.next_token_ in ("LEFT", "RIGHT"): + side = self.expect_name_() + coverage = self.parse_coverage_() + if side == "LEFT": + left.append(coverage) + else: + right.append(coverage) + self.expect_keyword_("END_CONTEXT") + context = ast.ContextDefinition(location, ex_or_in, left, + right) + contexts.append(context) + else: + self.expect_keyword_("END_CONTEXT") + return contexts + + def parse_substitution_(self, reversal): + assert self.is_cur_keyword_("AS_SUBSTITUTION") + location = self.cur_token_location_ + src = [] + dest = [] + if self.next_token_ != "SUB": + raise VoltLibError("Expected SUB", location) + while self.next_token_ == "SUB": + self.expect_keyword_("SUB") + src.append(self.parse_coverage_()) + self.expect_keyword_("WITH") + dest.append(self.parse_coverage_()) + self.expect_keyword_("END_SUB") + self.expect_keyword_("END_SUBSTITUTION") + max_src = max([len(cov) for cov in src]) + max_dest = max([len(cov) for cov in dest]) + # many to many or mixed is invalid + if ((max_src > 1 and max_dest > 1) or + (reversal and (max_src > 1 or max_dest > 1))): + raise VoltLibError( + "Invalid substitution type", + location) + mapping = OrderedDict(zip(tuple(src), tuple(dest))) + if max_src == 1 and max_dest == 1: + if reversal: + sub = ast.SubstitutionReverseChainingSingleDefinition( + location, mapping) + else: + sub = ast.SubstitutionSingleDefinition(location, mapping) + elif max_src == 1 and max_dest > 1: + sub = ast.SubstitutionMultipleDefinition(location, mapping) + elif max_src > 1 and max_dest == 1: + sub = ast.SubstitutionLigatureDefinition(location, mapping) + return sub + + def parse_position_(self): + assert self.is_cur_keyword_("AS_POSITION") + location = self.cur_token_location_ + pos_type = self.expect_name_() + if pos_type not in ( + "ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"): + raise VoltLibError( + "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", + location) + if pos_type == "ATTACH": + position = self.parse_attach_() + elif pos_type == "ATTACH_CURSIVE": + position = self.parse_attach_cursive_() + elif pos_type == "ADJUST_PAIR": + position = self.parse_adjust_pair_() + elif pos_type == "ADJUST_SINGLE": + position = self.parse_adjust_single_() + self.expect_keyword_("END_POSITION") + return position + + def parse_attach_(self): + assert self.is_cur_keyword_("ATTACH") + location = self.cur_token_location_ + coverage = self.parse_coverage_() + coverage_to = [] + self.expect_keyword_("TO") + while self.next_token_ != "END_ATTACH": + cov = self.parse_coverage_() + self.expect_keyword_("AT") + self.expect_keyword_("ANCHOR") + anchor_name = self.expect_string_() + coverage_to.append((cov, anchor_name)) + self.expect_keyword_("END_ATTACH") + position = ast.PositionAttachDefinition( + location, coverage, coverage_to) + return position + + def parse_attach_cursive_(self): + assert self.is_cur_keyword_("ATTACH_CURSIVE") + location = self.cur_token_location_ + coverages_exit = [] + coverages_enter = [] + while self.next_token_ != "ENTER": + self.expect_keyword_("EXIT") + coverages_exit.append(self.parse_coverage_()) + while self.next_token_ != "END_ATTACH": + self.expect_keyword_("ENTER") + coverages_enter.append(self.parse_coverage_()) + self.expect_keyword_("END_ATTACH") + position = ast.PositionAttachCursiveDefinition( + location, coverages_exit, coverages_enter) + return position + + def parse_adjust_pair_(self): + assert self.is_cur_keyword_("ADJUST_PAIR") + location = self.cur_token_location_ + coverages_1 = [] + coverages_2 = [] + adjust_pair = {} + while self.next_token_ == "FIRST": + self.advance_lexer_() + coverage_1 = self.parse_coverage_() + coverages_1.append(coverage_1) + while self.next_token_ == "SECOND": + self.advance_lexer_() + coverage_2 = self.parse_coverage_() + coverages_2.append(coverage_2) + while self.next_token_ != "END_ADJUST": + id_1 = self.expect_number_() + id_2 = self.expect_number_() + self.expect_keyword_("BY") + pos_1 = self.parse_pos_() + pos_2 = self.parse_pos_() + adjust_pair[(id_1, id_2)] = (pos_1, pos_2) + self.expect_keyword_("END_ADJUST") + position = ast.PositionAdjustPairDefinition( + location, coverages_1, coverages_2, adjust_pair) + return position + + def parse_adjust_single_(self): + assert self.is_cur_keyword_("ADJUST_SINGLE") + location = self.cur_token_location_ + adjust_single = [] + while self.next_token_ != "END_ADJUST": + coverages = self.parse_coverage_() + self.expect_keyword_("BY") + pos = self.parse_pos_() + adjust_single.append((coverages, pos)) + self.expect_keyword_("END_ADJUST") + position = ast.PositionAdjustSingleDefinition( + location, adjust_single) + return position + + def parse_def_anchor_(self): + assert self.is_cur_keyword_("DEF_ANCHOR") + location = self.cur_token_location_ + name = self.expect_string_() + self.expect_keyword_("ON") + gid = self.expect_number_() + self.expect_keyword_("GLYPH") + glyph_name = self.expect_name_() + # check for duplicate anchor names on this glyph + if (glyph_name in self.anchors_ + and self.anchors_[glyph_name].resolve(name) is not None): + raise VoltLibError( + 'Anchor "%s" already defined, ' + 'anchor names are case insensitive' % name, + location + ) + self.expect_keyword_("COMPONENT") + component = self.expect_number_() + if self.next_token_ == "LOCKED": + locked = True + self.advance_lexer_() + else: + locked = False + self.expect_keyword_("AT") + pos = self.parse_pos_() + self.expect_keyword_("END_ANCHOR") + anchor = ast.AnchorDefinition(location, name, gid, glyph_name, + component, locked, pos) + if glyph_name not in self.anchors_: + self.anchors_[glyph_name] = SymbolTable() + self.anchors_[glyph_name].define(name, anchor) + return anchor + + def parse_adjust_by_(self): + self.advance_lexer_() + assert self.is_cur_keyword_("ADJUST_BY") + adjustment = self.expect_number_() + self.expect_keyword_("AT") + size = self.expect_number_() + return adjustment, size + + def parse_pos_(self): + # VOLT syntax doesn't seem to take device Y advance + self.advance_lexer_() + location = self.cur_token_location_ + assert self.is_cur_keyword_("POS"), location + adv = None + dx = None + dy = None + adv_adjust_by = {} + dx_adjust_by = {} + dy_adjust_by = {} + if self.next_token_ == "ADV": + self.advance_lexer_() + adv = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + adv_adjust_by[size] = adjustment + if self.next_token_ == "DX": + self.advance_lexer_() + dx = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + dx_adjust_by[size] = adjustment + if self.next_token_ == "DY": + self.advance_lexer_() + dy = self.expect_number_() + while self.next_token_ == "ADJUST_BY": + adjustment, size = self.parse_adjust_by_() + dy_adjust_by[size] = adjustment + self.expect_keyword_("END_POS") + return (adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by) + + def parse_unicode_values_(self): + location = self.cur_token_location_ + try: + unicode_values = self.expect_string_().split(",") + unicode_values = [ + int(uni[2:], 16) + for uni in unicode_values if uni != ""] + except ValueError as err: + raise VoltLibError(str(err), location) + return unicode_values if unicode_values != [] else None + + def parse_enum_(self): + assert self.is_cur_keyword_("ENUM") + location = self.cur_token_location_ + enum = self.parse_coverage_() + self.expect_keyword_("END_ENUM") + return enum + + def parse_coverage_(self): + coverage = [] + location = self.cur_token_location_ + while self.next_token_ in ("GLYPH", "GROUP", "RANGE", "ENUM"): + if self.next_token_ == "ENUM": + self.advance_lexer_() + enum = self.parse_enum_() + coverage.append(enum) + elif self.next_token_ == "GLYPH": + self.expect_keyword_("GLYPH") + name = self.expect_string_() + coverage.append(name) + elif self.next_token_ == "GROUP": + self.expect_keyword_("GROUP") + name = self.expect_string_() + # resolved_group = self.groups_.resolve(name) + group = (name,) + coverage.append(group) + # if resolved_group is not None: + # coverage.extend(resolved_group.enum) + # # TODO: check that group exists after all groups are defined + # else: + # group = (name,) + # coverage.append(group) + # # raise VoltLibError( + # # 'Glyph group "%s" is not defined' % name, + # # location) + elif self.next_token_ == "RANGE": + self.expect_keyword_("RANGE") + start = self.expect_string_() + self.expect_keyword_("TO") + end = self.expect_string_() + coverage.append((start, end)) + return tuple(coverage) + + def resolve_group(self, group_name): + return self.groups_.resolve(group_name) + + def glyph_range(self, start, end): + rng = self.glyphs_.range(start, end) + return frozenset(rng) + + def parse_ppem_(self): + location = self.cur_token_location_ + ppem_name = self.cur_token_ + value = self.expect_number_() + setting = ast.SettingDefinition(location, ppem_name, value) + return setting + + def parse_compiler_flag_(self): + location = self.cur_token_location_ + flag_name = self.cur_token_ + value = True + setting = ast.SettingDefinition(location, flag_name, value) + return setting + + def parse_cmap_format(self): + location = self.cur_token_location_ + name = self.cur_token_ + value = (self.expect_number_(), self.expect_number_(), + self.expect_number_()) + setting = ast.SettingDefinition(location, name, value) + return setting + + def is_cur_keyword_(self, k): + return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) + + def expect_string_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.STRING: + raise VoltLibError("Expected a string", self.cur_token_location_) + return self.cur_token_ + + def expect_keyword_(self, keyword): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: + return self.cur_token_ + raise VoltLibError("Expected \"%s\"" % keyword, + self.cur_token_location_) + + def expect_name_(self): + self.advance_lexer_() + if self.cur_token_type_ is Lexer.NAME: + return self.cur_token_ + raise VoltLibError("Expected a name", self.cur_token_location_) + + def expect_number_(self): + self.advance_lexer_() + if self.cur_token_type_ is not Lexer.NUMBER: + raise VoltLibError("Expected a number", self.cur_token_location_) + return self.cur_token_ + + def advance_lexer_(self): + self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( + self.next_token_type_, self.next_token_, self.next_token_location_) + try: + (self.next_token_type_, self.next_token_, + self.next_token_location_) = self.lexer_.next() + except StopIteration: + self.next_token_type_, self.next_token_ = (None, None) + + +class SymbolTable(object): + def __init__(self): + self.scopes_ = [{}] + + def enter_scope(self): + self.scopes_.append({}) + + def exit_scope(self): + self.scopes_.pop() + + def define(self, name, item): + self.scopes_[-1][name] = item + + def resolve(self, name, case_insensitive=True): + for scope in reversed(self.scopes_): + item = scope.get(name) + if item: + return item + if case_insensitive: + for key in scope: + if key.lower() == name.lower(): + return scope[key] + return None + + +class OrderedSymbolTable(SymbolTable): + def __init__(self): + self.scopes_ = [OrderedDict()] + + def enter_scope(self): + self.scopes_.append(OrderedDict()) + + def resolve(self, name, case_insensitive=False): + SymbolTable.resolve(self, name, case_insensitive=case_insensitive) + + def range(self, start, end): + for scope in reversed(self.scopes_): + if start in scope and end in scope: + start_idx = list(scope.keys()).index(start) + end_idx = list(scope.keys()).index(end) + return list(scope.keys())[start_idx:end_idx + 1] + return None diff -Nru fonttools-3.0/Snippets/interpolate.py fonttools-3.21.2/Snippets/interpolate.py --- fonttools-3.0/Snippets/interpolate.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/interpolate.py 2018-01-08 12:40:40.000000000 +0000 @@ -26,8 +26,8 @@ from fontTools.ttLib import TTFont from fontTools.ttLib.tables._n_a_m_e import NameRecord from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance -from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation -import warnings +from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, TupleVariation +import logging def AddFontVariations(font): @@ -75,13 +75,13 @@ thinCoord = GetCoordinates(thin, glyphName) blackCoord = GetCoordinates(black, glyphName) if not regularCoord or not blackCoord or not thinCoord: - warnings.warn("glyph %s not present in all input fonts" % - glyphName) + logging.warning("glyph %s not present in all input fonts", + glyphName) continue if (len(regularCoord) != len(blackCoord) or len(regularCoord) != len(thinCoord)): - warnings.warn("glyph %s has not the same number of " - "control points in all input fonts" % glyphName) + logging.warning("glyph %s has not the same number of " + "control points in all input fonts", glyphName) continue thinDelta = [] blackDelta = [] @@ -89,8 +89,8 @@ zip(regularCoord, blackCoord, thinCoord): thinDelta.append(((thinX - regX, thinY - regY))) blackDelta.append((blackX - regX, blackY - regY)) - thinVar = GlyphVariation({"wght": (-1.0, -1.0, 0.0)}, thinDelta) - blackVar = GlyphVariation({"wght": (0.0, 1.0, 1.0)}, blackDelta) + thinVar = TupleVariation({"wght": (-1.0, -1.0, 0.0)}, thinDelta) + blackVar = TupleVariation({"wght": (0.0, 1.0, 1.0)}, blackDelta) gvar.variations[glyphName] = [thinVar, blackVar] @@ -129,6 +129,7 @@ def main(): + logging.basicConfig(format="%(levelname)s: %(message)s") thin = TTFont("/tmp/Roboto/Roboto-Thin.ttf") regular = TTFont("/tmp/Roboto/Roboto-Regular.ttf") black = TTFont("/tmp/Roboto/Roboto-Black.ttf") @@ -139,4 +140,5 @@ if __name__ == "__main__": - main() + import sys + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/merge_woff_metadata.py fonttools-3.21.2/Snippets/merge_woff_metadata.py --- fonttools-3.0/Snippets/merge_woff_metadata.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/merge_woff_metadata.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ +from __future__ import print_function +import sys +import os +from fontTools.ttx import makeOutputFileName +from fontTools.ttLib import TTFont + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + if len(args) < 2: + print("usage: merge_woff_metadata.py METADATA.xml " + "INPUT.woff [OUTPUT.woff]", file=sys.stderr) + return 1 + + metadata_file = args[0] + with open(metadata_file, 'rb') as f: + metadata = f.read() + + infile = args[1] + if len(args) > 2: + outfile = args[2] + else: + filename, ext = os.path.splitext(infile) + outfile = makeOutputFileName(filename, None, ext) + + font = TTFont(infile) + + if font.flavor not in ("woff", "woff2"): + print("Input file is not a WOFF or WOFF2 font", file=sys.stderr) + return 1 + + data = font.flavorData + + # this sets the new WOFF metadata + data.metaData = metadata + + font.save(outfile) + + +if __name__ == "__main__": + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/otf2ttf.py fonttools-3.21.2/Snippets/otf2ttf.py --- fonttools-3.0/Snippets/otf2ttf.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/otf2ttf.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,91 @@ +#!/usr/bin/env python +from __future__ import print_function, division, absolute_import +import sys +from fontTools.ttLib import TTFont, newTable +from cu2qu.pens import Cu2QuPen +from fontTools.pens.ttGlyphPen import TTGlyphPen +from fontTools.ttx import makeOutputFileName +import argparse + + +# default approximation error, measured in UPEM +MAX_ERR = 1.0 + +# default 'post' table format +POST_FORMAT = 2.0 + +# assuming the input contours' direction is correctly set (counter-clockwise), +# we just flip it to clockwise +REVERSE_DIRECTION = True + + +def glyphs_to_quadratic( + glyphs, max_err=MAX_ERR, reverse_direction=REVERSE_DIRECTION): + quadGlyphs = {} + for gname in glyphs.keys(): + glyph = glyphs[gname] + ttPen = TTGlyphPen(glyphs) + cu2quPen = Cu2QuPen(ttPen, max_err, + reverse_direction=reverse_direction) + glyph.draw(cu2quPen) + quadGlyphs[gname] = ttPen.glyph() + return quadGlyphs + + +def otf_to_ttf(ttFont, post_format=POST_FORMAT, **kwargs): + assert ttFont.sfntVersion == "OTTO" + assert "CFF " in ttFont + + glyphOrder = ttFont.getGlyphOrder() + + ttFont["loca"] = newTable("loca") + ttFont["glyf"] = glyf = newTable("glyf") + glyf.glyphOrder = glyphOrder + glyf.glyphs = glyphs_to_quadratic(ttFont.getGlyphSet(), **kwargs) + del ttFont["CFF "] + + ttFont["maxp"] = maxp = newTable("maxp") + maxp.tableVersion = 0x00010000 + maxp.maxZones = 1 + maxp.maxTwilightPoints = 0 + maxp.maxStorage = 0 + maxp.maxFunctionDefs = 0 + maxp.maxInstructionDefs = 0 + maxp.maxStackElements = 0 + maxp.maxSizeOfInstructions = 0 + maxp.maxComponentElements = max( + len(g.components if hasattr(g, 'components') else []) + for g in glyf.glyphs.values()) + + post = ttFont["post"] + post.formatType = post_format + post.extraNames = [] + post.mapping = {} + post.glyphOrder = glyphOrder + + ttFont.sfntVersion = "\000\001\000\000" + + +def main(args=None): + parser = argparse.ArgumentParser() + parser.add_argument("input", metavar="INPUT") + parser.add_argument("-o", "--output") + parser.add_argument("-e", "--max-error", type=float, default=MAX_ERR) + parser.add_argument("--post-format", type=float, default=POST_FORMAT) + parser.add_argument( + "--keep-direction", dest='reverse_direction', action='store_false') + options = parser.parse_args(args) + + output = options.output or makeOutputFileName(options.input, + outputDir=None, + extension='.ttf') + font = TTFont(options.input) + otf_to_ttf(font, + post_format=options.post_format, + max_err=options.max_error, + reverse_direction=options.reverse_direction) + font.save(output) + + +if __name__ == "__main__": + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/README fonttools-3.21.2/Snippets/README --- fonttools-3.0/Snippets/README 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/README 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -This directory includes snippets that people might useful to get ideas -from. The contents will come and go, don't rely on them being there or -having a certain API. If you need it, copy it and modify it. diff -Nru fonttools-3.0/Snippets/README.md fonttools-3.21.2/Snippets/README.md --- fonttools-3.0/Snippets/README.md 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/README.md 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +This directory includes snippets that people might useful to get ideas from. +The contents will come and go, don't rely on them being there or having a certain API. +If you need it, copy it and modify it. + +If you do and think your work is useful for others, please add a link to it here: + +* https://github.com/twardoch/fonttools-utils +* https://github.com/twardoch/ttfdiet +* https://github.com/googlei18n/nototools +* https://github.com/googlefonts/fontbakery +* https://github.com/Typefounding/setUseTypoMetricsFalse diff -Nru fonttools-3.0/Snippets/svg2glif.py fonttools-3.21.2/Snippets/svg2glif.py --- fonttools-3.0/Snippets/svg2glif.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Snippets/svg2glif.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,126 @@ +#!/usr/bin/env python +""" Convert SVG paths to UFO glyphs. """ + +from __future__ import print_function, absolute_import + +__requires__ = ["FontTools", "ufoLib"] + +from fontTools.misc.py23 import SimpleNamespace +from fontTools.svgLib import SVGPath + +from ufoLib.pointPen import SegmentToPointPen +from ufoLib.glifLib import writeGlyphToString + + +__all__ = ["svg2glif"] + + +def svg2glif(svg, name, width=0, height=0, unicodes=None, transform=None, + version=2): + """ Convert an SVG outline to a UFO glyph with given 'name', advance + 'width' and 'height' (int), and 'unicodes' (list of int). + Return the resulting string in GLIF format (default: version 2). + If 'transform' is provided, apply a transformation matrix before the + conversion (must be tuple of 6 floats, or a FontTools Transform object). + """ + glyph = SimpleNamespace(width=width, height=height, unicodes=unicodes) + outline = SVGPath.fromstring(svg, transform=transform) + + # writeGlyphToString takes a callable (usually a glyph's drawPoints + # method) that accepts a PointPen, however SVGPath currently only has + # a draw method that accepts a segment pen. We need to wrap the call + # with a converter pen. + def drawPoints(pointPen): + pen = SegmentToPointPen(pointPen) + outline.draw(pen) + + return writeGlyphToString(name, + glyphObject=glyph, + drawPointsFunc=drawPoints, + formatVersion=version) + + +def parse_args(args): + import argparse + + def split(arg): + return arg.replace(",", " ").split() + + def unicode_hex_list(arg): + try: + return [int(unihex, 16) for unihex in split(arg)] + except ValueError: + msg = "Invalid unicode hexadecimal value: %r" % arg + raise argparse.ArgumentTypeError(msg) + + def transform_list(arg): + try: + return [float(n) for n in split(arg)] + except ValueError: + msg = "Invalid transformation matrix: %r" % arg + raise argparse.ArgumentTypeError(msg) + + parser = argparse.ArgumentParser( + description="Convert SVG outlines to UFO glyphs (.glif)") + parser.add_argument( + "infile", metavar="INPUT.svg", help="Input SVG file containing " + ' elements with "d" attributes.') + parser.add_argument( + "outfile", metavar="OUTPUT.glif", help="Output GLIF file (default: " + "print to stdout)", nargs='?') + parser.add_argument( + "-n", "--name", help="The glyph name (default: input SVG file " + "basename, without the .svg extension)") + parser.add_argument( + "-w", "--width", help="The glyph advance width (default: 0)", + type=int, default=0) + parser.add_argument( + "-H", "--height", help="The glyph vertical advance (optional if " + '"width" is defined)', type=int, default=0) + parser.add_argument( + "-u", "--unicodes", help="List of Unicode code points as hexadecimal " + 'numbers (e.g. -u "0041 0042")', + type=unicode_hex_list) + parser.add_argument( + "-t", "--transform", help="Transformation matrix as a list of six " + 'float values (e.g. -t "0.1 0 0 -0.1 -50 200")', type=transform_list) + parser.add_argument( + "-f", "--format", help="UFO GLIF format version (default: 2)", + type=int, choices=(1, 2), default=2) + + return parser.parse_args(args) + + +def main(args=None): + from io import open + + options = parse_args(args) + + svg_file = options.infile + + if options.name: + name = options.name + else: + import os + name = os.path.splitext(os.path.basename(svg_file))[0] + + with open(svg_file, "r", encoding="utf-8") as f: + svg = f.read() + + glif = svg2glif(svg, name, + width=options.width, + height=options.height, + unicodes=options.unicodes, + transform=options.transform, + version=options.format) + + if options.outfile is None: + print(glif) + else: + with open(options.outfile, 'w', encoding='utf-8') as f: + f.write(glif) + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/woff2_compress.py fonttools-3.21.2/Snippets/woff2_compress.py --- fonttools-3.0/Snippets/woff2_compress.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/woff2_compress.py 2018-01-08 12:40:40.000000000 +0000 @@ -13,7 +13,7 @@ args = sys.argv[1:] if len(args) < 1: print("One argument, the input filename, must be provided.", file=sys.stderr) - sys.exit(1) + return 1 filename = args[0] outfilename = makeOutputFileName(filename, outputDir=None, extension='.woff2') @@ -26,4 +26,4 @@ if __name__ == '__main__': - main() + sys.exit(main()) diff -Nru fonttools-3.0/Snippets/woff2_decompress.py fonttools-3.21.2/Snippets/woff2_decompress.py --- fonttools-3.0/Snippets/woff2_decompress.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Snippets/woff2_decompress.py 2018-01-08 12:40:40.000000000 +0000 @@ -23,7 +23,7 @@ args = sys.argv[1:] if len(args) < 1: print("One argument, the input filename, must be provided.", file=sys.stderr) - sys.exit(1) + return 1 filename = args[0] outfilename = make_output_name(filename) @@ -36,4 +36,4 @@ if __name__ == '__main__': - main() + sys.exit(main()) diff -Nru fonttools-3.0/Tests/afmLib/afmLib_test.py fonttools-3.21.2/Tests/afmLib/afmLib_test.py --- fonttools-3.0/Tests/afmLib/afmLib_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/afmLib/afmLib_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,55 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import unittest +import os +from fontTools import afmLib + + +CWD = os.path.abspath(os.path.dirname(__file__)) +DATADIR = os.path.join(CWD, 'data') +AFM = os.path.join(DATADIR, 'TestAFM.afm') + + +class AFMTest(unittest.TestCase): + + def test_read_afm(self): + afm = afmLib.AFM(AFM) + self.assertEqual(sorted(afm.kernpairs()), + sorted([('V', 'A'), ('T', 'comma'), ('V', 'd'), ('T', 'c'), ('T', 'period')])) + self.assertEqual(afm['V', 'A'], -60) + self.assertEqual(afm['V', 'd'], 30) + self.assertEqual(afm['A'], (65, 668, (8, -25, 660, 666))) + + def test_write_afm(self): + afm = afmLib.AFM(AFM) + newAfm, afmData = self.write(afm) + self.assertEqual(afm.kernpairs(), newAfm.kernpairs()) + self.assertEqual(afm.chars(), newAfm.chars()) + self.assertEqual(afm.comments(), newAfm.comments()[1:]) # skip the "generated by afmLib" comment + for pair in afm.kernpairs(): + self.assertEqual(afm[pair], newAfm[pair]) + for char in afm.chars(): + self.assertEqual(afm[char], newAfm[char]) + with open(AFM, 'r') as f: + originalLines = f.read().splitlines() + newLines = afmData.splitlines() + del newLines[1] # remove the "generated by afmLib" comment + self.assertEqual(originalLines, newLines) + + @staticmethod + def write(afm, sep='\r'): + temp = os.path.join(DATADIR, 'temp.afm') + try: + afm.write(temp, sep) + with open(temp, 'r') as f: + afmData = f.read() + afm = afmLib.AFM(temp) + finally: + if os.path.exists(temp): + os.remove(temp) + return afm, afmData + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/afmLib/data/TestAFM.afm fonttools-3.21.2/Tests/afmLib/data/TestAFM.afm --- fonttools-3.0/Tests/afmLib/data/TestAFM.afm 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/afmLib/data/TestAFM.afm 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ +StartFontMetrics 2.0 +Comment UniqueID 2123703 +Comment Panose 2 0 6 3 3 0 0 2 0 4 +FontName TestFont-Regular +FullName TestFont-Regular +FamilyName TestFont +Weight Regular +ItalicAngle 0.00 +IsFixedPitch false +FontBBox -94 -317 1316 1009 +UnderlinePosition -296 +UnderlineThickness 111 +Version 001.000 +Notice [c] Copyright 2017. All Rights Reserved. +EncodingScheme FontSpecific +CapHeight 700 +XHeight 500 +Ascender 750 +Descender -250 +StdHW 181 +StdVW 194 +StartCharMetrics 4 +C 32 ; WX 200 ; N space ; B 0 0 0 0 ; +C 65 ; WX 668 ; N A ; B 8 -25 660 666 ; +C 66 ; WX 543 ; N B ; B 36 0 522 666 ; +C 67 ; WX 582 ; N C ; B 24 -21 564 687 ; +EndCharMetrics +StartKernData +StartKernPairs 5 +KPX T c 30 +KPX T comma -100 +KPX T period -100 +KPX V A -60 +KPX V d 30 +EndKernPairs +EndKernData +EndFontMetrics diff -Nru fonttools-3.0/Tests/agl_test.py fonttools-3.21.2/Tests/agl_test.py --- fonttools-3.0/Tests/agl_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/agl_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, absolute_import, + unicode_literals) +from fontTools.misc.py23 import * +from fontTools import agl +import unittest + + +class AglToUnicodeTest(unittest.TestCase): + def test_spec_examples(self): + # https://github.com/adobe-type-tools/agl-specification#3-examples + # + # TODO: Currently, we only handle AGLFN instead of legacy AGL names. + # Therefore, the test cases below use Iogonek instead of Lcommaaccent. + # Change Iogonek to Lcommaaccent as soon as the implementation has + # been fixed to also support legacy AGL names. + # https://github.com/fonttools/fonttools/issues/775 + self.assertEqual(agl.toUnicode("Iogonek"), "Į") + self.assertEqual(agl.toUnicode("uni20AC0308"), "\u20AC\u0308") + self.assertEqual(agl.toUnicode("u1040C"), "\U0001040C") + self.assertEqual(agl.toUnicode("uniD801DC0C"), "") + self.assertEqual(agl.toUnicode("uni20ac"), "") + self.assertEqual( + agl.toUnicode("Iogonek_uni20AC0308_u1040C.alternate"), + "\u012E\u20AC\u0308\U0001040C") + self.assertEqual(agl.toUnicode("Iogonek_uni012E_u012E"), "ĮĮĮ") + self.assertEqual(agl.toUnicode("foo"), "") + self.assertEqual(agl.toUnicode(".notdef"), "") + + def test_aglfn(self): + self.assertEqual(agl.toUnicode("longs_t"), "ſt") + self.assertEqual(agl.toUnicode("f_f_i.alt123"), "ffi") + + def test_uniABCD(self): + self.assertEqual(agl.toUnicode("uni0041"), "A") + self.assertEqual(agl.toUnicode("uni0041_uni0042_uni0043"), "ABC") + self.assertEqual(agl.toUnicode("uni004100420043"), "ABC") + self.assertEqual(agl.toUnicode("uni"), "") + self.assertEqual(agl.toUnicode("uni41"), "") + self.assertEqual(agl.toUnicode("uni004101"), "") + self.assertEqual(agl.toUnicode("uniDC00"), "") + + def test_uABCD(self): + self.assertEqual(agl.toUnicode("u0041"), "A") + self.assertEqual(agl.toUnicode("u00041"), "A") + self.assertEqual(agl.toUnicode("u000041"), "A") + self.assertEqual(agl.toUnicode("u0000041"), "") + self.assertEqual(agl.toUnicode("u0041_uni0041_A.alt"), "AAA") + + def test_union(self): + # Interesting test case because "uni" is a prefix of "union". + self.assertEqual(agl.toUnicode("union"), "∪") + # U+222A U+FE00 is a Standardized Variant for UNION WITH SERIFS. + self.assertEqual(agl.toUnicode("union_uniFE00"), "\u222A\uFE00") + + def test_dingbats(self): + self.assertEqual(agl.toUnicode("a20", isZapfDingbats=True), "✔") + self.assertEqual(agl.toUnicode("a20.alt", isZapfDingbats=True), "✔") + self.assertEqual(agl.toUnicode("a206", isZapfDingbats=True), "❰") + self.assertEqual(agl.toUnicode("a20", isZapfDingbats=False), "") + self.assertEqual(agl.toUnicode("a0", isZapfDingbats=True), "") + self.assertEqual(agl.toUnicode("a207", isZapfDingbats=True), "") + self.assertEqual(agl.toUnicode("abcdef", isZapfDingbats=True), "") + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/cffLib/cffLib_test.py fonttools-3.21.2/Tests/cffLib/cffLib_test.py --- fonttools-3.0/Tests/cffLib/cffLib_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/cffLib/cffLib_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ +from __future__ import print_function, division, absolute_import +from fontTools.cffLib import TopDict, PrivateDict, CharStrings +from fontTools.misc.testTools import parseXML +import unittest + + +class TopDictTest(unittest.TestCase): + + def test_recalcFontBBox(self): + topDict = TopDict() + topDict.CharStrings = CharStrings(None, None, None, PrivateDict(), None, None) + topDict.CharStrings.fromXML(None, None, parseXML(""" + + endchar + + + 100 -100 rmoveto 200 hlineto 200 vlineto -200 hlineto endchar + + + 0 0 rmoveto 200 hlineto 200 vlineto -200 hlineto endchar + + + -55.1 -55.1 rmoveto 110.2 hlineto 110.2 vlineto -110.2 hlineto endchar + + """)) + + topDict.recalcFontBBox() + self.assertEqual(topDict.FontBBox, [-56, -100, 300, 200]) + + def test_recalcFontBBox_empty(self): + topDict = TopDict() + topDict.CharStrings = CharStrings(None, None, None, PrivateDict(), None, None) + topDict.CharStrings.fromXML(None, None, parseXML(""" + + endchar + + + 123 endchar + + """)) + + topDict.recalcFontBBox() + self.assertEqual(topDict.FontBBox, [0, 0, 0, 0]) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/cffLib/specializer_test.py fonttools-3.21.2/Tests/cffLib/specializer_test.py --- fonttools-3.0/Tests/cffLib/specializer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/cffLib/specializer_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,918 @@ +from __future__ import print_function, division, absolute_import +from fontTools.cffLib.specializer import (programToString, stringToProgram, + generalizeProgram, specializeProgram) +import unittest + +# TODO +# https://github.com/fonttools/fonttools/pull/959#commitcomment-22059841 +# Maybe we should make these data driven. Each entry will have an input string, +# and a generalized and specialized. For the latter two, if they are None, they +# are considered equal to the input. Then we can do roundtripping tests as well... +# There are a few other places (aosp tests for example) where we generate tests +# from data. + + +def get_generalized_charstr(charstr, **kwargs): + return programToString(generalizeProgram(stringToProgram(charstr), **kwargs)) + + +def get_specialized_charstr(charstr, **kwargs): + return programToString(specializeProgram(stringToProgram(charstr), **kwargs)) + + +class CFFGeneralizeProgramTest(unittest.TestCase): + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + +# no arguments/operands + def test_rmoveto_none(self): + test_charstr = 'rmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_hmoveto_none(self): + test_charstr = 'hmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_vmoveto_none(self): + test_charstr = 'vmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_rlineto_none(self): + test_charstr = 'rlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_hlineto_none(self): + test_charstr = 'hlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_vlineto_none(self): + test_charstr = 'vlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_rrcurveto_none(self): + test_charstr = 'rrcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_hhcurveto_none(self): + test_charstr = 'hhcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_vvcurveto_none(self): + test_charstr = 'vvcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_hvcurveto_none(self): + test_charstr = 'hvcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_vhcurveto_none(self): + test_charstr = 'vhcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_rcurveline_none(self): + test_charstr = 'rcurveline' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + + def test_rlinecurve_none(self): + test_charstr = 'rlinecurve' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_generalized_charstr(test_charstr) + +# rmoveto + def test_rmoveto_zero(self): + test_charstr = '0 0 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto_zero_width(self): + test_charstr = '100 0 0 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto(self): + test_charstr = '.55 -.8 rmoveto' + xpct_charstr = '0.55 -0.8 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto_width(self): + test_charstr = '100.5 50 -5.8 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hmoveto + def test_hmoveto_zero(self): + test_charstr = '0 hmoveto' + xpct_charstr = '0 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hmoveto_zero_width(self): + test_charstr = '100 0 hmoveto' + xpct_charstr = '100 0 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hmoveto(self): + test_charstr = '.67 hmoveto' + xpct_charstr = '0.67 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hmoveto_width(self): + test_charstr = '100 -70 hmoveto' + xpct_charstr = '100 -70 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# vmoveto + def test_vmoveto_zero(self): + test_charstr = '0 vmoveto' + xpct_charstr = '0 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vmoveto_zero_width(self): + test_charstr = '100 0 vmoveto' + xpct_charstr = '100 0 0 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vmoveto(self): + test_charstr = '-.24 vmoveto' + xpct_charstr = '0 -0.24 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vmoveto_width(self): + test_charstr = '100 44 vmoveto' + xpct_charstr = '100 0 44 rmoveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# rlineto + def test_rlineto_zero(self): + test_charstr = '0 0 rlineto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlineto_zero_mult(self): + test_charstr = '0 0 0 0 0 0 rlineto' + xpct_charstr = ('0 0 rlineto '*3).rstrip() + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlineto(self): + test_charstr = '.55 -.8 rlineto' + xpct_charstr = '0.55 -0.8 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlineto_mult(self): + test_charstr = '.55 -.8 .55 -.8 .55 -.8 rlineto' + xpct_charstr = ('0.55 -0.8 rlineto '*3).rstrip() + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hlineto + def test_hlineto_zero(self): + test_charstr = '0 hlineto' + xpct_charstr = '0 0 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hlineto_zero_mult(self): + test_charstr = '0 0 0 0 hlineto' + xpct_charstr = ('0 0 rlineto '*4).rstrip() + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hlineto(self): + test_charstr = '.67 hlineto' + xpct_charstr = '0.67 0 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hlineto_mult(self): + test_charstr = '.67 -6.0 .67 hlineto' + xpct_charstr = '0.67 0 rlineto 0 -6.0 rlineto 0.67 0 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# vlineto + def test_vlineto_zero(self): + test_charstr = '0 vlineto' + xpct_charstr = '0 0 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vlineto_zero_mult(self): + test_charstr = '0 0 0 vlineto' + xpct_charstr = ('0 0 rlineto '*3).rstrip() + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vlineto(self): + test_charstr = '-.24 vlineto' + xpct_charstr = '0 -0.24 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vlineto_mult(self): + test_charstr = '-.24 +50 30 -4 vlineto' + xpct_charstr = '0 -0.24 rlineto 50 0 rlineto 0 30 rlineto -4 0 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# rrcurveto + def test_rrcurveto(self): + test_charstr = '-1 56 -2 57 -1 57 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_mult(self): + test_charstr = '-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto' + xpct_charstr = '-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_d3947b8(self): + test_charstr = '1 2 3 4 5 0 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_v0_0h_h0(self): + test_charstr = '0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '0 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_h0_0h_h0(self): + test_charstr = '10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '10 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_00_0h_h0(self): + test_charstr = '0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '0 0 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_r0_0h_h0(self): + test_charstr = '10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '10 10 1 2 0 0 rrcurveto 0 0 1 2 0 1 rrcurveto 0 1 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_v0_0v_v0(self): + test_charstr = '0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '0 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_h0_0v_v0(self): + test_charstr = '10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '10 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_00_0v_v0(self): + test_charstr = '0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '0 0 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_r0_0v_v0(self): + test_charstr = '10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '10 10 1 2 0 0 rrcurveto 0 0 1 2 1 0 rrcurveto 1 0 3 4 0 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hhcurveto + def test_hhcurveto_4(self): + test_charstr = '10 30 0 10 hhcurveto' + xpct_charstr = '10 0 30 0 10 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_5(self): + test_charstr = '40 -38 -60 41 -91 hhcurveto' + xpct_charstr = '-38 40 -60 41 -91 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_4_4(self): + test_charstr = '43 23 25 18 29 56 42 -84 hhcurveto' + xpct_charstr = '43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_5_4(self): + test_charstr = '43 23 25 18 29 56 42 -84 79 hhcurveto' + xpct_charstr = '23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_4_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto' + xpct_charstr = '1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_5_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto' + xpct_charstr = '2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# vvcurveto + def test_vvcurveto_4(self): + test_charstr = '61 6 52 68 vvcurveto' + xpct_charstr = '0 61 6 52 0 68 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_5(self): + test_charstr = '61 38 35 56 72 vvcurveto' + xpct_charstr = '61 38 35 56 0 72 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_4_4(self): + test_charstr = '-84 -88 -30 -90 -13 19 23 -11 vvcurveto' + xpct_charstr = '0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_5_4(self): + test_charstr = '43 12 17 32 65 68 -6 52 61 vvcurveto' + xpct_charstr = '43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_4_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto' + xpct_charstr = '0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_5_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto' + xpct_charstr = '1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hvcurveto + def test_hvcurveto_4(self): + test_charstr = '1 2 3 4 hvcurveto' + xpct_charstr = '1 0 2 3 0 4 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_5(self): + test_charstr = '57 44 22 40 34 hvcurveto' + xpct_charstr = '57 0 44 22 34 40 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4(self): + test_charstr = '65 33 -19 -45 -45 -29 -25 -71 hvcurveto' + xpct_charstr = '65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_5(self): + test_charstr = '97 69 41 86 58 -36 34 -64 11 hvcurveto' + xpct_charstr = '97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto' + xpct_charstr = '1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_5(self): + test_charstr = '-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto' + xpct_charstr = '-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4_4(self): + test_charstr = '32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto' + xpct_charstr = '32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4_4_5(self): + test_charstr = '-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto' + xpct_charstr = '-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# vhcurveto + def test_vhcurveto_4(self): + test_charstr = '-57 43 -30 53 vhcurveto' + xpct_charstr = '0 -57 43 -30 53 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_5(self): + test_charstr = '41 -27 19 -46 11 vhcurveto' + xpct_charstr = '0 41 -27 19 -46 11 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 vhcurveto' + xpct_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_5(self): + test_charstr = '-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto' + xpct_charstr = '0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_4(self): + test_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto' + xpct_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_5(self): + test_charstr = '108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto' + xpct_charstr = '0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_4_5(self): + test_charstr = '60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto' + xpct_charstr = '0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# rcurveline + def test_rcurveline_6_2(self): + test_charstr = '21 -76 21 -72 24 -73 31 -100 rcurveline' + xpct_charstr = '21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_6_2(self): + test_charstr = '-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline' + xpct_charstr = '-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_6_6_2(self): + test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline' + xpct_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_6_6_6_2(self): + test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 46 -88 63 -97 52 -59 -38 -57 -49 -62 -52 -54 96 -8 rcurveline' + xpct_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 46 -88 63 -97 52 -59 rrcurveto -38 -57 -49 -62 -52 -54 rrcurveto 96 -8 rlineto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# rlinecurve + def test_rlinecurve_2_6(self): + test_charstr = '21 -76 21 -72 24 -73 31 -100 rlinecurve' + xpct_charstr = '21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_2_6(self): + test_charstr = '-73 80 -80 121 -49 96 60 65 55 41 rlinecurve' + xpct_charstr = '-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_2_2_6(self): + test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve' + xpct_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_2_2_2_6(self): + test_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 46 -88 rlinecurve' + xpct_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 rlineto 15 20 15 18 46 -88 rrcurveto' + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hstem/vstem + def test_hstem_vstem(self): + test_charstr = '95 0 58 542 60 hstem 89 65 344 67 vstem 89 45 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hstemhm/vstemhm + def test_hstemhm_vstemhm(self): + test_charstr = '-16 577 60 24 60 hstemhm 98 55 236 55 vstemhm 343 577 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# hintmask/cntrmask + def test_hintmask_cntrmask(self): + test_charstr = '52 80 153 61 4 83 -71.5 71.5 hintmask 11011100 94 119 216 119 216 119 cntrmask 1110000 154 -12 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# endchar + def test_endchar(self): + test_charstr = '-255 319 rmoveto 266 57 rlineto endchar' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + +# xtra + def test_xtra(self): + test_charstr = '-255 319 rmoveto 266 57 rlineto xtra 90 34' + xpct_charstr = test_charstr + self.assertEqual(get_generalized_charstr(test_charstr), xpct_charstr) + + +class CFFSpecializeProgramTest(unittest.TestCase): + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + +# no arguments/operands + def test_rmoveto_none(self): + test_charstr = 'rmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_hmoveto_none(self): + test_charstr = 'hmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_vmoveto_none(self): + test_charstr = 'vmoveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_rlineto_none(self): + test_charstr = 'rlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_hlineto_none(self): + test_charstr = 'hlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_vlineto_none(self): + test_charstr = 'vlineto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_rrcurveto_none(self): + test_charstr = 'rrcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_hhcurveto_none(self): + test_charstr = 'hhcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_vvcurveto_none(self): + test_charstr = 'vvcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_hvcurveto_none(self): + test_charstr = 'hvcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_vhcurveto_none(self): + test_charstr = 'vhcurveto' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_rcurveline_none(self): + test_charstr = 'rcurveline' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + + def test_rlinecurve_none(self): + test_charstr = 'rlinecurve' + with self.assertRaisesRegex(ValueError, r'\[\]'): + get_specialized_charstr(test_charstr) + +# rmoveto + def test_rmoveto_zero(self): + test_charstr = '0 0 rmoveto' + xpct_charstr = '0 hmoveto' + self.assertEqual(get_specialized_charstr(test_charstr, + generalizeFirst=False), xpct_charstr) + + def test_rmoveto_zero_mult(self): + test_charstr = '0 0 rmoveto '*3 + xpct_charstr = '0 hmoveto' + self.assertEqual(get_specialized_charstr(test_charstr, + generalizeFirst=False), xpct_charstr) + + def test_rmoveto_zero_width(self): + test_charstr = '100 0 0 rmoveto' + xpct_charstr = '100 0 hmoveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto(self): + test_charstr = '.55 -.8 rmoveto' + xpct_charstr = '0.55 -0.8 rmoveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto_mult(self): + test_charstr = '55 -8 rmoveto '*3 + xpct_charstr = '165 -24 rmoveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rmoveto_width(self): + test_charstr = '100.5 50 -5.8 rmoveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + +# rlineto + def test_rlineto_zero(self): + test_charstr = '0 0 rlineto' + xpct_charstr = '' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlineto_zero_mult(self): + test_charstr = '0 0 rlineto '*3 + xpct_charstr = '' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlineto(self): + test_charstr = '.55 -.8 rlineto' + xpct_charstr = '0.55 -0.8 rlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlineto_mult(self): + test_charstr = '.55 -.8 rlineto '*3 + xpct_charstr = '0.55 -0.8 0.55 -0.8 0.55 -0.8 rlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hlineto(self): + test_charstr = '.67 0 rlineto' + xpct_charstr = '0.67 hlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hlineto_zero_mult(self): + test_charstr = '62 0 rlineto '*3 + xpct_charstr = '186 hlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hlineto_mult(self): + test_charstr = '.67 0 rlineto 0 -6.0 rlineto .67 0 rlineto' + xpct_charstr = '0.67 -6.0 0.67 hlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vlineto(self): + test_charstr = '0 -.24 rlineto' + xpct_charstr = '-0.24 vlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vlineto_zero_mult(self): + test_charstr = '0 -24 rlineto '*3 + xpct_charstr = '-72 vlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vlineto_mult(self): + test_charstr = '0 -.24 rlineto +50 0 rlineto 0 30 rlineto -4 0 rlineto' + xpct_charstr = '-0.24 50 30 -4 vlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_0lineto_peephole(self): + test_charstr = '1 2 0 0 3 4 rlineto' + xpct_charstr = '1 2 3 4 rlineto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hlineto_peephole(self): + test_charstr = '1 2 5 0 3 4 rlineto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vlineto_peephole(self): + test_charstr = '1 2 0 5 3 4 rlineto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + +# rrcurveto + def test_rrcurveto(self): + test_charstr = '-1 56 -2 57 -1 57 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_mult(self): + test_charstr = '-30 8 -36 15 -37 22 rrcurveto 44 54 31 61 22 68 rrcurveto' + xpct_charstr = '-30 8 -36 15 -37 22 44 54 31 61 22 68 rrcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_d3947b8(self): + test_charstr = '1 2 3 4 5 0 rrcurveto' + xpct_charstr = '2 1 3 4 5 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_4(self): + test_charstr = '10 0 30 0 10 0 rrcurveto' + xpct_charstr = '10 30 0 10 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_5(self): + test_charstr = '-38 40 -60 41 -91 0 rrcurveto' + xpct_charstr = '40 -38 -60 41 -91 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_4_4(self): + test_charstr = '43 0 23 25 18 0 rrcurveto 29 0 56 42 -84 0 rrcurveto' + xpct_charstr = '43 23 25 18 29 56 42 -84 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_5_4(self): + test_charstr = '23 43 25 18 29 0 rrcurveto 56 0 42 -84 79 0 rrcurveto' + xpct_charstr = '43 23 25 18 29 56 42 -84 79 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_4_4_4(self): + test_charstr = '1 0 2 3 4 0 rrcurveto 5 0 6 7 8 0 rrcurveto 9 0 10 11 12 0 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_mult_5_4_4(self): + test_charstr = '2 1 3 4 5 0 rrcurveto 6 0 7 8 9 0 rrcurveto 10 0 11 12 13 0 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_4(self): + test_charstr = '0 61 6 52 0 68 rrcurveto' + xpct_charstr = '61 6 52 68 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_5(self): + test_charstr = '61 38 35 56 0 72 rrcurveto' + xpct_charstr = '61 38 35 56 72 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_4_4(self): + test_charstr = '0 -84 -88 -30 0 -90 rrcurveto 0 -13 19 23 0 -11 rrcurveto' + xpct_charstr = '-84 -88 -30 -90 -13 19 23 -11 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_5_4(self): + test_charstr = '43 12 17 32 0 65 rrcurveto 0 68 -6 52 0 61 rrcurveto' + xpct_charstr = '43 12 17 32 65 68 -6 52 61 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_4_4_4(self): + test_charstr = '0 1 2 3 0 4 rrcurveto 0 5 6 7 0 8 rrcurveto 0 9 10 11 0 12 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_mult_5_4_4(self): + test_charstr = '1 2 3 4 0 5 rrcurveto 0 6 7 8 0 9 rrcurveto 0 10 11 12 0 13 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 13 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4(self): + test_charstr = '1 0 2 3 0 4 rrcurveto' + xpct_charstr = '1 2 3 4 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_5(self): + test_charstr = '57 0 44 22 34 40 rrcurveto' + xpct_charstr = '57 44 22 40 34 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4(self): + test_charstr = '65 0 33 -19 0 -45 rrcurveto 0 -45 -29 -25 -71 0 rrcurveto' + xpct_charstr = '65 33 -19 -45 -45 -29 -25 -71 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_5(self): + test_charstr = '97 0 69 41 0 86 rrcurveto 0 58 -36 34 -64 11 rrcurveto' + xpct_charstr = '97 69 41 86 58 -36 34 -64 11 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4(self): + test_charstr = '1 0 2 3 0 4 rrcurveto 0 5 6 7 8 0 rrcurveto 9 0 10 11 0 12 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_5(self): + test_charstr = '-124 0 -79 104 0 165 rrcurveto 0 163 82 102 124 0 rrcurveto 56 0 43 -25 35 -37 rrcurveto' + xpct_charstr = '-124 -79 104 165 163 82 102 124 56 43 -25 -37 35 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4_4(self): + test_charstr = '32 0 25 22 0 32 rrcurveto 0 31 -25 22 -32 0 rrcurveto -32 0 -25 -22 0 -31 rrcurveto 0 -32 25 -22 32 0 rrcurveto' + xpct_charstr = '32 25 22 32 31 -25 22 -32 -32 -25 -22 -31 -32 25 -22 32 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_4_4_4_4_5(self): + test_charstr = '-170 0 -128 111 0 195 rrcurveto 0 234 172 151 178 0 rrcurveto 182 0 95 -118 0 -161 rrcurveto 0 -130 -71 -77 -63 0 rrcurveto -55 0 -19 38 20 79 rrcurveto' + xpct_charstr = '-170 -128 111 195 234 172 151 178 182 95 -118 -161 -130 -71 -77 -63 -55 -19 38 79 20 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4(self): + test_charstr = '0 -57 43 -30 53 0 rrcurveto' + xpct_charstr = '-57 43 -30 53 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_5(self): + test_charstr = '0 41 -27 19 -46 11 rrcurveto' + xpct_charstr = '41 -27 19 -46 11 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4(self): + test_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_5(self): + test_charstr = '0 -64 -23 -25 -45 0 rrcurveto -30 0 -24 14 -19 33 rrcurveto' + xpct_charstr = '-64 -23 -25 -45 -30 -24 14 33 -19 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_4(self): + test_charstr = '0 1 2 3 4 0 rrcurveto 5 0 6 7 0 8 rrcurveto 0 9 10 11 12 0 rrcurveto' + xpct_charstr = '1 2 3 4 5 6 7 8 9 10 11 12 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_5(self): + test_charstr = '0 108 59 81 98 0 rrcurveto 99 0 59 -81 0 -108 rrcurveto 0 -100 -46 -66 -63 -47 rrcurveto' + xpct_charstr = '108 59 81 98 99 59 -81 -108 -100 -46 -66 -63 -47 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_4_4_4_5(self): + test_charstr = '0 60 -26 37 -43 0 rrcurveto -33 0 -28 -22 0 -36 rrcurveto 0 -37 27 -20 32 0 rrcurveto 3 0 4 0 3 1 rrcurveto' + xpct_charstr = '60 -26 37 -43 -33 -28 -22 -36 -37 27 -20 32 3 4 0 1 3 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_v0_0h_h0(self): + test_charstr = '0 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '10 1 2 0 0 1 2 1 1 3 4 0 vhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_h0_0h_h0(self): + test_charstr = '10 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '10 1 2 0 hhcurveto 0 1 2 1 1 3 4 0 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_00_0h_h0(self): + test_charstr = '0 0 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '1 2 rlineto 0 1 2 1 1 3 4 0 hvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_r0_0h_h0(self): + test_charstr = '10 10 1 2 0 0 0 0 1 2 0 1 0 1 3 4 0 0 rrcurveto' + xpct_charstr = '10 10 1 2 0 0 1 2 1 1 3 4 0 vvcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_v0_0v_v0(self): + test_charstr = '0 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '10 1 2 0 vhcurveto 0 1 2 1 1 3 4 0 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_h0_0v_v0(self): + test_charstr = '10 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_00_0v_v0(self): + test_charstr = '0 0 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '1 2 rlineto 0 1 2 1 1 3 4 0 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rrcurveto_r0_0v_v0(self): + test_charstr = '10 10 1 2 0 0 0 0 1 2 1 0 1 0 3 4 0 0 rrcurveto' + xpct_charstr = '10 10 1 2 0 0 1 2 1 1 3 4 0 hhcurveto' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hhcurveto_peephole(self): + test_charstr = '1 2 3 4 5 6 1 2 3 4 5 0 1 2 3 4 5 6 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vvcurveto_peephole(self): + test_charstr = '1 2 3 4 5 6 1 2 3 4 0 6 1 2 3 4 5 6 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_hvcurveto_peephole(self): + test_charstr = '1 2 3 4 5 6 1 0 3 4 5 6 1 2 3 4 5 6 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_vhcurveto_peephole(self): + test_charstr = '1 2 3 4 5 6 0 2 3 4 5 6 1 2 3 4 5 6 rrcurveto' + xpct_charstr = test_charstr + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_2(self): + test_charstr = '21 -76 21 -72 24 -73 rrcurveto 31 -100 rlineto' + xpct_charstr = '21 -76 21 -72 24 -73 31 -100 rcurveline' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_6_2(self): + test_charstr = '-73 80 -80 121 -49 96 rrcurveto 60 65 55 41 54 17 rrcurveto -8 78 rlineto' + xpct_charstr = '-73 80 -80 121 -49 96 60 65 55 41 54 17 -8 78 rcurveline' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rcurveline_6_6_6_2(self): + test_charstr = '1 64 10 51 29 39 rrcurveto 15 21 15 20 15 18 rrcurveto 47 -89 63 -98 52 -59 rrcurveto 91 8 rlineto' + xpct_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 47 -89 63 -98 52 -59 91 8 rcurveline' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_6(self): + test_charstr = '21 -76 rlineto 21 -72 24 -73 31 -100 rrcurveto' + xpct_charstr = '21 -76 21 -72 24 -73 31 -100 rlinecurve' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_2_6(self): + test_charstr = '-73 80 rlineto -80 121 rlineto -49 96 60 65 55 41 rrcurveto' + xpct_charstr = '-73 80 -80 121 -49 96 60 65 55 41 rlinecurve' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + def test_rlinecurve_2_2_2_6(self): + test_charstr = '1 64 rlineto 10 51 rlineto 29 39 rlineto 15 21 15 20 15 18 rrcurveto' + xpct_charstr = '1 64 10 51 29 39 15 21 15 20 15 18 rlinecurve' + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + +# maxstack CFF=48 + def test_maxstack(self): + operands = '1 2 3 4 5 6 ' + operator = 'rrcurveto ' + test_charstr = (operands + operator)*9 + xpct_charstr = (operands + operator + operands*8 + operator).rstrip() + self.assertEqual(get_specialized_charstr(test_charstr), xpct_charstr) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/encodings/codecs_test.py fonttools-3.21.2/Tests/encodings/codecs_test.py --- fonttools-3.0/Tests/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/encodings/codecs_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,26 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +import fontTools.encodings.codecs # Not to be confused with "import codecs" + +class ExtendedCodecsTest(unittest.TestCase): + + def test_decode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), + unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) + + def test_encode_mac_japanese(self): + self.assertEqual(b'x\xfe\xfdy', + (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) + + def test_decode_mac_trad_chinese(self): + self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), + unichr(0x5C)) + + def test_decode_mac_romanian(self): + self.assertEqual(b'x\xfb'.decode("mac_romanian"), + unichr(0x78)+unichr(0x02DA)) + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/feaLib/builder_test.py fonttools-3.21.2/Tests/feaLib/builder_test.py --- fonttools-3.0/Tests/feaLib/builder_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/builder_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,492 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.feaLib.builder import Builder, addOpenTypeFeatures, \ + addOpenTypeFeaturesFromString +from fontTools.feaLib.error import FeatureLibError +from fontTools.ttLib import TTFont +from fontTools.feaLib.parser import Parser +from fontTools.feaLib import ast +from fontTools.feaLib.lexer import Lexer +import difflib +import os +import shutil +import sys +import tempfile +import unittest + + +def makeTTFont(): + glyphs = """ + .notdef space slash fraction semicolon period comma ampersand + quotedblleft quotedblright quoteleft quoteright + zero one two three four five six seven eight nine + zero.oldstyle one.oldstyle two.oldstyle three.oldstyle + four.oldstyle five.oldstyle six.oldstyle seven.oldstyle + eight.oldstyle nine.oldstyle onequarter onehalf threequarters + onesuperior twosuperior threesuperior ordfeminine ordmasculine + A B C D E F G H I J K L M N O P Q R S T U V W X Y Z + a b c d e f g h i j k l m n o p q r s t u v w x y z + A.sc B.sc C.sc D.sc E.sc F.sc G.sc H.sc I.sc J.sc K.sc L.sc M.sc + N.sc O.sc P.sc Q.sc R.sc S.sc T.sc U.sc V.sc W.sc X.sc Y.sc Z.sc + A.alt1 A.alt2 A.alt3 B.alt1 B.alt2 B.alt3 C.alt1 C.alt2 C.alt3 + a.alt1 a.alt2 a.alt3 a.end b.alt c.mid d.alt d.mid + e.begin e.mid e.end m.begin n.end s.end z.end + Eng Eng.alt1 Eng.alt2 Eng.alt3 + A.swash B.swash C.swash D.swash E.swash F.swash G.swash H.swash + I.swash J.swash K.swash L.swash M.swash N.swash O.swash P.swash + Q.swash R.swash S.swash T.swash U.swash V.swash W.swash X.swash + Y.swash Z.swash + f_l c_h c_k c_s c_t f_f f_f_i f_f_l f_i o_f_f_i s_t f_i.begin + a_n_d T_h T_h.swash germandbls ydieresis yacute breve + grave acute dieresis macron circumflex cedilla umlaut ogonek caron + damma hamza sukun kasratan lam_meem_jeem noon.final noon.initial + by feature lookup sub table + """.split() + font = TTFont() + font.setGlyphOrder(glyphs) + return font + + +class BuilderTest(unittest.TestCase): + # Feature files in data/*.fea; output gets compared to data/*.ttx. + TEST_FEATURE_FILES = """ + Attach enum markClass language_required + GlyphClassDef LigatureCaretByIndex LigatureCaretByPos + lookup lookupflag feature_aalt ignore_pos + GPOS_1 GPOS_1_zero GPOS_2 GPOS_2b GPOS_3 GPOS_4 GPOS_5 GPOS_6 GPOS_8 + GSUB_2 GSUB_3 GSUB_6 GSUB_8 + spec4h1 spec4h2 spec5d1 spec5d2 spec5fi1 spec5fi2 spec5fi3 spec5fi4 + spec5f_ii_1 spec5f_ii_2 spec5f_ii_3 spec5f_ii_4 + spec5h1 spec6b_ii spec6d2 spec6e spec6f + spec6h_ii spec6h_iii_1 spec6h_iii_3d spec8a spec8b spec8c + spec9a spec9b spec9c1 spec9c2 spec9c3 spec9d spec9e spec9f spec9g + spec10 + bug453 bug457 bug463 bug501 bug502 bug504 bug505 bug506 bug509 + bug512 bug514 bug568 bug633 + name size size2 multiple_feature_blocks omitted_GlyphClassDef + ZeroValue_SinglePos_horizontal ZeroValue_SinglePos_vertical + ZeroValue_PairPos_horizontal ZeroValue_PairPos_vertical + ZeroValue_ChainSinglePos_horizontal ZeroValue_ChainSinglePos_vertical + """.split() + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + def temp_path(self, suffix): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def read_ttx(self, path): + lines = [] + with open(path, "r", encoding="utf-8") as ttx: + for line in ttx.readlines(): + # Elide ttFont attributes because ttLibVersion may change, + # and use os-native line separators so we can run difflib. + if line.startswith("" + os.linesep) + else: + lines.append(line.rstrip() + os.linesep) + return lines + + def expect_ttx(self, font, expected_ttx): + path = self.temp_path(suffix=".ttx") + font.saveXML(path, tables=['head', 'name', 'BASE', 'GDEF', 'GSUB', + 'GPOS', 'OS/2', 'hhea', 'vhea']) + actual = self.read_ttx(path) + expected = self.read_ttx(expected_ttx) + if actual != expected: + for line in difflib.unified_diff( + expected, actual, fromfile=expected_ttx, tofile=path): + sys.stderr.write(line) + self.fail("TTX output is different from expected") + + def build(self, featureFile): + font = makeTTFont() + addOpenTypeFeaturesFromString(font, featureFile) + return font + + def check_feature_file(self, name): + font = makeTTFont() + addOpenTypeFeatures(font, self.getpath("%s.fea" % name)) + self.expect_ttx(font, self.getpath("%s.ttx" % name)) + # Make sure we can produce binary OpenType tables, not just XML. + for tag in ('GDEF', 'GSUB', 'GPOS'): + if tag in font: + font[tag].compile(font) + + def check_fea2fea_file(self, name, base=None, parser=Parser): + font = makeTTFont() + fname = (name + ".fea") if '.' not in name else name + p = parser(self.getpath(fname), glyphNames=font.getGlyphOrder()) + doc = p.parse() + actual = self.normal_fea(doc.asFea().split("\n")) + with open(self.getpath(base or fname), "r", encoding="utf-8") as ofile: + expected = self.normal_fea(ofile.readlines()) + + if expected != actual: + fname = name.rsplit(".", 1)[0] + ".fea" + for line in difflib.unified_diff( + expected, actual, + fromfile=fname + " (expected)", + tofile=fname + " (actual)"): + sys.stderr.write(line+"\n") + self.fail("Fea2Fea output is different from expected. " + "Generated:\n{}\n".format("\n".join(actual))) + + def normal_fea(self, lines): + output = [] + skip = 0 + for l in lines: + l = l.strip() + if l.startswith("#test-fea2fea:"): + if len(l) > 15: + output.append(l[15:].strip()) + skip = 1 + x = l.find("#") + if x >= 0: + l = l[:x].strip() + if not len(l): + continue + if skip > 0: + skip = skip - 1 + continue + output.append(l) + return output + + def test_alternateSubst_multipleSubstitutionsForSameGlyph(self): + self.assertRaisesRegex( + FeatureLibError, + "Already defined alternates for glyph \"A\"", + self.build, + "feature test {" + " sub A from [A.alt1 A.alt2];" + " sub B from [B.alt1 B.alt2 B.alt3];" + " sub A from [A.alt1 A.alt2];" + "} test;") + + def test_multipleSubst_multipleSubstitutionsForSameGlyph(self): + self.assertRaisesRegex( + FeatureLibError, + "Already defined substitution for glyph \"f_f_i\"", + self.build, + "feature test {" + " sub f_f_i by f f i;" + " sub c_t by c t;" + " sub f_f_i by f f i;" + "} test;") + + def test_pairPos_redefinition(self): + self.assertRaisesRegex( + FeatureLibError, + r"Already defined position for pair A B " + "at .*:2:[0-9]+", # :2: = line 2 + self.build, + "feature test {\n" + " pos A B 123;\n" # line 2 + " pos A B 456;\n" + "} test;\n") + + def test_singleSubst_multipleSubstitutionsForSameGlyph(self): + self.assertRaisesRegex( + FeatureLibError, + 'Already defined rule for replacing glyph "e" by "E.sc"', + self.build, + "feature test {" + " sub [a-z] by [A.sc-Z.sc];" + " sub e by e.fina;" + "} test;") + + def test_singlePos_redefinition(self): + self.assertRaisesRegex( + FeatureLibError, + "Already defined different position for glyph \"A\"", + self.build, "feature test { pos A 123; pos A 456; } test;") + + def test_feature_outside_aalt(self): + self.assertRaisesRegex( + FeatureLibError, + 'Feature references are only allowed inside "feature aalt"', + self.build, "feature test { feature test; } test;") + + def test_feature_undefinedReference(self): + self.assertRaisesRegex( + FeatureLibError, 'Feature none has not been defined', + self.build, "feature aalt { feature none; } aalt;") + + def test_GlyphClassDef_conflictingClasses(self): + self.assertRaisesRegex( + FeatureLibError, "Glyph X was assigned to a different class", + self.build, + "table GDEF {" + " GlyphClassDef [a b], [X], , ;" + " GlyphClassDef [a b X], , , ;" + "} GDEF;") + + def test_languagesystem(self): + builder = Builder(makeTTFont(), (None, None)) + builder.add_language_system(None, 'latn', 'FRA') + builder.add_language_system(None, 'cyrl', 'RUS') + builder.start_feature(location=None, name='test') + self.assertEqual(builder.language_systems, + {('latn', 'FRA'), ('cyrl', 'RUS')}) + + def test_languagesystem_duplicate(self): + self.assertRaisesRegex( + FeatureLibError, + '"languagesystem cyrl RUS" has already been specified', + self.build, "languagesystem cyrl RUS; languagesystem cyrl RUS;") + + def test_languagesystem_none_specified(self): + builder = Builder(makeTTFont(), (None, None)) + builder.start_feature(location=None, name='test') + self.assertEqual(builder.language_systems, {('DFLT', 'dflt')}) + + def test_languagesystem_DFLT_dflt_not_first(self): + self.assertRaisesRegex( + FeatureLibError, + "If \"languagesystem DFLT dflt\" is present, " + "it must be the first of the languagesystem statements", + self.build, "languagesystem latn TRK; languagesystem DFLT dflt;") + + def test_script(self): + builder = Builder(makeTTFont(), (None, None)) + builder.start_feature(location=None, name='test') + builder.set_script(location=None, script='cyrl') + self.assertEqual(builder.language_systems, {('cyrl', 'dflt')}) + + def test_script_in_aalt_feature(self): + self.assertRaisesRegex( + FeatureLibError, + "Script statements are not allowed within \"feature aalt\"", + self.build, "feature aalt { script latn; } aalt;") + + def test_script_in_size_feature(self): + self.assertRaisesRegex( + FeatureLibError, + "Script statements are not allowed within \"feature size\"", + self.build, "feature size { script latn; } size;") + + def test_language(self): + builder = Builder(makeTTFont(), (None, None)) + builder.add_language_system(None, 'latn', 'FRA ') + builder.start_feature(location=None, name='test') + builder.set_script(location=None, script='cyrl') + builder.set_language(location=None, language='RUS ', + include_default=False, required=False) + self.assertEqual(builder.language_systems, {('cyrl', 'RUS ')}) + builder.set_language(location=None, language='BGR ', + include_default=True, required=False) + self.assertEqual(builder.language_systems, + {('cyrl', 'BGR ')}) + builder.start_feature(location=None, name='test2') + self.assertRaisesRegex( + FeatureLibError, + "Need non-DFLT script when using non-dflt language " + "\(was: \"FRA \"\)", + builder.set_language, None, 'FRA ', True, False) + + def test_language_in_aalt_feature(self): + self.assertRaisesRegex( + FeatureLibError, + "Language statements are not allowed within \"feature aalt\"", + self.build, "feature aalt { language FRA; } aalt;") + + def test_language_in_size_feature(self): + self.assertRaisesRegex( + FeatureLibError, + "Language statements are not allowed within \"feature size\"", + self.build, "feature size { language FRA; } size;") + + def test_language_required_duplicate(self): + self.assertRaisesRegex( + FeatureLibError, + r"Language FRA \(script latn\) has already specified " + "feature scmp as its required feature", + self.build, + "feature scmp {" + " script latn;" + " language FRA required;" + " language DEU required;" + " substitute [a-z] by [A.sc-Z.sc];" + "} scmp;" + "feature test {" + " script latn;" + " language FRA required;" + " substitute [a-z] by [A.sc-Z.sc];" + "} test;") + + def test_lookup_already_defined(self): + self.assertRaisesRegex( + FeatureLibError, + "Lookup \"foo\" has already been defined", + self.build, "lookup foo {} foo; lookup foo {} foo;") + + def test_lookup_multiple_flags(self): + self.assertRaisesRegex( + FeatureLibError, + "Within a named lookup block, all rules must be " + "of the same lookup type and flag", + self.build, + "lookup foo {" + " lookupflag 1;" + " sub f i by f_i;" + " lookupflag 2;" + " sub f f i by f_f_i;" + "} foo;") + + def test_lookup_multiple_types(self): + self.assertRaisesRegex( + FeatureLibError, + "Within a named lookup block, all rules must be " + "of the same lookup type and flag", + self.build, + "lookup foo {" + " sub f f i by f_f_i;" + " sub A from [A.alt1 A.alt2];" + "} foo;") + + def test_lookup_inside_feature_aalt(self): + self.assertRaisesRegex( + FeatureLibError, + "Lookup blocks cannot be placed inside 'aalt' features", + self.build, "feature aalt {lookup L {} L;} aalt;") + + def test_extensions(self): + class ast_BaseClass(ast.MarkClass): + def asFea(self, indent=""): + return "" + + class ast_BaseClassDefinition(ast.MarkClassDefinition): + def asFea(self, indent=""): + return "" + + class ast_MarkBasePosStatement(ast.MarkBasePosStatement): + def asFea(self, indent=""): + if isinstance(self.base, ast.MarkClassName): + res = "" + for bcd in self.base.markClass.definitions: + if res != "": + res += "\n{}".format(indent) + res += "pos base {} {}".format(bcd.glyphs.asFea(), bcd.anchor.asFea()) + for m in self.marks: + res += " mark @{}".format(m.name) + res += ";" + else: + res = "pos base {}".format(self.base.asFea()) + for a, m in self.marks: + res += " {} mark @{}".format(a.asFea(), m.name) + res += ";" + return res + + class testAst(object): + MarkBasePosStatement = ast_MarkBasePosStatement + def __getattr__(self, name): + return getattr(ast, name) + + class testParser(Parser): + def parse_position_base_(self, enumerated, vertical): + location = self.cur_token_location_ + self.expect_keyword_("base") + if enumerated: + raise FeatureLibError( + '"enumerate" is not allowed with ' + 'mark-to-base attachment positioning', + location) + base = self.parse_glyphclass_(accept_glyphname=True) + if self.next_token_ == "<": + marks = self.parse_anchor_marks_() + else: + marks = [] + while self.next_token_ == "mark": + self.expect_keyword_("mark") + m = self.expect_markClass_reference_() + marks.append(m) + self.expect_symbol_(";") + return self.ast.MarkBasePosStatement(location, base, marks) + + def parseBaseClass(self): + if not hasattr(self.doc_, 'baseClasses'): + self.doc_.baseClasses = {} + location = self.cur_token_location_ + glyphs = self.parse_glyphclass_(accept_glyphname=True) + anchor = self.parse_anchor_() + name = self.expect_class_name_() + self.expect_symbol_(";") + baseClass = self.doc_.baseClasses.get(name) + if baseClass is None: + baseClass = ast_BaseClass(name) + self.doc_.baseClasses[name] = baseClass + self.glyphclasses_.define(name, baseClass) + bcdef = ast_BaseClassDefinition(location, baseClass, anchor, glyphs) + baseClass.addDefinition(bcdef) + return bcdef + + extensions = { + 'baseClass' : lambda s : s.parseBaseClass() + } + ast = testAst() + + self.check_fea2fea_file( + "baseClass.feax", base="baseClass.fea", parser=testParser) + + def test_markClass_same_glyph_redefined(self): + self.assertRaisesRegex( + FeatureLibError, + "Glyph acute already defined", + self.build, + "markClass [acute] @TOP_MARKS;"*2) + + def test_markClass_same_glyph_multiple_classes(self): + self.assertRaisesRegex( + FeatureLibError, + 'Glyph uni0327 cannot be in both @ogonek and @cedilla', + self.build, + "feature mark {" + " markClass [uni0327 uni0328] @ogonek;" + " pos base [a] mark @ogonek;" + " markClass [uni0327] @cedilla;" + " pos base [a] mark @cedilla;" + "} mark;") + + +def generate_feature_file_test(name): + return lambda self: self.check_feature_file(name) + + +for name in BuilderTest.TEST_FEATURE_FILES: + setattr(BuilderTest, "test_FeatureFile_%s" % name, + generate_feature_file_test(name)) + + +def generate_fea2fea_file_test(name): + return lambda self: self.check_fea2fea_file(name) + + +for name in BuilderTest.TEST_FEATURE_FILES: + setattr(BuilderTest, "test_Fea2feaFile_{}".format(name), + generate_fea2fea_file_test(name)) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/feaLib/data/Attach.fea fonttools-3.21.2/Tests/feaLib/data/Attach.fea --- fonttools-3.0/Tests/feaLib/data/Attach.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/Attach.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +table GDEF { + Attach [a e] 7; + Attach a 23; + Attach a 23; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/Attach.ttx fonttools-3.21.2/Tests/feaLib/data/Attach.ttx --- fonttools-3.0/Tests/feaLib/data/Attach.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/Attach.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/baseClass.fea fonttools-3.21.2/Tests/feaLib/data/baseClass.fea --- fonttools-3.0/Tests/feaLib/data/baseClass.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/baseClass.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +languagesystem DFLT dflt; + +markClass [acute] @TOP_MARKS; + +feature test { + pos base [a] mark @TOP_MARKS; + pos base b mark @TOP_MARKS; +} test; + + diff -Nru fonttools-3.0/Tests/feaLib/data/baseClass.feax fonttools-3.21.2/Tests/feaLib/data/baseClass.feax --- fonttools-3.0/Tests/feaLib/data/baseClass.feax 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/baseClass.feax 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +languagesystem DFLT dflt; + +markClass [acute] @TOP_MARKS; +baseClass [a] @BASE_TOPS; +baseClass b @BASE_TOPS; + +feature test { + pos base @BASE_TOPS mark @TOP_MARKS; +} test; + diff -Nru fonttools-3.0/Tests/feaLib/data/bug453.fea fonttools-3.21.2/Tests/feaLib/data/bug453.fea --- fonttools-3.0/Tests/feaLib/data/bug453.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug453.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +# https://github.com/behdad/fonttools/issues/453 +feature mark { + lookup mark1 { + markClass [acute] @TOP_MARKS; + pos base [e] mark @TOP_MARKS; + } mark1; + lookup mark2 { + markClass [acute] @TOP_MARKS_2; + pos base [e] mark @TOP_MARKS_2; + } mark2; +} mark; diff -Nru fonttools-3.0/Tests/feaLib/data/bug453.ttx fonttools-3.21.2/Tests/feaLib/data/bug453.ttx --- fonttools-3.0/Tests/feaLib/data/bug453.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug453.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug457.fea fonttools-3.21.2/Tests/feaLib/data/bug457.fea --- fonttools-3.0/Tests/feaLib/data/bug457.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug457.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +@group = [A \sub \lookup \feature \by \table]; + +feature liga { + sub @group by G; +} liga; diff -Nru fonttools-3.0/Tests/feaLib/data/bug457.ttx fonttools-3.21.2/Tests/feaLib/data/bug457.ttx --- fonttools-3.0/Tests/feaLib/data/bug457.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug457.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug463.fea fonttools-3.21.2/Tests/feaLib/data/bug463.fea --- fonttools-3.0/Tests/feaLib/data/bug463.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug463.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +# https://github.com/behdad/fonttools/issues/463 +feature ordn { + @DIGIT = [zero one two three four five six seven eight nine]; + sub @DIGIT [A a]' by ordfeminine; + sub @DIGIT [O o]' by ordmasculine; +} ordn; diff -Nru fonttools-3.0/Tests/feaLib/data/bug463.ttx fonttools-3.21.2/Tests/feaLib/data/bug463.ttx --- fonttools-3.0/Tests/feaLib/data/bug463.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug463.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug501.fea fonttools-3.21.2/Tests/feaLib/data/bug501.fea --- fonttools-3.0/Tests/feaLib/data/bug501.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug501.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +# https://github.com/behdad/fonttools/issues/501 +languagesystem DFLT dflt; +feature test { + lookup L { + script grek; + pos T 100; + } L; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug501.ttx fonttools-3.21.2/Tests/feaLib/data/bug501.ttx --- fonttools-3.0/Tests/feaLib/data/bug501.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug501.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug502.fea fonttools-3.21.2/Tests/feaLib/data/bug502.fea --- fonttools-3.0/Tests/feaLib/data/bug502.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug502.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +# https://github.com/behdad/fonttools/issues/502 +feature aalt { + sub A by A.alt1; + sub Eng by Eng.alt1; + sub Eng by Eng.alt2; + sub Eng by Eng.alt3; +} aalt; diff -Nru fonttools-3.0/Tests/feaLib/data/bug502.ttx fonttools-3.21.2/Tests/feaLib/data/bug502.ttx --- fonttools-3.0/Tests/feaLib/data/bug502.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug502.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug504.fea fonttools-3.21.2/Tests/feaLib/data/bug504.fea --- fonttools-3.0/Tests/feaLib/data/bug504.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug504.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +# https://github.com/behdad/fonttools/issues/504 + +feature test { + sub [a b c d] by [T E S T]; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug504.ttx fonttools-3.21.2/Tests/feaLib/data/bug504.ttx --- fonttools-3.0/Tests/feaLib/data/bug504.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug504.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug505.fea fonttools-3.21.2/Tests/feaLib/data/bug505.fea --- fonttools-3.0/Tests/feaLib/data/bug505.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug505.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +# https://github.com/behdad/fonttools/issues/505 + +languagesystem armn dflt; +languagesystem avst dflt; +languagesystem bali dflt; +languagesystem bamu dflt; + +feature test { + script linb; + script vai; + sub T by t; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug505.ttx fonttools-3.21.2/Tests/feaLib/data/bug505.ttx --- fonttools-3.0/Tests/feaLib/data/bug505.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug505.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug506.fea fonttools-3.21.2/Tests/feaLib/data/bug506.fea --- fonttools-3.0/Tests/feaLib/data/bug506.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug506.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +# https://github.com/behdad/fonttools/issues/506 +feature test { + sub f' i' by f_i; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug506.ttx fonttools-3.21.2/Tests/feaLib/data/bug506.ttx --- fonttools-3.0/Tests/feaLib/data/bug506.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug506.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug509.fea fonttools-3.21.2/Tests/feaLib/data/bug509.fea --- fonttools-3.0/Tests/feaLib/data/bug509.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug509.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +@LETTER_A = [A A.sc A.alt1]; +feature test { + ignore sub A; + sub @LETTER_A' by a; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug509.ttx fonttools-3.21.2/Tests/feaLib/data/bug509.ttx --- fonttools-3.0/Tests/feaLib/data/bug509.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug509.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug512.fea fonttools-3.21.2/Tests/feaLib/data/bug512.fea --- fonttools-3.0/Tests/feaLib/data/bug512.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug512.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +feature test { + sub G' by G.swash; + sub H' by H.swash; + sub G' by g; + sub H' by H.swash; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug512.ttx fonttools-3.21.2/Tests/feaLib/data/bug512.ttx --- fonttools-3.0/Tests/feaLib/data/bug512.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug512.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug514.fea fonttools-3.21.2/Tests/feaLib/data/bug514.fea --- fonttools-3.0/Tests/feaLib/data/bug514.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug514.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +# The many chain targets for this feature should get combined into +# two separate SinglePos lookups: {A:-40, B:-40, C:-40} and {A:-111}. +# https://github.com/fonttools/fonttools/issues/514 +# +# makeotf produces {A:-40, B:-40, C:-40} and {A:-111, B:-40} which +# is redundant. https://github.com/adobe-type-tools/afdko/issues/169 +feature test { + pos X [A-B]' -40 B' -40 A' -40 Y; + pos X A' -111 Y; + pos X B' -40 A' -111 [A-C]' -40 Y; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/bug514.ttx fonttools-3.21.2/Tests/feaLib/data/bug514.ttx --- fonttools-3.0/Tests/feaLib/data/bug514.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug514.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug568.fea fonttools-3.21.2/Tests/feaLib/data/bug568.fea --- fonttools-3.0/Tests/feaLib/data/bug568.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug568.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +# https://github.com/behdad/fonttools/issues/568 + +feature tst1 { + script latn; + pos T -20; +} tst1; + +feature tst2 { + script cyrl; + pos T -80; +} tst2; diff -Nru fonttools-3.0/Tests/feaLib/data/bug568.ttx fonttools-3.21.2/Tests/feaLib/data/bug568.ttx --- fonttools-3.0/Tests/feaLib/data/bug568.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug568.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/bug633.fea fonttools-3.21.2/Tests/feaLib/data/bug633.fea --- fonttools-3.0/Tests/feaLib/data/bug633.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug633.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +# https://github.com/fonttools/fonttools/issues/633 + +@public.kern1.K = [K X]; +@public.kern2.O = [C O]; +@public.kern2.V = [V W]; + +feature kern { + pos @public.kern1.K @public.kern2.O -20; + pos @public.kern1.K @public.kern2.V 0; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/bug633.ttx fonttools-3.21.2/Tests/feaLib/data/bug633.ttx --- fonttools-3.0/Tests/feaLib/data/bug633.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/bug633.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/enum.fea fonttools-3.21.2/Tests/feaLib/data/enum.fea --- fonttools-3.0/Tests/feaLib/data/enum.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/enum.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +languagesystem DFLT dflt; + +feature kern { + @Y_LC = [y yacute ydieresis]; + @SMALL_PUNC = [comma semicolon period]; + enum pos @Y_LC @SMALL_PUNC -100; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/enum.ttx fonttools-3.21.2/Tests/feaLib/data/enum.ttx --- fonttools-3.0/Tests/feaLib/data/enum.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/enum.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/feature_aalt.fea fonttools-3.21.2/Tests/feaLib/data/feature_aalt.fea --- fonttools-3.0/Tests/feaLib/data/feature_aalt.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/feature_aalt.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ +languagesystem DFLT dflt; + +feature aalt { + feature sups; + feature frac; + feature ordn; +} aalt; + +feature sups { + sub one by onesuperior; + sub two by twosuperior; + sub three by threesuperior; +} sups; + +feature frac { + sub one slash four by onequarter; + sub one slash two by onehalf; + sub three slash four by threequarters; +} frac; + +feature ordn { + sub [zero one two three four five six seven eight nine] [A a]' by ordfeminine; + sub [zero one two three four five six seven eight nine] [O o]' by ordmasculine; +} ordn; + +feature liga { + sub f i by f_i; + sub f l by f_l; +} liga; diff -Nru fonttools-3.0/Tests/feaLib/data/feature_aalt.ttx fonttools-3.21.2/Tests/feaLib/data/feature_aalt.ttx --- fonttools-3.0/Tests/feaLib/data/feature_aalt.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/feature_aalt.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GlyphClassDef.fea fonttools-3.21.2/Tests/feaLib/data/GlyphClassDef.fea --- fonttools-3.0/Tests/feaLib/data/GlyphClassDef.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GlyphClassDef.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +table GDEF { + GlyphClassDef [a], [b], [c], [d]; + GlyphClassDef [e], [f], [g], [h]; + GlyphClassDef [i], [j], [k], [l]; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/GlyphClassDef.ttx fonttools-3.21.2/Tests/feaLib/data/GlyphClassDef.ttx --- fonttools-3.0/Tests/feaLib/data/GlyphClassDef.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GlyphClassDef.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_1.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_1.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ +languagesystem DFLT dflt; + +@sevenEightNine = [seven eight nine]; + +feature kern { + pos zero 0; + + pos [one two three] <-80 0 -160 0>; + pos A <1 2 3 4 >; + pos B <1 2 3 4 >; + pos C <1 2 3 4 >; + pos four 400; + pos four.oldstyle 401; + pos five <-80 0 -160 0>; + pos six -200; + pos @sevenEightNine -100; + + pos P <1 0 800 0>; + pos Q <1 0 801 0>; + pos R <1 0 802 0>; + pos S <1 1 803 0>; + pos T <1 1 804 0>; + pos U <1 1 805 0>; + + # The AFDKO makeotf tool accepts re-definitions of previously defined + # single adjustment positionings, provided the re-definition is using + # the same value. We replicate this behavior. + pos four 400; + pos four <0 0 400 0>; + pos nine -100; +} kern; + +# According to the OpenType Feature File specification section 2.e.iv, +# the following should be interpreted as vertical advance adjustment +# because -100 (a value record format A) appears within a ‘vkrn’ feature. +# However, the AFDKO makeotf tool v2.0.90 (built on Nov 19, 2015) still +# makes it a horizontal advance adjustment. In our implementation, +# we follow the specification, so we produce different output than makeotf. +# https://github.com/adobe-type-tools/afdko/issues/85 +feature vkrn { + pos A -100; +} vkrn; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_1.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_1.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_1_zero.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_1_zero.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_1_zero.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_1_zero.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +# https://github.com/behdad/fonttools/issues/471 +feature test { + pos zero 0; + pos four 500; +} test; \ No newline at end of file diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_1_zero.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_1_zero.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_1_zero.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_1_zero.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_2b.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_2b.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_2b.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_2b.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +@PUNC = [comma semicolon period]; + +feature kern { + pos [A] @PUNC 1; + pos [B C] [comma] 2; + pos [D E F] [comma] 3; + pos [D E F] [semicolon period] 4; + pos [G] @PUNC <5 5 5 5>; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_2b.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_2b.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_2b.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_2b.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,127 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_2.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_2.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ +languagesystem DFLT dflt; + +# Mixes kerning between single glyphs, and class-based kerning. +# https://github.com/behdad/fonttools/issues/456 +lookup MixedKerning { + pos v v 14; + pos [D O Q] [T V W] -26; +} MixedKerning; + +lookup GlyphKerning { + pos T one 100; + pos T two 200; + pos T two.oldstyle 200; + pos T three 300; + pos T four 400; + pos X a 100; + pos X b 200; + pos Y a 100; + pos Y b 200; + pos Y c <3 3 3 3>; +} GlyphKerning; + +feature kern { + lookup GlyphKerning; + lookup MixedKerning; +} kern; + +feature vkrn { + pos T one 100; +} vkrn; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_2.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_2.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,184 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_3.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_3.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +languagesystem DFLT dflt; + +anchorDef 3 4 contourpoint 2 ANCH342; + +feature kern { + pos cursive zero ; + pos cursive one ; + pos cursive two ; + pos cursive three ; + pos cursive four ; + pos cursive five > ; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_3.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_3.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_4.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_4.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_4.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +languagesystem DFLT dflt; + +markClass [acute grave] @TOP_MARKS; +markClass macron @TOP_MARKS; +markClass [cedilla] @BOTTOM_MARKS; +markClass [ogonek] @SIDE_MARKS; + +feature test { + pos base a mark @TOP_MARKS mark @BOTTOM_MARKS; + pos base [b c] mark @BOTTOM_MARKS; + pos base d mark @SIDE_MARKS; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_4.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_4.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_4.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_4.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_5.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_5.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_5.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_5.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ +markClass [acute grave] @TOP_MARKS; +markClass macron @TOP_MARKS; +markClass [cedilla] @BOTTOM_MARKS; +markClass [ogonek] @OGONEK; + +feature test { + + pos ligature [c_t s_t] mark @TOP_MARKS mark @BOTTOM_MARKS + ligComponent mark @TOP_MARKS mark @BOTTOM_MARKS mark @OGONEK; + + pos ligature f_l mark @TOP_MARKS mark @BOTTOM_MARKS + ligComponent mark @TOP_MARKS mark @BOTTOM_MARKS; + + pos ligature [f_f_l] mark @TOP_MARKS mark @BOTTOM_MARKS + ligComponent mark @TOP_MARKS mark @BOTTOM_MARKS + ligComponent mark @TOP_MARKS mark @BOTTOM_MARKS; + +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_5.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_5.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_5.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_5.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_6.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_6.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_6.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +languagesystem DFLT dflt; + +markClass [acute grave] @TOP_MARKS; +markClass macron @TOP_MARKS; +markClass [cedilla] @BOTTOM_MARKS; + +feature test { + pos mark [acute grave macron ogonek] mark @TOP_MARKS mark @BOTTOM_MARKS; + pos mark [dieresis caron] mark @TOP_MARKS; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_6.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_6.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_6.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_6.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,162 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_8.fea fonttools-3.21.2/Tests/feaLib/data/GPOS_8.fea --- fonttools-3.0/Tests/feaLib/data/GPOS_8.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_8.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ +languagesystem DFLT dflt; + +lookup ChainedSinglePos { + pos A one' 1 two' 2 one' -1 two' -2; +} ChainedSinglePos; + +lookup L1 { + pos one 100; +} L1; + +lookup L2 { + pos two 200; +} L2; + +lookup ChainedContextualPos { + pos [A a] [B b] I' lookup L1 N' lookup L2 P' [Y y] [Z z]; +} ChainedContextualPos; + +feature test { + lookup ChainedSinglePos; + lookup ChainedContextualPos; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GPOS_8.ttx fonttools-3.21.2/Tests/feaLib/data/GPOS_8.ttx --- fonttools-3.0/Tests/feaLib/data/GPOS_8.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GPOS_8.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,176 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_2.fea fonttools-3.21.2/Tests/feaLib/data/GSUB_2.fea --- fonttools-3.0/Tests/feaLib/data/GSUB_2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ +feature f1 { + sub c_t by c t; + sub f_i by f i; + sub f_f_i by f f i; +} f1; + + +# Even if it has exactly the same content as feature f1, +# the lookup should not be shared. +feature f2 { + sub c_t by c t; + sub f_i by f i; + sub f_f_i by f f i; +} f2; diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_2.ttx fonttools-3.21.2/Tests/feaLib/data/GSUB_2.ttx --- fonttools-3.0/Tests/feaLib/data/GSUB_2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_3.fea fonttools-3.21.2/Tests/feaLib/data/GSUB_3.fea --- fonttools-3.0/Tests/feaLib/data/GSUB_3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ +feature f1 { + sub A from [A.alt1 A.alt2]; + sub B from [B.alt1 B.alt2 B.alt3]; + sub C from [C.alt1]; +} f1; + + +# Even if it has exactly the same content as feature f1, +# the lookup should not be shared. +feature f2 { + sub A from [A.alt1 A.alt2]; + sub B from [B.alt1 B.alt2 B.alt3]; + sub C from [C.alt1]; +} f2; diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_3.ttx fonttools-3.21.2/Tests/feaLib/data/GSUB_3.ttx --- fonttools-3.0/Tests/feaLib/data/GSUB_3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_6.fea fonttools-3.21.2/Tests/feaLib/data/GSUB_6.fea --- fonttools-3.0/Tests/feaLib/data/GSUB_6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_6.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ +lookup ChainedSingleSubst { + sub [one two] three A' by A.sc; + sub [B-D]' seven [eight nine] by [B.sc-D.sc]; +} ChainedSingleSubst; + +lookup ChainedMultipleSubst { + sub [A-C a-c] [D d] E c_t' V [W w] [X-Z x-z] by c t; +} ChainedMultipleSubst; + +lookup ChainedAlternateSubst { + sub [space comma semicolon] e' from [e e.begin]; +} ChainedAlternateSubst; + +lookup ChainedLigatureSubst { + sub A [C c]' [T t]' Z by c_t; +} ChainedLigatureSubst; + +lookup ChainedContextualSubst { + sub A D E c_t' lookup ChainedMultipleSubst V W X; +} ChainedContextualSubst; + +feature test { + lookup ChainedSingleSubst; + lookup ChainedMultipleSubst; + lookup ChainedAlternateSubst; + lookup ChainedLigatureSubst; + lookup ChainedContextualSubst; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_6.ttx fonttools-3.21.2/Tests/feaLib/data/GSUB_6.ttx --- fonttools-3.0/Tests/feaLib/data/GSUB_6.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_6.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,267 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_8.fea fonttools-3.21.2/Tests/feaLib/data/GSUB_8.fea --- fonttools-3.0/Tests/feaLib/data/GSUB_8.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_8.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +languagesystem DFLT dflt; + +feature test { + rsub [a A] [b B] [c C] q' [d D] [e E] [f F] by Q; + rsub [a A] [b B] [c C] [s-z]' [d D] [e E] [f F] by [S-Z]; + + # Having no context for a reverse chaining substitution rule + # is a little degenerate (we define a chain without linking it + # to anything else), but makeotf accepts this. + rsub p by P; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/GSUB_8.ttx fonttools-3.21.2/Tests/feaLib/data/GSUB_8.ttx --- fonttools-3.0/Tests/feaLib/data/GSUB_8.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/GSUB_8.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ignore_pos.fea fonttools-3.21.2/Tests/feaLib/data/ignore_pos.fea --- fonttools-3.0/Tests/feaLib/data/ignore_pos.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ignore_pos.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +feature test { + ignore pos f [a e] d'; + ignore pos a d' d; + pos [a e n] d' -20; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/ignore_pos.ttx fonttools-3.21.2/Tests/feaLib/data/ignore_pos.ttx --- fonttools-3.0/Tests/feaLib/data/ignore_pos.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ignore_pos.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/include/include1.fea fonttools-3.21.2/Tests/feaLib/data/include/include1.fea --- fonttools-3.0/Tests/feaLib/data/include/include1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/include1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +I1a +include(../include0.fea) +I1b diff -Nru fonttools-3.0/Tests/feaLib/data/include/include3.fea fonttools-3.21.2/Tests/feaLib/data/include/include3.fea --- fonttools-3.0/Tests/feaLib/data/include/include3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/include3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +I3a +include(subdir/include2.fea); +I3b + diff -Nru fonttools-3.0/Tests/feaLib/data/include/include4.fea fonttools-3.21.2/Tests/feaLib/data/include/include4.fea --- fonttools-3.0/Tests/feaLib/data/include/include4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/include4.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +I4a +include(include3.fea); +I4b + diff -Nru fonttools-3.0/Tests/feaLib/data/include/include5.fea fonttools-3.21.2/Tests/feaLib/data/include/include5.fea --- fonttools-3.0/Tests/feaLib/data/include/include5.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/include5.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +I5a +include(include4.fea); +I5b diff -Nru fonttools-3.0/Tests/feaLib/data/include/include6.fea fonttools-3.21.2/Tests/feaLib/data/include/include6.fea --- fonttools-3.0/Tests/feaLib/data/include/include6.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/include6.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +I6a +include(include5.fea); +I6b diff -Nru fonttools-3.0/Tests/feaLib/data/include/includemissingfile.fea fonttools-3.21.2/Tests/feaLib/data/include/includemissingfile.fea --- fonttools-3.0/Tests/feaLib/data/include/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/includemissingfile.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +include(missingfile.fea); diff -Nru fonttools-3.0/Tests/feaLib/data/include/includeself.fea fonttools-3.21.2/Tests/feaLib/data/include/includeself.fea --- fonttools-3.0/Tests/feaLib/data/include/includeself.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/includeself.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +include(includeself.fea); diff -Nru fonttools-3.0/Tests/feaLib/data/include/subdir/include2.fea fonttools-3.21.2/Tests/feaLib/data/include/subdir/include2.fea --- fonttools-3.0/Tests/feaLib/data/include/subdir/include2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include/subdir/include2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +I2a +include(include1.fea); +I2b diff -Nru fonttools-3.0/Tests/feaLib/data/include0.fea fonttools-3.21.2/Tests/feaLib/data/include0.fea --- fonttools-3.0/Tests/feaLib/data/include0.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/include0.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +I0 diff -Nru fonttools-3.0/Tests/feaLib/data/language_required.fea fonttools-3.21.2/Tests/feaLib/data/language_required.fea --- fonttools-3.0/Tests/feaLib/data/language_required.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/language_required.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ +languagesystem latn DEU; +languagesystem latn FRA; +languagesystem latn ITA; + +feature hlig { + script latn; + language DEU exclude_dflt required; + sub D E U by D.sc; + + language FRA exclude_dflt; + sub F R A by F.sc; +} hlig; + +feature liga { + script latn; + language ITA exclude_dflt required; + sub I T A by I.sc; +} liga; + +feature scmp { + sub [a-z] by [A.sc-Z.sc]; +} scmp; diff -Nru fonttools-3.0/Tests/feaLib/data/language_required.ttx fonttools-3.21.2/Tests/feaLib/data/language_required.ttx --- fonttools-3.0/Tests/feaLib/data/language_required.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/language_required.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,139 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/LigatureCaretByIndex.fea fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByIndex.fea --- fonttools-3.0/Tests/feaLib/data/LigatureCaretByIndex.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByIndex.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +table GDEF { + LigatureCaretByIndex [c_t s_t] 11; + + # The OpenType Feature File specification does not define what should + # happen when there are multiple LigatureCaretByIndex statements for + # the same glyph. Our behavior matches that of Adobe makeotf v2.0.90. + # https://github.com/adobe-type-tools/afdko/issues/95 + LigatureCaretByIndex o_f_f_i 66 33; + LigatureCaretByIndex o_f_f_i 55; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/LigatureCaretByIndex.ttx fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByIndex.ttx --- fonttools-3.0/Tests/feaLib/data/LigatureCaretByIndex.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByIndex.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/LigatureCaretByPos.fea fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByPos.fea --- fonttools-3.0/Tests/feaLib/data/LigatureCaretByPos.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByPos.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +table GDEF { + LigatureCaretByPos [c_h c_k] 500; + + # The OpenType Feature File specification does not define what should + # happen when there are multiple LigatureCaretByPos statements for + # the same glyph. Our behavior matches that of Adobe makeotf v2.0.90. + # https://github.com/adobe-type-tools/afdko/issues/95 + LigatureCaretByPos o_f_f_i 700 300; + LigatureCaretByPos o_f_f_i 900; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/LigatureCaretByPos.ttx fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByPos.ttx --- fonttools-3.0/Tests/feaLib/data/LigatureCaretByPos.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/LigatureCaretByPos.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/lookup.fea fonttools-3.21.2/Tests/feaLib/data/lookup.fea --- fonttools-3.0/Tests/feaLib/data/lookup.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/lookup.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ +# Three features. In the output, they should all point to the same lookup. + +lookup SomeLookup { + sub f f i by f_f_i; + sub f i by f_i; +} SomeLookup; + +feature tst1 { + lookup SomeLookup; +} tst1; + +feature tst2 { + lookup SomeLookup; +} tst2; + +feature tst3 { + lookup EmbeddedLookup { + sub A by A.sc; + } EmbeddedLookup; +} tst3; + +feature tst4 { + lookup EmbeddedLookup; +} tst4; diff -Nru fonttools-3.0/Tests/feaLib/data/lookupflag.fea fonttools-3.21.2/Tests/feaLib/data/lookupflag.fea --- fonttools-3.0/Tests/feaLib/data/lookupflag.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/lookupflag.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,97 @@ +languagesystem DFLT dflt; + +@TOP_MARKS = [acute grave macron]; +markClass [cedilla ogonek] @BOTTOM_MARKS; +@FRENCH_MARKS = [acute grave cedilla dieresis circumflex]; +@MARKS_WITH_DUPLICATES = [breve caron umlaut breve caron umlaut]; + +lookup A { + lookupflag RightToLeft; + pos one 1; +} A; + +lookup B { + lookupflag IgnoreBaseGlyphs; + pos two 2; +} B; + +lookup C { + lookupflag IgnoreLigatures; + pos four 4; +} C; + +lookup D { +#test-fea2fea: lookupflag RightToLeft IgnoreBaseGlyphs IgnoreLigatures; + lookupflag 7; + pos seven 7; +} D; + +lookup E { + lookupflag IgnoreMarks; + pos eight 8; +} E; + +lookup F { + lookupflag MarkAttachmentType @TOP_MARKS; + pos F 1; +} F; + +lookup G { + lookupflag MarkAttachmentType @BOTTOM_MARKS; + pos G 1; +} G; + +lookup H { + lookupflag IgnoreLigatures MarkAttachmentType @TOP_MARKS; + pos H 1; +} H; + +lookup I { + lookupflag UseMarkFilteringSet @TOP_MARKS; + pos I 1; +} I; + +lookup J { + # @FRENCH_MARKS overlaps with @TOP_MARKS. + # Other than with MarkAttachmentType, the same glyph may appear + # in multiple glyphsets for UseMarkFilteringSet. Make sure that + # our implementation can handle this. + lookupflag UseMarkFilteringSet @FRENCH_MARKS; + pos J 1; +} J; + +lookup K { + lookupflag IgnoreLigatures UseMarkFilteringSet @TOP_MARKS; + pos K 1; +} K; + +lookup L { + pos L 1; +} L; + +lookup M { + lookupflag UseMarkFilteringSet @MARKS_WITH_DUPLICATES; + pos M 1; +} M; + +lookup N { + lookupflag MarkAttachmentType @MARKS_WITH_DUPLICATES; + pos N 1; +} N; + +feature test { + lookup A; + lookup B; + lookup C; + lookup D; + lookup E; + lookup F; + lookup G; + lookup H; + lookup I; + lookup J; + lookup K; + lookup L; + lookup M; + lookup N; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/lookupflag.ttx fonttools-3.21.2/Tests/feaLib/data/lookupflag.ttx --- fonttools-3.0/Tests/feaLib/data/lookupflag.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/lookupflag.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,259 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/lookup.ttx fonttools-3.21.2/Tests/feaLib/data/lookup.ttx --- fonttools-3.0/Tests/feaLib/data/lookup.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/lookup.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/markClass.fea fonttools-3.21.2/Tests/feaLib/data/markClass.fea --- fonttools-3.0/Tests/feaLib/data/markClass.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/markClass.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +languagesystem DFLT dflt; + +markClass [acute] @TOP_MARKS; + +feature foo { + markClass [grave] @TOP_MARKS; + markClass cedilla @BOTTOM_MARKS; +} foo; + +feature bar { + markClass [dieresis breve] @TOP_MARKS; +} bar; diff -Nru fonttools-3.0/Tests/feaLib/data/markClass.ttx fonttools-3.21.2/Tests/feaLib/data/markClass.ttx --- fonttools-3.0/Tests/feaLib/data/markClass.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/markClass.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/mini.fea fonttools-3.21.2/Tests/feaLib/data/mini.fea --- fonttools-3.0/Tests/feaLib/data/mini.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/mini.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ +# Example file from OpenType Feature File specification, section 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +# Script and language coverage +languagesystem DFLT dflt; +languagesystem latn dflt; + +# Ligature formation +feature liga { + substitute f i by f_i; + substitute f l by f_l; +} liga; + +# Kerning +feature kern { + position A Y -100; + position a y -80; + position s f' <0 0 10 0> t; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/multiple_feature_blocks.fea fonttools-3.21.2/Tests/feaLib/data/multiple_feature_blocks.fea --- fonttools-3.0/Tests/feaLib/data/multiple_feature_blocks.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/multiple_feature_blocks.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ +languagesystem DFLT dflt; +languagesystem latn dflt; +languagesystem latn TRK; + +feature liga { + lookup liga_fl { + sub f l by f_l; + } liga_fl; +} liga; + +feature liga { + lookup liga_fi { + sub f i by f_i; + } liga_fi; + + script latn; + language TRK exclude_dflt; +} liga; diff -Nru fonttools-3.0/Tests/feaLib/data/multiple_feature_blocks.ttx fonttools-3.21.2/Tests/feaLib/data/multiple_feature_blocks.ttx --- fonttools-3.0/Tests/feaLib/data/multiple_feature_blocks.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/multiple_feature_blocks.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/name.fea fonttools-3.21.2/Tests/feaLib/data/name.fea --- fonttools-3.0/Tests/feaLib/data/name.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/name.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ +table name { +#test-fea2fea: + nameid 1 "Ignored-1"; +#test-fea2fea: + nameid 2 "Ignored-2"; +#test-fea2fea: + nameid 3 "Ignored-3"; +#test-fea2fea: + nameid 4 "Ignored-4"; +#test-fea2fea: + nameid 5 "Ignored-5"; +#test-fea2fea: + nameid 6 "Ignored-6"; +#test-fea2fea: nameid 7 "Test7"; + nameid 7 3 "Test7"; + nameid 8 1 "Test8"; +#test-fea2fea: nameid 9 "Test9"; + nameid 9 3 1 0x0409 "Test9"; + nameid 10 1 "Test10"; +#test-fea2fea: nameid 11 1 "Test11"; + nameid 11 1 0 0 "Test11"; +} name; + diff -Nru fonttools-3.0/Tests/feaLib/data/name.ttx fonttools-3.21.2/Tests/feaLib/data/name.ttx --- fonttools-3.0/Tests/feaLib/data/name.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/name.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + Test7 + + + Test8 + + + Test9 + + + Test10 + + + Test11 + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/omitted_GlyphClassDef.fea fonttools-3.21.2/Tests/feaLib/data/omitted_GlyphClassDef.fea --- fonttools-3.0/Tests/feaLib/data/omitted_GlyphClassDef.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/omitted_GlyphClassDef.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +@BASE = [f i]; +@MARKS = [acute grave]; + +table GDEF { + GlyphClassDef @BASE, , @MARKS, ; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/omitted_GlyphClassDef.ttx fonttools-3.21.2/Tests/feaLib/data/omitted_GlyphClassDef.ttx --- fonttools-3.0/Tests/feaLib/data/omitted_GlyphClassDef.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/omitted_GlyphClassDef.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/size2.fea fonttools-3.21.2/Tests/feaLib/data/size2.fea --- fonttools-3.0/Tests/feaLib/data/size2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/size2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +feature size { + parameters 10.0 0; +} size; diff -Nru fonttools-3.0/Tests/feaLib/data/size2.ttx fonttools-3.21.2/Tests/feaLib/data/size2.ttx --- fonttools-3.0/Tests/feaLib/data/size2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/size2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/size.fea fonttools-3.21.2/Tests/feaLib/data/size.fea --- fonttools-3.0/Tests/feaLib/data/size.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/size.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +feature size { +#test-fea2fea: parameters 10.0 0; + parameters 10.0 0 0 0; +} size; diff -Nru fonttools-3.0/Tests/feaLib/data/size.ttx fonttools-3.21.2/Tests/feaLib/data/size.ttx --- fonttools-3.0/Tests/feaLib/data/size.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/size.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec10.fea fonttools-3.21.2/Tests/feaLib/data/spec10.fea --- fonttools-3.0/Tests/feaLib/data/spec10.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec10.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +# OpenType Feature File specification, section 10. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +anon sbit { + /* sbit table specifications */ + 72 % dpi + sizes { + 10, 12, 14 source { + all "Generic/JGeneric" + } + } +} sbit; diff -Nru fonttools-3.0/Tests/feaLib/data/spec10.ttx fonttools-3.21.2/Tests/feaLib/data/spec10.ttx --- fonttools-3.0/Tests/feaLib/data/spec10.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec10.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec4h1.fea fonttools-3.21.2/Tests/feaLib/data/spec4h1.fea --- fonttools-3.0/Tests/feaLib/data/spec4h1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec4h1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,64 @@ +# OpenType Feature File specification, section 4.h, example 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; +languagesystem latn dflt; +languagesystem latn DEU; +languagesystem latn TRK; +languagesystem cyrl dflt; + +feature smcp { + sub [a-z] by [A.sc-Z.sc]; + + # Since all the rules in this feature are of the same type, they + # will be grouped in a single lookup. Since no script or language + # keyword has been specified yet, the lookup will be registered + # for this feature under all the language systems. +} smcp; + +feature liga { + sub f f by f_f; + sub f i by f_i; + sub f l by f_l; + + # Since all the rules in this feature are of the same type, they + # will be grouped in a single lookup. Since no script or language + # keyword has been specified yet, the lookup will be registered + # for this feature under all the language systems. + + script latn; + language dflt; + # lookupflag 0; (implicit) + sub c t by c_t; + sub c s by c_s; + + # The rules above will be placed in a lookup that is registered + # for all the specified languages for the script latn, but not any + # other scripts. + + language DEU; + # script latn; (stays the same) + # lookupflag 0; (stays the same) + sub c h by c_h; + sub c k by c_k; + + # The rules above will be placed in a lookup that is registered + # only under the script latn, language DEU. + + language TRK; + + # This will inherit both the top level default rules - the rules + # defined before the first 'script' statement, and the + # script-level default rules for 'latn': all the lookups of this + # feature defined after the 'script latn' statement, and before + # the language DEU statement. If TRK were not named here, it + # would not inherit the default rules for the script latn. +} liga; + +# TODO(sascha): Uncomment once we support 'pos' statements. +# feature kern { +# pos a y -150; +# # [more pos statements] +# # All the rules in this feature will be grouped in a single lookup +# # that is is registered under all the language systems. +# } kern; diff -Nru fonttools-3.0/Tests/feaLib/data/spec4h1.ttx fonttools-3.21.2/Tests/feaLib/data/spec4h1.ttx --- fonttools-3.0/Tests/feaLib/data/spec4h1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec4h1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec4h2.fea fonttools-3.21.2/Tests/feaLib/data/spec4h2.fea --- fonttools-3.0/Tests/feaLib/data/spec4h2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec4h2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ +# OpenType Feature File specification, section 4.h, example 2. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; +languagesystem latn dflt; +languagesystem latn DEU; +languagesystem cyrl dflt; +languagesystem cyrl SRB; +languagesystem grek dflt; + +feature liga { + # start of default rules that are applied under all language systems. + lookup HAS_I { + sub f f i by f_f_i; + sub f i by f_i; + } HAS_I; + + lookup NO_I { + sub f f l by f_f_l; + sub f f by f_f; + } NO_I; + + # end of default rules that are applied under all language systems. + + script latn; + language dflt; + # default lookup for latn included under all languages for the latn script + sub f l by f_l; + + language DEU; + # default lookups included under the DEU language + sub s s by germandbls; # This is also included. + + language TRK exclude_dflt; # default lookups are excluded. + lookup NO_I; # Only this lookup is included under the TRK language + + script cyrl; + language SRB; + sub c t by c_t; # this rule will apply only under script cyrl language SRB. +} liga; diff -Nru fonttools-3.0/Tests/feaLib/data/spec4h2.ttx fonttools-3.21.2/Tests/feaLib/data/spec4h2.ttx --- fonttools-3.0/Tests/feaLib/data/spec4h2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec4h2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5d1.fea fonttools-3.21.2/Tests/feaLib/data/spec5d1.fea --- fonttools-3.0/Tests/feaLib/data/spec5d1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5d1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ +# OpenType Feature File specification, section 5.d, example 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +feature F1 { + sub [one one.oldstyle] [slash fraction] [two two.oldstyle] by onehalf; +} F1; + +# Since the OpenType specification does not allow ligature substitutions +# to be specified on target sequences that contain glyph classes, the +# implementation software will enumerate all specific glyph sequences +# if glyph classes are detected in . Thus, the above +# example produces an identical representation in the font as if all +# the sequences were manually enumerated by the font editor: +feature F2 { + sub one slash two by onehalf; + sub one.oldstyle slash two by onehalf; + sub one fraction two by onehalf; + sub one.oldstyle fraction two by onehalf; + sub one slash two.oldstyle by onehalf; + sub one.oldstyle slash two.oldstyle by onehalf; + sub one fraction two.oldstyle by onehalf; + sub one.oldstyle fraction two.oldstyle by onehalf; +} F2; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5d1.ttx fonttools-3.21.2/Tests/feaLib/data/spec5d1.ttx --- fonttools-3.0/Tests/feaLib/data/spec5d1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5d1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5d2.fea fonttools-3.21.2/Tests/feaLib/data/spec5d2.fea --- fonttools-3.0/Tests/feaLib/data/spec5d2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5d2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ +# OpenType Feature File specification, section 5.d, example 2. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +# A contiguous set of ligature rules does not need to be ordered in +# any particular way by the font editor; the implementation software +# must do the appropriate sorting. + +# So: +feature F1 { + sub f f by f_f; + sub f i by f_i; + sub f f i by f_f_i; + sub o f f i by o_f_f_i; +} F1; + +# will produce an identical representation in the font as: +feature F2 { + sub o f f i by o_f_f_i; + sub f f i by f_f_i; + sub f f by f_f; + sub f i by f_i; +} F2; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5d2.ttx fonttools-3.21.2/Tests/feaLib/data/spec5d2.ttx --- fonttools-3.0/Tests/feaLib/data/spec5d2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5d2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi1.fea fonttools-3.21.2/Tests/feaLib/data/spec5fi1.fea --- fonttools-3.0/Tests/feaLib/data/spec5fi1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ +# OpenType Feature File specification, section 5.f.i, example 1 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem latn dflt; + +lookup CNTXT_LIGS { + sub f i by f_i; + sub c t by c_t; +} CNTXT_LIGS; + +lookup CNTXT_SUB { + sub n by n.end; + sub s by s.end; +} CNTXT_SUB; + +feature test { + sub [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; + sub [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi1.ttx fonttools-3.21.2/Tests/feaLib/data/spec5fi1.ttx --- fonttools-3.0/Tests/feaLib/data/spec5fi1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,122 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi2.fea fonttools-3.21.2/Tests/feaLib/data/spec5fi2.fea --- fonttools-3.0/Tests/feaLib/data/spec5fi2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +# OpenType Feature File specification, section 5.f.i, example 2 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem latn dflt; + +feature test { +#test-fea2fea: lookupflag RightToLeft IgnoreBaseGlyphs IgnoreLigatures; + lookupflag 7; + sub [a e n] d' by d.alt; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi2.ttx fonttools-3.21.2/Tests/feaLib/data/spec5fi2.ttx --- fonttools-3.0/Tests/feaLib/data/spec5fi2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi3.fea fonttools-3.21.2/Tests/feaLib/data/spec5fi3.fea --- fonttools-3.0/Tests/feaLib/data/spec5fi3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.f.i, example 3 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem latn dflt; + +feature test { + sub [A-Z] [A.sc-Z.sc]' by [a-z]; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi3.ttx fonttools-3.21.2/Tests/feaLib/data/spec5fi3.ttx --- fonttools-3.0/Tests/feaLib/data/spec5fi3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,139 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi4.fea fonttools-3.21.2/Tests/feaLib/data/spec5fi4.fea --- fonttools-3.0/Tests/feaLib/data/spec5fi4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi4.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.f.i, example 4 +# "Specifying a Chain Sub rule and marking sub-runs" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem latn dflt; + +feature test { + sub [e e.begin]' t' c by ampersand; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5fi4.ttx fonttools-3.21.2/Tests/feaLib/data/spec5fi4.ttx --- fonttools-3.0/Tests/feaLib/data/spec5fi4.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5fi4.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_1.fea fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_1.fea --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.f.ii, example 1 +# "Specifying exceptions to the Chain Sub rule" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +feature test { + ignore sub f [a e] d'; + ignore sub a d' d; + sub [a e n] d' by d.alt; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_1.ttx fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_1.ttx --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_2.fea fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_2.fea --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.f.ii, example 2 +# "Specifying exceptions to the Chain Sub rule" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +feature test { + @LETTER = [a-z]; + ignore sub @LETTER f' i'; + sub f' i' by f_i.begin; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_2.ttx fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_2.ttx --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_3.fea fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_3.fea --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.f.ii, example 3 +# "Specifying exceptions to the Chain Sub rule" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +feature test { + @LETTER = [a-z]; + ignore sub @LETTER a' n' d', a' n' d' @LETTER; + sub a' n' d' by a_n_d; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_3.ttx fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_3.ttx --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_4.fea fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_4.fea --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_4.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_4.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ +# OpenType Feature File specification, section 5.f.ii, example 4 +# "Specifying exceptions to the Chain Sub rule" +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +@LETTER = [A-Z a-z]; + +feature cswh { + + # --- Glyph classes used in this feature: + @BEGINNINGS = [A-N P-Z T_h m]; + @BEGINNINGS_SWASH = [A.swash-N.swash P.swash-Z.swash T_h.swash m.begin]; + @ENDINGS = [a e z]; + @ENDINGS_SWASH = [a.end e.end z.end]; + + # --- Beginning-of-word swashes: + ignore sub @LETTER @BEGINNINGS'; + sub @BEGINNINGS' by @BEGINNINGS_SWASH; + + # --- End-of-word swashes: + ignore sub @ENDINGS' @LETTER; + sub @ENDINGS' by @ENDINGS_SWASH; + +} cswh; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5f_ii_4.ttx fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_4.ttx --- fonttools-3.0/Tests/feaLib/data/spec5f_ii_4.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5f_ii_4.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,285 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec5h1.fea fonttools-3.21.2/Tests/feaLib/data/spec5h1.fea --- fonttools-3.0/Tests/feaLib/data/spec5h1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5h1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 5.h, example 1. +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; + +feature test { +#test-fea2fea: rsub [a e n] d' by d.alt; + reversesub [a e n] d' by d.alt; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec5h1.ttx fonttools-3.21.2/Tests/feaLib/data/spec5h1.ttx --- fonttools-3.0/Tests/feaLib/data/spec5h1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec5h1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6b_ii.fea fonttools-3.21.2/Tests/feaLib/data/spec6b_ii.fea --- fonttools-3.0/Tests/feaLib/data/spec6b_ii.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6b_ii.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +# OpenType Feature File specification, section 6.b.ii: +# [GPOS LookupType 2] Enumerating pairs +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +@Y_LC = [y yacute ydieresis]; +@SMALL_PUNC = [comma semicolon period]; + +feature kern { + enum pos @Y_LC semicolon -80; # specific pairs + pos f quoteright 30; # specific pair + pos @Y_LC @SMALL_PUNC -100; # class pair +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6b_ii.ttx fonttools-3.21.2/Tests/feaLib/data/spec6b_ii.ttx --- fonttools-3.0/Tests/feaLib/data/spec6b_ii.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6b_ii.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6d2.fea fonttools-3.21.2/Tests/feaLib/data/spec6d2.fea --- fonttools-3.0/Tests/feaLib/data/spec6d2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6d2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ +# OpenType Feature File specification, section 6.d, example 1: +# [GPOS LookupType 4] Mark-to-Base attachment positioning +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; + +markClass [acute grave] @TOP_MARKS; +markClass [dieresis umlaut] @TOP_MARKS; +markClass [cedilla] @BOTTOM_MARKS; + +feature test { + pos base [e o] mark @TOP_MARKS mark @BOTTOM_MARKS; +#test-fea2fea: pos base [a u] mark @TOP_MARKS mark @BOTTOM_MARKS; + position base [a u] mark @TOP_MARKS mark @BOTTOM_MARKS; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6d2.ttx fonttools-3.21.2/Tests/feaLib/data/spec6d2.ttx --- fonttools-3.0/Tests/feaLib/data/spec6d2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6d2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,152 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6e.fea fonttools-3.21.2/Tests/feaLib/data/spec6e.fea --- fonttools-3.0/Tests/feaLib/data/spec6e.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6e.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +languagesystem DFLT dflt; + +markClass sukun @TOP_MARKS; +markClass kasratan @BOTTOM_MARKS; + +feature test { + pos ligature lam_meem_jeem mark @TOP_MARKS # mark above lam + ligComponent mark @BOTTOM_MARKS # mark below meem + ligComponent ; # jeem has no marks +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6e.ttx fonttools-3.21.2/Tests/feaLib/data/spec6e.ttx --- fonttools-3.0/Tests/feaLib/data/spec6e.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6e.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6f.fea fonttools-3.21.2/Tests/feaLib/data/spec6f.fea --- fonttools-3.0/Tests/feaLib/data/spec6f.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6f.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +languagesystem DFLT dflt; + +feature test { + markClass damma @MARK_CLASS_1; + pos mark hamza mark @MARK_CLASS_1; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6f.ttx fonttools-3.21.2/Tests/feaLib/data/spec6f.ttx --- fonttools-3.0/Tests/feaLib/data/spec6f.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6f.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_ii.fea fonttools-3.21.2/Tests/feaLib/data/spec6h_ii.fea --- fonttools-3.0/Tests/feaLib/data/spec6h_ii.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_ii.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ +# OpenType Feature File specification, section 6.h.ii: +# Specifying Contextual Positioning with explicit lookup references +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; + +markClass [acute grave] @ALL_MARKS; + +lookup CNTXT_PAIR_POS { + pos T o -10; + pos T c -12; +} CNTXT_PAIR_POS; + +lookup CNTXT_MARK_TO_BASE { + pos base o mark @ALL_MARKS; + pos base c mark @ALL_MARKS; +} CNTXT_MARK_TO_BASE; + +feature test { + pos T' lookup CNTXT_PAIR_POS [o c]' @ALL_MARKS' lookup CNTXT_MARK_TO_BASE; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_iii_1.fea fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_1.fea --- fonttools-3.0/Tests/feaLib/data/spec6h_iii_1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# OpenType Feature File specification, section 6.h.iii, example 1: +# Specifying Contextual Positioning with in-line single positioning rules +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +languagesystem DFLT dflt; + +feature test { + pos [quoteleft quotedblleft] [Y T]' <0 0 20 0> [quoteright quotedblright]; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_iii_1.ttx fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_1.ttx --- fonttools-3.0/Tests/feaLib/data/spec6h_iii_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_iii_3d.fea fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_3d.fea --- fonttools-3.0/Tests/feaLib/data/spec6h_iii_3d.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_3d.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ +# OpenType Feature File specification, section 6.h.iii, example 3d: +# Specifying Contextual Positioning with in-line single positioning rules +# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html + +feature test { + pos L' quoteright' -150; +} test; diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_iii_3d.ttx fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_3d.ttx --- fonttools-3.0/Tests/feaLib/data/spec6h_iii_3d.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_iii_3d.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec6h_ii.ttx fonttools-3.21.2/Tests/feaLib/data/spec6h_ii.ttx --- fonttools-3.0/Tests/feaLib/data/spec6h_ii.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec6h_ii.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec8a.fea fonttools-3.21.2/Tests/feaLib/data/spec8a.fea --- fonttools-3.0/Tests/feaLib/data/spec8a.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8a.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ +languagesystem DFLT dflt; +languagesystem latn dflt; +languagesystem latn TRK; +languagesystem cyrl dflt; + +feature aalt { + feature salt; + feature smcp; + sub d by d.alt; +} aalt; + +feature smcp { + sub [a-c] by [A.sc-C.sc]; + sub f i by f_i; # not considered for aalt +} smcp; + +feature salt { + sub a from [a.alt1 a.alt2 a.alt3]; + sub e [c d e]' f by [c.mid d.mid e.mid]; + sub b by b.alt; +} salt; diff -Nru fonttools-3.0/Tests/feaLib/data/spec8a.ttx fonttools-3.21.2/Tests/feaLib/data/spec8a.ttx --- fonttools-3.0/Tests/feaLib/data/spec8a.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8a.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,200 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec8b.fea fonttools-3.21.2/Tests/feaLib/data/spec8b.fea --- fonttools-3.0/Tests/feaLib/data/spec8b.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8b.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +feature size { + parameters 10.0 3 80 139; +# 10.0 - design size, 3 - subfamily identifier, 80 - range start (exclusive, decipoints) +# 139 - range end (inclusive, decipoints) + sizemenuname "Win MinionPro Size Name"; + sizemenuname 1 "Mac MinionPro Size Name"; + # The specification says: sizemenuname 1 21 0 "Mac MinionPro Size Name"; + # which means Macintosh platform, MacOS Thai encoding, English language. + # Since fonttools currently does not support the MacOS Thai encoding, + # we use instead MacOS Roman encoding (0), Swedish language (5) for our test. + sizemenuname 1 0 5 "Mac MinionPro Size Name"; +} size; diff -Nru fonttools-3.0/Tests/feaLib/data/spec8b.ttx fonttools-3.21.2/Tests/feaLib/data/spec8b.ttx --- fonttools-3.0/Tests/feaLib/data/spec8b.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8b.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,53 @@ + + + + + + Win MinionPro Size Name + + + Mac MinionPro Size Name + + + Mac MinionPro Size Name + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec8c.fea fonttools-3.21.2/Tests/feaLib/data/spec8c.fea --- fonttools-3.0/Tests/feaLib/data/spec8c.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8c.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ +feature ss01 { + featureNames { + name "Feature description for MS Platform, script Unicode, language English"; +# With no platform ID, script ID, or language ID specified, the implementation assumes (3,1,0x409). +#test-fea2fea: name 3 1 1041 "Feature description for MS Platform, script Unicode, language Japanese"; + name 3 1 0x411 "Feature description for MS Platform, script Unicode, language Japanese"; + name 1 "Feature description for Apple Platform, script Roman, language unspecified"; +# With only the platform ID specified, the implementation assumes script and language = Latin. For Apple this is (1,0,0). + name 1 1 12 "Feature description for Apple Platform, script Japanese, language Japanese"; + }; +# --- rules for this feature --- + sub A by B; +} ss01; diff -Nru fonttools-3.0/Tests/feaLib/data/spec8c.ttx fonttools-3.21.2/Tests/feaLib/data/spec8c.ttx --- fonttools-3.0/Tests/feaLib/data/spec8c.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec8c.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + Feature description for MS Platform, script Unicode, language English + + + Feature description for MS Platform, script Unicode, language Japanese + + + Feature description for Apple Platform, script Roman, language unspecified + + + Feature description for Apple Platform, script Japanese, language Japanese + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9a.fea fonttools-3.21.2/Tests/feaLib/data/spec9a.fea --- fonttools-3.0/Tests/feaLib/data/spec9a.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9a.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +table BASE { + HorizAxis.BaseTagList ideo romn; + HorizAxis.BaseScriptList latn romn -120 0, cyrl romn -120 0, grek romn -120 0, hani ideo -120 0, kana ideo -120 0, hang ideo -120 0; +} BASE; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9a.ttx fonttools-3.21.2/Tests/feaLib/data/spec9a.ttx --- fonttools-3.0/Tests/feaLib/data/spec9a.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9a.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9b.fea fonttools-3.21.2/Tests/feaLib/data/spec9b.fea --- fonttools-3.0/Tests/feaLib/data/spec9b.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9b.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ +@BASE = [f i]; +@LIGATURES = [c_s c_t f_i f_f_i s_t]; +@MARKS = [acute grave]; +@COMPONENT = [noon.final noon.initial]; + +table GDEF { + GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENT; + Attach noon.final 5; + Attach noon.initial 4; + LigatureCaretByPos f_i 400 380; + LigatureCaretByPos [c_t s_t] 500; + LigatureCaretByIndex f_f_i 23 46; +} GDEF; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9b.ttx fonttools-3.21.2/Tests/feaLib/data/spec9b.ttx --- fonttools-3.0/Tests/feaLib/data/spec9b.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9b.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c1.fea fonttools-3.21.2/Tests/feaLib/data/spec9c1.fea --- fonttools-3.0/Tests/feaLib/data/spec9c1.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c1.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +table head { +#test-fea2fea: FontRevision 1.100; + FontRevision 1.1; +} head; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c1.ttx fonttools-3.21.2/Tests/feaLib/data/spec9c1.ttx --- fonttools-3.0/Tests/feaLib/data/spec9c1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c2.fea fonttools-3.21.2/Tests/feaLib/data/spec9c2.fea --- fonttools-3.0/Tests/feaLib/data/spec9c2.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c2.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +table head { + FontRevision 1.001; +} head; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c2.ttx fonttools-3.21.2/Tests/feaLib/data/spec9c2.ttx --- fonttools-3.0/Tests/feaLib/data/spec9c2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c3.fea fonttools-3.21.2/Tests/feaLib/data/spec9c3.fea --- fonttools-3.0/Tests/feaLib/data/spec9c3.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c3.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ +table head { + FontRevision 1.500; +} head; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9c3.ttx fonttools-3.21.2/Tests/feaLib/data/spec9c3.ttx --- fonttools-3.0/Tests/feaLib/data/spec9c3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9c3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9d.fea fonttools-3.21.2/Tests/feaLib/data/spec9d.fea --- fonttools-3.0/Tests/feaLib/data/spec9d.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9d.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +table hhea { + CaretOffset -50; + Ascender 800; + Descender 200; + LineGap 200; +} hhea; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9d.ttx fonttools-3.21.2/Tests/feaLib/data/spec9d.ttx --- fonttools-3.0/Tests/feaLib/data/spec9d.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9d.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9e.fea fonttools-3.21.2/Tests/feaLib/data/spec9e.fea --- fonttools-3.0/Tests/feaLib/data/spec9e.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9e.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ +table name { + nameid 9 "Joachim M\00fcller-Lanc\00e9"; # Windows (Unicode) + nameid 9 1 "Joachim M\9fller-Lanc\8e"; # Macintosh (Mac Roman) +} name; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9e.ttx fonttools-3.21.2/Tests/feaLib/data/spec9e.ttx --- fonttools-3.0/Tests/feaLib/data/spec9e.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9e.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ + + + + + + Joachim Müller-Lancé + + + Joachim Müller-Lancé + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9f.fea fonttools-3.21.2/Tests/feaLib/data/spec9f.fea --- fonttools-3.0/Tests/feaLib/data/spec9f.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9f.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ +table OS/2 { + FSType 4; + Panose 2 15 0 0 2 2 8 2 9 4; + TypoAscender 800; + TypoDescender -200; # Note that TypoDescender is negative for descent below the baseline. + winAscent 832; + winDescent 321; # Note that winDescent is positive for descent below the baseline. + UnicodeRange 0 1 9 55 59 60; +# 0 - Basic Latin, 1 - Latin-1 Supplement +# 9 - Cyrillic, 55 - CJK Compatibility +# 59 - CJK Unified Ideographs, 60 - Private Use Area + CodePageRange 1252 1251 932; +# 1252 - Latin 1, 1251 - Cyrllic, 932 - JIS/Japan + XHeight 400; + CapHeight 600; + WeightClass 800; + WidthClass 3; + Vendor "ADBE"; +} OS/2; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9f.ttx fonttools-3.21.2/Tests/feaLib/data/spec9f.ttx --- fonttools-3.0/Tests/feaLib/data/spec9f.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9f.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/spec9g.fea fonttools-3.21.2/Tests/feaLib/data/spec9g.fea --- fonttools-3.0/Tests/feaLib/data/spec9g.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9g.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ +table vhea { + VertTypoAscender 500; + VertTypoDescender -500; + VertTypoLineGap 1000; +} vhea; diff -Nru fonttools-3.0/Tests/feaLib/data/spec9g.ttx fonttools-3.21.2/Tests/feaLib/data/spec9g.ttx --- fonttools-3.0/Tests/feaLib/data/spec9g.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/spec9g.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# For contextual positioning statements with in-line single positioning rules, +# zeroes should get compiled to ValueRecord format 0. +# https://github.com/fonttools/fonttools/issues/633 + +# Zero value in a horizontal context. +feature kern { + pos A G' 0 A; # value format A + pos B G' <0 0 0 0> B; # value format B +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_horizontal.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# For contextual positioning statements with in-line single positioning rules, +# zeroes should get compiled to ValueRecord format 0. +# https://github.com/fonttools/fonttools/issues/633 + +# Zero value in a vertical context. +feature vkrn { + pos A G' 0 A; # value format A + pos B G' <0 0 0 0> B; # value format B +} vkrn; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_ChainSinglePos_vertical.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,89 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_horizontal.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_horizontal.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_horizontal.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_horizontal.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# For PairPos statements in horizontal compilation contexts, +# zero values should get compiled to ValueRecord format 4. +# https://github.com/fonttools/fonttools/issues/633 +feature kern { + pos A 0 A 0; + pos A 0 B <0 0 0 0>; + pos B <0 0 0 0> A 0; + pos B <0 0 0 0> B <0 0 0 0>; +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_horizontal.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_horizontal.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_horizontal.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_horizontal.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_vertical.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_vertical.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_vertical.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_vertical.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ +# For PairPos statements in vertical compilation contexts, +# zero values should get compiled to ValueRecord format 8. +# https://github.com/fonttools/fonttools/issues/633 +feature vkrn { + pos A 0 A 0; + pos A 0 B <0 0 0 0>; + pos B <0 0 0 0> A 0; + pos B <0 0 0 0> B <0 0 0 0>; +} vkrn; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_vertical.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_vertical.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_PairPos_vertical.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_PairPos_vertical.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +# For SinglePos statements, zeroes should get compiled to ValueRecord format 0. +# https://github.com/fonttools/fonttools/issues/633 + +# Zero value in a horizontal context. +feature kern { + pos A 0; # format A + pos B <0 0 0 0>; # format B +} kern; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_horizontal.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_vertical.fea fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_vertical.fea --- fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_vertical.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_vertical.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +# For SinglePos statements, zeroes should get compiled to ValueRecord format 0. +# https://github.com/fonttools/fonttools/issues/633 + +# Zero value in a vertical context. +feature vkrn { + pos A 0; # format A + pos B <0 0 0 0>; # format B +} vkrn; diff -Nru fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_vertical.ttx fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_vertical.ttx --- fonttools-3.0/Tests/feaLib/data/ZeroValue_SinglePos_vertical.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/data/ZeroValue_SinglePos_vertical.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/feaLib/error_test.py fonttools-3.21.2/Tests/feaLib/error_test.py --- fonttools-3.0/Tests/feaLib/error_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/error_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.error import FeatureLibError +import unittest + + +class FeatureLibErrorTest(unittest.TestCase): + def test_str(self): + err = FeatureLibError("Squeak!", ("foo.fea", 23, 42)) + self.assertEqual(str(err), "foo.fea:23:42: Squeak!") + + def test_str_nolocation(self): + err = FeatureLibError("Squeak!", None) + self.assertEqual(str(err), "Squeak!") + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/feaLib/lexer_test.py fonttools-3.21.2/Tests/feaLib/lexer_test.py --- fonttools-3.0/Tests/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/lexer_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,181 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.lexer import IncludingLexer, Lexer +import os +import unittest + + +def lex(s): + return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] + +class LexerTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_empty(self): + self.assertEqual(lex(""), []) + self.assertEqual(lex(" \t "), []) + + def test_name(self): + self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) + self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) + self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) + self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) + self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) + self.assertEqual(lex("a+*:^~!"), [(Lexer.NAME, "a+*:^~!")]) + self.assertEqual(lex("with-dash"), [(Lexer.NAME, "with-dash")]) + + def test_cid(self): + self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) + + def test_glyphclass(self): + self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) + self.assertRaisesRegex(FeatureLibError, + "Expected glyph class", lex, "@(a)") + self.assertRaisesRegex(FeatureLibError, + "Expected glyph class", lex, "@ A") + self.assertRaisesRegex(FeatureLibError, + "not be longer than 63 characters", + lex, "@" + ("A" * 64)) + self.assertRaisesRegex(FeatureLibError, + "Glyph class names must consist of", + lex, "@Ab:c") + + def test_include(self): + self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ + (Lexer.NAME, "include"), + (Lexer.FILENAME, "~/foo/bar baz.fea"), + (Lexer.SYMBOL, ";") + ]) + self.assertEqual(lex("include # Comment\n (foo) \n;"), [ + (Lexer.NAME, "include"), + (Lexer.COMMENT, "# Comment"), + (Lexer.FILENAME, "foo"), + (Lexer.SYMBOL, ";") + ]) + self.assertRaises(FeatureLibError, lex, "include blah") + self.assertRaises(FeatureLibError, lex, "include (blah") + + def test_number(self): + self.assertEqual(lex("123 -456"), + [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) + self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) + self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) + + def test_float(self): + self.assertEqual(lex("1.23 -4.5"), + [(Lexer.FLOAT, 1.23), (Lexer.FLOAT, -4.5)]) + + def test_symbol(self): + self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) + self.assertEqual(lex("-A-B"), + [(Lexer.SYMBOL, "-"), (Lexer.NAME, "A-B")]) + self.assertEqual( + lex("foo - -2"), + [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) + + def test_comment(self): + self.assertEqual(lex("# Comment\n#"), + [(Lexer.COMMENT, "# Comment"), (Lexer.COMMENT, "#")]) + + def test_string(self): + self.assertEqual(lex('"foo" "bar"'), + [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) + self.assertEqual(lex('"foo \nbar\r baz \r\nqux\n\n "'), + [(Lexer.STRING, "foo bar baz qux ")]) + # The lexer should preserve escape sequences because they have + # different interpretations depending on context. For better + # or for worse, that is how the OpenType Feature File Syntax + # has been specified; see section 9.e (name table) for examples. + self.assertEqual(lex(r'"M\00fcller-Lanc\00e9"'), # 'nameid 9' + [(Lexer.STRING, r"M\00fcller-Lanc\00e9")]) + self.assertEqual(lex(r'"M\9fller-Lanc\8e"'), # 'nameid 9 1' + [(Lexer.STRING, r"M\9fller-Lanc\8e")]) + self.assertRaises(FeatureLibError, lex, '"foo\n bar') + + def test_bad_character(self): + self.assertRaises(FeatureLibError, lambda: lex("123 \u0001")) + + def test_newline(self): + def lines(s): + return [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix + self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh + self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows + self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed + + def test_location(self): + def locs(s): + return ["%s:%d:%d" % loc for (_, _, loc) in Lexer(s, "test.fea")] + self.assertEqual(locs("a b # Comment\n12 @x"), [ + "test.fea:1:1", "test.fea:1:3", "test.fea:1:5", "test.fea:2:1", + "test.fea:2:4" + ]) + + def test_scan_over_(self): + lexer = Lexer("abbacabba12", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("xyz") + self.assertEqual(lexer.pos_, 0) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("abc") + self.assertEqual(lexer.pos_, 9) + lexer.scan_over_("0123456789") + self.assertEqual(lexer.pos_, 11) + + def test_scan_until_(self): + lexer = Lexer("foo'bar", "test.fea") + self.assertEqual(lexer.pos_, 0) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + lexer.scan_until_("'") + self.assertEqual(lexer.pos_, 3) + + +class IncludingLexerTest(unittest.TestCase): + @staticmethod + def getpath(filename): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", filename) + + def test_include(self): + lexer = IncludingLexer(self.getpath("include/include4.fea")) + result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) + for _, token, loc in lexer] + self.assertEqual(result, [ + "I4a include4.fea:1", + "I3a include3.fea:1", + "I2a include2.fea:1", + "I1a include1.fea:1", + "I0 include0.fea:1", + "I1b include1.fea:3", + "; include2.fea:2", + "I2b include2.fea:3", + "; include3.fea:2", + "I3b include3.fea:3", + "; include4.fea:2", + "I4b include4.fea:3" + ]) + + def test_include_limit(self): + lexer = IncludingLexer(self.getpath("include/include6.fea")) + self.assertRaises(FeatureLibError, lambda: list(lexer)) + + def test_include_self(self): + lexer = IncludingLexer(self.getpath("include/includeself.fea")) + self.assertRaises(FeatureLibError, lambda: list(lexer)) + + def test_include_missing_file(self): + lexer = IncludingLexer(self.getpath("include/includemissingfile.fea")) + self.assertRaises(FeatureLibError, lambda: list(lexer)) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/feaLib/parser_test.py fonttools-3.21.2/Tests/feaLib/parser_test.py --- fonttools-3.0/Tests/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/feaLib/parser_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1580 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.feaLib.error import FeatureLibError +from fontTools.feaLib.parser import Parser, SymbolTable +from fontTools.misc.py23 import * +import warnings +import fontTools.feaLib.ast as ast +import os +import unittest + + +def glyphstr(glyphs): + def f(x): + if len(x) == 1: + return list(x)[0] + else: + return '[%s]' % ' '.join(sorted(list(x))) + return ' '.join(f(g.glyphSet()) for g in glyphs) + + +def mapping(s): + b = [] + for a in s.glyphs: + b.extend(a.glyphSet()) + c = [] + for a in s.replacements: + c.extend(a.glyphSet()) + if len(c) == 1: + c = c * len(b) + return dict(zip(b, c)) + + +GLYPHNAMES = (""" + .notdef space A B C D E F G H I J K L M N O P Q R S T U V W X Y Z + A.sc B.sc C.sc D.sc E.sc F.sc G.sc H.sc I.sc J.sc K.sc L.sc M.sc + N.sc O.sc P.sc Q.sc R.sc S.sc T.sc U.sc V.sc W.sc X.sc Y.sc Z.sc + A.swash B.swash X.swash Y.swash Z.swash + a b c d e f g h i j k l m n o p q r s t u v w x y z + a.sc b.sc c.sc d.sc e.sc f.sc g.sc h.sc i.sc j.sc k.sc l.sc m.sc + n.sc o.sc p.sc q.sc r.sc s.sc t.sc u.sc v.sc w.sc x.sc y.sc z.sc + a.swash b.swash x.swash y.swash z.swash + foobar foo.09 foo.1234 foo.9876 +""").split() + ["foo.%d" % i for i in range(1, 200)] + + +class ParserTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_glyphMap_deprecated(self): + glyphMap = {'a': 0, 'b': 1, 'c': 2} + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + parser = Parser(UnicodeIO(), glyphMap=glyphMap) + + self.assertEqual(len(w), 1) + self.assertEqual(w[-1].category, UserWarning) + self.assertIn("deprecated", str(w[-1].message)) + self.assertEqual(parser.glyphNames_, {'a', 'b', 'c'}) + + self.assertRaisesRegex( + TypeError, "mutually exclusive", + Parser, UnicodeIO(), ("a",), glyphMap={"a": 0}) + + self.assertRaisesRegex( + TypeError, "unsupported keyword argument", + Parser, UnicodeIO(), foo="bar") + + def test_comments(self): + doc = self.parse( + """ # Initial + feature test { + sub A by B; # simple + } test;""") + c1 = doc.statements[0] + c2 = doc.statements[1].statements[1] + self.assertEqual(type(c1), ast.Comment) + self.assertEqual(c1.text, "# Initial") + self.assertEqual(str(c1), "# Initial") + self.assertEqual(type(c2), ast.Comment) + self.assertEqual(c2.text, "# simple") + self.assertEqual(doc.statements[1].name, "test") + + def test_anchor_format_a(self): + doc = self.parse( + "feature test {" + " pos cursive A ;" + "} test;") + anchor = doc.statements[0].statements[0].entryAnchor + self.assertEqual(type(anchor), ast.Anchor) + self.assertEqual(anchor.x, 120) + self.assertEqual(anchor.y, -20) + self.assertIsNone(anchor.contourpoint) + self.assertIsNone(anchor.xDeviceTable) + self.assertIsNone(anchor.yDeviceTable) + + def test_anchor_format_b(self): + doc = self.parse( + "feature test {" + " pos cursive A ;" + "} test;") + anchor = doc.statements[0].statements[0].entryAnchor + self.assertEqual(type(anchor), ast.Anchor) + self.assertEqual(anchor.x, 120) + self.assertEqual(anchor.y, -20) + self.assertEqual(anchor.contourpoint, 5) + self.assertIsNone(anchor.xDeviceTable) + self.assertIsNone(anchor.yDeviceTable) + + def test_anchor_format_c(self): + doc = self.parse( + "feature test {" + " pos cursive A " + " >" + " ;" + "} test;") + anchor = doc.statements[0].statements[0].entryAnchor + self.assertEqual(type(anchor), ast.Anchor) + self.assertEqual(anchor.x, 120) + self.assertEqual(anchor.y, -20) + self.assertIsNone(anchor.contourpoint) + self.assertEqual(anchor.xDeviceTable, ((11, 111), (12, 112))) + self.assertIsNone(anchor.yDeviceTable) + + def test_anchor_format_d(self): + doc = self.parse( + "feature test {" + " pos cursive A ;" + "} test;") + anchor = doc.statements[0].statements[0].exitAnchor + self.assertIsNone(anchor) + + def test_anchor_format_e(self): + doc = self.parse( + "feature test {" + " anchorDef 120 -20 contourpoint 7 Foo;" + " pos cursive A ;" + "} test;") + anchor = doc.statements[0].statements[1].entryAnchor + self.assertEqual(type(anchor), ast.Anchor) + self.assertEqual(anchor.x, 120) + self.assertEqual(anchor.y, -20) + self.assertEqual(anchor.contourpoint, 7) + self.assertIsNone(anchor.xDeviceTable) + self.assertIsNone(anchor.yDeviceTable) + + def test_anchor_format_e_undefined(self): + self.assertRaisesRegex( + FeatureLibError, 'Unknown anchor "UnknownName"', self.parse, + "feature test {" + " position cursive A ;" + "} test;") + + def test_anchordef(self): + [foo] = self.parse("anchorDef 123 456 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, None) + + def test_anchordef_contourpoint(self): + [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements + self.assertEqual(type(foo), ast.AnchorDefinition) + self.assertEqual(foo.name, "foo") + self.assertEqual(foo.x, 123) + self.assertEqual(foo.y, 456) + self.assertEqual(foo.contourpoint, 5) + + def test_anon(self): + anon = self.parse("anon TEST { # a\nfoo\n } TEST; # qux").statements[0] + self.assertIsInstance(anon, ast.AnonymousBlock) + self.assertEqual(anon.tag, "TEST") + self.assertEqual(anon.content, "foo\n ") + + def test_anonymous(self): + anon = self.parse("anonymous TEST {\nbar\n} TEST;").statements[0] + self.assertIsInstance(anon, ast.AnonymousBlock) + self.assertEqual(anon.tag, "TEST") + # feature file spec requires passing the final end-of-line + self.assertEqual(anon.content, "bar\n") + + def test_anon_missingBrace(self): + self.assertRaisesRegex( + FeatureLibError, "Expected '} TEST;' to terminate anonymous block", + self.parse, "anon TEST { \n no end in sight") + + def test_attach(self): + doc = self.parse("table GDEF {Attach [a e] 2;} GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.AttachStatement) + self.assertEqual(glyphstr([s.glyphs]), "[a e]") + self.assertEqual(s.contourPoints, {2}) + + def test_feature_block(self): + [liga] = self.parse("feature liga {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertFalse(liga.use_extension) + + def test_feature_block_useExtension(self): + [liga] = self.parse("feature liga useExtension {} liga;").statements + self.assertEqual(liga.name, "liga") + self.assertTrue(liga.use_extension) + + def test_feature_comment(self): + [liga] = self.parse("feature liga { # Comment\n } liga;").statements + [comment] = liga.statements + self.assertIsInstance(comment, ast.Comment) + self.assertEqual(comment.text, "# Comment") + + def test_feature_reference(self): + doc = self.parse("feature aalt { feature salt; } aalt;") + ref = doc.statements[0].statements[0] + self.assertIsInstance(ref, ast.FeatureReferenceStatement) + self.assertEqual(ref.featureName, "salt") + + def test_FeatureNames_bad(self): + self.assertRaisesRegex( + FeatureLibError, 'Expected "name"', + self.parse, "feature ss01 { featureNames { feature test; } ss01;") + + def test_FeatureNames_comment(self): + [feature] = self.parse( + "feature ss01 { featureNames { # Comment\n }; } ss01;").statements + [featureNames] = feature.statements + self.assertIsInstance(featureNames, ast.FeatureNamesBlock) + [comment] = featureNames.statements + self.assertIsInstance(comment, ast.Comment) + self.assertEqual(comment.text, "# Comment") + + def test_FeatureNames_emptyStatements(self): + [feature] = self.parse( + "feature ss01 { featureNames { ;;; }; } ss01;").statements + [featureNames] = feature.statements + self.assertIsInstance(featureNames, ast.FeatureNamesBlock) + self.assertEqual(featureNames.statements, []) + + def test_FontRevision(self): + doc = self.parse("table head {FontRevision 2.5;} head;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.FontRevisionStatement) + self.assertEqual(s.revision, 2.5) + + def test_FontRevision_negative(self): + self.assertRaisesRegex( + FeatureLibError, "Font revision numbers must be positive", + self.parse, "table head {FontRevision -17.2;} head;") + + def test_glyphclass(self): + [gc] = self.parse("@dash = [endash emdash figuredash];").statements + self.assertEqual(gc.name, "dash") + self.assertEqual(gc.glyphSet(), ("endash", "emdash", "figuredash")) + + def test_glyphclass_glyphNameTooLong(self): + self.assertRaisesRegex( + FeatureLibError, "must not be longer than 63 characters", + self.parse, "@GlyphClass = [%s];" % ("G" * 64)) + + def test_glyphclass_bad(self): + self.assertRaisesRegex( + FeatureLibError, + "Expected glyph name, glyph range, or glyph class reference", + self.parse, "@bad = [a 123];") + + def test_glyphclass_duplicate(self): + # makeotf accepts this, so we should too + ab, xy = self.parse("@dup = [a b]; @dup = [x y];").statements + self.assertEqual(glyphstr([ab]), "[a b]") + self.assertEqual(glyphstr([xy]), "[x y]") + + def test_glyphclass_empty(self): + [gc] = self.parse("@empty_set = [];").statements + self.assertEqual(gc.name, "empty_set") + self.assertEqual(gc.glyphSet(), tuple()) + + def test_glyphclass_equality(self): + [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements + self.assertEqual(foo.glyphSet(), ("a", "b")) + self.assertEqual(bar.glyphSet(), ("a", "b")) + + def test_glyphclass_from_markClass(self): + doc = self.parse( + "markClass [acute grave] @TOP_MARKS;" + "markClass cedilla @BOTTOM_MARKS;" + "@MARKS = [@TOP_MARKS @BOTTOM_MARKS ogonek];" + "@ALL = @MARKS;") + self.assertEqual(doc.statements[-1].glyphSet(), + ("acute", "grave", "cedilla", "ogonek")) + + def test_glyphclass_range_cid(self): + [gc] = self.parse(r"@GlyphClass = [\999-\1001];").statements + self.assertEqual(gc.name, "GlyphClass") + self.assertEqual(gc.glyphSet(), ("cid00999", "cid01000", "cid01001")) + + def test_glyphclass_range_cid_bad(self): + self.assertRaisesRegex( + FeatureLibError, + "Bad range: start should be less than limit", + self.parse, r"@bad = [\998-\995];") + + def test_glyphclass_range_uppercase(self): + [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements + self.assertEqual(gc.name, "swashes") + self.assertEqual(gc.glyphSet(), ("X.swash", "Y.swash", "Z.swash")) + + def test_glyphclass_range_lowercase(self): + [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements + self.assertEqual(gc.name, "defg.sc") + self.assertEqual(gc.glyphSet(), ("d.sc", "e.sc", "f.sc", "g.sc")) + + def test_glyphclass_range_dash(self): + glyphNames = "A-foo.sc B-foo.sc C-foo.sc".split() + [gc] = self.parse("@range = [A-foo.sc-C-foo.sc];", glyphNames).statements + self.assertEqual(gc.glyphSet(), ("A-foo.sc", "B-foo.sc", "C-foo.sc")) + + def test_glyphclass_range_dash_with_space(self): + gn = "A-foo.sc B-foo.sc C-foo.sc".split() + [gc] = self.parse("@range = [A-foo.sc - C-foo.sc];", gn).statements + self.assertEqual(gc.glyphSet(), ("A-foo.sc", "B-foo.sc", "C-foo.sc")) + + def test_glyphclass_glyph_name_should_win_over_range(self): + # The OpenType Feature File Specification v1.20 makes it clear + # that if a dashed name could be interpreted either as a glyph name + # or as a range, then the semantics should be the single dashed name. + glyphNames = ( + "A-foo.sc-C-foo.sc A-foo.sc B-foo.sc C-foo.sc".split()) + [gc] = self.parse("@range = [A-foo.sc-C-foo.sc];", glyphNames).statements + self.assertEqual(gc.glyphSet(), ("A-foo.sc-C-foo.sc",)) + + def test_glyphclass_range_dash_ambiguous(self): + glyphNames = "A B C A-B B-C".split() + self.assertRaisesRegex( + FeatureLibError, + 'Ambiguous glyph range "A-B-C"; ' + 'please use "A - B-C" or "A-B - C" to clarify what you mean', + self.parse, r"@bad = [A-B-C];", glyphNames) + + def test_glyphclass_range_digit1(self): + [gc] = self.parse("@range = [foo.2-foo.5];").statements + self.assertEqual(gc.glyphSet(), ("foo.2", "foo.3", "foo.4", "foo.5")) + + def test_glyphclass_range_digit2(self): + [gc] = self.parse("@range = [foo.09-foo.11];").statements + self.assertEqual(gc.glyphSet(), ("foo.09", "foo.10", "foo.11")) + + def test_glyphclass_range_digit3(self): + [gc] = self.parse("@range = [foo.123-foo.125];").statements + self.assertEqual(gc.glyphSet(), ("foo.123", "foo.124", "foo.125")) + + def test_glyphclass_range_bad(self): + self.assertRaisesRegex( + FeatureLibError, + "Bad range: \"a\" and \"foobar\" should have the same length", + self.parse, "@bad = [a-foobar];") + self.assertRaisesRegex( + FeatureLibError, "Bad range: \"A.swash-z.swash\"", + self.parse, "@bad = [A.swash-z.swash];") + self.assertRaisesRegex( + FeatureLibError, "Start of range must be smaller than its end", + self.parse, "@bad = [B.swash-A.swash];") + self.assertRaisesRegex( + FeatureLibError, "Bad range: \"foo.1234-foo.9876\"", + self.parse, "@bad = [foo.1234-foo.9876];") + + def test_glyphclass_range_mixed(self): + [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements + self.assertEqual(gc.glyphSet(), ( + "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" + )) + + def test_glyphclass_reference(self): + [vowels_lc, vowels_uc, vowels] = self.parse( + "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" + "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements + self.assertEqual(vowels_lc.glyphSet(), tuple("aeiou")) + self.assertEqual(vowels_uc.glyphSet(), tuple("AEIOU")) + self.assertEqual(vowels.glyphSet(), tuple("aeiouAEIOUyY")) + self.assertEqual(vowels.asFea(), + "@Vowels = [@Vowels.lc @Vowels.uc y Y];") + self.assertRaisesRegex( + FeatureLibError, "Unknown glyph class @unknown", + self.parse, "@bad = [@unknown];") + + def test_glyphclass_scoping(self): + [foo, liga, smcp] = self.parse( + "@foo = [a b];" + "feature liga { @bar = [@foo l]; } liga;" + "feature smcp { @bar = [@foo s]; } smcp;" + ).statements + self.assertEqual(foo.glyphSet(), ("a", "b")) + self.assertEqual(liga.statements[0].glyphSet(), ("a", "b", "l")) + self.assertEqual(smcp.statements[0].glyphSet(), ("a", "b", "s")) + + def test_glyphclass_scoping_bug496(self): + # https://github.com/behdad/fonttools/issues/496 + f1, f2 = self.parse( + "feature F1 { lookup L { @GLYPHCLASS = [A B C];} L; } F1;" + "feature F2 { sub @GLYPHCLASS by D; } F2;" + ).statements + self.assertEqual(list(f2.statements[0].glyphs[0].glyphSet()), + ["A", "B", "C"]) + + def test_GlyphClassDef(self): + doc = self.parse("table GDEF {GlyphClassDef [b],[l],[m],[C c];} GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.GlyphClassDefStatement) + self.assertEqual(glyphstr([s.baseGlyphs]), "b") + self.assertEqual(glyphstr([s.ligatureGlyphs]), "l") + self.assertEqual(glyphstr([s.markGlyphs]), "m") + self.assertEqual(glyphstr([s.componentGlyphs]), "[C c]") + + def test_GlyphClassDef_noCLassesSpecified(self): + doc = self.parse("table GDEF {GlyphClassDef ,,,;} GDEF;") + s = doc.statements[0].statements[0] + self.assertIsNone(s.baseGlyphs) + self.assertIsNone(s.ligatureGlyphs) + self.assertIsNone(s.markGlyphs) + self.assertIsNone(s.componentGlyphs) + + def test_ignore_pos(self): + doc = self.parse("feature test {ignore pos e t' c, q u' u' x;} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.IgnorePosStatement) + [(pref1, glyphs1, suff1), (pref2, glyphs2, suff2)] = sub.chainContexts + self.assertEqual(glyphstr(pref1), "e") + self.assertEqual(glyphstr(glyphs1), "t") + self.assertEqual(glyphstr(suff1), "c") + self.assertEqual(glyphstr(pref2), "q") + self.assertEqual(glyphstr(glyphs2), "u u") + self.assertEqual(glyphstr(suff2), "x") + + def test_ignore_position(self): + doc = self.parse( + "feature test {" + " ignore position f [a e] d' [a u]' [e y];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.IgnorePosStatement) + [(prefix, glyphs, suffix)] = sub.chainContexts + self.assertEqual(glyphstr(prefix), "f [a e]") + self.assertEqual(glyphstr(glyphs), "d [a u]") + self.assertEqual(glyphstr(suffix), "[e y]") + + def test_ignore_position_with_lookup(self): + self.assertRaisesRegex( + FeatureLibError, + 'No lookups can be specified for "ignore pos"', + self.parse, + "lookup L { pos [A A.sc] -100; } L;" + "feature test { ignore pos f' i', A' lookup L; } test;") + + def test_ignore_sub(self): + doc = self.parse("feature test {ignore sub e t' c, q u' u' x;} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.IgnoreSubstStatement) + [(pref1, glyphs1, suff1), (pref2, glyphs2, suff2)] = sub.chainContexts + self.assertEqual(glyphstr(pref1), "e") + self.assertEqual(glyphstr(glyphs1), "t") + self.assertEqual(glyphstr(suff1), "c") + self.assertEqual(glyphstr(pref2), "q") + self.assertEqual(glyphstr(glyphs2), "u u") + self.assertEqual(glyphstr(suff2), "x") + + def test_ignore_substitute(self): + doc = self.parse( + "feature test {" + " ignore substitute f [a e] d' [a u]' [e y];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.IgnoreSubstStatement) + [(prefix, glyphs, suffix)] = sub.chainContexts + self.assertEqual(glyphstr(prefix), "f [a e]") + self.assertEqual(glyphstr(glyphs), "d [a u]") + self.assertEqual(glyphstr(suffix), "[e y]") + + def test_ignore_substitute_with_lookup(self): + self.assertRaisesRegex( + FeatureLibError, + 'No lookups can be specified for "ignore sub"', + self.parse, + "lookup L { sub [A A.sc] by a; } L;" + "feature test { ignore sub f' i', A' lookup L; } test;") + + def test_language(self): + doc = self.parse("feature test {language DEU;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU ") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt(self): + doc = self.parse("feature test {language DEU exclude_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU ") + self.assertFalse(s.include_default) + self.assertFalse(s.required) + + def test_language_exclude_dflt_required(self): + doc = self.parse("feature test {" + " language DEU exclude_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU ") + self.assertFalse(s.include_default) + self.assertTrue(s.required) + + def test_language_include_dflt(self): + doc = self.parse("feature test {language DEU include_dflt;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU ") + self.assertTrue(s.include_default) + self.assertFalse(s.required) + + def test_language_include_dflt_required(self): + doc = self.parse("feature test {" + " language DEU include_dflt required;" + "} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.LanguageStatement) + self.assertEqual(s.language, "DEU ") + self.assertTrue(s.include_default) + self.assertTrue(s.required) + + def test_language_DFLT(self): + self.assertRaisesRegex( + FeatureLibError, + '"DFLT" is not a valid language tag; use "dflt" instead', + self.parse, "feature test { language DFLT; } test;") + + def test_ligatureCaretByIndex_glyphClass(self): + doc = self.parse("table GDEF{LigatureCaretByIndex [c_t f_i] 2;}GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.LigatureCaretByIndexStatement) + self.assertEqual(glyphstr([s.glyphs]), "[c_t f_i]") + self.assertEqual(s.carets, [2]) + + def test_ligatureCaretByIndex_singleGlyph(self): + doc = self.parse("table GDEF{LigatureCaretByIndex f_f_i 3 7;}GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.LigatureCaretByIndexStatement) + self.assertEqual(glyphstr([s.glyphs]), "f_f_i") + self.assertEqual(s.carets, [3, 7]) + + def test_ligatureCaretByPos_glyphClass(self): + doc = self.parse("table GDEF {LigatureCaretByPos [c_t f_i] 7;} GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.LigatureCaretByPosStatement) + self.assertEqual(glyphstr([s.glyphs]), "[c_t f_i]") + self.assertEqual(s.carets, [7]) + + def test_ligatureCaretByPos_singleGlyph(self): + doc = self.parse("table GDEF {LigatureCaretByPos f_i 400 380;} GDEF;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.LigatureCaretByPosStatement) + self.assertEqual(glyphstr([s.glyphs]), "f_i") + self.assertEqual(s.carets, [400, 380]) + + def test_lookup_block(self): + [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements + self.assertEqual(lookup.name, "Ligatures") + self.assertFalse(lookup.use_extension) + + def test_lookup_block_useExtension(self): + [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements + self.assertEqual(lookup.name, "Foo") + self.assertTrue(lookup.use_extension) + + def test_lookup_block_name_mismatch(self): + self.assertRaisesRegex( + FeatureLibError, 'Expected "Foo"', + self.parse, "lookup Foo {} Bar;") + + def test_lookup_block_with_horizontal_valueRecordDef(self): + doc = self.parse("feature liga {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} liga;") + [liga] = doc.statements + [look] = liga.statements + [foo] = look.statements + self.assertEqual(foo.value.xAdvance, 123) + self.assertIsNone(foo.value.yAdvance) + + def test_lookup_block_with_vertical_valueRecordDef(self): + doc = self.parse("feature vkrn {" + " lookup look {" + " valueRecordDef 123 foo;" + " } look;" + "} vkrn;") + [vkrn] = doc.statements + [look] = vkrn.statements + [foo] = look.statements + self.assertIsNone(foo.value.xAdvance) + self.assertEqual(foo.value.yAdvance, 123) + + def test_lookup_comment(self): + [lookup] = self.parse("lookup L { # Comment\n } L;").statements + [comment] = lookup.statements + self.assertIsInstance(comment, ast.Comment) + self.assertEqual(comment.text, "# Comment") + + def test_lookup_reference(self): + [foo, bar] = self.parse("lookup Foo {} Foo;" + "feature Bar {lookup Foo;} Bar;").statements + [ref] = bar.statements + self.assertEqual(type(ref), ast.LookupReferenceStatement) + self.assertEqual(ref.lookup, foo) + + def test_lookup_reference_to_lookup_inside_feature(self): + [qux, bar] = self.parse("feature Qux {lookup Foo {} Foo;} Qux;" + "feature Bar {lookup Foo;} Bar;").statements + [foo] = qux.statements + [ref] = bar.statements + self.assertIsInstance(ref, ast.LookupReferenceStatement) + self.assertEqual(ref.lookup, foo) + + def test_lookup_reference_unknown(self): + self.assertRaisesRegex( + FeatureLibError, 'Unknown lookup "Huh"', + self.parse, "feature liga {lookup Huh;} liga;") + + def parse_lookupflag_(self, s): + return self.parse("lookup L {%s} L;" % s).statements[0].statements[-1] + + def test_lookupflag_format_A(self): + flag = self.parse_lookupflag_("lookupflag RightToLeft IgnoreMarks;") + self.assertIsInstance(flag, ast.LookupFlagStatement) + self.assertEqual(flag.value, 9) + self.assertIsNone(flag.markAttachment) + self.assertIsNone(flag.markFilteringSet) + + def test_lookupflag_format_A_MarkAttachmentType(self): + flag = self.parse_lookupflag_( + "@TOP_MARKS = [acute grave macron];" + "lookupflag RightToLeft MarkAttachmentType @TOP_MARKS;") + self.assertIsInstance(flag, ast.LookupFlagStatement) + self.assertEqual(flag.value, 1) + self.assertIsInstance(flag.markAttachment, ast.GlyphClassName) + self.assertEqual(flag.markAttachment.glyphSet(), + ("acute", "grave", "macron")) + self.assertIsNone(flag.markFilteringSet) + + def test_lookupflag_format_A_UseMarkFilteringSet(self): + flag = self.parse_lookupflag_( + "@BOTTOM_MARKS = [cedilla ogonek];" + "lookupflag UseMarkFilteringSet @BOTTOM_MARKS IgnoreLigatures;") + self.assertIsInstance(flag, ast.LookupFlagStatement) + self.assertEqual(flag.value, 4) + self.assertIsNone(flag.markAttachment) + self.assertIsInstance(flag.markFilteringSet, ast.GlyphClassName) + self.assertEqual(flag.markFilteringSet.glyphSet(), + ("cedilla", "ogonek")) + + def test_lookupflag_format_B(self): + flag = self.parse_lookupflag_("lookupflag 7;") + self.assertIsInstance(flag, ast.LookupFlagStatement) + self.assertEqual(flag.value, 7) + self.assertIsNone(flag.markAttachment) + self.assertIsNone(flag.markFilteringSet) + + def test_lookupflag_repeated(self): + self.assertRaisesRegex( + FeatureLibError, + 'RightToLeft can be specified only once', + self.parse, + "feature test {lookupflag RightToLeft RightToLeft;} test;") + + def test_lookupflag_unrecognized(self): + self.assertRaisesRegex( + FeatureLibError, + '"IgnoreCookies" is not a recognized lookupflag', + self.parse, "feature test {lookupflag IgnoreCookies;} test;") + + def test_gpos_type_1_glyph(self): + doc = self.parse("feature kern {pos one <1 2 3 4>;} kern;") + pos = doc.statements[0].statements[0] + self.assertIsInstance(pos, ast.SinglePosStatement) + [(glyphs, value)] = pos.pos + self.assertEqual(glyphstr([glyphs]), "one") + self.assertEqual(value.makeString(vertical=False), "<1 2 3 4>") + + def test_gpos_type_1_glyphclass_horizontal(self): + doc = self.parse("feature kern {pos [one two] -300;} kern;") + pos = doc.statements[0].statements[0] + self.assertIsInstance(pos, ast.SinglePosStatement) + [(glyphs, value)] = pos.pos + self.assertEqual(glyphstr([glyphs]), "[one two]") + self.assertEqual(value.makeString(vertical=False), "-300") + + def test_gpos_type_1_glyphclass_vertical(self): + doc = self.parse("feature vkrn {pos [one two] -300;} vkrn;") + pos = doc.statements[0].statements[0] + self.assertIsInstance(pos, ast.SinglePosStatement) + [(glyphs, value)] = pos.pos + self.assertEqual(glyphstr([glyphs]), "[one two]") + self.assertEqual(value.makeString(vertical=True), "-300") + + def test_gpos_type_1_multiple(self): + doc = self.parse("feature f {pos one'1 two'2 [five six]'56;} f;") + pos = doc.statements[0].statements[0] + self.assertIsInstance(pos, ast.SinglePosStatement) + [(glyphs1, val1), (glyphs2, val2), (glyphs3, val3)] = pos.pos + self.assertEqual(glyphstr([glyphs1]), "one") + self.assertEqual(val1.makeString(vertical=False), "1") + self.assertEqual(glyphstr([glyphs2]), "two") + self.assertEqual(val2.makeString(vertical=False), "2") + self.assertEqual(glyphstr([glyphs3]), "[five six]") + self.assertEqual(val3.makeString(vertical=False), "56") + self.assertEqual(pos.prefix, []) + self.assertEqual(pos.suffix, []) + + def test_gpos_type_1_enumerated(self): + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is only allowed with pair positionings', + self.parse, "feature test {enum pos T 100;} test;") + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is only allowed with pair positionings', + self.parse, "feature test {enumerate pos T 100;} test;") + + def test_gpos_type_1_chained(self): + doc = self.parse("feature kern {pos [A B] [T Y]' 20 comma;} kern;") + pos = doc.statements[0].statements[0] + self.assertIsInstance(pos, ast.SinglePosStatement) + [(glyphs, value)] = pos.pos + self.assertEqual(glyphstr([glyphs]), "[T Y]") + self.assertEqual(value.makeString(vertical=False), "20") + self.assertEqual(glyphstr(pos.prefix), "[A B]") + self.assertEqual(glyphstr(pos.suffix), "comma") + + def test_gpos_type_2_format_a(self): + doc = self.parse("feature kern {" + " pos [T V] -60 [a b c] <1 2 3 4>;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.PairPosStatement) + self.assertFalse(pos.enumerated) + self.assertEqual(glyphstr([pos.glyphs1]), "[T V]") + self.assertEqual(pos.valuerecord1.makeString(vertical=False), "-60") + self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]") + self.assertEqual(pos.valuerecord2.makeString(vertical=False), + "<1 2 3 4>") + + def test_gpos_type_2_format_a_enumerated(self): + doc = self.parse("feature kern {" + " enum pos [T V] -60 [a b c] <1 2 3 4>;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.PairPosStatement) + self.assertTrue(pos.enumerated) + self.assertEqual(glyphstr([pos.glyphs1]), "[T V]") + self.assertEqual(pos.valuerecord1.makeString(vertical=False), "-60") + self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]") + self.assertEqual(pos.valuerecord2.makeString(vertical=False), + "<1 2 3 4>") + + def test_gpos_type_2_format_a_with_null(self): + doc = self.parse("feature kern {" + " pos [T V] <1 2 3 4> [a b c] ;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.PairPosStatement) + self.assertFalse(pos.enumerated) + self.assertEqual(glyphstr([pos.glyphs1]), "[T V]") + self.assertEqual(pos.valuerecord1.makeString(vertical=False), + "<1 2 3 4>") + self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]") + self.assertIsNone(pos.valuerecord2) + + def test_gpos_type_2_format_b(self): + doc = self.parse("feature kern {" + " pos [T V] [a b c] <1 2 3 4>;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.PairPosStatement) + self.assertFalse(pos.enumerated) + self.assertEqual(glyphstr([pos.glyphs1]), "[T V]") + self.assertEqual(pos.valuerecord1.makeString(vertical=False), + "<1 2 3 4>") + self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]") + self.assertIsNone(pos.valuerecord2) + + def test_gpos_type_2_format_b_enumerated(self): + doc = self.parse("feature kern {" + " enumerate position [T V] [a b c] <1 2 3 4>;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.PairPosStatement) + self.assertTrue(pos.enumerated) + self.assertEqual(glyphstr([pos.glyphs1]), "[T V]") + self.assertEqual(pos.valuerecord1.makeString(vertical=False), + "<1 2 3 4>") + self.assertEqual(glyphstr([pos.glyphs2]), "[a b c]") + self.assertIsNone(pos.valuerecord2) + + def test_gpos_type_3(self): + doc = self.parse("feature kern {" + " position cursive A ;" + "} kern;") + pos = doc.statements[0].statements[0] + self.assertEqual(type(pos), ast.CursivePosStatement) + self.assertEqual(pos.glyphclass.glyphSet(), ("A",)) + self.assertEqual((pos.entryAnchor.x, pos.entryAnchor.y), (12, -2)) + self.assertEqual((pos.exitAnchor.x, pos.exitAnchor.y), (2, 3)) + + def test_gpos_type_3_enumerated(self): + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is not allowed with cursive attachment positioning', + self.parse, + "feature kern {" + " enumerate position cursive A ;" + "} kern;") + + def test_gpos_type_4(self): + doc = self.parse( + "markClass [acute grave] @TOP_MARKS;" + "markClass [dieresis umlaut] @TOP_MARKS;" + "markClass [cedilla] @BOTTOM_MARKS;" + "feature test {" + " position base [a e o u] " + " mark @TOP_MARKS " + " mark @BOTTOM_MARKS;" + "} test;") + pos = doc.statements[-1].statements[0] + self.assertEqual(type(pos), ast.MarkBasePosStatement) + self.assertEqual(pos.base.glyphSet(), ("a", "e", "o", "u")) + (a1, m1), (a2, m2) = pos.marks + self.assertEqual((a1.x, a1.y, m1.name), (250, 450, "TOP_MARKS")) + self.assertEqual((a2.x, a2.y, m2.name), (210, -10, "BOTTOM_MARKS")) + + def test_gpos_type_4_enumerated(self): + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is not allowed with ' + 'mark-to-base attachment positioning', + self.parse, + "feature kern {" + " markClass cedilla @BOTTOM_MARKS;" + " enumerate position base A mark @BOTTOM_MARKS;" + "} kern;") + + def test_gpos_type_4_not_markClass(self): + self.assertRaisesRegex( + FeatureLibError, "@MARKS is not a markClass", self.parse, + "@MARKS = [acute grave];" + "feature test {" + " position base [a e o u] mark @MARKS;" + "} test;") + + def test_gpos_type_5(self): + doc = self.parse( + "markClass [grave acute] @TOP_MARKS;" + "markClass [cedilla] @BOTTOM_MARKS;" + "feature test {" + " position " + " ligature [a_f_f_i o_f_f_i] " + " mark @TOP_MARKS " + " mark @BOTTOM_MARKS " + " ligComponent " + " mark @TOP_MARKS " + " ligComponent " + " " + " ligComponent " + " mark @BOTTOM_MARKS;" + "} test;") + pos = doc.statements[-1].statements[0] + self.assertEqual(type(pos), ast.MarkLigPosStatement) + self.assertEqual(pos.ligatures.glyphSet(), ("a_f_f_i", "o_f_f_i")) + [(a11, m11), (a12, m12)], [(a2, m2)], [], [(a4, m4)] = pos.marks + self.assertEqual((a11.x, a11.y, m11.name), (50, 600, "TOP_MARKS")) + self.assertEqual((a12.x, a12.y, m12.name), (50, -10, "BOTTOM_MARKS")) + self.assertEqual((a2.x, a2.y, m2.name), (30, 800, "TOP_MARKS")) + self.assertEqual((a4.x, a4.y, m4.name), (30, -10, "BOTTOM_MARKS")) + + def test_gpos_type_5_enumerated(self): + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is not allowed with ' + 'mark-to-ligature attachment positioning', + self.parse, + "feature test {" + " markClass cedilla @MARKS;" + " enumerate position " + " ligature f_i mark @MARKS" + " ligComponent ;" + "} test;") + + def test_gpos_type_5_not_markClass(self): + self.assertRaisesRegex( + FeatureLibError, "@MARKS is not a markClass", self.parse, + "@MARKS = [acute grave];" + "feature test {" + " position ligature f_i mark @MARKS;" + "} test;") + + def test_gpos_type_6(self): + doc = self.parse( + "markClass damma @MARK_CLASS_1;" + "feature test {" + " position mark hamza mark @MARK_CLASS_1;" + "} test;") + pos = doc.statements[-1].statements[0] + self.assertEqual(type(pos), ast.MarkMarkPosStatement) + self.assertEqual(pos.baseMarks.glyphSet(), ("hamza",)) + [(a1, m1)] = pos.marks + self.assertEqual((a1.x, a1.y, m1.name), (221, 301, "MARK_CLASS_1")) + + def test_gpos_type_6_enumerated(self): + self.assertRaisesRegex( + FeatureLibError, + '"enumerate" is not allowed with ' + 'mark-to-mark attachment positioning', + self.parse, + "markClass damma @MARK_CLASS_1;" + "feature test {" + " enum pos mark hamza mark @MARK_CLASS_1;" + "} test;") + + def test_gpos_type_6_not_markClass(self): + self.assertRaisesRegex( + FeatureLibError, "@MARKS is not a markClass", self.parse, + "@MARKS = [acute grave];" + "feature test {" + " position mark cedilla mark @MARKS;" + "} test;") + + def test_gpos_type_8(self): + doc = self.parse( + "lookup L1 {pos one 100;} L1; lookup L2 {pos two 200;} L2;" + "feature test {" + " pos [A a] [B b] I' lookup L1 [N n]' lookup L2 P' [Y y] [Z z];" + "} test;") + lookup1, lookup2 = doc.statements[0:2] + pos = doc.statements[-1].statements[0] + self.assertEqual(type(pos), ast.ChainContextPosStatement) + self.assertEqual(glyphstr(pos.prefix), "[A a] [B b]") + self.assertEqual(glyphstr(pos.glyphs), "I [N n] P") + self.assertEqual(glyphstr(pos.suffix), "[Y y] [Z z]") + self.assertEqual(pos.lookups, [lookup1, lookup2, None]) + + def test_gpos_type_8_lookup_with_values(self): + self.assertRaisesRegex( + FeatureLibError, + 'If "lookup" is present, no values must be specified', + self.parse, + "lookup L1 {pos one 100;} L1;" + "feature test {" + " pos A' lookup L1 B' 20;" + "} test;") + + def test_markClass(self): + doc = self.parse("markClass [acute grave] @MARKS;") + mc = doc.statements[0] + self.assertIsInstance(mc, ast.MarkClassDefinition) + self.assertEqual(mc.markClass.name, "MARKS") + self.assertEqual(mc.glyphSet(), ("acute", "grave")) + self.assertEqual((mc.anchor.x, mc.anchor.y), (350, 3)) + + def test_nameid_windows_utf16(self): + doc = self.parse( + r'table name { nameid 9 "M\00fcller-Lanc\00e9"; } name;') + name = doc.statements[0].statements[0] + self.assertIsInstance(name, ast.NameRecord) + self.assertEqual(name.nameID, 9) + self.assertEqual(name.platformID, 3) + self.assertEqual(name.platEncID, 1) + self.assertEqual(name.langID, 0x0409) + self.assertEqual(name.string, "Müller-Lancé") + self.assertEqual(name.asFea(), r'nameid 9 "M\00fcller-Lanc\00e9";') + + def test_nameid_windows_utf16_backslash(self): + doc = self.parse(r'table name { nameid 9 "Back\005cslash"; } name;') + name = doc.statements[0].statements[0] + self.assertEqual(name.string, r"Back\slash") + self.assertEqual(name.asFea(), r'nameid 9 "Back\005cslash";') + + def test_nameid_windows_utf16_quotation_mark(self): + doc = self.parse( + r'table name { nameid 9 "Quotation \0022Mark\0022"; } name;') + name = doc.statements[0].statements[0] + self.assertEqual(name.string, 'Quotation "Mark"') + self.assertEqual(name.asFea(), r'nameid 9 "Quotation \0022Mark\0022";') + + def test_nameid_windows_utf16_surroates(self): + doc = self.parse(r'table name { nameid 9 "Carrot \D83E\DD55"; } name;') + name = doc.statements[0].statements[0] + self.assertEqual(name.string, r"Carrot 🥕") + self.assertEqual(name.asFea(), r'nameid 9 "Carrot \d83e\dd55";') + + def test_nameid_mac_roman(self): + doc = self.parse( + r'table name { nameid 9 1 "Joachim M\9fller-Lanc\8e"; } name;') + name = doc.statements[0].statements[0] + self.assertIsInstance(name, ast.NameRecord) + self.assertEqual(name.nameID, 9) + self.assertEqual(name.platformID, 1) + self.assertEqual(name.platEncID, 0) + self.assertEqual(name.langID, 0) + self.assertEqual(name.string, "Joachim Müller-Lancé") + self.assertEqual(name.asFea(), + r'nameid 9 1 "Joachim M\9fller-Lanc\8e";') + + def test_nameid_mac_croatian(self): + doc = self.parse( + r'table name { nameid 9 1 0 18 "Jovica Veljovi\e6"; } name;') + name = doc.statements[0].statements[0] + self.assertEqual(name.nameID, 9) + self.assertEqual(name.platformID, 1) + self.assertEqual(name.platEncID, 0) + self.assertEqual(name.langID, 18) + self.assertEqual(name.string, "Jovica Veljović") + self.assertEqual(name.asFea(), r'nameid 9 1 0 18 "Jovica Veljovi\e6";') + + def test_nameid_unsupported_platform(self): + self.assertRaisesRegex( + FeatureLibError, "Expected platform id 1 or 3", + self.parse, 'table name { nameid 9 666 "Foo"; } name;') + + def test_rsub_format_a(self): + doc = self.parse("feature test {rsub a [b B] c' d [e E] by C;} test;") + rsub = doc.statements[0].statements[0] + self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement) + self.assertEqual(glyphstr(rsub.old_prefix), "a [B b]") + self.assertEqual(rsub.glyphs[0].glyphSet(), ("c",)) + self.assertEqual(rsub.replacements[0].glyphSet(), ("C",)) + self.assertEqual(glyphstr(rsub.old_suffix), "d [E e]") + + def test_rsub_format_a_cid(self): + doc = self.parse(r"feature test {rsub \1 [\2 \3] \4' \5 by \6;} test;") + rsub = doc.statements[0].statements[0] + self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement) + self.assertEqual(glyphstr(rsub.old_prefix), + "cid00001 [cid00002 cid00003]") + self.assertEqual(rsub.glyphs[0].glyphSet(), ("cid00004",)) + self.assertEqual(rsub.replacements[0].glyphSet(), ("cid00006",)) + self.assertEqual(glyphstr(rsub.old_suffix), "cid00005") + + def test_rsub_format_b(self): + doc = self.parse( + "feature smcp {" + " reversesub A B [one.fitted one.oldstyle]' C [d D] by one;" + "} smcp;") + rsub = doc.statements[0].statements[0] + self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement) + self.assertEqual(glyphstr(rsub.old_prefix), "A B") + self.assertEqual(glyphstr(rsub.old_suffix), "C [D d]") + self.assertEqual(mapping(rsub), { + "one.fitted": "one", + "one.oldstyle": "one" + }) + + def test_rsub_format_c(self): + doc = self.parse( + "feature test {" + " reversesub BACK TRACK [a-d]' LOOK AHEAD by [A.sc-D.sc];" + "} test;") + rsub = doc.statements[0].statements[0] + self.assertEqual(type(rsub), ast.ReverseChainSingleSubstStatement) + self.assertEqual(glyphstr(rsub.old_prefix), "BACK TRACK") + self.assertEqual(glyphstr(rsub.old_suffix), "LOOK AHEAD") + self.assertEqual(mapping(rsub), { + "a": "A.sc", + "b": "B.sc", + "c": "C.sc", + "d": "D.sc" + }) + + def test_rsub_from(self): + self.assertRaisesRegex( + FeatureLibError, + 'Reverse chaining substitutions do not support "from"', + self.parse, "feature test {rsub a from [a.1 a.2 a.3];} test;") + + def test_rsub_nonsingle(self): + self.assertRaisesRegex( + FeatureLibError, + "In reverse chaining single substitutions, only a single glyph " + "or glyph class can be replaced", + self.parse, "feature test {rsub c d by c_d;} test;") + + def test_rsub_multiple_replacement_glyphs(self): + self.assertRaisesRegex( + FeatureLibError, + 'In reverse chaining single substitutions, the replacement ' + '\(after "by"\) must be a single glyph or glyph class', + self.parse, "feature test {rsub f_i by f i;} test;") + + def test_script(self): + doc = self.parse("feature test {script cyrl;} test;") + s = doc.statements[0].statements[0] + self.assertEqual(type(s), ast.ScriptStatement) + self.assertEqual(s.script, "cyrl") + + def test_script_dflt(self): + self.assertRaisesRegex( + FeatureLibError, + '"dflt" is not a valid script tag; use "DFLT" instead', + self.parse, "feature test {script dflt;} test;") + + def test_sub_single_format_a(self): # GSUB LookupType 1 + doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(mapping(sub), {"a": "a.sc"}) + self.assertEqual(glyphstr(sub.suffix), "") + + def test_sub_single_format_a_chained(self): # chain to GSUB LookupType 1 + doc = self.parse("feature test {sub [A a] d' [C] by d.alt;} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(mapping(sub), {"d": "d.alt"}) + self.assertEqual(glyphstr(sub.prefix), "[A a]") + self.assertEqual(glyphstr(sub.suffix), "C") + + def test_sub_single_format_a_cid(self): # GSUB LookupType 1 + doc = self.parse(r"feature smcp {substitute \12345 by \78987;} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(mapping(sub), {"cid12345": "cid78987"}) + self.assertEqual(glyphstr(sub.suffix), "") + + def test_sub_single_format_b(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [one.fitted one.oldstyle] by one;" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(mapping(sub), { + "one.fitted": "one", + "one.oldstyle": "one" + }) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr(sub.suffix), "") + + def test_sub_single_format_b_chained(self): # chain to GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute PRE FIX [one.fitted one.oldstyle]' SUF FIX by one;" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(mapping(sub), { + "one.fitted": "one", + "one.oldstyle": "one" + }) + self.assertEqual(glyphstr(sub.prefix), "PRE FIX") + self.assertEqual(glyphstr(sub.suffix), "SUF FIX") + + def test_sub_single_format_c(self): # GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [a-d] by [A.sc-D.sc];" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(mapping(sub), { + "a": "A.sc", + "b": "B.sc", + "c": "C.sc", + "d": "D.sc" + }) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr(sub.suffix), "") + + def test_sub_single_format_c_chained(self): # chain to GSUB LookupType 1 + doc = self.parse( + "feature smcp {" + " substitute [a-d]' X Y [Z z] by [A.sc-D.sc];" + "} smcp;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.SingleSubstStatement) + self.assertEqual(mapping(sub), { + "a": "A.sc", + "b": "B.sc", + "c": "C.sc", + "d": "D.sc" + }) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr(sub.suffix), "X Y [Z z]") + + def test_sub_single_format_c_different_num_elements(self): + self.assertRaisesRegex( + FeatureLibError, + 'Expected a glyph class with 4 elements after "by", ' + 'but found a glyph class with 26 elements', + self.parse, "feature smcp {sub [a-d] by [A.sc-Z.sc];} smcp;") + + def test_sub_with_values(self): + self.assertRaisesRegex( + FeatureLibError, + "Substitution statements cannot contain values", + self.parse, "feature smcp {sub A' 20 by A.sc;} smcp;") + + def test_substitute_multiple(self): # GSUB LookupType 2 + doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.MultipleSubstStatement) + self.assertEqual(sub.glyph, "f_f_i") + self.assertEqual(sub.replacement, ("f", "f", "i")) + + def test_substitute_multiple_chained(self): # chain to GSUB LookupType 2 + doc = self.parse("lookup L {sub [A-C] f_f_i' [X-Z] by f f i;} L;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.MultipleSubstStatement) + self.assertEqual(sub.glyph, "f_f_i") + self.assertEqual(sub.replacement, ("f", "f", "i")) + + def test_split_marked_glyphs_runs(self): + self.assertRaisesRegex( + FeatureLibError, + "Unsupported contextual target sequence", + self.parse, "feature test{" + " ignore pos a' x x A';" + "} test;") + self.assertRaisesRegex( + FeatureLibError, + "Unsupported contextual target sequence", + self.parse, "lookup shift {" + " pos a <0 -10 0 0>;" + " pos A <0 10 0 0>;" + "} shift;" + "feature test {" + " sub a' lookup shift x x A' lookup shift;" + "} test;") + self.assertRaisesRegex( + FeatureLibError, + "Unsupported contextual target sequence", + self.parse, "feature test {" + " ignore sub a' x x A';" + "} test;") + self.assertRaisesRegex( + FeatureLibError, + "Unsupported contextual target sequence", + self.parse, "lookup upper {" + " sub a by A;" + "} upper;" + "lookup lower {" + " sub A by a;" + "} lower;" + "feature test {" + " sub a' lookup upper x x A' lookup lower;" + "} test;") + + def test_substitute_mix_single_multiple(self): + doc = self.parse("lookup Look {" + " sub f_f by f f;" + " sub f by f;" + " sub f_f_i by f f i;" + "} Look;") + statements = doc.statements[0].statements + for sub in statements: + self.assertIsInstance(sub, ast.MultipleSubstStatement) + self.assertEqual(statements[1].glyph, "f") + self.assertEqual(statements[1].replacement, ["f"]) + + def test_substitute_from(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " substitute a from [a.1 a.2 a.3];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.AlternateSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr([sub.glyph]), "a") + self.assertEqual(glyphstr(sub.suffix), "") + self.assertEqual(glyphstr([sub.replacement]), "[a.1 a.2 a.3]") + + def test_substitute_from_chained(self): # chain to GSUB LookupType 3 + doc = self.parse("feature test {" + " substitute A B a' [Y y] Z from [a.1 a.2 a.3];" + "} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.AlternateSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "A B") + self.assertEqual(glyphstr([sub.glyph]), "a") + self.assertEqual(glyphstr(sub.suffix), "[Y y] Z") + self.assertEqual(glyphstr([sub.replacement]), "[a.1 a.2 a.3]") + + def test_substitute_from_cid(self): # GSUB LookupType 3 + doc = self.parse(r"feature test {" + r" substitute \7 from [\111 \222];" + r"} test;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.AlternateSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr([sub.glyph]), "cid00007") + self.assertEqual(glyphstr(sub.suffix), "") + self.assertEqual(glyphstr([sub.replacement]), "[cid00111 cid00222]") + + def test_substitute_from_glyphclass(self): # GSUB LookupType 3 + doc = self.parse("feature test {" + " @Ampersands = [ampersand.1 ampersand.2];" + " substitute ampersand from @Ampersands;" + "} test;") + [glyphclass, sub] = doc.statements[0].statements + self.assertIsInstance(sub, ast.AlternateSubstStatement) + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr([sub.glyph]), "ampersand") + self.assertEqual(glyphstr(sub.suffix), "") + self.assertEqual(glyphstr([sub.replacement]), + "[ampersand.1 ampersand.2]") + + def test_substitute_ligature(self): # GSUB LookupType 4 + doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.LigatureSubstStatement) + self.assertEqual(glyphstr(sub.glyphs), "f f i") + self.assertEqual(sub.replacement, "f_f_i") + self.assertEqual(glyphstr(sub.prefix), "") + self.assertEqual(glyphstr(sub.suffix), "") + + def test_substitute_ligature_chained(self): # chain to GSUB LookupType 4 + doc = self.parse("feature F {substitute A B f' i' Z by f_i;} F;") + sub = doc.statements[0].statements[0] + self.assertIsInstance(sub, ast.LigatureSubstStatement) + self.assertEqual(glyphstr(sub.glyphs), "f i") + self.assertEqual(sub.replacement, "f_i") + self.assertEqual(glyphstr(sub.prefix), "A B") + self.assertEqual(glyphstr(sub.suffix), "Z") + + def test_substitute_lookups(self): # GSUB LookupType 6 + doc = Parser(self.getpath("spec5fi1.fea"), GLYPHNAMES).parse() + [_, _, _, langsys, ligs, sub, feature] = doc.statements + self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) + self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) + + def test_substitute_missing_by(self): + self.assertRaisesRegex( + FeatureLibError, + 'Expected "by", "from" or explicit lookup references', + self.parse, "feature liga {substitute f f i;} liga;") + + def test_subtable(self): + doc = self.parse("feature test {subtable;} test;") + s = doc.statements[0].statements[0] + self.assertIsInstance(s, ast.SubtableStatement) + + def test_table_badEnd(self): + self.assertRaisesRegex( + FeatureLibError, 'Expected "GDEF"', self.parse, + "table GDEF {LigatureCaretByPos f_i 400;} ABCD;") + + def test_table_comment(self): + for table in "BASE GDEF OS/2 head hhea name vhea".split(): + doc = self.parse("table %s { # Comment\n } %s;" % (table, table)) + comment = doc.statements[0].statements[0] + self.assertIsInstance(comment, ast.Comment) + self.assertEqual(comment.text, "# Comment") + + def test_table_unsupported(self): + self.assertRaisesRegex( + FeatureLibError, '"table Foo" is not supported', self.parse, + "table Foo {LigatureCaretByPos f_i 400;} Foo;") + + def test_valuerecord_format_a_horizontal(self): + doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertIsNone(value.xPlacement) + self.assertIsNone(value.yPlacement) + self.assertEqual(value.xAdvance, 123) + self.assertIsNone(value.yAdvance) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=False), "123") + + def test_valuerecord_format_a_vertical(self): + doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") + value = doc.statements[0].statements[0].value + self.assertIsNone(value.xPlacement) + self.assertIsNone(value.yPlacement) + self.assertIsNone(value.xAdvance) + self.assertEqual(value.yAdvance, 123) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=True), "123") + + def test_valuerecord_format_a_zero_horizontal(self): + doc = self.parse("feature liga {valueRecordDef 0 foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertIsNone(value.xPlacement) + self.assertIsNone(value.yPlacement) + self.assertEqual(value.xAdvance, 0) + self.assertIsNone(value.yAdvance) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=False), "0") + + def test_valuerecord_format_a_zero_vertical(self): + doc = self.parse("feature vkrn {valueRecordDef 0 foo;} vkrn;") + value = doc.statements[0].statements[0].value + self.assertIsNone(value.xPlacement) + self.assertIsNone(value.yPlacement) + self.assertIsNone(value.xAdvance) + self.assertEqual(value.yAdvance, 0) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=True), "0") + + def test_valuerecord_format_a_vertical_contexts_(self): + for tag in "vkrn vpal vhal valt".split(): + doc = self.parse( + "feature %s {valueRecordDef 77 foo;} %s;" % (tag, tag)) + value = doc.statements[0].statements[0].value + if value.yAdvance != 77: + self.fail(msg="feature %s should be a vertical context " + "for ValueRecord format A" % tag) + + def test_valuerecord_format_b(self): + doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=False), "<1 2 3 4>") + + def test_valuerecord_format_b_zero(self): + doc = self.parse("feature liga {valueRecordDef <0 0 0 0> foo;} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 0) + self.assertEqual(value.yPlacement, 0) + self.assertEqual(value.xAdvance, 0) + self.assertEqual(value.yAdvance, 0) + self.assertIsNone(value.xPlaDevice) + self.assertIsNone(value.yPlaDevice) + self.assertIsNone(value.xAdvDevice) + self.assertIsNone(value.yAdvDevice) + self.assertEqual(value.makeString(vertical=False), "<0 0 0 0>") + + def test_valuerecord_format_c(self): + doc = self.parse( + "feature liga {" + " valueRecordDef <" + " 1 2 3 4" + " " + " " + " " + " " + " > foo;" + "} liga;") + value = doc.statements[0].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + self.assertEqual(value.xPlaDevice, ((8, 88),)) + self.assertEqual(value.yPlaDevice, ((11, 111), (12, 112))) + self.assertIsNone(value.xAdvDevice) + self.assertEqual(value.yAdvDevice, ((33, -113), (44, -114), (55, 115))) + self.assertEqual(value.makeString(vertical=False), + "<1 2 3 4 " + " >") + + def test_valuerecord_format_d(self): + doc = self.parse("feature test {valueRecordDef foo;} test;") + value = doc.statements[0].statements[0].value + self.assertIsNone(value) + + def test_valuerecord_named(self): + doc = self.parse("valueRecordDef <1 2 3 4> foo;" + "feature liga {valueRecordDef bar;} liga;") + value = doc.statements[1].statements[0].value + self.assertEqual(value.xPlacement, 1) + self.assertEqual(value.yPlacement, 2) + self.assertEqual(value.xAdvance, 3) + self.assertEqual(value.yAdvance, 4) + + def test_valuerecord_named_unknown(self): + self.assertRaisesRegex( + FeatureLibError, "Unknown valueRecordDef \"unknown\"", + self.parse, "valueRecordDef foo;") + + def test_valuerecord_scoping(self): + [foo, liga, smcp] = self.parse( + "valueRecordDef 789 foo;" + "feature liga {valueRecordDef bar;} liga;" + "feature smcp {valueRecordDef bar;} smcp;" + ).statements + self.assertEqual(foo.value.xAdvance, 789) + self.assertEqual(liga.statements[0].value.xAdvance, 789) + self.assertEqual(smcp.statements[0].value.xAdvance, 789) + + def test_valuerecord_device_value_out_of_range(self): + self.assertRaisesRegex( + FeatureLibError, r"Device value out of valid range \(-128..127\)", + self.parse, + "valueRecordDef <1 2 3 4 " + " > foo;") + + def test_languagesystem(self): + [langsys] = self.parse("languagesystem latn DEU;").statements + self.assertEqual(langsys.script, "latn") + self.assertEqual(langsys.language, "DEU ") + self.assertRaisesRegex( + FeatureLibError, + 'For script "DFLT", the language must be "dflt"', + self.parse, "languagesystem DFLT DEU;") + self.assertRaisesRegex( + FeatureLibError, + '"dflt" is not a valid script tag; use "DFLT" instead', + self.parse, "languagesystem dflt dflt;") + self.assertRaisesRegex( + FeatureLibError, + '"DFLT" is not a valid language tag; use "dflt" instead', + self.parse, "languagesystem latn DFLT;") + self.assertRaisesRegex( + FeatureLibError, "Expected ';'", + self.parse, "languagesystem latn DEU") + self.assertRaisesRegex( + FeatureLibError, "longer than 4 characters", + self.parse, "languagesystem foobar DEU;") + self.assertRaisesRegex( + FeatureLibError, "longer than 4 characters", + self.parse, "languagesystem latn FOOBAR;") + + def test_empty_statement_ignored(self): + doc = self.parse("feature test {;} test;") + self.assertFalse(doc.statements[0].statements) + doc = self.parse(";;;") + self.assertFalse(doc.statements) + for table in "BASE GDEF OS/2 head hhea name vhea".split(): + doc = self.parse("table %s { ;;; } %s;" % (table, table)) + self.assertEqual(doc.statements[0].statements, []) + + def parse(self, text, glyphNames=GLYPHNAMES): + featurefile = UnicodeIO(text) + p = Parser(featurefile, glyphNames) + return p.parse() + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + +class SymbolTableTest(unittest.TestCase): + def test_scopes(self): + symtab = SymbolTable() + symtab.define("foo", 23) + self.assertEqual(symtab.resolve("foo"), 23) + symtab.enter_scope() + self.assertEqual(symtab.resolve("foo"), 23) + symtab.define("foo", 42) + self.assertEqual(symtab.resolve("foo"), 42) + symtab.exit_scope() + self.assertEqual(symtab.resolve("foo"), 23) + + def test_resolve_undefined(self): + self.assertEqual(SymbolTable().resolve("abc"), None) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/merge_test.py fonttools-3.21.2/Tests/merge_test.py --- fonttools-3.0/Tests/merge_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/merge_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,118 @@ +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.merge import * +import unittest + + +class MergeIntegrationTest(unittest.TestCase): + # TODO + pass + +class gaspMergeUnitTest(unittest.TestCase): + def setUp(self): + self.merger = Merger() + + self.table1 = ttLib.newTable('gasp') + self.table1.version = 1 + self.table1.gaspRange = { + 0x8: 0xA , + 0x10: 0x5, + } + + self.table2 = ttLib.newTable('gasp') + self.table2.version = 1 + self.table2.gaspRange = { + 0x6: 0xB , + 0xFF: 0x4, + } + + self.result = ttLib.newTable('gasp') + + def test_gasp_merge_basic(self): + result = self.result.merge(self.merger, [self.table1, self.table2]) + self.assertEqual(result, self.table1) + + result = self.result.merge(self.merger, [self.table2, self.table1]) + self.assertEqual(result, self.table2) + + def test_gasp_merge_notImplemented(self): + result = self.result.merge(self.merger, [NotImplemented, self.table1]) + self.assertEqual(result, NotImplemented) + + result = self.result.merge(self.merger, [self.table1, NotImplemented]) + self.assertEqual(result, self.table1) + + +class CmapMergeUnitTest(unittest.TestCase): + def setUp(self): + self.merger = Merger() + self.table1 = ttLib.newTable('cmap') + self.table2 = ttLib.newTable('cmap') + self.mergedTable = ttLib.newTable('cmap') + pass + + def tearDown(self): + pass + + + def makeSubtable(self, format, platformID, platEncID, cmap): + module = ttLib.getTableModule('cmap') + subtable = module.cmap_classes[format](format) + (subtable.platformID, + subtable.platEncID, + subtable.language, + subtable.cmap) = (platformID, platEncID, 0, cmap) + return subtable + + # 4-3-1 table merged with 12-3-10 table with no dupes with codepoints outside BMP + def test_cmap_merge_no_dupes(self): + table1 = self.table1 + table2 = self.table2 + mergedTable = self.mergedTable + + cmap1 = {0x2603: 'SNOWMAN'} + table1.tables = [self.makeSubtable(4,3,1, cmap1)] + + cmap2 = {0x26C4: 'SNOWMAN WITHOUT SNOW'} + cmap2Extended = {0x1F93C: 'WRESTLERS'} + cmap2Extended.update(cmap2) + table2.tables = [self.makeSubtable(4,3,1, cmap2), self.makeSubtable(12,3,10, cmap2Extended)] + + self.merger.alternateGlyphsPerFont = [{},{}] + mergedTable.merge(self.merger, [table1, table2]) + + expectedCmap = cmap2.copy() + expectedCmap.update(cmap1) + expectedCmapExtended = cmap2Extended.copy() + expectedCmapExtended.update(cmap1) + self.assertEqual(mergedTable.numSubTables, 2) + self.assertEqual([(table.format, table.platformID, table.platEncID, table.language) for table in mergedTable.tables], + [(4,3,1,0),(12,3,10,0)]) + self.assertEqual(mergedTable.tables[0].cmap, expectedCmap) + self.assertEqual(mergedTable.tables[1].cmap, expectedCmapExtended) + + # Tests Issue #322 + def test_cmap_merge_three_dupes(self): + table1 = self.table1 + table2 = self.table2 + mergedTable = self.mergedTable + + cmap1 = {0x20: 'space#0', 0xA0: 'space#0'} + table1.tables = [self.makeSubtable(4,3,1,cmap1)] + cmap2 = {0x20: 'space#1', 0xA0: 'uni00A0#1'} + table2.tables = [self.makeSubtable(4,3,1,cmap2)] + + self.merger.duplicateGlyphsPerFont = [{},{}] + mergedTable.merge(self.merger, [table1, table2]) + + expectedCmap = cmap1.copy() + self.assertEqual(mergedTable.numSubTables, 1) + table = mergedTable.tables[0] + self.assertEqual((table.format, table.platformID, table.platEncID, table.language), (4,3,1,0)) + self.assertEqual(table.cmap, expectedCmap) + self.assertEqual(self.merger.duplicateGlyphsPerFont, [{}, {'space#0': 'space#1'}]) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/arrayTools_test.py fonttools-3.21.2/Tests/misc/arrayTools_test.py --- fonttools-3.0/Tests/misc/arrayTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/arrayTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,85 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.arrayTools import ( + calcBounds, calcIntBounds, updateBounds, pointInRect, pointsInRect, + vectorLength, asInt16, normRect, scaleRect, offsetRect, insetRect, + sectRect, unionRect, rectCenter, intRect) +import math + + +def test_calcBounds(): + assert calcBounds([]) == (0, 0, 0, 0) + assert calcBounds( + [(0, 40), (0, 100), (50, 50), (80, 10)]) == (0, 10, 80, 100) + + +def test_calcIntBounds(): + assert calcIntBounds( + [(0.1, 40.1), (0.1, 100.1), (49.9, 49.9), (79.5, 9.5)] + ) == (0, 10, 80, 100) + + +def test_updateBounds(): + assert updateBounds((0, 0, 0, 0), (100, 100)) == (0, 0, 100, 100) + + +def test_pointInRect(): + assert pointInRect((50, 50), (0, 0, 100, 100)) + assert pointInRect((0, 0), (0, 0, 100, 100)) + assert pointInRect((100, 100), (0, 0, 100, 100)) + assert not pointInRect((101, 100), (0, 0, 100, 100)) + + +def test_pointsInRect(): + assert pointsInRect([], (0, 0, 100, 100)) == [] + assert pointsInRect( + [(50, 50), (0, 0), (100, 100), (101, 100)], + (0, 0, 100, 100)) == [True, True, True, False] + + +def test_vectorLength(): + assert vectorLength((1, 1)) == math.sqrt(2) + + +def test_asInt16(): + assert asInt16([0, 0.1, 0.5, 0.9]) == [0, 0, 1, 1] + + +def test_normRect(): + assert normRect((0, 10, 100, 200)) == (0, 10, 100, 200) + assert normRect((100, 200, 0, 10)) == (0, 10, 100, 200) + + +def test_scaleRect(): + assert scaleRect((10, 20, 50, 150), 1.5, 2) == (15.0, 40, 75.0, 300) + + +def test_offsetRect(): + assert offsetRect((10, 20, 30, 40), 5, 6) == (15, 26, 35, 46) + + +def test_insetRect(): + assert insetRect((10, 20, 50, 60), 5, 10) == (15, 30, 45, 50) + assert insetRect((10, 20, 50, 60), -5, -10) == (5, 10, 55, 70) + + +def test_sectRect(): + intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) + assert not intersects + + intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) + assert intersects + assert rect == (5, 20, 20, 30) + + +def test_unionRect(): + assert unionRect((0, 10, 20, 30), (0, 40, 20, 50)) == (0, 10, 20, 50) + + +def test_rectCenter(): + assert rectCenter((0, 0, 100, 200)) == (50.0, 100.0) + assert rectCenter((0, 0, 100, 199.0)) == (50.0, 99.5) + + +def test_intRect(): + assert intRect((0.9, 2.9, 3.1, 4.1)) == (0, 2, 4, 5) diff -Nru fonttools-3.0/Tests/misc/bezierTools_test.py fonttools-3.21.2/Tests/misc/bezierTools_test.py --- fonttools-3.0/Tests/misc/bezierTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/bezierTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,133 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.bezierTools import ( + calcQuadraticBounds, calcCubicBounds, splitLine, splitQuadratic, + splitCubic, splitQuadraticAtT, splitCubicAtT, solveCubic) +import pytest + + +def test_calcQuadraticBounds(): + assert calcQuadraticBounds( + (0, 0), (50, 100), (100, 0)) == (0, 0, 100, 50.0) + assert calcQuadraticBounds( + (0, 0), (100, 0), (100, 100)) == (0.0, 0.0, 100, 100) + + +def test_calcCubicBounds(): + assert calcCubicBounds( + (0, 0), (25, 100), (75, 100), (100, 0)) == ((0, 0, 100, 75.0)) + assert calcCubicBounds( + (0, 0), (50, 0), (100, 50), (100, 100)) == (0.0, 0.0, 100, 100) + assert calcCubicBounds( + (50, 0), (0, 100), (100, 100), (50, 0) + ) == pytest.approx((35.566243, 0.000000, 64.433757, 75.000000)) + + +def test_splitLine(): + assert splitLine( + (0, 0), (100, 100), where=50, isHorizontal=True + ) == [((0, 0), (50.0, 50.0)), ((50.0, 50.0), (100, 100))] + assert splitLine( + (0, 0), (100, 100), where=100, isHorizontal=True + ) == [((0, 0), (100, 100))] + assert splitLine( + (0, 0), (100, 100), where=0, isHorizontal=True + ) == [((0, 0), (0, 0)), ((0, 0), (100, 100))] + assert splitLine( + (0, 0), (100, 100), where=0, isHorizontal=False + ) == [((0, 0), (0, 0)), ((0, 0), (100, 100))] + assert splitLine( + (100, 0), (0, 0), where=50, isHorizontal=False + ) == [((100, 0), (50, 0)), ((50, 0), (0, 0))] + assert splitLine( + (0, 100), (0, 0), where=50, isHorizontal=True + ) == [((0, 100), (0, 50)), ((0, 50), (0, 0))] + assert splitLine( + (0, 100), (100, 100), where=50, isHorizontal=True + ) == [((0, 100), (100, 100))] + + +def assert_curves_approx_equal(actual_curves, expected_curves): + assert len(actual_curves) == len(expected_curves) + for acurve, ecurve in zip(actual_curves, expected_curves): + assert len(acurve) == len(ecurve) + for apt, ept in zip(acurve, ecurve): + assert apt == pytest.approx(ept) + + +def test_splitQuadratic(): + assert splitQuadratic( + (0, 0), (50, 100), (100, 0), where=150, isHorizontal=False + ) == [((0, 0), (50, 100), (100, 0))] + assert splitQuadratic( + (0, 0), (50, 100), (100, 0), where=50, isHorizontal=False + ) == [((0, 0), (25, 50), (50, 50)), + ((50, 50), (75, 50), (100, 0))] + assert splitQuadratic( + (0, 0), (50, 100), (100, 0), where=25, isHorizontal=False + ) == [((0, 0), (12.5, 25), (25, 37.5)), + ((25, 37.5), (62.5, 75), (100, 0))] + assert_curves_approx_equal( + splitQuadratic( + (0, 0), (50, 100), (100, 0), where=25, isHorizontal=True), + [((0, 0), (7.32233, 14.64466), (14.64466, 25)), + ((14.64466, 25), (50, 75), (85.3553, 25)), + ((85.3553, 25), (92.6777, 14.64466), (100, -7.10543e-15))]) + # XXX I'm not at all sure if the following behavior is desirable + assert splitQuadratic( + (0, 0), (50, 100), (100, 0), where=50, isHorizontal=True + ) == [((0, 0), (25, 50), (50, 50)), + ((50, 50), (50, 50), (50, 50)), + ((50, 50), (75, 50), (100, 0))] + + +def test_splitCubic(): + assert splitCubic( + (0, 0), (25, 100), (75, 100), (100, 0), where=150, isHorizontal=False + ) == [((0, 0), (25, 100), (75, 100), (100, 0))] + assert splitCubic( + (0, 0), (25, 100), (75, 100), (100, 0), where=50, isHorizontal=False + ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)), + ((50, 75), (68.75, 75), (87.5, 50), (100, 0))] + assert_curves_approx_equal( + splitCubic( + (0, 0), (25, 100), (75, 100), (100, 0), where=25, + isHorizontal=True), + [((0, 0), (2.293792, 9.17517), (4.798045, 17.5085), (7.47414, 25)), + ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), + (92.5259, 25)), + ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), + (100, 1.77636e-15))]) + + +def test_splitQuadraticAtT(): + assert splitQuadraticAtT( + (0, 0), (50, 100), (100, 0), 0.5 + ) == [((0, 0), (25, 50), (50, 50)), + ((50, 50), (75, 50), (100, 0))] + assert splitQuadraticAtT( + (0, 0), (50, 100), (100, 0), 0.5, 0.75 + ) == [((0, 0), (25, 50), (50, 50)), + ((50, 50), (62.5, 50), (75, 37.5)), + ((75, 37.5), (87.5, 25), (100, 0))] + + +def test_splitCubicAtT(): + assert splitCubicAtT( + (0, 0), (25, 100), (75, 100), (100, 0), 0.5 + ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)), + ((50, 75), (68.75, 75), (87.5, 50), (100, 0))] + assert splitCubicAtT( + (0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75 + ) == [((0, 0), (12.5, 50), (31.25, 75), (50, 75)), + ((50, 75), (59.375, 75), (68.75, 68.75), (77.34375, 56.25)), + ((77.34375, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0))] + + +def test_solveCubic(): + assert solveCubic(1, 1, -6, 0) == [-3.0, -0.0, 2.0] + assert solveCubic(-10.0, -9.0, 48.0, -29.0) == [-2.9, 1.0, 1.0] + assert solveCubic(-9.875, -9.0, 47.625, -28.75) == [-2.911392, 1.0, 1.0] + assert solveCubic(1.0, -4.5, 6.75, -3.375) == [1.5, 1.5, 1.5] + assert solveCubic(-12.0, 18.0, -9.0, 1.50023651123) == [0.5, 0.5, 0.5] + assert solveCubic(9.0, 0.0, 0.0, -7.62939453125e-05) == [-0.0, -0.0, -0.0] diff -Nru fonttools-3.0/Tests/misc/classifyTools_test.py fonttools-3.21.2/Tests/misc/classifyTools_test.py --- fonttools-3.0/Tests/misc/classifyTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/classifyTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.classifyTools import classify + + +def test_classify(): + assert classify([]) == ([], {}) + assert classify([[]]) == ([], {}) + assert classify([[], []]) == ([], {}) + assert classify([[1]]) == ([{1}], {1: {1}}) + assert classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) + assert classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + assert classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) + assert classify([[1,2],[2,4]]) == ( + [{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) + assert classify([[1,2],[2,4,5]]) == ( + [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + assert classify([[1,2],[2,4,5]], sort=False) == ( + [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) + assert classify([[1,2,9],[2,4,5]], sort=False) == ( + [{1, 9}, {4, 5}, {2}], + {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9}}) + assert classify([[1,2,9,15],[2,4,5]], sort=False) == ( + [{1, 9, 15}, {4, 5}, {2}], + {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, 5: {4, 5}, 9: {1, 9, 15}, + 15: {1, 9, 15}}) + classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) + assert set([frozenset(c) for c in classes]) == set( + [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) + assert mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} diff -Nru fonttools-3.0/Tests/misc/eexec_test.py fonttools-3.21.2/Tests/misc/eexec_test.py --- fonttools-3.0/Tests/misc/eexec_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/eexec_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.eexec import decrypt, encrypt + + +def test_decrypt(): + testStr = b"\0\0asdadads asds\265" + decryptedStr, R = decrypt(testStr, 12321) + assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + assert R == 36142 + + +def test_encrypt(): + testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1' + encryptedStr, R = encrypt(testStr, 12321) + assert encryptedStr == b"\0\0asdadads asds\265" + assert R == 36142 diff -Nru fonttools-3.0/Tests/misc/encodingTools_test.py fonttools-3.21.2/Tests/misc/encodingTools_test.py --- fonttools-3.0/Tests/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/encodingTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,32 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import unittest +from fontTools.misc.encodingTools import getEncoding + +class EncodingTest(unittest.TestCase): + + def test_encoding_unicode(self): + + self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well + self.assertEqual(getEncoding(3, 1, None), "utf_16_be") + self.assertEqual(getEncoding(3, 10, None), "utf_16_be") + self.assertEqual(getEncoding(0, 3, None), "utf_16_be") + + def test_encoding_macroman_misc(self): + self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") + self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") + self.assertEqual(getEncoding(1, 0, 45), "mac_roman") + + def test_extended_mac_encodings(self): + encoding = getEncoding(1, 1, 0) # Mac Japanese + decoded = b'\xfe'.decode(encoding) + self.assertEqual(decoded, unichr(0x2122)) + + def test_extended_unknown(self): + self.assertEqual(getEncoding(10, 11, 12), None) + self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") + self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/fixedTools_test.py fonttools-3.21.2/Tests/misc/fixedTools_test.py --- fonttools-3.0/Tests/misc/fixedTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/fixedTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.fixedTools import fixedToFloat, floatToFixed +import unittest + + +class FixedToolsTest(unittest.TestCase): + + def test_roundtrip(self): + for bits in range(0, 15): + for value in range(-(2**(bits+1)), 2**(bits+1)): + self.assertEqual(value, floatToFixed(fixedToFloat(value, bits), bits)) + + def test_fixedToFloat_precision14(self): + self.assertEqual(0.8, fixedToFloat(13107, 14)) + self.assertEqual(0.0, fixedToFloat(0, 14)) + self.assertEqual(1.0, fixedToFloat(16384, 14)) + self.assertEqual(-1.0, fixedToFloat(-16384, 14)) + self.assertEqual(0.99994, fixedToFloat(16383, 14)) + self.assertEqual(-0.99994, fixedToFloat(-16383, 14)) + + def test_fixedToFloat_precision6(self): + self.assertAlmostEqual(-9.98, fixedToFloat(-639, 6)) + self.assertAlmostEqual(-10.0, fixedToFloat(-640, 6)) + self.assertAlmostEqual(9.98, fixedToFloat(639, 6)) + self.assertAlmostEqual(10.0, fixedToFloat(640, 6)) + + def test_floatToFixed_precision14(self): + self.assertEqual(13107, floatToFixed(0.8, 14)) + self.assertEqual(16384, floatToFixed(1.0, 14)) + self.assertEqual(16384, floatToFixed(1, 14)) + self.assertEqual(-16384, floatToFixed(-1.0, 14)) + self.assertEqual(-16384, floatToFixed(-1, 14)) + self.assertEqual(0, floatToFixed(0, 14)) + + def test_fixedToFloat_return_float(self): + value = fixedToFloat(16384, 14) + self.assertIsInstance(value, float) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/loggingTools_test.py fonttools-3.21.2/Tests/misc/loggingTools_test.py --- fonttools-3.0/Tests/misc/loggingTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/loggingTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,172 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import ( + LevelFormatter, Timer, configLogger, ChannelsFilter, LogMixin) +import logging +import textwrap +import time +import re +import pytest + + +def logger_name_generator(): + basename = "fontTools.test#" + num = 1 + while True: + yield basename+str(num) + num += 1 + +unique_logger_name = logger_name_generator() + + +@pytest.fixture +def logger(): + log = logging.getLogger(next(unique_logger_name)) + configLogger(logger=log, level="DEBUG", stream=StringIO()) + return log + + +def test_LevelFormatter(): + stream = StringIO() + handler = logging.StreamHandler(stream) + formatter = LevelFormatter( + fmt={ + '*': '[%(levelname)s] %(message)s', + 'DEBUG': '%(name)s [%(levelname)s] %(message)s', + 'INFO': '%(message)s', + }) + handler.setFormatter(formatter) + name = next(unique_logger_name) + log = logging.getLogger(name) + log.setLevel(logging.DEBUG) + log.addHandler(handler) + + log.debug("this uses a custom format string") + log.info("this also uses a custom format string") + log.warning("this one uses the default format string") + + assert stream.getvalue() == textwrap.dedent("""\ + %s [DEBUG] this uses a custom format string + this also uses a custom format string + [WARNING] this one uses the default format string + """ % name) + + +class TimerTest(object): + + def test_split(self): + timer = Timer() + time.sleep(0.01) + fist_lap = timer.split() + assert timer.elapsed == fist_lap + time.sleep(0.1) + second_lap = timer.split() + assert second_lap > fist_lap + assert timer.elapsed == second_lap + + def test_time(self): + timer = Timer() + time.sleep(0.01) + overall_time = timer.time() + assert overall_time > 0 + + def test_context_manager(self): + with Timer() as t: + time.sleep(0.01) + assert t.elapsed > 0 + + def test_using_logger(self, logger): + with Timer(logger, 'do something'): + time.sleep(0.01) + + assert re.match( + "Took [0-9]\.[0-9]{3}s to do something", + logger.handlers[0].stream.getvalue()) + + def test_using_logger_calling_instance(self, logger): + timer = Timer(logger) + with timer(): + time.sleep(0.01) + + assert re.match( + "elapsed time: [0-9]\.[0-9]{3}s", + logger.handlers[0].stream.getvalue()) + + # do it again but with custom level + with timer('redo it', level=logging.WARNING): + time.sleep(0.02) + + assert re.search( + "WARNING: Took [0-9]\.[0-9]{3}s to redo it", + logger.handlers[0].stream.getvalue()) + + def test_function_decorator(self, logger): + timer = Timer(logger) + + @timer() + def test1(): + time.sleep(0.01) + @timer('run test 2', level=logging.INFO) + def test2(): + time.sleep(0.02) + + test1() + + assert re.match( + "Took [0-9]\.[0-9]{3}s to run 'test1'", + logger.handlers[0].stream.getvalue()) + + test2() + + assert re.search( + "Took [0-9]\.[0-9]{3}s to run test 2", + logger.handlers[0].stream.getvalue()) + + +def test_ChannelsFilter(logger): + n = logger.name + filtr = ChannelsFilter(n+".A.B", n+".C.D") + handler = logger.handlers[0] + handler.addFilter(filtr) + stream = handler.stream + + logging.getLogger(n+".A.B").debug('this record passes through') + assert 'this record passes through' in stream.getvalue() + + logging.getLogger(n+'.A.B.C').debug('records from children also pass') + assert 'records from children also pass' in stream.getvalue() + + logging.getLogger(n+'.C.D').debug('this one as well') + assert 'this one as well' in stream.getvalue() + + logging.getLogger(n+'.A.B.').debug('also this one') + assert 'also this one' in stream.getvalue() + + before = stream.getvalue() + logging.getLogger(n+'.A.F').debug('but this one does not!') + assert before == stream.getvalue() + + logging.getLogger(n+'.C.DE').debug('neither this one!') + assert before == stream.getvalue() + + +def test_LogMixin(): + + class Base(object): + pass + + class A(LogMixin, Base): + pass + + class B(A): + pass + + a = A() + b = B() + + assert hasattr(a, 'log') + assert hasattr(b, 'log') + assert isinstance(a.log, logging.Logger) + assert isinstance(b.log, logging.Logger) + assert a.log.name == "loggingTools_test.A" + assert b.log.name == "loggingTools_test.B" diff -Nru fonttools-3.0/Tests/misc/macRes_test.py fonttools-3.21.2/Tests/misc/macRes_test.py --- fonttools-3.0/Tests/misc/macRes_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/macRes_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,97 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import sys +import os +import tempfile +import unittest +from fontTools.misc.textTools import deHexStr +from fontTools.misc.macRes import ResourceReader + + +# test resource data in DeRez notation +""" +data 'TEST' (128, "name1") { $"4865 6C6C 6F" }; /* Hello */ +data 'TEST' (129, "name2") { $"576F 726C 64" }; /* World */ +data 'test' (130, "name3") { $"486F 7720 6172 6520 796F 753F" }; /* How are you? */ +""" +# the same data, compiled using Rez +# $ /usr/bin/Rez testdata.rez -o compiled +# $ hexdump -v compiled/..namedfork/rsrc +TEST_RSRC_FORK = deHexStr( + "00 00 01 00 00 00 01 22 00 00 00 22 00 00 00 64 " # 0x00000000 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000010 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000020 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000030 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000040 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000050 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000060 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000070 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000080 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000090 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000A0 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000B0 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000C0 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000D0 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000E0 + "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x000000F0 + "00 00 00 05 48 65 6c 6c 6f 00 00 00 05 57 6f 72 " # 0x00000100 + "6c 64 00 00 00 0c 48 6f 77 20 61 72 65 20 79 6f " # 0x00000110 + "75 3f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 " # 0x00000120 + "00 00 00 00 00 00 00 00 00 00 00 1c 00 52 00 01 " # 0x00000130 + "54 45 53 54 00 01 00 12 74 65 73 74 00 00 00 2a " # 0x00000140 + "00 80 00 00 00 00 00 00 00 00 00 00 00 81 00 06 " # 0x00000150 + "00 00 00 09 00 00 00 00 00 82 00 0c 00 00 00 12 " # 0x00000160 + "00 00 00 00 05 6e 61 6d 65 31 05 6e 61 6d 65 32 " # 0x00000170 + "05 6e 61 6d 65 33 " # 0x00000180 +) + + +class ResourceReaderTest(unittest.TestCase): + + def test_read_file(self): + infile = BytesIO(TEST_RSRC_FORK) + reader = ResourceReader(infile) + resources = [res for typ in reader.keys() for res in reader[typ]] + self.assertExpected(resources) + + def test_read_datafork(self): + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(TEST_RSRC_FORK) + try: + reader = ResourceReader(tmp.name) + resources = [res for typ in reader.keys() for res in reader[typ]] + reader.close() + self.assertExpected(resources) + finally: + os.remove(tmp.name) + + def test_read_namedfork_rsrc(self): + if sys.platform != 'darwin': + self.skipTest('Not supported on "%s"' % sys.platform) + tmp = tempfile.NamedTemporaryFile(delete=False) + tmp.close() + try: + with open(tmp.name + '/..namedfork/rsrc', 'wb') as fork: + fork.write(TEST_RSRC_FORK) + reader = ResourceReader(tmp.name) + resources = [res for typ in reader.keys() for res in reader[typ]] + reader.close() + self.assertExpected(resources) + finally: + os.remove(tmp.name) + + def assertExpected(self, resources): + self.assertRezEqual(resources[0], 'TEST', b'Hello', 128, 'name1') + self.assertRezEqual(resources[1], 'TEST', b'World', 129, 'name2') + self.assertRezEqual( + resources[2], 'test', b'How are you?', 130, 'name3') + + def assertRezEqual(self, res, type_, data, id, name): + self.assertEqual(res.type, type_) + self.assertEqual(res.data, data) + self.assertEqual(res.id, id) + self.assertEqual(res.name, name) + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/psCharStrings_test.py fonttools-3.21.2/Tests/misc/psCharStrings_test.py --- fonttools-3.0/Tests/misc/psCharStrings_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/psCharStrings_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,32 @@ +from __future__ import print_function, division, absolute_import +from fontTools.cffLib import PrivateDict +from fontTools.cffLib.specializer import stringToProgram +from fontTools.misc.psCharStrings import T2CharString +import unittest + + +class T2CharStringTest(unittest.TestCase): + + @classmethod + def stringToT2CharString(cls, string): + return T2CharString(program=stringToProgram(string), private=PrivateDict()) + + def test_calcBounds_empty(self): + cs = self.stringToT2CharString("endchar") + bounds = cs.calcBounds() + self.assertEqual(bounds, None) + + def test_calcBounds_line(self): + cs = self.stringToT2CharString("100 100 rmoveto 40 10 rlineto -20 50 rlineto endchar") + bounds = cs.calcBounds() + self.assertEqual(bounds, (100, 100, 140, 160)) + + def test_calcBounds_curve(self): + cs = self.stringToT2CharString("100 100 rmoveto -50 -150 200 0 -50 150 rrcurveto endchar") + bounds = cs.calcBounds() + self.assertEqual(bounds, (91.90524980688875, -12.5, 208.09475019311125, 100)) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/py23_test.py fonttools-3.21.2/Tests/misc/py23_test.py --- fonttools-3.0/Tests/misc/py23_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/py23_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,485 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +import filecmp +import tempfile +from subprocess import check_call +import sys +import os +import unittest + +from fontTools.misc.py23 import ( + round2, round3, isclose, redirect_stdout, redirect_stderr) + + +PIPE_SCRIPT = """\ +import sys +binary_stdin = open(sys.stdin.fileno(), mode='rb', closefd=False) +binary_stdout = open(sys.stdout.fileno(), mode='wb', closefd=False) +binary_stdout.write(binary_stdin.read()) +""" + +# the string contains a mix of line endings, plus the Win "EOF" charater (0x1A) +# 'hello\rworld\r\n\x1a\r\n' +TEST_BIN_DATA = deHexStr( + "68 65 6c 6c 6f 0d 77 6f 72 6c 64 0d 0a 1a 0d 0a" +) + +class OpenFuncWrapperTest(unittest.TestCase): + + @staticmethod + def make_temp(data): + with tempfile.NamedTemporaryFile(delete=False) as f: + f.write(tobytes(data)) + return f.name + + def diff_piped(self, data, import_statement): + script = self.make_temp("\n".join([import_statement, PIPE_SCRIPT])) + datafile = self.make_temp(data) + try: + with open(datafile, 'rb') as infile, \ + tempfile.NamedTemporaryFile(delete=False) as outfile: + env = dict(os.environ) + env["PYTHONPATH"] = os.pathsep.join(sys.path) + check_call( + [sys.executable, script], stdin=infile, stdout=outfile, + env=env) + result = not filecmp.cmp(infile.name, outfile.name, shallow=False) + finally: + os.remove(script) + os.remove(datafile) + os.remove(outfile.name) + return result + + def test_binary_pipe_py23_open_wrapper(self): + if self.diff_piped( + TEST_BIN_DATA, "from fontTools.misc.py23 import open"): + self.fail("Input and output data differ!") + + def test_binary_pipe_built_in_io_open(self): + if sys.version_info.major < 3 and sys.platform == 'win32': + # On Windows Python 2.x, the piped input and output data are + # expected to be different when using io.open, because of issue + # https://bugs.python.org/issue10841. + expected = True + else: + expected = False + result = self.diff_piped(TEST_BIN_DATA, "from io import open") + self.assertEqual(result, expected) + + +class Round2Test(unittest.TestCase): + """ + Test cases taken from cpython 2.7 test suite: + + https://github.com/python/cpython/blob/2.7/Lib/test/test_float.py#L748 + + Excludes the test cases that are not supported when using the `decimal` + module's `quantize` method. + """ + + def test_second_argument_type(self): + # floats should be illegal + self.assertRaises(TypeError, round2, 3.14159, 2.0) + + def test_halfway_cases(self): + # Halfway cases need special attention, since the current + # implementation has to deal with them specially. Note that + # 2.x rounds halfway values up (i.e., away from zero) while + # 3.x does round-half-to-even. + self.assertAlmostEqual(round2(0.125, 2), 0.13) + self.assertAlmostEqual(round2(0.375, 2), 0.38) + self.assertAlmostEqual(round2(0.625, 2), 0.63) + self.assertAlmostEqual(round2(0.875, 2), 0.88) + self.assertAlmostEqual(round2(-0.125, 2), -0.13) + self.assertAlmostEqual(round2(-0.375, 2), -0.38) + self.assertAlmostEqual(round2(-0.625, 2), -0.63) + self.assertAlmostEqual(round2(-0.875, 2), -0.88) + + self.assertAlmostEqual(round2(0.25, 1), 0.3) + self.assertAlmostEqual(round2(0.75, 1), 0.8) + self.assertAlmostEqual(round2(-0.25, 1), -0.3) + self.assertAlmostEqual(round2(-0.75, 1), -0.8) + + self.assertEqual(round2(-6.5, 0), -7.0) + self.assertEqual(round2(-5.5, 0), -6.0) + self.assertEqual(round2(-1.5, 0), -2.0) + self.assertEqual(round2(-0.5, 0), -1.0) + self.assertEqual(round2(0.5, 0), 1.0) + self.assertEqual(round2(1.5, 0), 2.0) + self.assertEqual(round2(2.5, 0), 3.0) + self.assertEqual(round2(3.5, 0), 4.0) + self.assertEqual(round2(4.5, 0), 5.0) + self.assertEqual(round2(5.5, 0), 6.0) + self.assertEqual(round2(6.5, 0), 7.0) + + # same but without an explicit second argument; in 3.x these + # will give integers + self.assertEqual(round2(-6.5), -7.0) + self.assertEqual(round2(-5.5), -6.0) + self.assertEqual(round2(-1.5), -2.0) + self.assertEqual(round2(-0.5), -1.0) + self.assertEqual(round2(0.5), 1.0) + self.assertEqual(round2(1.5), 2.0) + self.assertEqual(round2(2.5), 3.0) + self.assertEqual(round2(3.5), 4.0) + self.assertEqual(round2(4.5), 5.0) + self.assertEqual(round2(5.5), 6.0) + self.assertEqual(round2(6.5), 7.0) + + self.assertEqual(round2(-25.0, -1), -30.0) + self.assertEqual(round2(-15.0, -1), -20.0) + self.assertEqual(round2(-5.0, -1), -10.0) + self.assertEqual(round2(5.0, -1), 10.0) + self.assertEqual(round2(15.0, -1), 20.0) + self.assertEqual(round2(25.0, -1), 30.0) + self.assertEqual(round2(35.0, -1), 40.0) + self.assertEqual(round2(45.0, -1), 50.0) + self.assertEqual(round2(55.0, -1), 60.0) + self.assertEqual(round2(65.0, -1), 70.0) + self.assertEqual(round2(75.0, -1), 80.0) + self.assertEqual(round2(85.0, -1), 90.0) + self.assertEqual(round2(95.0, -1), 100.0) + self.assertEqual(round2(12325.0, -1), 12330.0) + self.assertEqual(round2(0, -1), 0.0) + + self.assertEqual(round2(350.0, -2), 400.0) + self.assertEqual(round2(450.0, -2), 500.0) + + self.assertAlmostEqual(round2(0.5e21, -21), 1e21) + self.assertAlmostEqual(round2(1.5e21, -21), 2e21) + self.assertAlmostEqual(round2(2.5e21, -21), 3e21) + self.assertAlmostEqual(round2(5.5e21, -21), 6e21) + self.assertAlmostEqual(round2(8.5e21, -21), 9e21) + + self.assertAlmostEqual(round2(-1.5e22, -22), -2e22) + self.assertAlmostEqual(round2(-0.5e22, -22), -1e22) + self.assertAlmostEqual(round2(0.5e22, -22), 1e22) + self.assertAlmostEqual(round2(1.5e22, -22), 2e22) + + +class Round3Test(unittest.TestCase): + """ Same as above but results adapted for Python 3 round() """ + + def test_second_argument_type(self): + # floats should be illegal + self.assertRaises(TypeError, round3, 3.14159, 2.0) + + # None should be allowed + self.assertEqual(round3(1.0, None), 1) + # the following would raise an error with the built-in Python3.5 round: + # TypeError: 'NoneType' object cannot be interpreted as an integer + self.assertEqual(round3(1, None), 1) + + def test_halfway_cases(self): + self.assertAlmostEqual(round3(0.125, 2), 0.12) + self.assertAlmostEqual(round3(0.375, 2), 0.38) + self.assertAlmostEqual(round3(0.625, 2), 0.62) + self.assertAlmostEqual(round3(0.875, 2), 0.88) + self.assertAlmostEqual(round3(-0.125, 2), -0.12) + self.assertAlmostEqual(round3(-0.375, 2), -0.38) + self.assertAlmostEqual(round3(-0.625, 2), -0.62) + self.assertAlmostEqual(round3(-0.875, 2), -0.88) + + self.assertAlmostEqual(round3(0.25, 1), 0.2) + self.assertAlmostEqual(round3(0.75, 1), 0.8) + self.assertAlmostEqual(round3(-0.25, 1), -0.2) + self.assertAlmostEqual(round3(-0.75, 1), -0.8) + + self.assertEqual(round3(-6.5, 0), -6.0) + self.assertEqual(round3(-5.5, 0), -6.0) + self.assertEqual(round3(-1.5, 0), -2.0) + self.assertEqual(round3(-0.5, 0), 0.0) + self.assertEqual(round3(0.5, 0), 0.0) + self.assertEqual(round3(1.5, 0), 2.0) + self.assertEqual(round3(2.5, 0), 2.0) + self.assertEqual(round3(3.5, 0), 4.0) + self.assertEqual(round3(4.5, 0), 4.0) + self.assertEqual(round3(5.5, 0), 6.0) + self.assertEqual(round3(6.5, 0), 6.0) + + # same but without an explicit second argument; in 2.x these + # will give floats + self.assertEqual(round3(-6.5), -6) + self.assertEqual(round3(-5.5), -6) + self.assertEqual(round3(-1.5), -2.0) + self.assertEqual(round3(-0.5), 0) + self.assertEqual(round3(0.5), 0) + self.assertEqual(round3(1.5), 2) + self.assertEqual(round3(2.5), 2) + self.assertEqual(round3(3.5), 4) + self.assertEqual(round3(4.5), 4) + self.assertEqual(round3(5.5), 6) + self.assertEqual(round3(6.5), 6) + + # no ndigits and input is already an integer: output == input + rv = round3(1) + self.assertEqual(rv, 1) + self.assertTrue(isinstance(rv, int)) + rv = round3(1.0) + self.assertEqual(rv, 1) + self.assertTrue(isinstance(rv, int)) + + self.assertEqual(round3(-25.0, -1), -20.0) + self.assertEqual(round3(-15.0, -1), -20.0) + self.assertEqual(round3(-5.0, -1), 0.0) + self.assertEqual(round3(5.0, -1), 0.0) + self.assertEqual(round3(15.0, -1), 20.0) + self.assertEqual(round3(25.0, -1), 20.0) + self.assertEqual(round3(35.0, -1), 40.0) + self.assertEqual(round3(45.0, -1), 40.0) + self.assertEqual(round3(55.0, -1), 60.0) + self.assertEqual(round3(65.0, -1), 60.0) + self.assertEqual(round3(75.0, -1), 80.0) + self.assertEqual(round3(85.0, -1), 80.0) + self.assertEqual(round3(95.0, -1), 100.0) + self.assertEqual(round3(12325.0, -1), 12320.0) + self.assertEqual(round3(0, -1), 0.0) + + self.assertEqual(round3(350.0, -2), 400.0) + self.assertEqual(round3(450.0, -2), 400.0) + + self.assertAlmostEqual(round3(0.5e21, -21), 0.0) + self.assertAlmostEqual(round3(1.5e21, -21), 2e21) + self.assertAlmostEqual(round3(2.5e21, -21), 2e21) + self.assertAlmostEqual(round3(5.5e21, -21), 6e21) + self.assertAlmostEqual(round3(8.5e21, -21), 8e21) + + self.assertAlmostEqual(round3(-1.5e22, -22), -2e22) + self.assertAlmostEqual(round3(-0.5e22, -22), 0.0) + self.assertAlmostEqual(round3(0.5e22, -22), 0.0) + self.assertAlmostEqual(round3(1.5e22, -22), 2e22) + + +NAN = float('nan') +INF = float('inf') +NINF = float('-inf') + + +class IsCloseTests(unittest.TestCase): + """ + Tests taken from Python 3.5 test_math.py: + https://hg.python.org/cpython/file/v3.5.2/Lib/test/test_math.py + """ + isclose = staticmethod(isclose) + + def assertIsClose(self, a, b, *args, **kwargs): + self.assertTrue( + self.isclose(a, b, *args, **kwargs), + msg="%s and %s should be close!" % (a, b)) + + def assertIsNotClose(self, a, b, *args, **kwargs): + self.assertFalse( + self.isclose(a, b, *args, **kwargs), + msg="%s and %s should not be close!" % (a, b)) + + def assertAllClose(self, examples, *args, **kwargs): + for a, b in examples: + self.assertIsClose(a, b, *args, **kwargs) + + def assertAllNotClose(self, examples, *args, **kwargs): + for a, b in examples: + self.assertIsNotClose(a, b, *args, **kwargs) + + def test_negative_tolerances(self): + # ValueError should be raised if either tolerance is less than zero + with self.assertRaises(ValueError): + self.assertIsClose(1, 1, rel_tol=-1e-100) + with self.assertRaises(ValueError): + self.assertIsClose(1, 1, rel_tol=1e-100, abs_tol=-1e10) + + def test_identical(self): + # identical values must test as close + identical_examples = [ + (2.0, 2.0), + (0.1e200, 0.1e200), + (1.123e-300, 1.123e-300), + (12345, 12345.0), + (0.0, -0.0), + (345678, 345678)] + self.assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0) + + def test_eight_decimal_places(self): + # examples that are close to 1e-8, but not 1e-9 + eight_decimal_places_examples = [ + (1e8, 1e8 + 1), + (-1e-8, -1.000000009e-8), + (1.12345678, 1.12345679)] + self.assertAllClose(eight_decimal_places_examples, rel_tol=1e-8) + self.assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-9) + + def test_near_zero(self): + # values close to zero + near_zero_examples = [ + (1e-9, 0.0), + (-1e-9, 0.0), + (-1e-150, 0.0)] + # these should not be close to any rel_tol + self.assertAllNotClose(near_zero_examples, rel_tol=0.9) + # these should be close to abs_tol=1e-8 + self.assertAllClose(near_zero_examples, abs_tol=1e-8) + + def test_identical_infinite(self): + # these are close regardless of tolerance -- i.e. they are equal + self.assertIsClose(INF, INF) + self.assertIsClose(INF, INF, abs_tol=0.0) + self.assertIsClose(NINF, NINF) + self.assertIsClose(NINF, NINF, abs_tol=0.0) + + def test_inf_ninf_nan(self): + # these should never be close (following IEEE 754 rules for equality) + not_close_examples = [ + (NAN, NAN), + (NAN, 1e-100), + (1e-100, NAN), + (INF, NAN), + (NAN, INF), + (INF, NINF), + (INF, 1.0), + (1.0, INF), + (INF, 1e308), + (1e308, INF)] + # use largest reasonable tolerance + self.assertAllNotClose(not_close_examples, abs_tol=0.999999999999999) + + def test_zero_tolerance(self): + # test with zero tolerance + zero_tolerance_close_examples = [ + (1.0, 1.0), + (-3.4, -3.4), + (-1e-300, -1e-300)] + self.assertAllClose(zero_tolerance_close_examples, rel_tol=0.0) + + zero_tolerance_not_close_examples = [ + (1.0, 1.000000000000001), + (0.99999999999999, 1.0), + (1.0e200, .999999999999999e200)] + self.assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0) + + def test_assymetry(self): + # test the assymetry example from PEP 485 + self.assertAllClose([(9, 10), (10, 9)], rel_tol=0.1) + + def test_integers(self): + # test with integer values + integer_examples = [ + (100000001, 100000000), + (123456789, 123456788)] + + self.assertAllClose(integer_examples, rel_tol=1e-8) + self.assertAllNotClose(integer_examples, rel_tol=1e-9) + + def test_decimals(self): + # test with Decimal values + from decimal import Decimal + + decimal_examples = [ + (Decimal('1.00000001'), Decimal('1.0')), + (Decimal('1.00000001e-20'), Decimal('1.0e-20')), + (Decimal('1.00000001e-100'), Decimal('1.0e-100'))] + self.assertAllClose(decimal_examples, rel_tol=1e-8) + self.assertAllNotClose(decimal_examples, rel_tol=1e-9) + + def test_fractions(self): + # test with Fraction values + from fractions import Fraction + + # could use some more examples here! + fraction_examples = [(Fraction(1, 100000000) + 1, Fraction(1))] + self.assertAllClose(fraction_examples, rel_tol=1e-8) + self.assertAllNotClose(fraction_examples, rel_tol=1e-9) + + +@unittest.skipUnless( + (sys.version_info[0] == 2 and sys.maxunicode < 0x10FFFF), + "requires 'narrow' Python 2.7 build") +class NarrowUnicodeBuildTest(unittest.TestCase): + + def test_unichr(self): + from __builtin__ import unichr as narrow_unichr + + self.assertRaises( + ValueError, + narrow_unichr, 0xFFFF + 1) + + self.assertEqual(unichr(1114111), u'\U0010FFFF') + + self.assertRaises( + ValueError, + unichr, 0x10FFFF + 1) + + def test_byteord(self): + from __builtin__ import ord as narrow_ord + + self.assertRaises( + TypeError, + narrow_ord, u'\U00010000') + + self.assertEqual(byteord(u'\U00010000'), 0xFFFF + 1) + self.assertEqual(byteord(u'\U0010FFFF'), 1114111) + + +class TestRedirectStream: + + redirect_stream = None + orig_stream = None + + def test_no_redirect_in_init(self): + orig_stdout = getattr(sys, self.orig_stream) + self.redirect_stream(None) + self.assertIs(getattr(sys, self.orig_stream), orig_stdout) + + def test_redirect_to_string_io(self): + f = StringIO() + msg = "Consider an API like help(), which prints directly to stdout" + orig_stdout = getattr(sys, self.orig_stream) + with self.redirect_stream(f): + print(msg, file=getattr(sys, self.orig_stream)) + self.assertIs(getattr(sys, self.orig_stream), orig_stdout) + s = f.getvalue().strip() + self.assertEqual(s, msg) + + def test_enter_result_is_target(self): + f = StringIO() + with self.redirect_stream(f) as enter_result: + self.assertIs(enter_result, f) + + def test_cm_is_reusable(self): + f = StringIO() + write_to_f = self.redirect_stream(f) + orig_stdout = getattr(sys, self.orig_stream) + with write_to_f: + print("Hello", end=" ", file=getattr(sys, self.orig_stream)) + with write_to_f: + print("World!", file=getattr(sys, self.orig_stream)) + self.assertIs(getattr(sys, self.orig_stream), orig_stdout) + s = f.getvalue() + self.assertEqual(s, "Hello World!\n") + + def test_cm_is_reentrant(self): + f = StringIO() + write_to_f = self.redirect_stream(f) + orig_stdout = getattr(sys, self.orig_stream) + with write_to_f: + print("Hello", end=" ", file=getattr(sys, self.orig_stream)) + with write_to_f: + print("World!", file=getattr(sys, self.orig_stream)) + self.assertIs(getattr(sys, self.orig_stream), orig_stdout) + s = f.getvalue() + self.assertEqual(s, "Hello World!\n") + + +class TestRedirectStdout(TestRedirectStream, unittest.TestCase): + + redirect_stream = redirect_stdout + orig_stream = "stdout" + + +class TestRedirectStderr(TestRedirectStream, unittest.TestCase): + + redirect_stream = redirect_stderr + orig_stream = "stderr" + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/testTools_test.py fonttools-3.21.2/Tests/misc/testTools_test.py --- fonttools-3.0/Tests/misc/testTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/testTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +import fontTools.misc.testTools as testTools +import unittest + + +class TestToolsTest(unittest.TestCase): + + def test_parseXML_str(self): + self.assertEqual(testTools.parseXML( + '' + '' + ' some ünıcòðe text' + ' ' + ' some more text' + '' + ''), [ + ("Foo", {"n": "1"}, []), + ("Foo", {"n": "2"}, [ + " some ünıcòðe text ", + ("Bar", {"color": "red"}, []), + " some more text", + ]), + ("Foo", {"n": "3"}, []) + ]) + + def test_parseXML_bytes(self): + self.assertEqual(testTools.parseXML( + b'' + b'' + b' some \xc3\xbcn\xc4\xb1c\xc3\xb2\xc3\xb0e text' + b' ' + b' some more text' + b'' + b''), [ + ("Foo", {"n": "1"}, []), + ("Foo", {"n": "2"}, [ + " some ünıcòðe text ", + ("Bar", {"color": "red"}, []), + " some more text", + ]), + ("Foo", {"n": "3"}, []) + ]) + + def test_parseXML_str_list(self): + self.assertEqual(testTools.parseXML( + ['' + '']), [ + ("Foo", {"n": "1"}, []), + ("Foo", {"n": "2"}, []) + ]) + + def test_parseXML_bytes_list(self): + self.assertEqual(testTools.parseXML( + [b'' + b'']), [ + ("Foo", {"n": "1"}, []), + ("Foo", {"n": "2"}, []) + ]) + + def test_getXML(self): + def toXML(writer, ttFont): + writer.simpletag("simple") + writer.newline() + writer.begintag("tag", attr='value') + writer.newline() + writer.write("hello world") + writer.newline() + writer.endtag("tag") + writer.newline() # toXML always ends with a newline + + self.assertEqual(testTools.getXML(toXML), + ['', + '', + ' hello world', + '']) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/textTools_test.py fonttools-3.21.2/Tests/misc/textTools_test.py --- fonttools-3.0/Tests/misc/textTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/textTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import pad + + +def test_pad(): + assert len(pad(b'abcd', 4)) == 4 + assert len(pad(b'abcde', 2)) == 6 + assert len(pad(b'abcde', 4)) == 8 + assert pad(b'abcdef', 4) == b'abcdef\x00\x00' + assert pad(b'abcdef', 1) == b'abcdef' diff -Nru fonttools-3.0/Tests/misc/timeTools_test.py fonttools-3.21.2/Tests/misc/timeTools_test.py --- fonttools-3.0/Tests/misc/timeTools_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/timeTools_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.timeTools import asctime, timestampNow, epoch_diff +import os +import time +import pytest + + +def test_asctime(): + assert isinstance(asctime(), basestring) + assert asctime(time.gmtime(0)) == 'Thu Jan 1 00:00:00 1970' + + +def test_source_date_epoch(): + os.environ["SOURCE_DATE_EPOCH"] = "150687315" + assert timestampNow() + epoch_diff == 150687315 + + # Check that malformed value fail, any better way? + os.environ["SOURCE_DATE_EPOCH"] = "ABCDEFGHI" + with pytest.raises(ValueError): + timestampNow() + + del os.environ["SOURCE_DATE_EPOCH"] + assert timestampNow() + epoch_diff != 150687315 diff -Nru fonttools-3.0/Tests/misc/transform_test.py fonttools-3.21.2/Tests/misc/transform_test.py --- fonttools-3.0/Tests/misc/transform_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/transform_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.transform import Transform, Identity, Offset, Scale +import math +import pytest + + +class TransformTest(object): + + def test_examples(self): + t = Transform() + assert repr(t) == "" + assert t.scale(2) == Transform(2, 0, 0, 2, 0, 0) + assert t.scale(2.5, 5.5) == Transform(2.5, 0, 0, 5.5, 0, 0) + assert t.scale(2, 3).transformPoint((100, 100)) == (200, 300) + + def test__init__(self): + assert Transform(12) == Transform(12, 0, 0, 1, 0, 0) + assert Transform(dx=12) == Transform(1, 0, 0, 1, 12, 0) + assert Transform(yx=12) == Transform(1, 0, 12, 1, 0, 0) + + def test_transformPoints(self): + t = Transform(2, 0, 0, 3, 0, 0) + assert t.transformPoints( + [(0, 0), (0, 100), (100, 100), (100, 0)] + ) == [(0, 0), (0, 300), (200, 300), (200, 0)] + + def test_translate(self): + t = Transform() + assert t.translate(20, 30) == Transform(1, 0, 0, 1, 20, 30) + + def test_scale(self): + t = Transform() + assert t.scale(5) == Transform(5, 0, 0, 5, 0, 0) + assert t.scale(5, 6) == Transform(5, 0, 0, 6, 0, 0) + + def test_rotate(self): + t = Transform() + assert t.rotate(math.pi / 2) == Transform(0, 1, -1, 0, 0, 0) + t = Transform() + assert t.rotate(-math.pi / 2) == Transform(0, -1, 1, 0, 0, 0) + t = Transform() + assert tuple(t.rotate(math.radians(30))) == pytest.approx( + tuple(Transform(0.866025, 0.5, -0.5, 0.866025, 0, 0))) + + def test_skew(self): + t = Transform().skew(math.pi / 4) + assert tuple(t) == pytest.approx(tuple(Transform(1, 0, 1, 1, 0, 0))) + + def test_transform(self): + t = Transform(2, 0, 0, 3, 1, 6) + assert t.transform((4, 3, 2, 1, 5, 6)) == Transform(8, 9, 4, 3, 11, 24) + + def test_reverseTransform(self): + t = Transform(2, 0, 0, 3, 1, 6) + reverse_t = t.reverseTransform((4, 3, 2, 1, 5, 6)) + assert reverse_t == Transform(8, 6, 6, 3, 21, 15) + t = Transform(4, 3, 2, 1, 5, 6) + reverse_t = t.transform((2, 0, 0, 3, 1, 6)) + assert reverse_t == Transform(8, 6, 6, 3, 21, 15) + + def test_inverse(self): + t = Transform().translate(2, 3).scale(4, 5) + assert t.transformPoint((10, 20)) == (42, 103) + it = t.inverse() + assert it.transformPoint((42, 103)) == (10.0, 20.0) + assert Transform().inverse() == Transform() + + def test_toPS(self): + t = Transform().scale(2, 3).translate(4, 5) + assert t.toPS() == '[2 0 0 3 8 15]' + + def test__ne__(self): + assert Transform() != Transform(2, 0, 0, 2, 0, 0) + + def test__hash__(self): + t = Transform(12, 0, 0, 13, 0, 0) + d = {t: None} + assert t in d.keys() + + def test__bool__(self): + assert not bool(Transform()) + assert Transform(2, 0, 0, 2, 0, 0) + assert Transform(1, 0, 0, 1, 1, 0) + + def test__repr__(self): + assert repr(Transform(1, 2, 3, 4, 5, 6)) == '' + + def test_Identity(self): + assert isinstance(Identity, Transform) + assert Identity == Transform(1, 0, 0, 1, 0, 0) + + def test_Offset(self): + assert Offset() == Transform(1, 0, 0, 1, 0, 0) + assert Offset(1) == Transform(1, 0, 0, 1, 1, 0) + assert Offset(1, 2) == Transform(1, 0, 0, 1, 1, 2) + + def test_Scale(self): + assert Scale(1) == Transform(1, 0, 0, 1, 0, 0) + assert Scale(2) == Transform(2, 0, 0, 2, 0, 0) + assert Scale(1, 2) == Transform(1, 0, 0, 2, 0, 0) diff -Nru fonttools-3.0/Tests/misc/xmlReader_test.py fonttools-3.21.2/Tests/misc/xmlReader_test.py --- fonttools-3.0/Tests/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/xmlReader_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +import os +import unittest +from fontTools.ttLib import TTFont +from fontTools.misc.xmlReader import XMLReader, ProgressPrinter, BUFSIZE +import tempfile + + +class TestXMLReader(unittest.TestCase): + + def test_decode_utf8(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileOrPath, ttFont, progress=None): + super(DebugXMLReader, self).__init__( + fileOrPath, ttFont, progress) + self.contents = [] + + def _endElementHandler(self, name): + if self.stackSize == 3: + name, attrs, content = self.root + self.contents.append(content) + super(DebugXMLReader, self)._endElementHandler(name) + + expected = 'fôôbär' + data = '''\ + + + + + %s + + + +''' % expected + + with BytesIO(data.encode('utf-8')) as tmp: + reader = DebugXMLReader(tmp, TTFont()) + reader.read() + content = strjoin(reader.contents[0]).strip() + self.assertEqual(expected, content) + + def test_normalise_newlines(self): + + class DebugXMLReader(XMLReader): + + def __init__(self, fileOrPath, ttFont, progress=None): + super(DebugXMLReader, self).__init__( + fileOrPath, ttFont, progress) + self.newlines = [] + + def _characterDataHandler(self, data): + self.newlines.extend([c for c in data if c in ('\r', '\n')]) + + # notice how when CR is escaped, it is not normalised by the XML parser + data = ( + '\r' # \r -> \n + ' \r\n' # \r\n -> \n + ' a line of text\n' # \n + ' escaped CR and unix newline \n' # \n -> \r\n + ' escaped CR and macintosh newline \r' # \r -> \r\n + ' escaped CR and windows newline \r\n' # \r\n -> \r\n + ' \n' # \n + '') + + with BytesIO(data.encode('utf-8')) as tmp: + reader = DebugXMLReader(tmp, TTFont()) + reader.read() + expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] + self.assertEqual(expected, reader.newlines) + + def test_progress(self): + + class DummyProgressPrinter(ProgressPrinter): + + def __init__(self, title, maxval=100): + self.label = title + self.maxval = maxval + self.pos = 0 + + def set(self, val, maxval=None): + if maxval is not None: + self.maxval = maxval + self.pos = val + + def increment(self, val=1): + self.pos += val + + def setLabel(self, text): + self.label = text + + data = ( + '\n' + ' \n' + ' %s\n' + ' \n' + '\n' + % ("z" * 2 * BUFSIZE) + ).encode('utf-8') + + dataSize = len(data) + progressBar = DummyProgressPrinter('test') + with BytesIO(data) as tmp: + reader = XMLReader(tmp, TTFont(), progress=progressBar) + self.assertEqual(progressBar.pos, 0) + reader.read() + self.assertEqual(progressBar.pos, dataSize // 100) + self.assertEqual(progressBar.maxval, dataSize // 100) + self.assertTrue('test' in progressBar.label) + with BytesIO(b"") as tmp: + reader = XMLReader(tmp, TTFont(), progress=progressBar) + reader.read() + # when data size is less than 100 bytes, 'maxval' is 1 + self.assertEqual(progressBar.maxval, 1) + + def test_close_file_path(self): + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(b'') + reader = XMLReader(tmp.name, TTFont()) + reader.read() + # when reading from path, the file is closed automatically at the end + self.assertTrue(reader.file.closed) + # this does nothing + reader.close() + self.assertTrue(reader.file.closed) + os.remove(tmp.name) + + def test_close_file_obj(self): + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(b'"hello"') + with open(tmp.name, "rb") as f: + reader = XMLReader(f, TTFont()) + reader.read() + # when reading from a file or file-like object, the latter is kept open + self.assertFalse(reader.file.closed) + # ... until the user explicitly closes it + reader.close() + self.assertTrue(reader.file.closed) + os.remove(tmp.name) + + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/misc/xmlWriter_test.py fonttools-3.21.2/Tests/misc/xmlWriter_test.py --- fonttools-3.0/Tests/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/misc/xmlWriter_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,128 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import os +import unittest +from fontTools.misc.xmlWriter import XMLWriter + +linesep = tobytes(os.linesep) +HEADER = b'' + linesep + +class TestXMLWriter(unittest.TestCase): + + def test_comment_escaped(self): + writer = XMLWriter(BytesIO()) + writer.comment("This&that are ") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_comment_multiline(self): + writer = XMLWriter(BytesIO()) + writer.comment("Hello world\nHow are you?") + self.assertEqual(HEADER + b"", + writer.file.getvalue()) + + def test_encoding_default(self): + writer = XMLWriter(BytesIO()) + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_utf8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="utf8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF_8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF-8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_UTF8(self): + # https://github.com/behdad/fonttools/issues/246 + writer = XMLWriter(BytesIO(), encoding="UTF8") + self.assertEqual(b'' + linesep, + writer.file.getvalue()) + + def test_encoding_other(self): + self.assertRaises(Exception, XMLWriter, BytesIO(), + encoding="iso-8859-1") + + def test_write(self): + writer = XMLWriter(BytesIO()) + writer.write("foo&bar") + self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) + + def test_indent_dedent(self): + writer = XMLWriter(BytesIO()) + writer.write("foo") + writer.newline() + writer.indent() + writer.write("bar") + writer.newline() + writer.dedent() + writer.write("baz") + self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), + writer.file.getvalue()) + + def test_writecdata(self): + writer = XMLWriter(BytesIO()) + writer.writecdata("foo&bar") + self.assertEqual(HEADER + b"", writer.file.getvalue()) + + def test_simpletag(self): + writer = XMLWriter(BytesIO()) + writer.simpletag("tag", a="1", b="2") + self.assertEqual(HEADER + b'', writer.file.getvalue()) + + def test_begintag_endtag(self): + writer = XMLWriter(BytesIO()) + writer.begintag("tag", attr="value") + writer.write("content") + writer.endtag("tag") + self.assertEqual(HEADER + b'content', writer.file.getvalue()) + + def test_dumphex(self): + writer = XMLWriter(BytesIO()) + writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") + self.assertEqual(HEADER + bytesjoin([ + "54797065 20697320 61206265 61757469", + "66756c20 67726f75 70206f66 206c6574", + "74657273 2c206e6f 74206120 67726f75", + "70206f66 20626561 75746966 756c206c", + "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) + + def test_stringifyattrs(self): + writer = XMLWriter(BytesIO()) + expected = ' attr="0"' + self.assertEqual(expected, writer.stringifyattrs(attr=0)) + self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) + self.assertEqual(expected, writer.stringifyattrs(attr='0')) + self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) + + def test_carriage_return_escaped(self): + writer = XMLWriter(BytesIO()) + writer.write("two lines\r\nseparated by Windows line endings") + self.assertEqual( + HEADER + b'two lines \nseparated by Windows line endings', + writer.file.getvalue()) + + def test_newlinestr(self): + header = b'' + + for nls in (None, '\n', '\r\n', '\r', ''): + writer = XMLWriter(BytesIO(), newlinestr=nls) + writer.write("hello") + writer.newline() + writer.write("world") + writer.newline() + + linesep = tobytes(os.linesep) if nls is None else tobytes(nls) + + self.assertEqual( + header + linesep + b"hello" + linesep + b"world" + linesep, + writer.file.getvalue()) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/mtiLib/data/featurename-backward.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/featurename-backward.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/featurename-backward.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/featurename-backward.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/featurename-backward.txt fonttools-3.21.2/Tests/mtiLib/data/featurename-backward.txt --- fonttools-3.0/Tests/mtiLib/data/featurename-backward.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/featurename-backward.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + +feature table begin +f0 akhn l1 +1 akh2 l1 +feature table end + +script table begin +telu default 0, 1 +tel2 default f0, 1 +script table end + +lookup l1 single +a b +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/featurename-forward.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/featurename-forward.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/featurename-forward.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/featurename-forward.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/featurename-forward.txt fonttools-3.21.2/Tests/mtiLib/data/featurename-forward.txt --- fonttools-3.0/Tests/mtiLib/data/featurename-forward.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/featurename-forward.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + +script table begin +telu default 0, 1 +tel2 default f0, 1 +script table end + +feature table begin +f0 akhn l1 +1 akh2 l1 +feature table end + +lookup l1 single +a b +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/lookupnames-backward.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/lookupnames-backward.txt fonttools-3.21.2/Tests/mtiLib/data/lookupnames-backward.txt --- fonttools-3.0/Tests/mtiLib/data/lookupnames-backward.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/lookupnames-backward.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,36 @@ + +lookup l1 single + +uvowelsignkannada uvowelsignaltkannada +uuvowelsignkannada uuvowelsignaltkannada + +lookup end + +lookup l0 chained + +backtrackclass definition begin +pakannada 1 +phakannada 1 +vakannada 1 +pevowelkannada 1 +phevowelkannada 1 +vevowelkannada 1 +class definition end + +class definition begin +uvowelsignkannada 1 +uuvowelsignkannada 1 +class definition end + +class-chain 1 1 1,l1 + +lookup end + +script table begin +telu default 0, 1 +script table end + +feature table begin +0 akhn l1 +1 akh2 l0 +feature table end diff -Nru fonttools-3.0/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/lookupnames-forward.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/lookupnames-forward.txt fonttools-3.21.2/Tests/mtiLib/data/lookupnames-forward.txt --- fonttools-3.0/Tests/mtiLib/data/lookupnames-forward.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/lookupnames-forward.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,36 @@ + +lookup l0 chained + +backtrackclass definition begin +pakannada 1 +phakannada 1 +vakannada 1 +pevowelkannada 1 +phevowelkannada 1 +vevowelkannada 1 +class definition end + +class definition begin +uvowelsignkannada 1 +uuvowelsignkannada 1 +class definition end + +class-chain 1 1 1,l1 + +lookup end + +script table begin +telu default 0, 1 +script table end + +lookup l1 single + +uvowelsignkannada uvowelsignaltkannada +uuvowelsignkannada uuvowelsignaltkannada + +lookup end + +feature table begin +0 akhn l1 +1 akh2 l0 +feature table end diff -Nru fonttools-3.0/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mixed-toplevels.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mixed-toplevels.txt fonttools-3.21.2/Tests/mtiLib/data/mixed-toplevels.txt --- fonttools-3.0/Tests/mtiLib/data/mixed-toplevels.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mixed-toplevels.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,36 @@ + +lookup 0 chained + +backtrackclass definition begin +pakannada 1 +phakannada 1 +vakannada 1 +pevowelkannada 1 +phevowelkannada 1 +vevowelkannada 1 +class definition end + +class definition begin +uvowelsignkannada 1 +uuvowelsignkannada 1 +class definition end + +class-chain 1 1 1,1 + +lookup end + +script table begin +telu default 0, 1 +script table end + +lookup 1 single + +uvowelsignkannada uvowelsignaltkannada +uuvowelsignkannada uuvowelsignaltkannada + +lookup end + +feature table begin +0 akhn 1 +1 akh2 0 +feature table end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chainedclass.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chainedclass.txt fonttools-3.21.2/Tests/mtiLib/data/mti/chainedclass.txt --- fonttools-3.0/Tests/mtiLib/data/mti/chainedclass.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chainedclass.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + lookup swashes-knda chained backtrackclass definition begin pakannada 1 phakannada 1 vakannada 1 pevowelkannada 1 phevowelkannada 1 vevowelkannada 1 class definition end class definition begin uvowelsignkannada 1 uuvowelsignkannada 1 class definition end class-chain 1 1 1,u-swash-knda lookup end lookup u-swash-knda single uvowelsignkannada uvowelsignaltkannada uuvowelsignkannada uuvowelsignaltkannada lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chainedcoverage.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chainedcoverage.txt fonttools-3.21.2/Tests/mtiLib/data/mti/chainedcoverage.txt --- fonttools-3.0/Tests/mtiLib/data/mti/chainedcoverage.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chainedcoverage.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup slashcontext chained RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no backtrackcoverage definition begin zero one two three four five six seven eight nine coverage definition end inputcoverage definition begin slash coverage definition end lookaheadcoverage definition begin zero one two three four five six seven eight nine coverage definition end coverage 1, slashTofraction lookup end lookup slashTofraction single RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no slash fraction lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.txt fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.txt --- fonttools-3.0/Tests/mtiLib/data/mti/chained-glyph.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/chained-glyph.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +lookup raucontext-sinh chained + +markattachmenttype 2 + +glyph rakarsinh uvowelsignsinh 1,u2aelow-sinh +glyph rakarsinh uuvowelsignsinh 1,u2aelow-sinh + +lookup end + +lookup u2aelow-sinh single +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/cmap.ttx fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.ttx --- fonttools-3.0/Tests/mtiLib/data/mti/cmap.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/cmap.ttx.cmap fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.ttx.cmap --- fonttools-3.0/Tests/mtiLib/data/mti/cmap.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/cmap.txt fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.txt --- fonttools-3.0/Tests/mtiLib/data/mti/cmap.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/cmap.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,52 @@ +Font Chef Table cmap + +cmap subtable 0 +platformID 0 +encodingID 3 +format 4 +language 0 +0x0000 null \#1 +0x000D CR \#2 +0x0020 space \#3 +0x0021 exclam \#4 +0x0022 quotedbl \#5 +0x0023 numbersign \#6 +0x0041 A +0x0042 B +0x0061 a +0x0062 b +end subtable + +cmap subtable 1 +platformID 1 +encodingID 0 +format 6 +language 0 +0x0000 null \#1 +0x000D CR \#2 +0x0020 space \#3 +0x0021 exclam \#4 +0x0022 quotedbl \#5 +0x0023 numbersign \#6 +0x0041 A +0x0042 B +0x0061 a +0x0062 b +end subtable + +cmap subtable 2 +platformID 3 +encodingID 1 +format 4 +language 0 +0x0000 null \#1 +0x000D CR \#2 +0x0020 space \#3 +0x0021 exclam \#4 +0x0022 quotedbl \#5 +0x0023 numbersign \#6 +0x0041 A +0x0042 B +0x0061 a +0x0062 b +end subtable diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/contextclass.txt fonttools-3.21.2/Tests/mtiLib/data/mti/contextclass.txt --- fonttools-3.0/Tests/mtiLib/data/mti/contextclass.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/contextclass.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + lookup i-ligatures-deva context class definition begin ivowelsigndeva 1 ivowelsign1deva 1 ivowelsign2deva 1 anusvaradeva 2 candrabindudeva 3 rephdeva 4 rephanusvaradeva 5 class definition end class 1,0,0,0,2 1,i-anusvara-deva 5,removemark-deva class 1,0,0,0,4 1,i-reph-deva 5,removemark-deva class 1,0,0,0,5 1,i-rephanusvara-deva 5,removemark-deva class 1,0,0,2 1,i-anusvara-deva 4,removemark-deva class 1,0,0,4 1,i-reph-deva 4,removemark-deva class 1,0,0,5 1,i-rephanusvara-deva 4,removemark-deva class 1,0,2 1,i-anusvara-deva 3,removemark-deva class 1,0,3 1,i-candrabindu-deva 3,removemark-deva class 1,0,4 1,i-reph-deva 3,removemark-deva class 1,0,5 1,i-rephanusvara-deva 3,removemark-deva lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/contextcoverage.txt fonttools-3.21.2/Tests/mtiLib/data/mti/contextcoverage.txt --- fonttools-3.0/Tests/mtiLib/data/mti/contextcoverage.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/contextcoverage.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup slashcontext context RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no coverage definition begin 0 zero one two three four five six seven eight nine coverage definition end coverage definition begin 1 slash coverage definition end coverage definition begin 2 zero one two three four five six seven eight nine coverage definition end coverage 35, 40 1, 8 2, slashTofraction lookup end lookup slashTofraction single RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no slash fraction lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/context-glyph.txt fonttools-3.21.2/Tests/mtiLib/data/mti/context-glyph.txt --- fonttools-3.0/Tests/mtiLib/data/mti/context-glyph.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/context-glyph.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + +lookup 7 context + +RightToLeft no +IgnoreBaseGlyphs no +IgnoreLigatures no +IgnoreMarks no + +glyph gabardeva, viramadeva, radeva 2, 8 +glyph jabardeva, viramadeva, radeva 2, 8 +glyph ddabardeva, viramadeva, radeva 2, 8 +glyph babardeva, viramadeva, radeva 2, 8 + +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/featuretable.txt fonttools-3.21.2/Tests/mtiLib/data/mti/featuretable.txt --- fonttools-3.0/Tests/mtiLib/data/mti/featuretable.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/featuretable.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +script table begin telu default 0, 1, 2, 3, 4 script table end feature table begin 0 akhn akhand-telugu 1 blwf belowbase-telugu 2 abvs tripleligatures-telugu,vattucontext-telugu,markra-telugu,lowsubscript-telugu,above-subst-telugu,akhand-ra-telugu 3 psts postbase-telugu 4 haln halant-telugu feature table end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefattach.ttx.GDEF fonttools-3.21.2/Tests/mtiLib/data/mti/gdefattach.ttx.GDEF --- fonttools-3.0/Tests/mtiLib/data/mti/gdefattach.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefattach.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefattach.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gdefattach.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gdefattach.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefattach.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + +attachment list begin + +A 5 +B 0 3 9 + +attachment list end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefclasses.ttx.GDEF fonttools-3.21.2/Tests/mtiLib/data/mti/gdefclasses.ttx.GDEF --- fonttools-3.0/Tests/mtiLib/data/mti/gdefclasses.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefclasses.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefclasses.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gdefclasses.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gdefclasses.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefclasses.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + class definition begin A 1 C 1 fi 2 fl 2 breve 3 acute 3 class definition end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefligcaret.ttx.GDEF fonttools-3.21.2/Tests/mtiLib/data/mti/gdefligcaret.ttx.GDEF --- fonttools-3.0/Tests/mtiLib/data/mti/gdefligcaret.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefligcaret.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefligcaret.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gdefligcaret.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gdefligcaret.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefligcaret.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + +carets begin + +uniFB01 1 236 +ffi 2 210 450 + +carets end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkattach.ttx.GDEF fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkattach.ttx.GDEF --- fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkattach.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkattach.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkattach.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkattach.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkattach.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkattach.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + mark attachment class definition begin breve 1 grave 1 commaacent 2 dotbelow 2 class definition end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkfilter.ttx.GDEF fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkfilter.ttx.GDEF --- fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkfilter.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkfilter.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkfilter.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkfilter.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gdefmarkfilter.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gdefmarkfilter.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + markfilter set definition begin breve 1 acute 1 dotabove 1 dotbelow 2 commaaccent 2 cedilla 2 dotabove 3 dotbelow 3 set definition end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposcursive.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposcursive.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gposcursive.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gposcursive.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposcursive.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + lookup kernpairs cursive entry A 560, 1466 1 exit A 769, 1466 2 entry B 150, 1466 1 exit B 1186, 1091 6 lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposkernset.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposkernset.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gposkernset.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gposkernset.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposkernset.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ +lookup 0 kernset + +RightToLeft no +IgnoreBaseGlyphs no +IgnoreLigatures no +IgnoreMarks no + +left x advance Acircumflex V -10 +left x advance T acircumflex -18 +% Below are the class definitions. Above are the exceptions. +subtable end + +firstclass definition begin +A 1 +Aacute 1 +Agrave 1 +Acircumflex 1 +O 2 +Oacute 2 +Ograve 2 +Ocircumflex 2 +T 3 +class definition end + +secondclass definition begin +V 1 +a 2 +aacute 2 +agrave 2 +acircumflex 2 +class definition end + +left x advance 1 1 -50 +left x advance 2 1 -10 +left x advance 3 2 -35 + +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposmarktobase.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,213 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gposmarktobase.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gposmarktobase.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gposmarktobase.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gposmarktobase.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup topmarktobase-guru mark to base mark bindigurmukhi 0 -184, 1183 16 base ngagurmukhi 0 816, 1183 41 base tthagurmukhi 0 816, 1183 30 base nnagurmukhi 0 976, 1183 35 base nagurmukhi 0 816, 1183 32 base lagurmukhi 0 996, 1183 46 base lanuktagurmukhi 0 996, 1183 46 mark eematragurmukhi 0 -184, 1183 15 mark aimatragurmukhi 0 -184, 1183 28 mark oomatragurmukhi 0 -184, 1183 20 mark aumatragurmukhi 0 -184, 1183 38 base nganuktagurmukhi 0 816, 1183 41 base tthanuktagurmukhi 0 816, 1183 30 base nnanuktagurmukhi 0 976, 1183 35 base nanuktagurmukhi 0 816, 1183 32 mark eematrabindigurmukhi 0 -184, 1183 27 mark aimatrabindigurmukhi 0 -184, 1183 40 mark oomatrabindigurmukhi 0 -184, 1183 36 mark aumatrabindigurmukhi 0 -184, 1183 54 mark eematratippigurmukhi 0 -184, 1183 15 mark aimatratippigurmukhi 0 -184, 1183 28 mark oomatratippigurmukhi 0 -184, 1183 20 lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairclass.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,91 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpospairclass.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairclass.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gpospairclass.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairclass.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,27 @@ +lookup 0 pair + +firstclass definition begin +A 1 +Aacute 1 +Agrave 1 +Acircumflex 1 +O 2 +Oacute 2 +Ograve 2 +Ocircumflex 2 +T 3 +class definition end + +secondclass definition begin +V 1 +a 2 +aacute 2 +agrave 2 +acircumflex 2 +class definition end + +left x advance 1 1 -50 +left x advance 2 1 -10 +left x advance 3 2 -35 + +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairglyph.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpospairglyph.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairglyph.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gpospairglyph.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpospairglyph.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + +lookup 0 pair + +left x advance A V -50 +left x advance Aacute V -50 +left x advance Agrave V -50 +left x advance Acircumflex V -50 +left x advance O V -10 +left x advance Oacute V -10 +left x advance Ograve V -10 +left x advance Ocircumflex V -10 +left x advance T a -35 +left x advance T aacute -35 +left x advance T agrave -35 +left x advance T acircumflex -35 + +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpossingle.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gpossingle.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gpossingle.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gpossingle.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gpossingle.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + lookup supsToInferiors single RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no y placement asuperior -560 y placement bsuperior -560 y placement csuperior -560 y placement dsuperior -560 y placement esuperior -560 y placement fsuperior -560 y placement gsuperior -560 y placement hsuperior -560 y placement isuperior -560 y placement jsuperior -560 y placement ksuperior -560 y placement lsuperior -560 y placement msuperior -560 y placement nsuperior -560 y placement osuperior -560 y placement psuperior -560 y placement qsuperior -560 y placement rsuperior -560 y placement ssuperior -560 y placement tsuperior -560 y placement usuperior -560 y placement vsuperior -560 y placement wsuperior -560 y placement xsuperior -560 y placement ysuperior -560 y placement zsuperior -560 y placement periodsuperior -560 y placement commasuperior -560 y placement dollarsuperior -560 y placement centsuperior -560 y placement aesuperior -560 y placement oesuperior -560 y placement egravesuperior -560 lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubalternate.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubalternate.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gsubalternate.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gsubalternate.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubalternate.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + +Comment: taken from Adobe garamond Pro, showing Unicode glyph references + + +lookup 27 alternate + +RightToLeft no +IgnoreBaseGlyphs no +IgnoreLigatures no +IgnoreMarks no + +U 30 U F730 U E13D U E13E U E13A U 2070 U 2080 U E13B U E139 U E13C +U 31 U F731 U E0F3 U E0F4 U E0F1 U B9 U 2081 U E0F2 U E0F0 U E0F8 +U 32 U F732 U E133 U E134 U E131 U B2 U 2082 U E132 U E130 U E0F9 +U 33 U F733 U E12B U E12C U E129 U B3 U 2083 U E12A U E128 +U 34 U F734 U E0D4 U E0D5 U E0D2 U 2074 U 2084 U E0D3 U E0D1 +U 35 U F735 U E0CD U E0CE U E0CB U 2075 U 2085 U E0CC U E0CA +U 36 U F736 U E121 U E122 U E11F U 2076 U 2086 U E120 U E11E +U 37 U F737 U E11C U E11D U E11A U 2077 U 2087 U E11B U E119 +U 38 U F738 U E0C0 U E0C1 U E0BE U 2078 U 2088 U E0BF U E0BD +U 39 U F739 U E0EC U E0ED U E0EA U 2079 U 2089 U E0EB U E0E9 +U 2039 U E0DB U E0DC +U 203A U E0DD U E0DE + +lookup end diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubligature.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubligature.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gsubligature.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gsubligature.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubligature.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup latinLigatures ligature RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no IJ I J ffi f f i ffl f f l fft f f t ffb f f b ffh f f h ffk f f k fi f i fl f l ff f f ft f t fb f b fh f h fk f k fj f j ij i j tt t t IJsmall Ismall Jsmall lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubmultiple.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubmultiple.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gsubmultiple.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gsubmultiple.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubmultiple.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup replace-akhand-telugu multiple kassevoweltelugu kaivoweltelugu ssasubscripttelugu janyevoweltelugu jaivoweltelugu nyasubscripttelugu lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubreversechanined.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,81 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubreversechanined.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gsubreversechanined.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gsubreversechanined.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubreversechanined.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ +lookup arabicReverse reversechained RightToLeft yes IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks yes backtrackcoverage definition begin bayi1 jeemi1 kafi1 ghafi1 laami1 kafm1 ghafm1 laamm1 coverage definition end rayf2 rayf1 reyf2 reyf1 zayf2 zayf1 yayf2 yayf1 % subtable backtrackcoverage definition begin bayi1 fayi1 kafi1 ghafi1 laami1 kafm1 ghafm1 laamm1 coverage definition end hamzayehf2 hamzayehf1 hamzayeharabf2 hamzayeharabf1 ayehf2 ayehf1 yehf2 yehf1 % subtable lookaheadcoverage definition begin ray rey zay yay coverage definition end dal dal1 del del1 zal zal1 lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubsingle.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/gsubsingle.txt fonttools-3.21.2/Tests/mtiLib/data/mti/gsubsingle.txt --- fonttools-3.0/Tests/mtiLib/data/mti/gsubsingle.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/gsubsingle.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + lookup alt-fractions single RightToLeft no IgnoreBaseGlyphs no IgnoreLigatures no IgnoreMarks no onehalf onehalf.alt onequarter onequarter.alt threequarters threequarters.alt lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/mark-to-ligature.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,826 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/mark-to-ligature.txt fonttools-3.21.2/Tests/mtiLib/data/mti/mark-to-ligature.txt --- fonttools-3.0/Tests/mtiLib/data/mti/mark-to-ligature.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/mark-to-ligature.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,93 @@ +lookup LigMk0 mark to ligature + +mark FathatanNS 0 281, 1388 0 +mark DammatanNS 0 354, 1409 0 +mark FathaNS 0 277, 1379 0 +mark DammaNS 0 394, 1444 0 +mark ShaddaNS 0 283, 1581 0 +mark SukunNS 0 220, 1474 1 +mark MaddaNS 0 397, 1472 1 +mark HamzaAboveNS 0 266, 1425 2 +mark UltapeshNS 0 454, 1128 1 +mark DammaRflxNS 0 454, 1128 1 +mark Fatha2dotsNS 0 272, 1097 0 +mark AlefSuperiorNS 0 141, 874 1 +mark ShaddaAlefNS 0 283, 1581 0 +mark WaslaNS 0 357, 1470 0 +mark OneDotAboveNS 0 215, 1001 3 +mark TwoDotsAboveNS 0 346, 1003 0 +mark ThreeDotsUpAboveNS 0 346, 1003 0 +mark ThreeDotsDownAboveNS 0 346, 687 0 +mark FourDotsAboveNS 0 347, 860 0 +mark TwoDotsVerticalAboveNS 0 357, 707 1 +mark OneDotAbove2NS 0 215, 1001 3 +mark SharetKafNS 0 382, 520 1 +mark ShaddaKasratanNS 0 315, 1164 55 +mark ShaddaKasraNS 0 426, 1340 55 +mark ShaddaFathatanNS 0 369, 1604 0 +mark ShaddaDammatanNS 0 283, 1581 0 +mark ShaddaDammaNS 0 283, 1581 0 +ligature LamAlefFin.short 1 2 0 1122, 1620 96 +ligature LamAlefFin.short 2 2 0 162, 1487 99 +ligature LamAlefFin.cup 1 2 0 1122, 1620 105 +ligature LamAlefFin.cup 2 2 0 162, 1487 106 +ligature LamAlefFin.cut 1 2 0 1122, 1620 110 +ligature LamAlefFin.cut 2 2 0 162, 1487 108 +ligature BehxIni_RehFin 1 2 0 618, 813 59 +ligature BehxIni_RehFin 2 2 0 282, 523 58 +ligature BehxIni_RehFin.b 1 2 0 708, 813 65 +ligature BehxIni_RehFin.b 2 2 0 282, 543 64 +ligature BehxIni_NoonGhunnaFin 1 2 0 1205, 871 78 +ligature BehxIni_NoonGhunnaFin 2 2 0 516, 565 74 +ligature BehxIni_MeemFin 1 2 0 785, 1255 90 +ligature BehxIni_MeemFin 2 2 0 269, 952 93 +ligature HahIni_YehBarreeFin 1 2 0 1017, 732 83 +ligature HahIni_YehBarreeFin 2 2 0 344, 743 86 +ligature AinMed_YehBarreeFin 1 2 0 774, 860 105 +ligature AinMed_YehBarreeFin 2 2 0 312, 618 108 +ligature TahIni_YehBarreeFin 1 2 0 1253, 1065 143 +ligature TahIni_YehBarreeFin 2 2 0 263, 419 142 +ligature BehxMed_NoonGhunnaFin 1 2 0 1205, 1061 78 +ligature BehxMed_NoonGhunnaFin 2 2 0 516, 755 74 +ligature KafMed_MeemFin 1 2 0 238, 1435 182 +ligature KafMed_MeemFin 2 2 0 84, 308 186 +ligature LamMed_MeemFin 1 2 0 555, 1627 154 +ligature LamMed_MeemFin 2 2 0 175, 472 155 +ligature LamMed_MeemFin.b 1 2 0 555, 1627 156 +ligature LamMed_MeemFin.b 2 2 0 175, 472 157 +ligature LamIni_MeemFin 1 2 0 386, 1808 70 +ligature LamIni_MeemFin 2 2 0 130, 701 150 +ligature AinIni.12m_MeemFin.02 1 2 0 720, 1281 160 +ligature AinIni.12m_MeemFin.02 2 2 0 75, 631 158 +ligature KafMed.12_YehxFin.01 1 2 0 807, 1457 106 +ligature KafMed.12_YehxFin.01 2 2 0 440, 418 176 +ligature LamMed_YehxFin 1 2 0 925, 1620 157 +ligature LamMed_YehxFin 2 2 0 490, 196 152 +ligature LamMed_YehxFin.cup 1 2 0 935, 1620 159 +ligature LamMed_YehxFin.cup 2 2 0 500, 196 155 +ligature FehxMed_YehBarreeFin 1 2 0 397, 804 158 +ligature FehxMed_YehBarreeFin 2 2 0 6,-65 161 +ligature KafIni_YehBarreeFin 1 2 0 496, 1549 81 +ligature KafIni_YehBarreeFin 2 2 0 328, 339 171 +ligature KafMed_YehBarreeFin 1 2 0 465, 1407 106 +ligature KafMed_YehBarreeFin 2 2 0 328, 251 197 +ligature LamIni_YehBarreeFin 1 2 0 719, 1633 70 +ligature LamIni_YehBarreeFin 2 2 0 328, 339 160 +ligature AinIni_YehBarreeFin 1 2 0 766, 1036 82 +ligature AinIni_YehBarreeFin 2 2 0 194, 312 151 +ligature BehxMed_YehxFin 1 2 0 913,-285 117 +ligature BehxMed_YehxFin 2 2 0 1223,-305 112 +ligature BehxMed_MeemFin.py 1 2 0 777, 699 99 +ligature BehxMed_MeemFin.py 2 2 0 194, 481 102 +ligature BehxMed_RehFin 1 2 0 708, 1083 65 +ligature BehxMed_RehFin 2 2 0 282, 813 64 +ligature BehxMed_RehFin.cup 1 2 0 708, 1083 65 +ligature BehxMed_RehFin.cup 2 2 0 282, 813 64 +ligature BehxMed_NoonGhunnaFin.cup 1 2 0 1205, 1061 78 +ligature BehxMed_NoonGhunnaFin.cup 2 2 0 516, 755 74 +ligature LamAlefSep 1 2 0 1055, 1583 105 +ligature LamAlefSep 2 2 0 198, 1528 106 +ligature LamAlefFin 1 2 0 1122, 1620 98 +ligature LamAlefFin 2 2 0 162, 1487 99 + +lookup end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/README fonttools-3.21.2/Tests/mtiLib/data/mti/README --- fonttools-3.0/Tests/mtiLib/data/mti/README 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/README 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ +The *.txt data in this directory was imported from: + +https://github.com/Monotype/OpenType_Table_Source/tree/gh-pages/downloads + +at the following revision: + +8a4db481c63efe468092d7e9cd149d2e9786369a + +plus fixes for the following issues: + +https://github.com/Monotype/OpenType_Table_Source/issues/11 +https://github.com/Monotype/OpenType_Table_Source/issues/13 diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/scripttable.ttx.GPOS fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.ttx.GPOS --- fonttools-3.0/Tests/mtiLib/data/mti/scripttable.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/scripttable.ttx.GSUB fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.ttx.GSUB --- fonttools-3.0/Tests/mtiLib/data/mti/scripttable.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/mtiLib/data/mti/scripttable.txt fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.txt --- fonttools-3.0/Tests/mtiLib/data/mti/scripttable.txt 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/data/mti/scripttable.txt 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + script table begin cyrl default 3, 4, 1 grek default 3, 4, 2 latn default 3, 4, 0 latn DEU 3, 4, 0 latn ROM 3, 4, 0 latn TRK 3, 4, 0 latn VIT 3, 4, 5, 0 script table end \ No newline at end of file diff -Nru fonttools-3.0/Tests/mtiLib/mti_test.py fonttools-3.21.2/Tests/mtiLib/mti_test.py --- fonttools-3.0/Tests/mtiLib/mti_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/mtiLib/mti_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,236 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTFont +from fontTools import mtiLib +import difflib +import os +import sys +import unittest + + +class MtiTest(unittest.TestCase): + + GLYPH_ORDER = ['.notdef', + 'a', 'b', 'pakannada', 'phakannada', 'vakannada', 'pevowelkannada', + 'phevowelkannada', 'vevowelkannada', 'uvowelsignkannada', 'uuvowelsignkannada', + 'uvowelsignaltkannada', 'uuvowelsignaltkannada', 'uuvowelsignsinh', + 'uvowelsignsinh', 'rakarsinh', 'zero', 'one', 'two', 'three', 'four', 'five', + 'six', 'seven', 'eight', 'nine', 'slash', 'fraction', 'A', 'B', 'C', 'fi', + 'fl', 'breve', 'acute', 'uniFB01', 'ffi', 'grave', 'commaacent', 'dotbelow', + 'dotabove', 'cedilla', 'commaaccent', 'Acircumflex', 'V', 'T', 'acircumflex', + 'Aacute', 'Agrave', 'O', 'Oacute', 'Ograve', 'Ocircumflex', 'aacute', 'agrave', + 'aimatrabindigurmukhi', 'aimatragurmukhi', 'aimatratippigurmukhi', + 'aumatrabindigurmukhi', 'aumatragurmukhi', 'bindigurmukhi', + 'eematrabindigurmukhi', 'eematragurmukhi', 'eematratippigurmukhi', + 'oomatrabindigurmukhi', 'oomatragurmukhi', 'oomatratippigurmukhi', + 'lagurmukhi', 'lanuktagurmukhi', 'nagurmukhi', 'nanuktagurmukhi', + 'ngagurmukhi', 'nganuktagurmukhi', 'nnagurmukhi', 'nnanuktagurmukhi', + 'tthagurmukhi', 'tthanuktagurmukhi', 'bsuperior', 'isuperior', 'vsuperior', + 'wsuperior', 'periodsuperior', 'osuperior', 'tsuperior', 'dollarsuperior', + 'fsuperior', 'gsuperior', 'zsuperior', 'dsuperior', 'psuperior', 'hsuperior', + 'oesuperior', 'aesuperior', 'centsuperior', 'esuperior', 'lsuperior', + 'qsuperior', 'csuperior', 'asuperior', 'commasuperior', 'xsuperior', + 'egravesuperior', 'usuperior', 'rsuperior', 'nsuperior', 'ssuperior', + 'msuperior', 'jsuperior', 'ysuperior', 'ksuperior', 'guilsinglright', + 'guilsinglleft', 'uniF737', 'uniE11C', 'uniE11D', 'uniE11A', 'uni2077', + 'uni2087', 'uniE11B', 'uniE119', 'uniE0DD', 'uniE0DE', 'uniF736', 'uniE121', + 'uniE122', 'uniE11F', 'uni2076', 'uni2086', 'uniE120', 'uniE11E', 'uniE0DB', + 'uniE0DC', 'uniF733', 'uniE12B', 'uniE12C', 'uniE129', 'uni00B3', 'uni2083', + 'uniE12A', 'uniE128', 'uniF732', 'uniE133', 'uniE134', 'uniE131', 'uni00B2', + 'uni2082', 'uniE132', 'uniE130', 'uniE0F9', 'uniF734', 'uniE0D4', 'uniE0D5', + 'uniE0D2', 'uni2074', 'uni2084', 'uniE0D3', 'uniE0D1', 'uniF730', 'uniE13D', + 'uniE13E', 'uniE13A', 'uni2070', 'uni2080', 'uniE13B', 'uniE139', 'uniE13C', + 'uniF739', 'uniE0EC', 'uniE0ED', 'uniE0EA', 'uni2079', 'uni2089', 'uniE0EB', + 'uniE0E9', 'uniF735', 'uniE0CD', 'uniE0CE', 'uniE0CB', 'uni2075', 'uni2085', + 'uniE0CC', 'uniE0CA', 'uniF731', 'uniE0F3', 'uniE0F4', 'uniE0F1', 'uni00B9', + 'uni2081', 'uniE0F2', 'uniE0F0', 'uniE0F8', 'uniF738', 'uniE0C0', 'uniE0C1', + 'uniE0BE', 'uni2078', 'uni2088', 'uniE0BF', 'uniE0BD', 'I', 'Ismall', 't', 'i', + 'f', 'IJ', 'J', 'IJsmall', 'Jsmall', 'tt', 'ij', 'j', 'ffb', 'ffh', 'h', 'ffk', + 'k', 'ffl', 'l', 'fft', 'fb', 'ff', 'fh', 'fj', 'fk', 'ft', 'janyevoweltelugu', + 'kassevoweltelugu', 'jaivoweltelugu', 'nyasubscripttelugu', 'kaivoweltelugu', + 'ssasubscripttelugu', 'bayi1', 'jeemi1', 'kafi1', 'ghafi1', 'laami1', 'kafm1', + 'ghafm1', 'laamm1', 'rayf2', 'reyf2', 'yayf2', 'zayf2', 'fayi1', 'ayehf2', + 'hamzayeharabf2', 'hamzayehf2', 'yehf2', 'ray', 'rey', 'zay', 'yay', 'dal', + 'del', 'zal', 'rayf1', 'reyf1', 'yayf1', 'zayf1', 'ayehf1', 'hamzayeharabf1', + 'hamzayehf1', 'yehf1', 'dal1', 'del1', 'zal1', 'onehalf', 'onehalf.alt', + 'onequarter', 'onequarter.alt', 'threequarters', 'threequarters.alt', + 'AlefSuperiorNS', 'DammaNS', 'DammaRflxNS', 'DammatanNS', 'Fatha2dotsNS', + 'FathaNS', 'FathatanNS', 'FourDotsAboveNS', 'HamzaAboveNS', 'MaddaNS', + 'OneDotAbove2NS', 'OneDotAboveNS', 'ShaddaAlefNS', 'ShaddaDammaNS', + 'ShaddaDammatanNS', 'ShaddaFathatanNS', 'ShaddaKasraNS', 'ShaddaKasratanNS', + 'ShaddaNS', 'SharetKafNS', 'SukunNS', 'ThreeDotsDownAboveNS', + 'ThreeDotsUpAboveNS', 'TwoDotsAboveNS', 'TwoDotsVerticalAboveNS', 'UltapeshNS', + 'WaslaNS', 'AinIni.12m_MeemFin.02', 'AinIni_YehBarreeFin', + 'AinMed_YehBarreeFin', 'BehxIni_MeemFin', 'BehxIni_NoonGhunnaFin', + 'BehxIni_RehFin', 'BehxIni_RehFin.b', 'BehxMed_MeemFin.py', + 'BehxMed_NoonGhunnaFin', 'BehxMed_NoonGhunnaFin.cup', 'BehxMed_RehFin', + 'BehxMed_RehFin.cup', 'BehxMed_YehxFin', 'FehxMed_YehBarreeFin', + 'HahIni_YehBarreeFin', 'KafIni_YehBarreeFin', 'KafMed.12_YehxFin.01', + 'KafMed_MeemFin', 'KafMed_YehBarreeFin', 'LamAlefFin', 'LamAlefFin.cup', + 'LamAlefFin.cut', 'LamAlefFin.short', 'LamAlefSep', 'LamIni_MeemFin', + 'LamIni_YehBarreeFin', 'LamMed_MeemFin', 'LamMed_MeemFin.b', 'LamMed_YehxFin', + 'LamMed_YehxFin.cup', 'TahIni_YehBarreeFin', 'null', 'CR', 'space', + 'exclam', 'quotedbl', 'numbersign', + ] + + # Feature files in data/*.txt; output gets compared to data/*.ttx. + TESTS = { + None: ( + 'mti/cmap', + ), + 'cmap': ( + 'mti/cmap', + ), + 'GSUB': ( + 'featurename-backward', + 'featurename-forward', + 'lookupnames-backward', + 'lookupnames-forward', + 'mixed-toplevels', + + 'mti/scripttable', + 'mti/chainedclass', + 'mti/chainedcoverage', + 'mti/chained-glyph', + 'mti/gsubalternate', + 'mti/gsubligature', + 'mti/gsubmultiple', + 'mti/gsubreversechanined', + 'mti/gsubsingle', + ), + 'GPOS': ( + 'mti/scripttable', + 'mti/chained-glyph', + 'mti/gposcursive', + 'mti/gposkernset', + 'mti/gposmarktobase', + 'mti/gpospairclass', + 'mti/gpospairglyph', + 'mti/gpossingle', + 'mti/mark-to-ligature', + ), + 'GDEF': ( + 'mti/gdefattach', + 'mti/gdefclasses', + 'mti/gdefligcaret', + 'mti/gdefmarkattach', + 'mti/gdefmarkfilter', + ), + } + # TODO: + # https://github.com/Monotype/OpenType_Table_Source/issues/12 + # + # 'mti/featuretable' + # 'mti/contextclass' + # 'mti/contextcoverage' + # 'mti/context-glyph' + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + pass + + def tearDown(self): + pass + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + def expect_ttx(self, expected_ttx, actual_ttx, fromfile=None, tofile=None): + expected = [l+'\n' for l in expected_ttx.split('\n')] + actual = [l+'\n' for l in actual_ttx.split('\n')] + if actual != expected: + sys.stderr.write('\n') + for line in difflib.unified_diff( + expected, actual, fromfile=fromfile, tofile=tofile): + sys.stderr.write(line) + self.fail("TTX output is different from expected") + + @classmethod + def create_font(celf): + font = TTFont() + font.setGlyphOrder(celf.GLYPH_ORDER) + return font + + def check_mti_file(self, name, tableTag=None): + + xml_expected_path = self.getpath("%s.ttx" % name + ('.'+tableTag if tableTag is not None else '')) + with open(xml_expected_path, 'rt', encoding="utf-8") as xml_expected_file: + xml_expected = xml_expected_file.read() + + font = self.create_font() + + with open(self.getpath("%s.txt" % name), 'rt', encoding="utf-8") as f: + table = mtiLib.build(f, font, tableTag=tableTag) + + if tableTag is not None: + self.assertEqual(tableTag, table.tableTag) + tableTag = table.tableTag + + # Make sure it compiles. + blob = table.compile(font) + + # Make sure it decompiles. + decompiled = table.__class__() + decompiled.decompile(blob, font) + + # XML from built object. + writer = XMLWriter(StringIO(), newlinestr='\n') + writer.begintag(tableTag); writer.newline() + table.toXML(writer, font) + writer.endtag(tableTag); writer.newline() + xml_built = writer.file.getvalue() + + # XML from decompiled object. + writer = XMLWriter(StringIO(), newlinestr='\n') + writer.begintag(tableTag); writer.newline() + decompiled.toXML(writer, font) + writer.endtag(tableTag); writer.newline() + xml_binary = writer.file.getvalue() + + self.expect_ttx(xml_binary, xml_built, fromfile='decompiled', tofile='built') + self.expect_ttx(xml_expected, xml_built, fromfile=xml_expected_path, tofile='built') + + from fontTools.misc import xmlReader + f = StringIO() + f.write(xml_expected) + f.seek(0) + font2 = TTFont() + font2.setGlyphOrder(font.getGlyphOrder()) + reader = xmlReader.XMLReader(f, font2) + reader.read(rootless=True) + + # XML from object read from XML. + writer = XMLWriter(StringIO(), newlinestr='\n') + writer.begintag(tableTag); writer.newline() + font2[tableTag].toXML(writer, font) + writer.endtag(tableTag); writer.newline() + xml_fromxml = writer.file.getvalue() + + self.expect_ttx(xml_expected, xml_fromxml, fromfile=xml_expected_path, tofile='fromxml') + +def generate_mti_file_test(name, tableTag=None): + return lambda self: self.check_mti_file(os.path.join(*name.split('/')), tableTag=tableTag) + + +for tableTag,tests in MtiTest.TESTS.items(): + for name in tests: + setattr(MtiTest, "test_MtiFile_%s%s" % (name, '_'+tableTag if tableTag else ''), + generate_mti_file_test(name, tableTag=tableTag)) + + +if __name__ == "__main__": + if len(sys.argv) > 1: + from fontTools.mtiLib import main + font = MtiTest.create_font() + sys.exit(main(sys.argv[1:], font)) + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/otlLib/builder_test.py fonttools-3.21.2/Tests/otlLib/builder_test.py --- fonttools-3.0/Tests/otlLib/builder_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/otlLib/builder_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1062 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.testTools import getXML +from fontTools.otlLib import builder +from fontTools.ttLib.tables import otTables +from itertools import chain +import unittest + + +class BuilderTest(unittest.TestCase): + GLYPHS = (".notdef space zero one two three four five six " + "A B C a b c grave acute cedilla f_f_i f_i c_t").split() + GLYPHMAP = {name: num for num, name in enumerate(GLYPHS)} + + ANCHOR1 = builder.buildAnchor(11, -11) + ANCHOR2 = builder.buildAnchor(22, -22) + ANCHOR3 = builder.buildAnchor(33, -33) + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + + def test_buildAnchor_format1(self): + anchor = builder.buildAnchor(23, 42) + self.assertEqual(getXML(anchor.toXML), + ['', + ' ', + ' ', + '']) + + def test_buildAnchor_format2(self): + anchor = builder.buildAnchor(23, 42, point=17) + self.assertEqual(getXML(anchor.toXML), + ['', + ' ', + ' ', + ' ', + '']) + + def test_buildAnchor_format3(self): + anchor = builder.buildAnchor( + 23, 42, + deviceX=builder.buildDevice({1: 1, 0: 0}), + deviceY=builder.buildDevice({7: 7})) + self.assertEqual(getXML(anchor.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildAttachList(self): + attachList = builder.buildAttachList({ + "zero": [23, 7], "one": [1], + }, self.GLYPHMAP) + self.assertEqual(getXML(attachList.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildAttachList_empty(self): + self.assertIsNone(builder.buildAttachList({}, self.GLYPHMAP)) + + def test_buildAttachPoint(self): + attachPoint = builder.buildAttachPoint([7, 3]) + self.assertEqual(getXML(attachPoint.toXML), + ['', + ' ', + ' ', + ' ', + '']) + + def test_buildAttachPoint_empty(self): + self.assertIsNone(builder.buildAttachPoint([])) + + def test_buildAttachPoint_duplicate(self): + attachPoint = builder.buildAttachPoint([7, 3, 7]) + self.assertEqual(getXML(attachPoint.toXML), + ['', + ' ', + ' ', + ' ', + '']) + + + def test_buildBaseArray(self): + anchor = builder.buildAnchor + baseArray = builder.buildBaseArray({ + "a": {2: anchor(300, 80)}, + "c": {1: anchor(300, 80), 2: anchor(300, -20)} + }, numMarkClasses=4, glyphMap=self.GLYPHMAP) + self.assertEqual(getXML(baseArray.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildBaseRecord(self): + a = builder.buildAnchor + rec = builder.buildBaseRecord([a(500, -20), None, a(300, -15)]) + self.assertEqual(getXML(rec.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildCaretValueForCoord(self): + caret = builder.buildCaretValueForCoord(500) + self.assertEqual(getXML(caret.toXML), + ['', + ' ', + '']) + + def test_buildCaretValueForPoint(self): + caret = builder.buildCaretValueForPoint(23) + self.assertEqual(getXML(caret.toXML), + ['', + ' ', + '']) + + def test_buildComponentRecord(self): + a = builder.buildAnchor + rec = builder.buildComponentRecord([a(500, -20), None, a(300, -15)]) + self.assertEqual(getXML(rec.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildComponentRecord_empty(self): + self.assertIsNone(builder.buildComponentRecord([])) + + def test_buildComponentRecord_None(self): + self.assertIsNone(builder.buildComponentRecord(None)) + + def test_buildCoverage(self): + cov = builder.buildCoverage({"two", "four"}, {"two": 2, "four": 4}) + self.assertEqual(getXML(cov.toXML), + ['', + ' ', + ' ', + '']) + + def test_buildCursivePos(self): + pos = builder.buildCursivePosSubtable({ + "two": (self.ANCHOR1, self.ANCHOR2), + "four": (self.ANCHOR3, self.ANCHOR1) + }, self.GLYPHMAP) + self.assertEqual(getXML(pos.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildDevice_format1(self): + device = builder.buildDevice({1:1, 0:0}) + self.assertEqual(getXML(device.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildDevice_format2(self): + device = builder.buildDevice({2:2, 0:1, 1:0}) + self.assertEqual(getXML(device.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildDevice_format3(self): + device = builder.buildDevice({5:3, 1:77}) + self.assertEqual(getXML(device.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigatureArray(self): + anchor = builder.buildAnchor + ligatureArray = builder.buildLigatureArray({ + "f_i": [{2: anchor(300, -20)}, {}], + "c_t": [{}, {1: anchor(500, 350), 2: anchor(1300, -20)}] + }, numMarkClasses=4, glyphMap=self.GLYPHMAP) + self.assertEqual(getXML(ligatureArray.toXML), + ['', + ' ', + ' ', # f_i + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigatureAttach(self): + anchor = builder.buildAnchor + attach = builder.buildLigatureAttach([ + [anchor(500, -10), None], + [None, anchor(300, -20), None]]) + self.assertEqual(getXML(attach.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigatureAttach_emptyComponents(self): + attach = builder.buildLigatureAttach([[], None]) + self.assertEqual(getXML(attach.toXML), + ['', + ' ', + ' ', + ' ', + '']) + + def test_buildLigatureAttach_noComponents(self): + attach = builder.buildLigatureAttach([]) + self.assertEqual(getXML(attach.toXML), + ['', + ' ', + '']) + + def test_buildLigCaretList(self): + carets = builder.buildLigCaretList( + {"f_f_i": [300, 600]}, {"c_t": [42]}, self.GLYPHMAP) + self.assertEqual(getXML(carets.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigCaretList_bothCoordsAndPointsForSameGlyph(self): + carets = builder.buildLigCaretList( + {"f_f_i": [300]}, {"f_f_i": [7]}, self.GLYPHMAP) + self.assertEqual(getXML(carets.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigCaretList_empty(self): + self.assertIsNone(builder.buildLigCaretList({}, {}, self.GLYPHMAP)) + + def test_buildLigCaretList_None(self): + self.assertIsNone(builder.buildLigCaretList(None, None, self.GLYPHMAP)) + + def test_buildLigGlyph_coords(self): + lig = builder.buildLigGlyph([500, 800], None) + self.assertEqual(getXML(lig.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLigGlyph_empty(self): + self.assertIsNone(builder.buildLigGlyph([], [])) + + def test_buildLigGlyph_None(self): + self.assertIsNone(builder.buildLigGlyph(None, None)) + + def test_buildLigGlyph_points(self): + lig = builder.buildLigGlyph(None, [2]) + self.assertEqual(getXML(lig.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLookup(self): + s1 = builder.buildSingleSubstSubtable({"one": "two"}) + s2 = builder.buildSingleSubstSubtable({"three": "four"}) + lookup = builder.buildLookup([s1, s2], flags=7) + self.assertEqual(getXML(lookup.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildLookup_badFlags(self): + s = builder.buildSingleSubstSubtable({"one": "two"}) + self.assertRaisesRegex( + AssertionError, "if markFilterSet is None, " + "flags must not set LOOKUP_FLAG_USE_MARK_FILTERING_SET; " + "flags=0x0010", + builder.buildLookup, [s], + builder.LOOKUP_FLAG_USE_MARK_FILTERING_SET, None) + self.assertRaisesRegex( + AssertionError, "if markFilterSet is not None, " + "flags must set LOOKUP_FLAG_USE_MARK_FILTERING_SET; " + "flags=0x0004", + builder.buildLookup, [s], + builder.LOOKUP_FLAG_IGNORE_LIGATURES, 777) + + def test_buildLookup_conflictingSubtableTypes(self): + s1 = builder.buildSingleSubstSubtable({"one": "two"}) + s2 = builder.buildAlternateSubstSubtable({"one": ["two", "three"]}) + self.assertRaisesRegex( + AssertionError, "all subtables must have the same LookupType", + builder.buildLookup, [s1, s2]) + + def test_buildLookup_noSubtables(self): + self.assertIsNone(builder.buildLookup([])) + self.assertIsNone(builder.buildLookup(None)) + self.assertIsNone(builder.buildLookup([None])) + self.assertIsNone(builder.buildLookup([None, None])) + + def test_buildLookup_markFilterSet(self): + s = builder.buildSingleSubstSubtable({"one": "two"}) + flags = (builder.LOOKUP_FLAG_RIGHT_TO_LEFT | + builder.LOOKUP_FLAG_USE_MARK_FILTERING_SET) + lookup = builder.buildLookup([s], flags, markFilterSet=999) + self.assertEqual(getXML(lookup.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMarkArray(self): + markArray = builder.buildMarkArray({ + "acute": (7, builder.buildAnchor(300, 800)), + "grave": (2, builder.buildAnchor(10, 80)) + }, self.GLYPHMAP) + self.assertLess(self.GLYPHMAP["grave"], self.GLYPHMAP["acute"]) + self.assertEqual(getXML(markArray.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMarkBasePosSubtable(self): + anchor = builder.buildAnchor + marks = { + "acute": (0, anchor(300, 700)), + "cedilla": (1, anchor(300, -100)), + "grave": (0, anchor(300, 700)) + } + bases = { + # Make sure we can handle missing entries. + "A": {}, # no entry for any markClass + "B": {0: anchor(500, 900)}, # only markClass 0 specified + "C": {1: anchor(500, -10)}, # only markClass 1 specified + + "a": {0: anchor(500, 400), 1: anchor(500, -20)}, + "b": {0: anchor(500, 800), 1: anchor(500, -20)} + } + table = builder.buildMarkBasePosSubtable(marks, bases, self.GLYPHMAP) + self.assertEqual(getXML(table.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # grave + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # acute + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # cedilla + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # A + ' ', + ' ', + ' ', + ' ', # B + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # C + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # a + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # b + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMarkGlyphSetsDef(self): + marksets = builder.buildMarkGlyphSetsDef( + [{"acute", "grave"}, {"cedilla", "grave"}], self.GLYPHMAP) + self.assertEqual(getXML(marksets.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMarkGlyphSetsDef_empty(self): + self.assertIsNone(builder.buildMarkGlyphSetsDef([], self.GLYPHMAP)) + + def test_buildMarkGlyphSetsDef_None(self): + self.assertIsNone(builder.buildMarkGlyphSetsDef(None, self.GLYPHMAP)) + + def test_buildMarkLigPosSubtable(self): + anchor = builder.buildAnchor + marks = { + "acute": (0, anchor(300, 700)), + "cedilla": (1, anchor(300, -100)), + "grave": (0, anchor(300, 700)) + } + bases = { + "f_i": [{}, {0: anchor(200, 400)}], # nothing on f; only 1 on i + "c_t": [ + {0: anchor(500, 600), 1: anchor(500, -20)}, # c + {0: anchor(1300, 800), 1: anchor(1300, -20)} # t + ] + } + table = builder.buildMarkLigPosSubtable(marks, bases, self.GLYPHMAP) + self.assertEqual(getXML(table.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMarkRecord(self): + rec = builder.buildMarkRecord(17, builder.buildAnchor(500, -20)) + self.assertEqual(getXML(rec.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildMark2Record(self): + a = builder.buildAnchor + rec = builder.buildMark2Record([a(500, -20), None, a(300, -15)]) + self.assertEqual(getXML(rec.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildPairPosClassesSubtable(self): + d20 = builder.buildValue({"XPlacement": -20}) + d50 = builder.buildValue({"XPlacement": -50}) + d0 = builder.buildValue({}) + d8020 = builder.buildValue({"XPlacement": -80, "YPlacement": -20}) + subtable = builder.buildPairPosClassesSubtable({ + (tuple("A",), tuple(["zero"])): (d0, d50), + (tuple("A",), tuple(["one", "two"])): (None, d20), + (tuple(["B", "C"]), tuple(["zero"])): (d8020, d50), + }, self.GLYPHMAP) + self.assertEqual(getXML(subtable.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildPairPosGlyphs(self): + d50 = builder.buildValue({"XPlacement": -50}) + d8020 = builder.buildValue({"XPlacement": -80, "YPlacement": -20}) + subtables = builder.buildPairPosGlyphs({ + ("A", "zero"): (None, d50), + ("A", "one"): (d8020, d50), + }, self.GLYPHMAP) + self.assertEqual(sum([getXML(t.toXML) for t in subtables], []), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildPairPosGlyphsSubtable(self): + d20 = builder.buildValue({"XPlacement": -20}) + d50 = builder.buildValue({"XPlacement": -50}) + d0 = builder.buildValue({}) + d8020 = builder.buildValue({"XPlacement": -80, "YPlacement": -20}) + subtable = builder.buildPairPosGlyphsSubtable({ + ("A", "zero"): (d0, d50), + ("A", "one"): (None, d20), + ("B", "five"): (d8020, d50), + }, self.GLYPHMAP) + self.assertEqual(getXML(subtable.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildSinglePos(self): + subtables = builder.buildSinglePos({ + "one": builder.buildValue({"XPlacement": 500}), + "two": builder.buildValue({"XPlacement": 500}), + "three": builder.buildValue({"XPlacement": 200}), + "four": builder.buildValue({"XPlacement": 400}), + "five": builder.buildValue({"XPlacement": 500}), + "six": builder.buildValue({"YPlacement": -6}), + }, self.GLYPHMAP) + self.assertEqual(sum([getXML(t.toXML) for t in subtables], []), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildSinglePos_ValueFormat0(self): + subtables = builder.buildSinglePos({ + "zero": builder.buildValue({}) + }, self.GLYPHMAP) + self.assertEqual(sum([getXML(t.toXML) for t in subtables], []), + ['', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildSinglePosSubtable_format1(self): + subtable = builder.buildSinglePosSubtable({ + "one": builder.buildValue({"XPlacement": 777}), + "two": builder.buildValue({"XPlacement": 777}), + }, self.GLYPHMAP) + self.assertEqual(getXML(subtable.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildSinglePosSubtable_format2(self): + subtable = builder.buildSinglePosSubtable({ + "one": builder.buildValue({"XPlacement": 777}), + "two": builder.buildValue({"YPlacement": -888}), + }, self.GLYPHMAP) + self.assertEqual(getXML(subtable.toXML), + ['', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '']) + + def test_buildValue(self): + value = builder.buildValue({"XPlacement": 7, "YPlacement": 23}) + func = lambda writer, font: value.toXML(writer, font, valueName="Val") + self.assertEqual(getXML(func), + ['']) + + def test_getLigatureKey(self): + components = lambda s: [tuple(word) for word in s.split()] + c = components("fi fl ff ffi fff") + c.sort(key=builder._getLigatureKey) + self.assertEqual(c, components("fff ffi ff fi fl")) + + def test_getSinglePosValueKey(self): + device = builder.buildDevice({10:1, 11:3}) + a1 = builder.buildValue({"XPlacement": 500, "XPlaDevice": device}) + a2 = builder.buildValue({"XPlacement": 500, "XPlaDevice": device}) + b = builder.buildValue({"XPlacement": 500}) + keyA1 = builder._getSinglePosValueKey(a1) + keyA2 = builder._getSinglePosValueKey(a1) + keyB = builder._getSinglePosValueKey(b) + self.assertEqual(keyA1, keyA2) + self.assertEqual(hash(keyA1), hash(keyA2)) + self.assertNotEqual(keyA1, keyB) + self.assertNotEqual(hash(keyA1), hash(keyB)) + + +class ClassDefBuilderTest(unittest.TestCase): + def test_build_usingClass0(self): + b = builder.ClassDefBuilder(useClass0=True) + b.add({"aa", "bb"}) + b.add({"a", "b"}) + b.add({"c"}) + b.add({"e", "f", "g", "h"}) + cdef = b.build() + self.assertIsInstance(cdef, otTables.ClassDef) + self.assertEqual(cdef.classDefs, { + "a": 2, + "b": 2, + "c": 3, + "aa": 1, + "bb": 1 + }) + + def test_build_notUsingClass0(self): + b = builder.ClassDefBuilder(useClass0=False) + b.add({"a", "b"}) + b.add({"c"}) + b.add({"e", "f", "g", "h"}) + cdef = b.build() + self.assertIsInstance(cdef, otTables.ClassDef) + self.assertEqual(cdef.classDefs, { + "a": 2, + "b": 2, + "c": 3, + "e": 1, + "f": 1, + "g": 1, + "h": 1 + }) + + def test_canAdd(self): + b = builder.ClassDefBuilder(useClass0=True) + b.add({"a", "b", "c", "d"}) + b.add({"e", "f"}) + self.assertTrue(b.canAdd({"a", "b", "c", "d"})) + self.assertTrue(b.canAdd({"e", "f"})) + self.assertTrue(b.canAdd({"g", "h", "i"})) + self.assertFalse(b.canAdd({"b", "c", "d"})) + self.assertFalse(b.canAdd({"a", "b", "c", "d", "e", "f"})) + self.assertFalse(b.canAdd({"d", "e", "f"})) + self.assertFalse(b.canAdd({"f"})) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/areaPen_test.py fonttools-3.21.2/Tests/pens/areaPen_test.py --- fonttools-3.0/Tests/pens/areaPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/areaPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,180 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.areaPen import AreaPen +import unittest + +precision = 6 + +def draw1_(pen): + pen.moveTo( (254, 360) ) + pen.lineTo( (771, 367) ) + pen.curveTo( (800, 393), (808, 399), (819, 412) ) + pen.curveTo( (818, 388), (774, 138), (489, 145) ) + pen.curveTo( (188, 145), (200, 398), (200, 421) ) + pen.curveTo( (209, 409), (220, 394), (254, 360) ) + pen.closePath() + +def draw2_(pen): + pen.moveTo( (254, 360) ) + pen.curveTo( (220, 394), (209, 409), (200, 421) ) + pen.curveTo( (200, 398), (188, 145), (489, 145) ) + pen.curveTo( (774, 138), (818, 388), (819, 412) ) + pen.curveTo( (808, 399), (800, 393), (771, 367) ) + pen.closePath() + +def draw3_(pen): + pen.moveTo( (771, 367) ) + pen.curveTo( (800, 393), (808, 399), (819, 412) ) + pen.curveTo( (818, 388), (774, 138), (489, 145) ) + pen.curveTo( (188, 145), (200, 398), (200, 421) ) + pen.curveTo( (209, 409), (220, 394), (254, 360) ) + pen.closePath() + +def draw4_(pen): + pen.moveTo( (771, 367) ) + pen.lineTo( (254, 360) ) + pen.curveTo( (220, 394), (209, 409), (200, 421) ) + pen.curveTo( (200, 398), (188, 145), (489, 145) ) + pen.curveTo( (774, 138), (818, 388), (819, 412) ) + pen.curveTo( (808, 399), (800, 393), (771, 367) ) + pen.closePath() + +def draw5_(pen): + pen.moveTo( (254, 360) ) + pen.lineTo( (771, 367) ) + pen.qCurveTo( (793, 386), (802, 394) ) + pen.qCurveTo( (811, 402), (819, 412) ) + pen.qCurveTo( (819, 406), (814, 383.5) ) + pen.qCurveTo( (809, 361), (796, 330.5) ) + pen.qCurveTo( (783, 300), (760.5, 266.5) ) + pen.qCurveTo( (738, 233), (701, 205.5) ) + pen.qCurveTo( (664, 178), (612, 160.5) ) + pen.qCurveTo( (560, 143), (489, 145) ) + pen.qCurveTo( (414, 145), (363, 164) ) + pen.qCurveTo( (312, 183), (280, 211.5) ) + pen.qCurveTo( (248, 240), (231.5, 274.5) ) + pen.qCurveTo( (215, 309), (208, 339.5) ) + pen.qCurveTo( (201, 370), (200.5, 392.5) ) + pen.qCurveTo( (200, 415), (200, 421) ) + pen.qCurveTo( (207, 412), (217.5, 399) ) + pen.qCurveTo( (228, 386), (254, 360) ) + pen.closePath() + +def draw6_(pen): + pen.moveTo( (254, 360) ) + pen.qCurveTo( (228, 386), (217.5, 399) ) + pen.qCurveTo( (207, 412), (200, 421) ) + pen.qCurveTo( (200, 415), (200.5, 392.5) ) + pen.qCurveTo( (201, 370), (208, 339.5) ) + pen.qCurveTo( (215, 309), (231.5, 274.5) ) + pen.qCurveTo( (248, 240), (280, 211.5) ) + pen.qCurveTo( (312, 183), (363, 164) ) + pen.qCurveTo( (414, 145), (489, 145) ) + pen.qCurveTo( (560, 143), (612, 160.5) ) + pen.qCurveTo( (664, 178), (701, 205.5) ) + pen.qCurveTo( (738, 233), (760.5, 266.5) ) + pen.qCurveTo( (783, 300), (796, 330.5) ) + pen.qCurveTo( (809, 361), (814, 383.5) ) + pen.qCurveTo( (819, 406), (819, 412) ) + pen.qCurveTo( (811, 402), (802, 394) ) + pen.qCurveTo( (793, 386), (771, 367) ) + pen.closePath() + +def draw7_(pen): + pen.moveTo( (771, 367) ) + pen.qCurveTo( (793, 386), (802, 394) ) + pen.qCurveTo( (811, 402), (819, 412) ) + pen.qCurveTo( (819, 406), (814, 383.5) ) + pen.qCurveTo( (809, 361), (796, 330.5) ) + pen.qCurveTo( (783, 300), (760.5, 266.5) ) + pen.qCurveTo( (738, 233), (701, 205.5) ) + pen.qCurveTo( (664, 178), (612, 160.5) ) + pen.qCurveTo( (560, 143), (489, 145) ) + pen.qCurveTo( (414, 145), (363, 164) ) + pen.qCurveTo( (312, 183), (280, 211.5) ) + pen.qCurveTo( (248, 240), (231.5, 274.5) ) + pen.qCurveTo( (215, 309), (208, 339.5) ) + pen.qCurveTo( (201, 370), (200.5, 392.5) ) + pen.qCurveTo( (200, 415), (200, 421) ) + pen.qCurveTo( (207, 412), (217.5, 399) ) + pen.qCurveTo( (228, 386), (254, 360) ) + pen.closePath() + +def draw8_(pen): + pen.moveTo( (771, 367) ) + pen.lineTo( (254, 360) ) + pen.qCurveTo( (228, 386), (217.5, 399) ) + pen.qCurveTo( (207, 412), (200, 421) ) + pen.qCurveTo( (200, 415), (200.5, 392.5) ) + pen.qCurveTo( (201, 370), (208, 339.5) ) + pen.qCurveTo( (215, 309), (231.5, 274.5) ) + pen.qCurveTo( (248, 240), (280, 211.5) ) + pen.qCurveTo( (312, 183), (363, 164) ) + pen.qCurveTo( (414, 145), (489, 145) ) + pen.qCurveTo( (560, 143), (612, 160.5) ) + pen.qCurveTo( (664, 178), (701, 205.5) ) + pen.qCurveTo( (738, 233), (760.5, 266.5) ) + pen.qCurveTo( (783, 300), (796, 330.5) ) + pen.qCurveTo( (809, 361), (814, 383.5) ) + pen.qCurveTo( (819, 406), (819, 412) ) + pen.qCurveTo( (811, 402), (802, 394) ) + pen.qCurveTo( (793, 386), (771, 367) ) + pen.closePath() + + +class AreaPenTest(unittest.TestCase): + def test_PScontour_clockwise_line_first(self): + pen = AreaPen(None) + draw1_(pen) + self.assertEqual(-104561.35, round(pen.value, precision)) + + def test_PScontour_counterclockwise_line_last(self): + pen = AreaPen(None) + draw2_(pen) + self.assertEqual(104561.35, round(pen.value, precision)) + + def test_PScontour_clockwise_line_last(self): + pen = AreaPen(None) + draw3_(pen) + self.assertEqual(-104561.35, round(pen.value, precision)) + + def test_PScontour_counterclockwise_line_first(self): + pen = AreaPen(None) + draw4_(pen) + self.assertEqual(104561.35, round(pen.value, precision)) + + def test_TTcontour_clockwise_line_first(self): + pen = AreaPen(None) + draw5_(pen) + self.assertEqual(-104602.791667, round(pen.value, precision)) + + def test_TTcontour_counterclockwise_line_last(self): + pen = AreaPen(None) + draw6_(pen) + self.assertEqual(104602.791667, round(pen.value, precision)) + + def test_TTcontour_clockwise_line_last(self): + pen = AreaPen(None) + draw7_(pen) + self.assertEqual(-104602.791667, round(pen.value, precision)) + + def test_TTcontour_counterclockwise_line_first(self): + pen = AreaPen(None) + draw8_(pen) + self.assertEqual(104602.791667, round(pen.value, precision)) + + def test_openPaths(self): + pen = AreaPen() + pen.moveTo((0, 0)) + pen.endPath() + self.assertEqual(0, pen.value) + + pen.moveTo((0, 0)) + pen.lineTo((1, 0)) + with self.assertRaises(NotImplementedError): + pen.endPath() + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/basePen_test.py fonttools-3.21.2/Tests/pens/basePen_test.py --- fonttools-3.0/Tests/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/basePen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,179 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.basePen import \ + BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment +from fontTools.misc.loggingTools import CapturingLogHandler +import unittest + + +class _TestPen(BasePen): + def __init__(self): + BasePen.__init__(self, glyphSet={}) + self._commands = [] + + def __repr__(self): + return " ".join(self._commands) + + def getCurrentPoint(self): + return self._getCurrentPoint() + + def _moveTo(self, pt): + self._commands.append("%s %s moveto" % (pt[0], pt[1])) + + def _lineTo(self, pt): + self._commands.append("%s %s lineto" % (pt[0], pt[1])) + + def _curveToOne(self, bcp1, bcp2, pt): + self._commands.append("%s %s %s %s %s %s curveto" % + (bcp1[0], bcp1[1], + bcp2[0], bcp2[1], + pt[0], pt[1])) + + def _closePath(self): + self._commands.append("closepath") + + def _endPath(self): + self._commands.append("endpath") + + +class _TestGlyph: + def draw(self, pen): + pen.moveTo((0.0, 0.0)) + pen.lineTo((0.0, 100.0)) + pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) + pen.closePath() + + +class BasePenTest(unittest.TestCase): + def test_moveTo(self): + pen = _TestPen() + pen.moveTo((0.5, -4.3)) + self.assertEqual("0.5 -4.3 moveto", repr(pen)) + self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) + + def test_lineTo(self): + pen = _TestPen() + pen.moveTo((4, 5)) + pen.lineTo((7, 8)) + self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) + self.assertEqual((7, 8), pen.getCurrentPoint()) + + def test_curveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.curveTo) + + def test_curveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1)) + self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) + self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) + + def test_curveTo_twoPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_curveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) + self.assertEqual("0.0 0.0 moveto " + "1.0 1.1 1.5 1.6 2.0 2.1 curveto " + "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) + self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) + + def test_qCurveTo_zeroPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + self.assertRaises(AssertionError, pen.qCurveTo) + + def test_qCurveTo_onePoint(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((77.7, 99.9)) + self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) + self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) + + def test_qCurveTo_manyPoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) + self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", + repr(pen)) + self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) + + def test_qCurveTo_onlyOffCurvePoints(self): + pen = _TestPen() + pen.moveTo((0.0, 0.0)) + pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) + self.assertEqual("0.0 0.0 moveto " + "12.0 -12.0 moveto " + "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " + "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " + "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) + self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) + + def test_closePath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.closePath() + self.assertEqual("3 4 lineto closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_endPath(self): + pen = _TestPen() + pen.lineTo((3, 4)) + pen.endPath() + self.assertEqual("3 4 lineto endpath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_addComponent(self): + pen = _TestPen() + pen.glyphSet["oslash"] = _TestGlyph() + pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) + self.assertEqual("-10.0 0.0 moveto " + "40.0 200.0 lineto " + "127.5 300.0 131.25 290.0 125.0 265.0 curveto " + "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " + "closepath", repr(pen)) + self.assertEqual(None, pen.getCurrentPoint()) + + def test_addComponent_skip_missing(self): + pen = _TestPen() + with CapturingLogHandler(pen.log, "WARNING") as captor: + pen.addComponent("nonexistent", (1, 0, 0, 1, 0, 0)) + captor.assertRegex("glyph '.*' is missing from glyphSet; skipped") + + +class DecomposeSegmentTest(unittest.TestCase): + def test_decomposeSuperBezierSegment(self): + decompose = decomposeSuperBezierSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) + self.assertEqual([((0, 0), (1, 1), (2, 2))], + decompose([(0, 0), (1, 1), (2, 2)])) + self.assertEqual( + [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], + decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) + + def test_decomposeQuadraticSegment(self): + decompose = decomposeQuadraticSegment + self.assertRaises(AssertionError, decompose, []) + self.assertRaises(AssertionError, decompose, [(0, 0)]) + self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) + self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], + decompose([(0, 0), (4, 8), (9, -9)])) + self.assertEqual( + [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], + decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/boundsPen_test.py fonttools-3.21.2/Tests/pens/boundsPen_test.py --- fonttools-3.0/Tests/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/boundsPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen +import unittest + + +def draw_(pen): + pen.moveTo((0, 0)) + pen.lineTo((0, 100)) + pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) + pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) + pen.closePath() + + +def bounds_(pen): + return " ".join(["%.0f" % c for c in pen.bounds]) + + +class BoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = BoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 58 100", bounds_(pen)) + + def test_empty(self): + pen = BoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 45 20", bounds_(pen)) + + def test_quadraticCurve(self): + pen = BoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 3", bounds_(pen)) + + +class ControlBoundsPenTest(unittest.TestCase): + def test_draw(self): + pen = ControlBoundsPen(None) + draw_(pen) + self.assertEqual("-55 0 60 100", bounds_(pen)) + + def test_empty(self): + pen = ControlBoundsPen(None) + self.assertEqual(None, pen.bounds) + + def test_curve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.curveTo((20, 10), (90, 40), (0, 0)) + self.assertEqual("0 0 90 40", bounds_(pen)) + + def test_quadraticCurve(self): + pen = ControlBoundsPen(None) + pen.moveTo((0, 0)) + pen.qCurveTo((6, 6), (10, 0)) + self.assertEqual("0 0 10 6", bounds_(pen)) + + def test_singlePoint(self): + pen = ControlBoundsPen(None) + pen.moveTo((-5, 10)) + self.assertEqual("-5 10 -5 10", bounds_(pen)) + + def test_ignoreSinglePoint(self): + pen = ControlBoundsPen(None, ignoreSinglePoints=True) + pen.moveTo((0, 10)) + self.assertEqual(None, pen.bounds) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/perimeterPen_test.py fonttools-3.21.2/Tests/pens/perimeterPen_test.py --- fonttools-3.0/Tests/pens/perimeterPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/perimeterPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,167 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.perimeterPen import PerimeterPen +import unittest + +def draw1_(pen): + pen.moveTo( (254, 360) ) + pen.lineTo( (771, 367) ) + pen.curveTo( (800, 393), (808, 399), (819, 412) ) + pen.curveTo( (818, 388), (774, 138), (489, 145) ) + pen.curveTo( (188, 145), (200, 398), (200, 421) ) + pen.curveTo( (209, 409), (220, 394), (254, 360) ) + pen.closePath() + +def draw2_(pen): + pen.moveTo( (254, 360) ) + pen.curveTo( (220, 394), (209, 409), (200, 421) ) + pen.curveTo( (200, 398), (188, 145), (489, 145) ) + pen.curveTo( (774, 138), (818, 388), (819, 412) ) + pen.curveTo( (808, 399), (800, 393), (771, 367) ) + pen.closePath() + +def draw3_(pen): + pen.moveTo( (771, 367) ) + pen.curveTo( (800, 393), (808, 399), (819, 412) ) + pen.curveTo( (818, 388), (774, 138), (489, 145) ) + pen.curveTo( (188, 145), (200, 398), (200, 421) ) + pen.curveTo( (209, 409), (220, 394), (254, 360) ) + pen.closePath() + +def draw4_(pen): + pen.moveTo( (771, 367) ) + pen.lineTo( (254, 360) ) + pen.curveTo( (220, 394), (209, 409), (200, 421) ) + pen.curveTo( (200, 398), (188, 145), (489, 145) ) + pen.curveTo( (774, 138), (818, 388), (819, 412) ) + pen.curveTo( (808, 399), (800, 393), (771, 367) ) + pen.closePath() + +def draw5_(pen): + pen.moveTo( (254, 360) ) + pen.lineTo( (771, 367) ) + pen.qCurveTo( (793, 386), (802, 394) ) + pen.qCurveTo( (811, 402), (819, 412) ) + pen.qCurveTo( (819, 406), (814, 383.5) ) + pen.qCurveTo( (809, 361), (796, 330.5) ) + pen.qCurveTo( (783, 300), (760.5, 266.5) ) + pen.qCurveTo( (738, 233), (701, 205.5) ) + pen.qCurveTo( (664, 178), (612, 160.5) ) + pen.qCurveTo( (560, 143), (489, 145) ) + pen.qCurveTo( (414, 145), (363, 164) ) + pen.qCurveTo( (312, 183), (280, 211.5) ) + pen.qCurveTo( (248, 240), (231.5, 274.5) ) + pen.qCurveTo( (215, 309), (208, 339.5) ) + pen.qCurveTo( (201, 370), (200.5, 392.5) ) + pen.qCurveTo( (200, 415), (200, 421) ) + pen.qCurveTo( (207, 412), (217.5, 399) ) + pen.qCurveTo( (228, 386), (254, 360) ) + pen.closePath() + +def draw6_(pen): + pen.moveTo( (254, 360) ) + pen.qCurveTo( (228, 386), (217.5, 399) ) + pen.qCurveTo( (207, 412), (200, 421) ) + pen.qCurveTo( (200, 415), (200.5, 392.5) ) + pen.qCurveTo( (201, 370), (208, 339.5) ) + pen.qCurveTo( (215, 309), (231.5, 274.5) ) + pen.qCurveTo( (248, 240), (280, 211.5) ) + pen.qCurveTo( (312, 183), (363, 164) ) + pen.qCurveTo( (414, 145), (489, 145) ) + pen.qCurveTo( (560, 143), (612, 160.5) ) + pen.qCurveTo( (664, 178), (701, 205.5) ) + pen.qCurveTo( (738, 233), (760.5, 266.5) ) + pen.qCurveTo( (783, 300), (796, 330.5) ) + pen.qCurveTo( (809, 361), (814, 383.5) ) + pen.qCurveTo( (819, 406), (819, 412) ) + pen.qCurveTo( (811, 402), (802, 394) ) + pen.qCurveTo( (793, 386), (771, 367) ) + pen.closePath() + +def draw7_(pen): + pen.moveTo( (771, 367) ) + pen.qCurveTo( (793, 386), (802, 394) ) + pen.qCurveTo( (811, 402), (819, 412) ) + pen.qCurveTo( (819, 406), (814, 383.5) ) + pen.qCurveTo( (809, 361), (796, 330.5) ) + pen.qCurveTo( (783, 300), (760.5, 266.5) ) + pen.qCurveTo( (738, 233), (701, 205.5) ) + pen.qCurveTo( (664, 178), (612, 160.5) ) + pen.qCurveTo( (560, 143), (489, 145) ) + pen.qCurveTo( (414, 145), (363, 164) ) + pen.qCurveTo( (312, 183), (280, 211.5) ) + pen.qCurveTo( (248, 240), (231.5, 274.5) ) + pen.qCurveTo( (215, 309), (208, 339.5) ) + pen.qCurveTo( (201, 370), (200.5, 392.5) ) + pen.qCurveTo( (200, 415), (200, 421) ) + pen.qCurveTo( (207, 412), (217.5, 399) ) + pen.qCurveTo( (228, 386), (254, 360) ) + pen.closePath() + +def draw8_(pen): + pen.moveTo( (771, 367) ) + pen.lineTo( (254, 360) ) + pen.qCurveTo( (228, 386), (217.5, 399) ) + pen.qCurveTo( (207, 412), (200, 421) ) + pen.qCurveTo( (200, 415), (200.5, 392.5) ) + pen.qCurveTo( (201, 370), (208, 339.5) ) + pen.qCurveTo( (215, 309), (231.5, 274.5) ) + pen.qCurveTo( (248, 240), (280, 211.5) ) + pen.qCurveTo( (312, 183), (363, 164) ) + pen.qCurveTo( (414, 145), (489, 145) ) + pen.qCurveTo( (560, 143), (612, 160.5) ) + pen.qCurveTo( (664, 178), (701, 205.5) ) + pen.qCurveTo( (738, 233), (760.5, 266.5) ) + pen.qCurveTo( (783, 300), (796, 330.5) ) + pen.qCurveTo( (809, 361), (814, 383.5) ) + pen.qCurveTo( (819, 406), (819, 412) ) + pen.qCurveTo( (811, 402), (802, 394) ) + pen.qCurveTo( (793, 386), (771, 367) ) + pen.closePath() + + +class PerimeterPenTest(unittest.TestCase): + def test_PScontour_clockwise_line_first(self): + pen = PerimeterPen(None) + draw1_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_PScontour_counterclockwise_line_last(self): + pen = PerimeterPen(None) + draw2_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_PScontour_clockwise_line_last(self): + pen = PerimeterPen(None) + draw3_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_PScontour_counterclockwise_line_first(self): + pen = PerimeterPen(None) + draw4_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_TTcontour_clockwise_line_first(self): + pen = PerimeterPen(None) + draw5_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_TTcontour_counterclockwise_line_last(self): + pen = PerimeterPen(None) + draw6_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_TTcontour_clockwise_line_last(self): + pen = PerimeterPen(None) + draw7_(pen) + self.assertEqual(1589, round(pen.value)) + + def test_TTcontour_counterclockwise_line_first(self): + pen = PerimeterPen(None) + draw8_(pen) + self.assertEqual(1589, round(pen.value)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/pointInsidePen_test.py fonttools-3.21.2/Tests/pens/pointInsidePen_test.py --- fonttools-3.0/Tests/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/pointInsidePen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,225 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.pointInsidePen import PointInsidePen +import unittest + + +class PointInsidePenTest(unittest.TestCase): + def test_line(self): + def draw_triangles(pen): + pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) + pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) + pen.closePath() + + self.assertEqual( + " *********" + " ** *" + " ** *" + " * *" + " *", + self.render(draw_triangles, even_odd=True)) + + self.assertEqual( + " *********" + " *******" + " *****" + " ***" + " *", + self.render(draw_triangles, even_odd=False)) + + def test_curve(self): + def draw_curves(pen): + pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) + pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) + pen.closePath() + + self.assertEqual( + "*** ***" + "**** ****" + "*** ***" + "**** ****" + "*** ***", + self.render(draw_curves, even_odd=True)) + + self.assertEqual( + "*** ***" + "**********" + "**********" + "**********" + "*** ***", + self.render(draw_curves, even_odd=False)) + + def test_qCurve(self): + def draw_qCurves(pen): + pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) + pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) + pen.closePath() + + self.assertEqual( + "*** **" + "**** ***" + "*** ***" + "*** ****" + "** ***", + self.render(draw_qCurves, even_odd=True)) + + self.assertEqual( + "*** **" + "**********" + "**********" + "**********" + "** ***", + self.render(draw_qCurves, even_odd=False)) + + @staticmethod + def render(draw_function, even_odd): + result = BytesIO() + for y in range(5): + for x in range(10): + pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) + draw_function(pen) + if pen.getResult(): + result.write(b"*") + else: + result.write(b" ") + return tounicode(result.getvalue()) + + + def test_contour_no_solutions(self): + def draw_contour(pen): + pen.moveTo( (969, 230) ) + pen.curveTo( (825, 348) , (715, 184) , (614, 202) ) + pen.lineTo( (614, 160) ) + pen.lineTo( (969, 160) ) + pen.closePath() + + piPen = PointInsidePen(None, (750, 295)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + self.assertEqual(piPen.getResult(), False) + + piPen = PointInsidePen(None, (835, 190)) # this point is inside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 1) + self.assertEqual(piPen.getResult(), True) + + def test_contour_square_closed(self): + def draw_contour(pen): + pen.moveTo( (100, 100) ) + pen.lineTo( (-100, 100) ) + pen.lineTo( (-100, -100) ) + pen.lineTo( (100, -100) ) + pen.closePath() + + piPen = PointInsidePen(None, (0, 0)) # this point is inside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 1) + self.assertEqual(piPen.getResult(), True) + + def test_contour_square_opened(self): + def draw_contour(pen): + pen.moveTo( (100, 100) ) + pen.lineTo( (-100, 100) ) + pen.lineTo( (-100, -100) ) + pen.lineTo( (100, -100) ) + # contour not explicitly closed + + piPen = PointInsidePen(None, (0, 0)) # this point is inside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 1) + self.assertEqual(piPen.getResult(), True) + + def test_contour_circle(self): + def draw_contour(pen): + pen.moveTo( (0, 100) ) + pen.curveTo( (-55, 100) , (-100, 55) , (-100, 0) ) + pen.curveTo( (-100, -55) , (-55, -100) , (0, -100) ) + pen.curveTo( (55, -100) , (100, -55) , (100, 0) ) + pen.curveTo( (100, 55) , (55, 100) , (0, 100) ) + + piPen = PointInsidePen(None, (50, 50)) # this point is inside + draw_contour(piPen) + self.assertEqual(piPen.getResult(), True) + + piPen = PointInsidePen(None, (50, -50)) # this point is inside + draw_contour(piPen) + self.assertEqual(piPen.getResult(), True) + + def test_contour_diamond(self): + def draw_contour(pen): + pen.moveTo( (0, 100) ) + pen.lineTo( (100, 0) ) + pen.lineTo( (0, -100) ) + pen.lineTo( (-100, 0) ) + pen.closePath() + + piPen = PointInsidePen(None, (-200, 0)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + piPen = PointInsidePen(None, (-200, 100)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + piPen = PointInsidePen(None, (-200, -100)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + piPen = PointInsidePen(None, (-200, 50)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + def test_contour_integers(self): + def draw_contour(pen): + pen.moveTo( (728, 697) ) + pen.lineTo( (504, 699) ) + pen.curveTo( (487, 719) , (508, 783) , (556, 783) ) + pen.lineTo( (718, 783) ) + pen.curveTo( (739, 783) , (749, 712) , (728, 697) ) + pen.closePath() + + piPen = PointInsidePen(None, (416, 783)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + def test_contour_decimals(self): + def draw_contour(pen): + pen.moveTo( (727.546875, 697.0) ) + pen.lineTo( (504.375, 698.515625) ) + pen.curveTo( (487.328125, 719.359375), (507.84375, 783.140625), (555.796875, 783.140625) ) + pen.lineTo( (717.96875, 783.140625) ) + pen.curveTo( (738.890625, 783.140625), (748.796875, 711.5), (727.546875, 697.0) ) + pen.closePath() + + piPen = PointInsidePen(None, (416.625, 783.140625)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + def test_contour2_integers(self): + def draw_contour(pen): + pen.moveTo( (51, 22) ) + pen.lineTo( (51, 74) ) + pen.lineTo( (83, 50) ) + pen.curveTo( (83, 49) , (82, 48) , (82, 47) ) + pen.closePath() + + piPen = PointInsidePen(None, (21, 50)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + + def test_contour2_decimals(self): + def draw_contour(pen): + pen.moveTo( (51.25, 21.859375) ) + pen.lineTo( (51.25, 73.828125) ) + pen.lineTo( (82.5, 50.0) ) + pen.curveTo( (82.5, 49.09375) , (82.265625, 48.265625) , (82.234375, 47.375) ) + pen.closePath() + + piPen = PointInsidePen(None, (21.25, 50.0)) # this point is outside + draw_contour(piPen) + self.assertEqual(piPen.getWinding(), 0) + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) + diff -Nru fonttools-3.0/Tests/pens/recordingPen_test.py fonttools-3.21.2/Tests/pens/recordingPen_test.py --- fonttools-3.0/Tests/pens/recordingPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/recordingPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.recordingPen import RecordingPen, DecomposingRecordingPen +import pytest + + +class _TestGlyph(object): + + def draw(self, pen): + pen.moveTo((0.0, 0.0)) + pen.lineTo((0.0, 100.0)) + pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 0.0)) + pen.closePath() + + +class RecordingPenTest(object): + + def test_addComponent(self): + pen = RecordingPen() + pen.addComponent("a", (2, 0, 0, 3, -10, 5)) + assert pen.value == [("addComponent", ("a", (2, 0, 0, 3, -10, 5)))] + + +class DecomposingRecordingPenTest(object): + + def test_addComponent_decomposed(self): + pen = DecomposingRecordingPen({"a": _TestGlyph()}) + pen.addComponent("a", (2, 0, 0, 3, -10, 5)) + assert pen.value == [ + ('moveTo', ((-10.0, 5.0),)), + ('lineTo', ((-10.0, 305.0),)), + ('curveTo', ((90.0, 230.0), (110.0, 155.0), (90.0, 5.0),)), + ('closePath', ())] + + def test_addComponent_missing_raises(self): + pen = DecomposingRecordingPen(dict()) + with pytest.raises(KeyError) as excinfo: + pen.addComponent("a", (1, 0, 0, 1, 0, 0)) + assert excinfo.value.args[0] == "a" diff -Nru fonttools-3.0/Tests/pens/reverseContourPen_test.py fonttools-3.21.2/Tests/pens/reverseContourPen_test.py --- fonttools-3.0/Tests/pens/reverseContourPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/reverseContourPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,319 @@ +from fontTools.pens.recordingPen import RecordingPen +from fontTools.pens.reverseContourPen import ReverseContourPen +import pytest + + +TEST_DATA = [ + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('lineTo', ((2, 2),)), + ('lineTo', ((3, 3),)), # last not on move, line is implied + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((3, 3),)), + ('lineTo', ((2, 2),)), + ('lineTo', ((1, 1),)), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('lineTo', ((2, 2),)), + ('lineTo', ((0, 0),)), # last on move, no implied line + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((2, 2),)), + ('lineTo', ((1, 1),)), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('lineTo', ((2, 2),)), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((2, 2),)), + ('lineTo', ((1, 1),)), + ('lineTo', ((0, 0),)), + ('lineTo', ((0, 0),)), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('curveTo', ((1, 1), (2, 2), (3, 3))), + ('curveTo', ((4, 4), (5, 5), (0, 0))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('curveTo', ((5, 5), (4, 4), (3, 3))), + ('curveTo', ((2, 2), (1, 1), (0, 0))), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('curveTo', ((1, 1), (2, 2), (3, 3))), + ('curveTo', ((4, 4), (5, 5), (6, 6))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((6, 6),)), # implied line + ('curveTo', ((5, 5), (4, 4), (3, 3))), + ('curveTo', ((2, 2), (1, 1), (0, 0))), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), # this line becomes implied + ('curveTo', ((2, 2), (3, 3), (4, 4))), + ('curveTo', ((5, 5), (6, 6), (7, 7))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((7, 7),)), + ('curveTo', ((6, 6), (5, 5), (4, 4))), + ('curveTo', ((3, 3), (2, 2), (1, 1))), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('qCurveTo', ((1, 1), (2, 2))), + ('qCurveTo', ((3, 3), (0, 0))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('qCurveTo', ((3, 3), (2, 2))), + ('qCurveTo', ((1, 1), (0, 0))), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('qCurveTo', ((1, 1), (2, 2))), + ('qCurveTo', ((3, 3), (4, 4))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((4, 4),)), + ('qCurveTo', ((3, 3), (2, 2))), + ('qCurveTo', ((1, 1), (0, 0))), + ('closePath', ()), + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('qCurveTo', ((2, 2), (3, 3))), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((3, 3),)), + ('qCurveTo', ((2, 2), (1, 1))), + ('closePath', ()), + ] + ), + ( + [ + ('addComponent', ('a', (1, 0, 0, 1, 0, 0))) + ], + [ + ('addComponent', ('a', (1, 0, 0, 1, 0, 0))) + ] + ), + ( + [], [] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('endPath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('endPath', ()), + ], + ), + ( + [ + ('moveTo', ((0, 0),)), + ('closePath', ()), + ], + [ + ('moveTo', ((0, 0),)), + ('endPath', ()), # single-point paths is always open + ], + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('endPath', ()) + ], + [ + ('moveTo', ((1, 1),)), + ('lineTo', ((0, 0),)), + ('endPath', ()) + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('curveTo', ((1, 1), (2, 2), (3, 3))), + ('endPath', ()) + ], + [ + ('moveTo', ((3, 3),)), + ('curveTo', ((2, 2), (1, 1), (0, 0))), + ('endPath', ()) + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('curveTo', ((1, 1), (2, 2), (3, 3))), + ('lineTo', ((4, 4),)), + ('endPath', ()) + ], + [ + ('moveTo', ((4, 4),)), + ('lineTo', ((3, 3),)), + ('curveTo', ((2, 2), (1, 1), (0, 0))), + ('endPath', ()) + ] + ), + ( + [ + ('moveTo', ((0, 0),)), + ('lineTo', ((1, 1),)), + ('curveTo', ((2, 2), (3, 3), (4, 4))), + ('endPath', ()) + ], + [ + ('moveTo', ((4, 4),)), + ('curveTo', ((3, 3), (2, 2), (1, 1))), + ('lineTo', ((0, 0),)), + ('endPath', ()) + ] + ), + ( + [ + ('qCurveTo', ((0, 0), (1, 1), (2, 2), None)), + ('closePath', ()) + ], + [ + ('qCurveTo', ((0, 0), (2, 2), (1, 1), None)), + ('closePath', ()) + ] + ), + ( + [ + ('qCurveTo', ((0, 0), (1, 1), (2, 2), None)), + ('endPath', ()) + ], + [ + ('qCurveTo', ((0, 0), (2, 2), (1, 1), None)), + ('closePath', ()) # this is always "closed" + ] + ), + # Test case from: + # https://github.com/googlei18n/cu2qu/issues/51#issue-179370514 + ( + [ + ('moveTo', ((848, 348),)), + ('lineTo', ((848, 348),)), # duplicate lineTo point after moveTo + ('qCurveTo', ((848, 526), (649, 704), (449, 704))), + ('qCurveTo', ((449, 704), (248, 704), (50, 526), (50, 348))), + ('lineTo', ((50, 348),)), + ('qCurveTo', ((50, 348), (50, 171), (248, -3), (449, -3))), + ('qCurveTo', ((449, -3), (649, -3), (848, 171), (848, 348))), + ('closePath', ()) + ], + [ + ('moveTo', ((848, 348),)), + ('qCurveTo', ((848, 171), (649, -3), (449, -3), (449, -3))), + ('qCurveTo', ((248, -3), (50, 171), (50, 348), (50, 348))), + ('lineTo', ((50, 348),)), + ('qCurveTo', ((50, 526), (248, 704), (449, 704), (449, 704))), + ('qCurveTo', ((649, 704), (848, 526), (848, 348))), + ('lineTo', ((848, 348),)), # the duplicate point is kept + ('closePath', ()) + ] + ) +] + + +@pytest.mark.parametrize("contour, expected", TEST_DATA) +def test_reverse_pen(contour, expected): + recpen = RecordingPen() + revpen = ReverseContourPen(recpen) + for operator, operands in contour: + getattr(revpen, operator)(*operands) + assert recpen.value == expected + + +@pytest.mark.parametrize("contour, expected", TEST_DATA) +def test_reverse_point_pen(contour, expected): + try: + from ufoLib.pointPen import ( + ReverseContourPointPen, PointToSegmentPen, SegmentToPointPen) + except ImportError: + pytest.skip("ufoLib not installed") + + recpen = RecordingPen() + pt2seg = PointToSegmentPen(recpen, outputImpliedClosingLine=True) + revpen = ReverseContourPointPen(pt2seg) + seg2pt = SegmentToPointPen(revpen) + for operator, operands in contour: + getattr(seg2pt, operator)(*operands) + + # for closed contours that have a lineTo following the moveTo, + # and whose points don't overlap, our current implementation diverges + # from the ReverseContourPointPen as wrapped by ufoLib's pen converters. + # In the latter case, an extra lineTo is added because of + # outputImpliedClosingLine=True. This is redundant but not incorrect, + # as the number of points is the same in both. + if (contour and contour[-1][0] == "closePath" and + contour[1][0] == "lineTo" and contour[1][1] != contour[0][1]): + expected = expected[:-1] + [("lineTo", contour[0][1])] + expected[-1:] + + assert recpen.value == expected diff -Nru fonttools-3.0/Tests/pens/t2CharStringPen_test.py fonttools-3.21.2/Tests/pens/t2CharStringPen_test.py --- fonttools-3.0/Tests/pens/t2CharStringPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/t2CharStringPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,184 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.pens.t2CharStringPen import T2CharStringPen +import unittest + + +class T2CharStringPenTest(unittest.TestCase): + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def assertAlmostEqualProgram(self, expected, actual): + self.assertEqual(len(expected), len(actual)) + for i1, i2 in zip(expected, actual): + if isinstance(i1, basestring): + self.assertIsInstance(i2, basestring) + self.assertEqual(i1, i2) + else: + self.assertAlmostEqual(i1, i2) + + def test_draw_h_v_lines(self): + pen = T2CharStringPen(100, {}) + pen.moveTo((0, 0)) + pen.lineTo((10, 0)) + pen.lineTo((10, 10)) + pen.lineTo((0, 10)) + pen.closePath() # no-op + pen.moveTo((10, 10)) + pen.lineTo((10, 20)) + pen.lineTo((0, 20)) + pen.lineTo((0, 10)) + pen.closePath() + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 0, 'hmoveto', + 10, 10, -10, 'hlineto', + 10, 'hmoveto', + 10, -10, -10, 'vlineto', + 'endchar'], + charstring.program) + + def test_draw_lines(self): + pen = T2CharStringPen(100, {}) + pen.moveTo((5, 5)) + pen.lineTo((25, 15)) + pen.lineTo((35, 35)) + pen.lineTo((15, 25)) + pen.closePath() # no-op + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 5, 5, 'rmoveto', + 20, 10, 10, 20, -20, -10, 'rlineto', + 'endchar'], + charstring.program) + + def test_draw_h_v_curves(self): + pen = T2CharStringPen(100, {}) + pen.moveTo((0, 0)) + pen.curveTo((10, 0), (20, 10), (20, 20)) + pen.curveTo((20, 30), (10, 40), (0, 40)) + pen.endPath() # no-op + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 0, 'hmoveto', + 10, 10, 10, 10, 10, -10, 10, -10, 'hvcurveto', + 'endchar'], + charstring.program) + + def test_draw_curves(self): + pen = T2CharStringPen(100, {}) + pen.moveTo((95, 25)) + pen.curveTo((115, 44), (115, 76), (95, 95)) + pen.curveTo((76, 114), (44, 115), (25, 95)) + pen.endPath() # no-op + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 95, 25, 'rmoveto', + 20, 19, 0, 32, -20, 19, -19, 19, -32, 1, -19, -20, 'rrcurveto', + 'endchar'], + charstring.program) + + def test_draw_more_curves(self): + pen = T2CharStringPen(100, {}) + pen.moveTo((10, 10)) + pen.curveTo((20, 10), (50, 10), (60, 10)) + pen.curveTo((60, 20), (60, 50), (60, 60)) + pen.curveTo((50, 50), (40, 60), (30, 60)) + pen.curveTo((40, 50), (30, 40), (30, 30)) + pen.curveTo((30, 25), (25, 19), (20, 20)) + pen.curveTo((15, 20), (9, 25), (10, 30)) + pen.curveTo((7, 25), (6, 15), (10, 10)) + pen.endPath() # no-op + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 10, 10, 'rmoveto', + 10, 30, 0, 10, 'hhcurveto', + 10, 0, 30, 10, 'vvcurveto', + -10, -10, -10, 10, -10, 'hhcurveto', + 10, -10, -10, -10, -10, 'vvcurveto', + -5, -5, -6, -5, 1, 'vhcurveto', + -5, -6, 5, 5, 1, 'hvcurveto', + -3, -5, -1, -10, 4, -5, 'rrcurveto', + 'endchar'], + charstring.program) + + def test_default_width(self): + pen = T2CharStringPen(None, {}) + charstring = pen.getCharString(None, None) + self.assertEqual(['endchar'], charstring.program) + + def test_no_round(self): + pen = T2CharStringPen(100.1, {}, roundTolerance=0.0) + pen.moveTo((0, 0)) + pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49)) + pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1)) + pen.closePath() + charstring = pen.getCharString(None, None) + + self.assertAlmostEqualProgram( + [100, # we always round the advance width + 0, 'hmoveto', + 10.1, 0.1, 9.8, 9.8, 0.59, 10.59, 'rrcurveto', + 10, -10.59, 9.41, -9.8, 0.2, 'vhcurveto', + 'endchar'], + charstring.program) + + def test_round_all(self): + pen = T2CharStringPen(100.1, {}, roundTolerance=0.5) + pen.moveTo((0, 0)) + pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49)) + pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1)) + pen.closePath() + charstring = pen.getCharString(None, None) + + self.assertEqual( + [100, + 0, 'hmoveto', + 10, 10, 10, 10, 10, -10, 10, -10, 'hvcurveto', + 'endchar'], + charstring.program) + + def test_round_some(self): + pen = T2CharStringPen(100, {}, roundTolerance=0.2) + pen.moveTo((0, 0)) + # the following two are rounded as within the tolerance + pen.lineTo((10.1, 0.1)) + pen.lineTo((19.9, 9.9)) + # this one is not rounded as it exceeds the tolerance + pen.lineTo((20.49, 20.49)) + pen.closePath() + charstring = pen.getCharString(None, None) + + self.assertAlmostEqualProgram( + [100, + 0, 'hmoveto', + 10, 'hlineto', + 10, 10, 0.49, 10.49, 'rlineto', + 'endchar'], + charstring.program) + + def test_invalid_tolerance(self): + self.assertRaisesRegex( + ValueError, + "Rounding tolerance must be positive", + T2CharStringPen, None, {}, roundTolerance=-0.1) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/pens/ttGlyphPen_test.py fonttools-3.21.2/Tests/pens/ttGlyphPen_test.py --- fonttools-3.0/Tests/pens/ttGlyphPen_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/pens/ttGlyphPen_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,152 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * + +import os +import unittest + +from fontTools import ttLib +from fontTools.pens.ttGlyphPen import TTGlyphPen + + +class TTGlyphPenTest(unittest.TestCase): + + def runEndToEnd(self, filename): + font = ttLib.TTFont() + ttx_path = os.path.join( + os.path.abspath(os.path.dirname(os.path.realpath(__file__))), + '..', 'ttLib', 'data', filename) + font.importXML(ttx_path) + + glyphSet = font.getGlyphSet() + glyfTable = font['glyf'] + pen = TTGlyphPen(font.getGlyphSet()) + + for name in font.getGlyphOrder(): + oldGlyph = glyphSet[name] + oldGlyph.draw(pen) + oldGlyph = oldGlyph._glyph + newGlyph = pen.glyph() + + if hasattr(oldGlyph, 'program'): + newGlyph.program = oldGlyph.program + + self.assertEqual( + oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) + + def test_e2e_linesAndSimpleComponents(self): + self.runEndToEnd('TestTTF-Regular.ttx') + + def test_e2e_curvesAndComponentTransforms(self): + self.runEndToEnd('TestTTFComplex-Regular.ttx') + + def test_moveTo_errorWithinContour(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + with self.assertRaises(AssertionError): + pen.moveTo((1, 0)) + + def test_closePath_ignoresAnchors(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + pen.closePath() + self.assertFalse(pen.points) + self.assertFalse(pen.types) + self.assertFalse(pen.endPts) + + def test_endPath_sameAsClosePath(self): + pen = TTGlyphPen(None) + + pen.moveTo((0, 0)) + pen.lineTo((0, 1)) + pen.lineTo((1, 0)) + pen.closePath() + closePathGlyph = pen.glyph() + + pen.moveTo((0, 0)) + pen.lineTo((0, 1)) + pen.lineTo((1, 0)) + pen.endPath() + endPathGlyph = pen.glyph() + + self.assertEqual(closePathGlyph, endPathGlyph) + + def test_glyph_errorOnUnendedContour(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + with self.assertRaises(AssertionError): + pen.glyph() + + def test_glyph_decomposes(self): + componentName = 'a' + glyphSet = {} + pen = TTGlyphPen(glyphSet) + + pen.moveTo((0, 0)) + pen.lineTo((0, 1)) + pen.lineTo((1, 0)) + pen.closePath() + glyphSet[componentName] = _TestGlyph(pen.glyph()) + + pen.moveTo((0, 0)) + pen.lineTo((0, 1)) + pen.lineTo((1, 0)) + pen.closePath() + pen.addComponent(componentName, (1, 0, 0, 1, 2, 0)) + compositeGlyph = pen.glyph() + + pen.moveTo((0, 0)) + pen.lineTo((0, 1)) + pen.lineTo((1, 0)) + pen.closePath() + pen.moveTo((2, 0)) + pen.lineTo((2, 1)) + pen.lineTo((3, 0)) + pen.closePath() + plainGlyph = pen.glyph() + + self.assertEqual(plainGlyph, compositeGlyph) + + def test_remove_extra_move_points(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + pen.lineTo((100, 0)) + pen.qCurveTo((100, 50), (50, 100), (0, 0)) + pen.closePath() + self.assertEqual(len(pen.points), 4) + self.assertEqual(pen.points[0], (0, 0)) + + def test_keep_move_point(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + pen.lineTo((100, 0)) + pen.qCurveTo((100, 50), (50, 100), (30, 30)) + # when last and move pts are different, closePath() implies a lineTo + pen.closePath() + self.assertEqual(len(pen.points), 5) + self.assertEqual(pen.points[0], (0, 0)) + + def test_keep_duplicate_end_point(self): + pen = TTGlyphPen(None) + pen.moveTo((0, 0)) + pen.lineTo((100, 0)) + pen.qCurveTo((100, 50), (50, 100), (0, 0)) + pen.lineTo((0, 0)) # the duplicate point is not removed + pen.closePath() + self.assertEqual(len(pen.points), 5) + self.assertEqual(pen.points[0], (0, 0)) + + +class _TestGlyph(object): + def __init__(self, glyph): + self.coordinates = glyph.coordinates + + def draw(self, pen): + pen.moveTo(self.coordinates[0]) + for point in self.coordinates[1:]: + pen.lineTo(point) + pen.closePath() + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/subset/data/expect_ankr.ttx fonttools-3.21.2/Tests/subset/data/expect_ankr.ttx --- fonttools-3.0/Tests/subset/data/expect_ankr.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_ankr.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_bsln_0.ttx fonttools-3.21.2/Tests/subset/data/expect_bsln_0.ttx --- fonttools-3.0/Tests/subset/data/expect_bsln_0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_bsln_0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_bsln_1.ttx fonttools-3.21.2/Tests/subset/data/expect_bsln_1.ttx --- fonttools-3.0/Tests/subset/data/expect_bsln_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_bsln_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_bsln_2.ttx fonttools-3.21.2/Tests/subset/data/expect_bsln_2.ttx --- fonttools-3.0/Tests/subset/data/expect_bsln_2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_bsln_2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_bsln_3.ttx fonttools-3.21.2/Tests/subset/data/expect_bsln_3.ttx --- fonttools-3.0/Tests/subset/data/expect_bsln_3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_bsln_3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_desubroutinize_CFF.ttx fonttools-3.21.2/Tests/subset/data/expect_desubroutinize_CFF.ttx --- fonttools-3.0/Tests/subset/data/expect_desubroutinize_CFF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_desubroutinize_CFF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -63 endchar + + + 220 -93 -21 114 -20 297 181 -59 59 292 -20 hstemhm + 9 118 -43 120 hintmask 11101100 + 535 hmoveto + 157 736 rlineto + 10 -24 -32 4 -23 hhcurveto + -117 -130 -135 -160 -101 hvcurveto + 2 -21 -17 1 -14 hhcurveto + -118 -86 -55 -68 -39 28 -19 34 31 25 15 24 14 -8 17 -5 hvcurveto + hintmask 11011010 + 13 34 42 14 62 4 rrcurveto + -87 -153 -60 -164 -90 vvcurveto + -104 80 -2 54 vhcurveto + -6 9 -8 15 32 vvcurveto + 104 55 190 75 163 vhcurveto + 44 -4 39 -9 51 -23 -77 -363 rcurveline + 86 407 rmoveto + -39 16 -43 11 -40 8 56 112 64 93 60 32 rrcurveto + endchar + + + 142 -92 -21 113 -20 386 52 333 -20 hstem + 8 120 vstem + 459 hmoveto + 157 736 rlineto + 12 -30 -26 3 -24 hhcurveto + -238 -290 -563 -189 -106 65 -2 69 -4 hvcurveto + -1 9 -13 -4 51 vvcurveto + 97 42 172 64 154 vhcurveto + 158 hlineto + -77 -366 rlineto + -59 418 rmoveto + 58 126 72 106 73 32 -56 -264 rcurveline + endchar + + + 187 -17 96 -79 -20 406 48 270 46 hstemhm + 6 93 362 139 -119 101 -101 119 hintmask 01111100 + 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + 82 383 rlineto + 2 18 20 1 8 hhcurveto + 73 22 -57 -70 hvcurveto + hintmask 10111001 + -76 -26 -104 -73 -23 -19 10 26 -25 vhcurveto + -9 -23 -4 -19 -16 vvcurveto + -61 56 -13 43 167 52 192 96 75 -33 69 -85 17 vhcurveto + hintmask 10111010 + 65 37 35 63 59 vvcurveto + 82 -66 77 -147 -189 -174 -127 -138 -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + 165 133 78 117 95 37 -51 -57 -75 -64 -87 -80 vhcurveto + -6 hlineto + 47 222 rlineto + endchar + + + 185 -28 92 -64 -20 413 41 270 46 hstemhm + 6 93 350 149 -119 105 -105 119 hintmask 01111100 + 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + 6 30 rlineto + hintmask 10111001 + -41 39 41 -17 39 hhcurveto + 125 110 175 136 72 -32 62 -82 15 hvcurveto + hintmask 10111010 + 64 38 36 61 58 vvcurveto + 83 -74 78 -144 -183 -177 -126 -139 -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + 152 116 91 138 101 25 -49 -53 -81 -59 -87 -83 vhcurveto + -6 hlineto + 47 222 rlineto + -59 -592 rmoveto + -20 -21 8 21 -20 hvcurveto + 62 290 rlineto + 2 18 20 1 7 hhcurveto + hintmask 10111100 + 63 21 -49 -57 -96 -58 -120 -72 hvcurveto + endchar + + + -73 21 -21 750 -20 hstem + 6 93 vstem + 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 53 75 87 36 vhcurveto + -145 -679 rlineto + 144 hlineto + endchar + + + 215 -207 50 157 -20 770 -20 hstemhm + 6 93 13 84 -84 205 hintmask 11111000 + 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 53 75 87 36 vhcurveto + -145 -679 rlineto + 34 hlineto + -11 -20 -5 -23 -27 vvcurveto + -79 48 -58 113 155 66 109 138 29 vhcurveto + 150 710 -150 -33 -164 -751 rlineto + -100 -22 -30 -23 -40 hhcurveto + -44 -27 29 39 40 29 33 36 16 17 -7 -16 16 hvcurveto + hintmask 11110100 + 4 11 3 11 11 vvcurveto + 34 -26 24 -41 6 vhcurveto + endchar + + + 88 -207 50 144 81 682 -20 hstemhm + 17 84 -84 220 -50 93 hintmask 11110100 + 538 750 rmoveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 54 76 87 36 vhcurveto + -157 -714 rlineto + -103 -23 -27 -20 -45 hhcurveto + -29 -39 18 52 37 24 37 46 20 15 -5 -21 25 hvcurveto + hintmask 11101000 + 4 15 2 14 11 vvcurveto + 64 -58 3 -40 -79 -43 -66 -68 -83 53 -58 95 164 67 94 153 32 vhcurveto + 150 710 rlineto + endchar + + + -131 21 -21 624 46 78 -20 hstem + 324 748 rmoveto + -72 -121 -78 -6 -55 hhcurveto + -12 -46 rlineto + 95 hlineto + -132 -624 rlineto + 144 hlineto + endchar + + + 66 -5 65 197 51 204 237 -54 54 hstemhm + 6 111 -12 110 117 155 -117 117 hintmask 11101001 + 205 257 rmoveto + 38 -8 -33 13 -37 hhcurveto + -80 -41 -60 -83 -154 141 -16 58 171 111 136 121 71 -38 65 -88 29 hvcurveto + 92 46 45 74 66 vvcurveto + 78 -63 68 -123 vhcurveto + hintmask 11100110 + -116 -91 -61 -91 -54 32 -31 40 24 27 11 23 25 hvcurveto + -28 8 -10 36 27 vvcurveto + hintmask 11011001 + 47 31 31 48 51 25 -36 -46 -70 -58 -94 -113 -31 vhcurveto + hintmask 11101010 + 93 -33 40 -80 -76 vvcurveto + -87 -53 -82 -86 -37 -39 13 76 40 10 62 78 6 vhcurveto + endchar + + + 44 -11 125 -89 89 -89 107 380 237 -54 54 hstemhm + 66 110 142 119 -119 144 hintmask 00110101 + 111 132 rmoveto + -5 hlineto + 83 135 273 98 223 vvcurveto + 97 -53 64 -137 -151 -55 -79 -68 -58 31 -32 41 24 26 11 23 26 vhcurveto + -28 8 -10 37 23 vvcurveto + hintmask 01001110 + 50 14 31 67 29 32 -33 -49 vhcurveto + -266 -329 -98 -219 vvcurveto + -11 0 -11 2 -11 vhcurveto + 7 20 36 21 23 hhcurveto + hintmask 10010110 + 102 37 -36 109 hhcurveto + 99 20 52 98 14 0 14 -1 16 hvcurveto + -44 -47 -17 -25 -70 hhcurveto + hintmask 00110110 + -75 -57 18 -59 hhcurveto + endchar + + + 98 -9 84 623 52 hstem + 30 158 236 131 vstem + 377 750 rmoveto + -215 -132 -223 -273 -166 35 -97 172 205 113 299 199 168 -53 93 -125 hvcurveto + -189 -425 rmoveto + 225 17 105 148 60 hhcurveto + 47 7 -63 -82 -232 -68 -246 -114 -48 -11 77 74 37 3 35 2 27 hvcurveto + endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_keep_colr.ttx fonttools-3.21.2/Tests/subset/data/expect_keep_colr.ttx --- fonttools-3.0/Tests/subset/data/expect_keep_colr.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_keep_colr.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx fonttools-3.21.2/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx --- fonttools-3.0/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_keep_gvar_notdef_outline.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + wght + 0x0 + 100.0 + 400.0 + 900.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestGVAR + + + Regular + + + Weight + + + Thin + + + Light + + + Regular + + + Bold + + + Black + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_keep_gvar.ttx fonttools-3.21.2/Tests/subset/data/expect_keep_gvar.ttx --- fonttools-3.0/Tests/subset/data/expect_keep_gvar.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_keep_gvar.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + wght + 0x0 + 100.0 + 400.0 + 900.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestGVAR + + + Regular + + + Weight + + + Thin + + + Light + + + Regular + + + Bold + + + Black + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_keep_math.ttx fonttools-3.21.2/Tests/subset/data/expect_keep_math.ttx --- fonttools-3.0/Tests/subset/data/expect_keep_math.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_keep_math.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,634 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 19 vlineto + -52 6 -14 21 -28 65 rrcurveto + -246 563 -20 0 -206 -488 rlineto + -59 -140 -9 -21 -58 -6 rrcurveto + -19 199 19 vlineto + -48 -22 10 31 hvcurveto + 0 12 4 17 5 13 rrcurveto + 46 114 262 0 41 -94 rlineto + 12 -28 7 -27 0 -15 0 -9 -6 -11 -8 -4 -12 -7 -7 -2 -36 0 rrcurveto + -19 vlineto + return + + + -231 0 115 275 rlineto + return + + + -125 167 -62 0 -124 -167 34 0 121 103 122 -103 rlineto + return + + + 25 vlineto + -41 0 -18 22 -51 121 rrcurveto + -222 522 -28 0 -221 -545 rlineto + -38 -94 -15 -18 -46 -8 rrcurveto + -25 202 25 vlineto + -59 4 -22 11 0 26 0 24 19 42 12 31 rrcurveto + 13 34 225 0 rlineto + 34 -79 12 -35 0 -22 0 -22 -13 -8 -33 -3 rrcurveto + -32 -3 0 -25 rlineto + return + + + -5 -16 19 0 rlineto + 59 19 -16 -26 hvcurveto + 0 -6 -1 -10 -1 -5 rrcurveto + -123 -495 rlineto + -8 -34 -19 -29 -76 0 rrcurveto + -20 0 -4 -16 324 0 rlineto + 207 82 94 99 hvcurveto + 0 85 -67 42 -60 11 rrcurveto + -3 4 rlineto + 20 5 29 8 25 13 48 24 43 45 0 78 rrcurveto + 136 -152 9 -84 vhcurveto + return + + + 13 2 15 1 15 0 rrcurveto + 54 65 -21 -77 -97 -56 -66 -112 hvcurveto + -15 0 -18 2 -20 4 rrcurveto + return + + + 15 4 20 0 16 0 rrcurveto + 78 64 -35 -73 -130 -96 -42 -94 hvcurveto + -26 0 -20 2 -25 7 rrcurveto + return + + + + + + -351 endchar + + + 121 0 20 196 41 397 20 hstem + 707 hmoveto + -107 callsubr + -5 257 rmoveto + -106 callsubr + endchar + + + -268 656 20 hstem + 48 86 vstem + 304 -161 rmoveto + -140 117 -30 113 0 186 0 193 31 93 139 119 rrcurveto + -9 16 rlineto + -160 -95 -87 -144 0 -185 0 -170 86 -169 158 -90 rrcurveto + endchar + + + -133 139 81 vstem + 382 -134 rmoveto + -90 110 -72 169 0 306 0 303 72 172 90 110 rrcurveto + 30 vlineto + -142 -134 -101 -214 0 -267 0 -272 101 -209 142 -134 rrcurveto + endchar + + + -12 139 95 vstem + 503 -243 rmoveto + -134 165 -135 265 0 456 0 458 135 294 134 138 rrcurveto + 33 vlineto + -213 -171 -151 -352 0 -400 0 -409 151 -313 213 -200 rrcurveto + endchar + + + 149 182 110 vstem + 667 -346 rmoveto + -178 220 -197 349 0 613 0 606 197 396 178 184 rrcurveto + 44 vlineto + -284 -228 -201 -464 0 -538 0 -541 201 -422 284 -267 rrcurveto + endchar + + + 207 124 130 vstem + 732 -453 rmoveto + -224 232 -254 504 0 746 0 777 254 473 224 231 rrcurveto + 56 vlineto + -355 -286 -253 -578 0 -673 0 -675 253 -577 355 -286 rrcurveto + endchar + + + 121 0 20 177 39 hstem + 689 hmoveto + -104 callsubr + 17 236 rmoveto + -195 0 94 243 rlineto + endchar + + + 95 0 38 280 35 261 39 hstem + 198 653 rmoveto + -103 callsubr + -44 -42 rmoveto + -102 callsubr + -9 -45 rmoveto + -101 callsubr + endchar + + + -601 654 20 hstem + -75 507 rmoveto + -105 callsubr + endchar + + + -41 560 554 rmoveto + -256 213 -48 0 -256 -213 64 0 216 146 216 -146 rlineto + endchar + + + 378 979 564 rmoveto + -465 213 -48 0 -466 -213 99 0 391 145 390 -145 rlineto + endchar + + + 859 1460 564 rmoveto + -706 213 -48 0 -706 -213 153 0 577 145 577 -145 rlineto + endchar + + + 1285 1886 599 rmoveto + -943 197 -943 -197 5 -26 937 161 939 -161 rlineto + endchar + + + 1727 2328 603 rmoveto + -1164 213 -1164 -213 5 -31 1158 182 1160 -182 rlineto + endchar + + + -151 50 124 vstem + 400 1005 rmoveto + -261 -184 -89 -359 0 -303 rrcurveto + -159 124 239 vlineto + 0 254 54 285 172 197 rrcurveto + endchar + + + -151 50 124 vstem + 174 hmoveto + 1010 -124 -1010 vlineto + endchar + + + -151 50 124 vstem + 400 30 rmoveto + -172 197 -54 285 0 254 rrcurveto + 239 -124 -159 vlineto + 0 -303 89 -359 261 -184 rrcurveto + endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_lcar_0.ttx fonttools-3.21.2/Tests/subset/data/expect_lcar_0.ttx --- fonttools-3.0/Tests/subset/data/expect_lcar_0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_lcar_0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_lcar_1.ttx fonttools-3.21.2/Tests/subset/data/expect_lcar_1.ttx --- fonttools-3.0/Tests/subset/data/expect_lcar_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_lcar_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_hinting_CFF.ttx fonttools-3.21.2/Tests/subset/data/expect_no_hinting_CFF.ttx --- fonttools-3.0/Tests/subset/data/expect_no_hinting_CFF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_hinting_CFF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,212 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -106 callsubr + 53 75 87 36 vhcurveto + -145 -679 rlineto + return + + + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 return + + + 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + return + + + -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + return + + + + + + -63 endchar + + + 220 535 hmoveto + 157 736 rlineto + 10 -24 -32 4 -23 hhcurveto + -117 -130 -135 -160 -101 hvcurveto + 2 -21 -17 1 -14 hhcurveto + -118 -86 -55 -68 -39 28 -19 34 31 25 15 24 14 -8 17 -5 hvcurveto + 13 34 42 14 62 4 rrcurveto + -87 -153 -60 -164 -90 vvcurveto + -104 80 -2 54 vhcurveto + -6 9 -8 15 32 vvcurveto + 104 55 190 75 163 vhcurveto + 44 -4 39 -9 51 -23 -77 -363 rcurveline + 86 407 rmoveto + -39 16 -43 11 -40 8 56 112 64 93 60 32 rrcurveto + endchar + + + 142 459 hmoveto + 157 736 rlineto + 12 -30 -26 3 -24 hhcurveto + -238 -290 -563 -189 -106 65 -2 69 -4 hvcurveto + -1 9 -13 -4 51 vvcurveto + 97 42 172 64 154 vhcurveto + 158 hlineto + -77 -366 rlineto + -59 418 rmoveto + 58 126 72 106 73 32 -56 -264 rcurveline + endchar + + + 187 -105 callsubr + 82 383 rlineto + 2 18 20 1 8 hhcurveto + 73 22 -57 -70 hvcurveto + -76 -26 -104 -73 -23 -19 10 26 -25 vhcurveto + -9 -23 -4 -19 -16 vvcurveto + -61 56 -13 43 167 52 192 96 75 -33 69 -85 17 vhcurveto + 65 37 35 63 59 vvcurveto + 82 -66 77 -147 -189 -174 -127 -138 -104 callsubr + 165 133 78 117 95 37 -51 -57 -75 -64 -87 -80 vhcurveto + -6 hlineto + 47 222 rlineto + endchar + + + 185 -105 callsubr + 6 30 rlineto + -41 39 41 -17 39 hhcurveto + 125 110 175 136 72 -32 62 -82 15 hvcurveto + 64 38 36 61 58 vvcurveto + 83 -74 78 -144 -183 -177 -126 -139 -104 callsubr + 152 116 91 138 101 25 -49 -53 -81 -59 -87 -83 vhcurveto + -6 hlineto + 47 222 rlineto + -59 -592 rmoveto + -20 -21 8 21 -20 hvcurveto + 62 290 rlineto + 2 18 20 1 7 hhcurveto + 63 21 -49 -57 -96 -58 -120 -72 hvcurveto + endchar + + + -73 -107 callsubr + 144 hlineto + endchar + + + 215 -107 callsubr + 34 hlineto + -11 -20 -5 -23 -27 vvcurveto + -79 48 -58 113 155 66 109 138 29 vhcurveto + 150 710 -150 -33 -164 -751 rlineto + -100 -22 -30 -23 -40 hhcurveto + -44 -27 29 39 40 29 33 36 16 17 -7 -16 16 hvcurveto + 4 11 3 11 11 vvcurveto + 34 -26 24 -41 6 vhcurveto + endchar + + + 88 538 750 rmoveto + -106 callsubr + 54 76 87 36 vhcurveto + -157 -714 rlineto + -103 -23 -27 -20 -45 hhcurveto + -29 -39 18 52 37 24 37 46 20 15 -5 -21 25 hvcurveto + 4 15 2 14 11 vvcurveto + 64 -58 3 -40 -79 -43 -66 -68 -83 53 -58 95 164 67 94 153 32 vhcurveto + 150 710 rlineto + endchar + + + -131 324 748 rmoveto + -72 -121 -78 -6 -55 hhcurveto + -12 -46 rlineto + 95 hlineto + -132 -624 rlineto + 144 hlineto + endchar + + + 66 205 257 rmoveto + 38 -8 -33 13 -37 hhcurveto + -80 -41 -60 -83 -154 141 -16 58 171 111 136 121 71 -38 65 -88 29 hvcurveto + 92 46 45 74 66 vvcurveto + 78 -63 68 -123 vhcurveto + -116 -91 -61 -91 -54 32 -31 40 24 27 11 23 25 hvcurveto + -28 8 -10 36 27 vvcurveto + 47 31 31 48 51 25 -36 -46 -70 -58 -94 -113 -31 vhcurveto + 93 -33 40 -80 -76 vvcurveto + -87 -53 -82 -86 -37 -39 13 76 40 10 62 78 6 vhcurveto + endchar + + + 44 111 132 rmoveto + -5 hlineto + 83 135 273 98 223 vvcurveto + 97 -53 64 -137 -151 -55 -79 -68 -58 31 -32 41 24 26 11 23 26 vhcurveto + -28 8 -10 37 23 vvcurveto + 50 14 31 67 29 32 -33 -49 vhcurveto + -266 -329 -98 -219 vvcurveto + -11 0 -11 2 -11 vhcurveto + 7 20 36 21 23 hhcurveto + 102 37 -36 109 hhcurveto + 99 20 52 98 14 0 14 -1 16 hvcurveto + -44 -47 -17 -25 -70 hhcurveto + -75 -57 18 -59 hhcurveto + endchar + + + 98 377 750 rmoveto + -215 -132 -223 -273 -166 35 -97 172 205 113 299 199 168 -53 93 -125 hvcurveto + -189 -425 rmoveto + 225 17 105 148 60 hhcurveto + 47 7 -63 -82 -232 -68 -246 -114 -48 -11 77 74 37 3 35 2 27 hvcurveto + endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_hinting_desubroutinize_CFF.ttx fonttools-3.21.2/Tests/subset/data/expect_no_hinting_desubroutinize_CFF.ttx --- fonttools-3.0/Tests/subset/data/expect_no_hinting_desubroutinize_CFF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_hinting_desubroutinize_CFF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -63 endchar + + + 220 535 hmoveto + 157 736 rlineto + 10 -24 -32 4 -23 hhcurveto + -117 -130 -135 -160 -101 hvcurveto + 2 -21 -17 1 -14 hhcurveto + -118 -86 -55 -68 -39 28 -19 34 31 25 15 24 14 -8 17 -5 hvcurveto + 13 34 42 14 62 4 rrcurveto + -87 -153 -60 -164 -90 vvcurveto + -104 80 -2 54 vhcurveto + -6 9 -8 15 32 vvcurveto + 104 55 190 75 163 vhcurveto + 44 -4 39 -9 51 -23 -77 -363 rcurveline + 86 407 rmoveto + -39 16 -43 11 -40 8 56 112 64 93 60 32 rrcurveto + endchar + + + 142 459 hmoveto + 157 736 rlineto + 12 -30 -26 3 -24 hhcurveto + -238 -290 -563 -189 -106 65 -2 69 -4 hvcurveto + -1 9 -13 -4 51 vvcurveto + 97 42 172 64 154 vhcurveto + 158 hlineto + -77 -366 rlineto + -59 418 rmoveto + 58 126 72 106 73 32 -56 -264 rcurveline + endchar + + + 187 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + 82 383 rlineto + 2 18 20 1 8 hhcurveto + 73 22 -57 -70 hvcurveto + -76 -26 -104 -73 -23 -19 10 26 -25 vhcurveto + -9 -23 -4 -19 -16 vvcurveto + -61 56 -13 43 167 52 192 96 75 -33 69 -85 17 vhcurveto + 65 37 35 63 59 vvcurveto + 82 -66 77 -147 -189 -174 -127 -138 -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + 165 133 78 117 95 37 -51 -57 -75 -64 -87 -80 vhcurveto + -6 hlineto + 47 222 rlineto + endchar + + + 185 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + 6 30 rlineto + -41 39 41 -17 39 hhcurveto + 125 110 175 136 72 -32 62 -82 15 hvcurveto + 64 38 36 61 58 vvcurveto + 83 -74 78 -144 -183 -177 -126 -139 -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + 152 116 91 138 101 25 -49 -53 -81 -59 -87 -83 vhcurveto + -6 hlineto + 47 222 rlineto + -59 -592 rmoveto + -20 -21 8 21 -20 hvcurveto + 62 290 rlineto + 2 18 20 1 7 hhcurveto + 63 21 -49 -57 -96 -58 -120 -72 hvcurveto + endchar + + + -73 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 53 75 87 36 vhcurveto + -145 -679 rlineto + 144 hlineto + endchar + + + 215 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 53 75 87 36 vhcurveto + -145 -679 rlineto + 34 hlineto + -11 -20 -5 -23 -27 vvcurveto + -79 48 -58 113 155 66 109 138 29 vhcurveto + 150 710 -150 -33 -164 -751 rlineto + -100 -22 -30 -23 -40 hhcurveto + -44 -27 29 39 40 29 33 36 16 17 -7 -16 16 hvcurveto + 4 11 3 11 11 vvcurveto + 34 -26 24 -41 6 vhcurveto + endchar + + + 88 538 750 rmoveto + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 54 76 87 36 vhcurveto + -157 -714 rlineto + -103 -23 -27 -20 -45 hhcurveto + -29 -39 18 52 37 24 37 46 20 15 -5 -21 25 hvcurveto + 4 15 2 14 11 vvcurveto + 64 -58 3 -40 -79 -43 -66 -68 -83 53 -58 95 164 67 94 153 32 vhcurveto + 150 710 rlineto + endchar + + + -131 324 748 rmoveto + -72 -121 -78 -6 -55 hhcurveto + -12 -46 rlineto + 95 hlineto + -132 -624 rlineto + 144 hlineto + endchar + + + 66 205 257 rmoveto + 38 -8 -33 13 -37 hhcurveto + -80 -41 -60 -83 -154 141 -16 58 171 111 136 121 71 -38 65 -88 29 hvcurveto + 92 46 45 74 66 vvcurveto + 78 -63 68 -123 vhcurveto + -116 -91 -61 -91 -54 32 -31 40 24 27 11 23 25 hvcurveto + -28 8 -10 36 27 vvcurveto + 47 31 31 48 51 25 -36 -46 -70 -58 -94 -113 -31 vhcurveto + 93 -33 40 -80 -76 vvcurveto + -87 -53 -82 -86 -37 -39 13 76 40 10 62 78 6 vhcurveto + endchar + + + 44 111 132 rmoveto + -5 hlineto + 83 135 273 98 223 vvcurveto + 97 -53 64 -137 -151 -55 -79 -68 -58 31 -32 41 24 26 11 23 26 vhcurveto + -28 8 -10 37 23 vvcurveto + 50 14 31 67 29 32 -33 -49 vhcurveto + -266 -329 -98 -219 vvcurveto + -11 0 -11 2 -11 vhcurveto + 7 20 36 21 23 hhcurveto + 102 37 -36 109 hhcurveto + 99 20 52 98 14 0 14 -1 16 hvcurveto + -44 -47 -17 -25 -70 hhcurveto + -75 -57 18 -59 hhcurveto + endchar + + + 98 377 750 rmoveto + -215 -132 -223 -273 -166 35 -97 172 205 113 299 199 168 -53 93 -125 hvcurveto + -189 -425 rmoveto + 225 17 105 148 60 hhcurveto + 47 7 -63 -82 -232 -68 -246 -114 -48 -11 77 74 37 3 35 2 27 hvcurveto + endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_hinting_TTF.ttx fonttools-3.21.2/Tests/subset/data/expect_no_hinting_TTF.ttx --- fonttools-3.0/Tests/subset/data/expect_no_hinting_TTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_hinting_TTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_cid.ttx fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_cid.ttx --- fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_cid.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_cid.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_otf.ttx fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_otf.ttx --- fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_otf.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_otf.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 196 endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_ttf.ttx fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_ttf.ttx --- fonttools-3.0/Tests/subset/data/expect_no_notdef_outline_ttf.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_no_notdef_outline_ttf.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_notdef_width_cid.ttx fonttools-3.21.2/Tests/subset/data/expect_notdef_width_cid.ttx --- fonttools-3.0/Tests/subset/data/expect_notdef_width_cid.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_notdef_width_cid.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + endchar + + + -407 endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_opbd_0.ttx fonttools-3.21.2/Tests/subset/data/expect_opbd_0.ttx --- fonttools-3.0/Tests/subset/data/expect_opbd_0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_opbd_0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_opbd_1.ttx fonttools-3.21.2/Tests/subset/data/expect_opbd_1.ttx --- fonttools-3.0/Tests/subset/data/expect_opbd_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_opbd_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_prop_0.ttx fonttools-3.21.2/Tests/subset/data/expect_prop_0.ttx --- fonttools-3.0/Tests/subset/data/expect_prop_0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_prop_0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/expect_prop_1.ttx fonttools-3.21.2/Tests/subset/data/expect_prop_1.ttx --- fonttools-3.0/Tests/subset/data/expect_prop_1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/expect_prop_1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/google_color.ttx fonttools-3.21.2/Tests/subset/data/google_color.ttx --- fonttools-3.0/Tests/subset/data/google_color.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/google_color.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,507 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Meh + + + ZOMG + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + DEAD + + + + + + + + + + + + F00D + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/Lobster.subset.ttx fonttools-3.21.2/Tests/subset/data/Lobster.subset.ttx --- fonttools-3.0/Tests/subset/data/Lobster.subset.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/Lobster.subset.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,661 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright (c) 2010 by Pablo Impallari. www.impallari.com. All rights reserved. + + + Lobster 1.4 + + + Regular + + + PabloImpallari.www.impallari.com: Lobster 1.4: 2010 + + + Lobster1.4 + + + Version 1.4 + + + Lobster1.4 + + + Lobster 1.4 is a trademark of Pablo Impallari. www.impallari.com. + + + Pablo Impallari. www.impallari.com + + + Pablo Impallari + + + Copyright (c) 2010 by Pablo Impallari. All rights reserved. + + + www.impallari.com + + + www.impallari.com + + + Copyright (c) 2010, Pablo Impallari (www.impallari.com|impallari@gmail.com), +with Reserved Font Name Lobster. +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is available with a FAQ at: http://scripts.sil.org/OFL + + + http://scripts.sil.org/OFL + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 397 748 rmoveto + 1 -13 -13 1 -14 hhcurveto + -106 callsubr + 53 75 87 36 vhcurveto + -145 -679 rlineto + return + + + -167 -184 -127 -133 -72 38 -25 69 hvcurveto + -1 9 -13 8 51 vvcurveto + 107 return + + + 119 hintmask 01111100 + 230 636 rmoveto + -136 -636 rlineto + 144 hlineto + return + + + -67 41 -25 66 vhcurveto + -1 9 -13 8 51 vvcurveto + return + + + hintmask 00110101 + return + + + hintmask 10111010 + return + + + hintmask 11100110 + return + + + + + + -63 endchar + + + 220 -93 -21 114 -20 297 181 -59 59 292 -20 hstemhm + 9 118 -43 120 hintmask 11101100 + 535 hmoveto + 157 736 rlineto + 10 -24 -32 4 -23 hhcurveto + -117 -130 -135 -160 -101 hvcurveto + 2 -21 -17 1 -14 hhcurveto + -118 -86 -55 -68 -39 28 -19 34 31 25 15 24 14 -8 17 -5 hvcurveto + hintmask 11011010 + 13 34 42 14 62 4 rrcurveto + -87 -153 -60 -164 -90 vvcurveto + -104 80 -2 54 vhcurveto + -6 9 -8 15 32 vvcurveto + 104 55 190 75 163 vhcurveto + 44 -4 39 -9 51 -23 -77 -363 rcurveline + 86 407 rmoveto + -39 16 -43 11 -40 8 56 112 64 93 60 32 rrcurveto + endchar + + + 142 -92 -21 113 -20 386 52 333 -20 hstem + 8 120 vstem + 459 hmoveto + 157 736 rlineto + 12 -30 -26 3 -24 hhcurveto + -238 -290 -563 -189 -106 65 -2 69 -4 hvcurveto + -1 9 -13 -4 51 vvcurveto + 97 42 172 64 154 vhcurveto + 158 hlineto + -77 -366 rlineto + -59 418 rmoveto + 58 126 72 106 73 32 -56 -264 rcurveline + endchar + + + 187 -17 96 -79 -20 406 48 270 46 hstemhm + 6 93 362 139 -119 101 -101 -105 callsubr + 82 383 rlineto + 2 18 20 1 8 hhcurveto + 73 22 -57 -70 hvcurveto + hintmask 10111001 + -76 -26 -104 -73 -23 -19 10 26 -25 vhcurveto + -9 -23 -4 -19 -16 vvcurveto + -61 56 -13 43 167 52 192 96 75 -33 69 -85 17 vhcurveto + -102 callsubr + 65 37 35 63 59 vvcurveto + 82 -66 77 -147 -189 -174 -127 -138 -104 callsubr + 165 133 78 117 95 37 -51 -57 -75 -64 -87 -80 vhcurveto + -6 hlineto + 47 222 rlineto + endchar + + + 185 -28 92 -64 -20 413 41 270 46 hstemhm + 6 93 350 149 -119 105 -105 -105 callsubr + 6 30 rlineto + hintmask 10111001 + -41 39 41 -17 39 hhcurveto + 125 110 175 136 72 -32 62 -82 15 hvcurveto + hintmask 10111010 + 64 38 36 61 58 vvcurveto + 83 -74 78 -144 -183 -177 -126 -139 -104 callsubr + 152 116 91 138 101 25 -49 -53 -81 -59 -87 -83 vhcurveto + -6 hlineto + 47 222 rlineto + -59 -592 rmoveto + -20 -21 8 21 -20 hvcurveto + 62 290 rlineto + 2 18 20 1 7 hhcurveto + hintmask 10111100 + 63 21 -49 -57 -96 -58 -120 -72 hvcurveto + endchar + + + -73 21 -21 750 -20 hstem + 6 93 vstem + -107 callsubr + 144 hlineto + endchar + + + 215 -207 50 157 -20 770 -20 hstemhm + 6 93 13 84 -84 205 hintmask 11111000 + -107 callsubr + 34 hlineto + -11 -20 -5 -23 -27 vvcurveto + -79 48 -58 113 155 66 109 138 29 vhcurveto + 150 710 -150 -33 -164 -751 rlineto + -100 -22 -30 -23 -40 hhcurveto + -44 -27 29 39 40 29 33 36 16 17 -7 -16 16 hvcurveto + hintmask 11110100 + 4 11 3 11 11 vvcurveto + 34 -26 24 -41 6 vhcurveto + endchar + + + 88 -207 50 144 81 682 -20 hstemhm + 17 84 -84 220 -50 93 hintmask 11110100 + 538 750 rmoveto + -106 callsubr + 54 76 87 36 vhcurveto + -157 -714 rlineto + -103 -23 -27 -20 -45 hhcurveto + -29 -39 18 52 37 24 37 46 20 15 -5 -21 25 hvcurveto + hintmask 11101000 + 4 15 2 14 11 vvcurveto + 64 -58 3 -40 -79 -43 -66 -68 -83 53 -58 95 164 67 94 153 32 vhcurveto + 150 710 rlineto + endchar + + + -131 21 -21 624 46 78 -20 hstem + 324 748 rmoveto + -72 -121 -78 -6 -55 hhcurveto + -12 -46 rlineto + 95 hlineto + -132 -624 rlineto + 144 hlineto + endchar + + + 66 -5 65 197 51 204 237 -54 54 hstemhm + 6 111 -12 110 117 155 -117 117 hintmask 11101001 + 205 257 rmoveto + 38 -8 -33 13 -37 hhcurveto + -80 -41 -60 -83 -154 141 -16 58 171 111 136 121 71 -38 65 -88 29 hvcurveto + 92 46 45 74 66 vvcurveto + 78 -63 68 -123 vhcurveto + -101 callsubr + -116 -91 -61 -91 -54 32 -31 40 24 27 11 23 25 hvcurveto + -28 8 -10 36 27 vvcurveto + hintmask 11011001 + 47 31 31 48 51 25 -36 -46 -70 -58 -94 -113 -31 vhcurveto + hintmask 11101010 + 93 -33 40 -80 -76 vvcurveto + -87 -53 -82 -86 -37 -39 13 76 40 10 62 78 6 vhcurveto + endchar + + + 44 -11 125 -89 89 -89 107 380 237 -54 54 hstemhm + 66 110 142 119 -119 144 -103 callsubr + 111 132 rmoveto + -5 hlineto + 83 135 273 98 223 vvcurveto + 97 -53 64 -137 -151 -55 -79 -68 -58 31 -32 41 24 26 11 23 26 vhcurveto + -28 8 -10 37 23 vvcurveto + hintmask 01001110 + 50 14 31 67 29 32 -33 -49 vhcurveto + -266 -329 -98 -219 vvcurveto + -11 0 -11 2 -11 vhcurveto + 7 20 36 21 23 hhcurveto + hintmask 10010110 + 102 37 -36 109 hhcurveto + 99 20 52 98 14 0 14 -1 16 hvcurveto + -44 -47 -17 -25 -70 hhcurveto + hintmask 00110110 + -75 -57 18 -59 hhcurveto + endchar + + + 98 -9 84 623 52 hstem + 30 158 236 131 vstem + 377 750 rmoveto + -215 -132 -223 -273 -166 35 -97 172 205 113 299 199 168 -53 93 -125 hvcurveto + -189 -425 rmoveto + 225 17 105 148 60 hhcurveto + 47 7 -63 -82 -232 -68 -246 -114 -48 -11 77 74 37 3 35 2 27 hvcurveto + endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/NotdefWidthCID-Regular.ttx fonttools-3.21.2/Tests/subset/data/NotdefWidthCID-Regular.ttx --- fonttools-3.0/Tests/subset/data/NotdefWidthCID-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/NotdefWidthCID-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,267 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Notdef Width CID + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -120 50 900 50 hstem + 100 50 700 50 vstem + 100 -120 rmoveto + 800 1000 -800 hlineto + 400 -459 rmoveto + -318 409 rlineto + 636 hlineto + -286 -450 rmoveto + 318 409 rlineto + -818 vlineto + -668 -41 rmoveto + 318 409 318 -409 rlineto + -668 859 rmoveto + 318 -409 -318 -409 rlineto + endchar + + + -407 endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestANKR.ttx fonttools-3.21.2/Tests/subset/data/TestANKR.ttx --- fonttools-3.0/Tests/subset/data/TestANKR.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestANKR.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,325 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestANKR + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestBSLN-0.ttx fonttools-3.21.2/Tests/subset/data/TestBSLN-0.ttx --- fonttools-3.0/Tests/subset/data/TestBSLN-0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestBSLN-0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,342 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestBSLN-0 + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestBSLN-1.ttx fonttools-3.21.2/Tests/subset/data/TestBSLN-1.ttx --- fonttools-3.0/Tests/subset/data/TestBSLN-1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestBSLN-1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestBSLN-1 + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestBSLN-2.ttx fonttools-3.21.2/Tests/subset/data/TestBSLN-2.ttx --- fonttools-3.0/Tests/subset/data/TestBSLN-2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestBSLN-2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,342 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestBSLN-2 + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestBSLN-3.ttx fonttools-3.21.2/Tests/subset/data/TestBSLN-3.ttx --- fonttools-3.0/Tests/subset/data/TestBSLN-3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestBSLN-3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,363 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestBSLN-3 + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestCID-Regular.ttx fonttools-3.21.2/Tests/subset/data/TestCID-Regular.ttx --- fonttools-3.0/Tests/subset/data/TestCID-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestCID-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,389 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test CID + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 129 216 -105 callsubr + -82 hlineto + 2 -98 rlineto + return + + + -106 callgsubr + 67 return + + + rmoveto + 56 hlineto + 11 433 2 98 rlineto + return + + + -33 -28 -26 -40 vhcurveto + hintmask 01100000 + -38 28 -26 33 vhcurveto + endchar + + + -13 130 -109 -21 return + + + 493 277 hstem + 91 -105 callgsubr + return + + + + + + + + -120 50 900 50 hstem + 100 50 700 50 vstem + 100 -120 rmoveto + 800 1000 -800 hlineto + 400 -459 rmoveto + -318 409 rlineto + 636 hlineto + -286 -450 rmoveto + 318 409 rlineto + -818 vlineto + -668 -41 rmoveto + 318 409 318 -409 rlineto + -668 859 rmoveto + 318 -409 -318 -409 rlineto + endchar + + + -407 endchar + + + -316 -103 callsubr + -106 callsubr + hintmask 01010000 + -107 callsubr + hintmask 01100000 + 39 -662 rmoveto + 33 29 26 38 hvcurveto + hintmask 10100000 + 40 -29 26 -33 -104 callsubr + + + -173 -102 callsubr + -104 callgsubr + 86 vstem + 108 493 rmoveto + 52 hlineto + 15 180 3 97 rlineto + -88 hlineto + 2 -97 rlineto + 203 -103 callgsubr + 51 hlineto + 17 180 2 97 rlineto + -88 hlineto + 2 -97 rlineto + endchar + + + + + + + + 123 -95.5 return + + + hstemhm + 96 -107 callgsubr + return + + + 85.5 return + + + 101.5 return + + + -180 rmoveto + return + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestCLR-Regular.ttx fonttools-3.21.2/Tests/subset/data/TestCLR-Regular.ttx --- fonttools-3.0/Tests/subset/data/TestCLR-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestCLR-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,763 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSH[ ] /* 2 values pushed */ + 1 0 + MDAP[1] /* MoveDirectAbsPt */ + ALIGNRP[ ] /* AlignRelativePt */ + PUSH[ ] /* 3 values pushed */ + 7 4 0 + MIRP[01101] /* MoveIndirectRelPt */ + SHP[0] /* ShiftPointByLastPoint */ + PUSH[ ] /* 2 values pushed */ + 6 5 + MDRP[11100] /* MoveDirectRelPt */ + ALIGNRP[ ] /* AlignRelativePt */ + PUSH[ ] /* 3 values pushed */ + 3 2 0 + MIRP[01101] /* MoveIndirectRelPt */ + SHP[0] /* ShiftPointByLastPoint */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSH[ ] /* 2 values pushed */ + 3 0 + MDAP[1] /* MoveDirectAbsPt */ + ALIGNRP[ ] /* AlignRelativePt */ + PUSH[ ] /* 3 values pushed */ + 5 4 0 + MIRP[01101] /* MoveIndirectRelPt */ + SHP[0] /* ShiftPointByLastPoint */ + PUSH[ ] /* 3 values pushed */ + 7 6 1 + MIRP[11100] /* MoveIndirectRelPt */ + ALIGNRP[ ] /* AlignRelativePt */ + PUSH[ ] /* 3 values pushed */ + 1 2 0 + MIRP[01101] /* MoveIndirectRelPt */ + SHP[0] /* ShiftPointByLastPoint */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Created by Khaled Hosny with Sorts Mill Tools 2.1.0_alpha1 <http://bitbucket.org/sortsmill/sortsmill-tools> + + + TestCLR + + + Regular + + + FontForge : TestCLR : 1-12-2015 + + + TestCLR + + + Version 001.000 + + + TestCLR-Regular + + + Created by Khaled Hosny with Sorts Mill Tools 2.1.0_alpha1 <http://bitbucket.org/sortsmill/sortsmill-tools> + + + TestCLR + + + Regular + + + FontForge : TestCLR : 1-12-2015 + + + TestCLR + + + Version 001.000 + + + TestCLR-Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestGVAR.ttx fonttools-3.21.2/Tests/subset/data/TestGVAR.ttx --- fonttools-3.0/Tests/subset/data/TestGVAR.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestGVAR.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,655 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestGVAR + + + Regular + + + 1.000;UKWN;TestGVAR-Regular + + + TestGVAR + + + Version 1.000 + + + TestGVAR-Regular + + + Sascha Brawer + + + Weight + + + Thin + + + Light + + + Regular + + + Bold + + + Black + + + TestGVAR + + + Regular + + + 1.000;UKWN;TestGVAR-Regular + + + TestGVAR-Regular + + + Version 1.000 + + + TestGVAR-Regular + + + Sascha Brawer + + + Weight + + + Thin + + + Light + + + Regular + + + Bold + + + Black + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wght + 100.0 + 400.0 + 900.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestLCAR-0.ttx fonttools-3.21.2/Tests/subset/data/TestLCAR-0.ttx --- fonttools-3.0/Tests/subset/data/TestLCAR-0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestLCAR-0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,320 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestLCAR + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestLCAR-1.ttx fonttools-3.21.2/Tests/subset/data/TestLCAR-1.ttx --- fonttools-3.0/Tests/subset/data/TestLCAR-1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestLCAR-1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,320 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestLCAR + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestMATH-Regular.ttx fonttools-3.21.2/Tests/subset/data/TestMATH-Regular.ttx --- fonttools-3.0/Tests/subset/data/TestMATH-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestMATH-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7590 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + XITS Math + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 14 0 45 381 rlineto + 1 11 1 15 0 12 rrcurveto + 49 -19 32 -34 -38 -15 -33 -43 vhcurveto + 0 -11 1 -12 2 -20 rrcurveto + return + + + 30 -24 25 -29 -30 -23 -24 -30 -30 22 -22 30 31 23 22 29 vhcurveto + return + + + -106 -65 -101 -69 -32 -24 29 39 vhcurveto + 0 68 34 88 44 47 12 14 21 9 16 0 rrcurveto + 38 25 -35 -52 hvcurveto + return + + + -44 hlineto + -92 -89 -13 -8 -62 0 -37 0 -24 10 -35 29 -27 22 -12 6 -21 0 rrcurveto + -102 -91 -100 -113 hvcurveto + return + + + -78 51 -59 67 vhcurveto + 33 0 26 11 27 26 48 45 32 75 0 70 0 16 -1 11 -3 17 32 -11 18 -3 26 0 53 0 31 14 37 43 rrcurveto + -394 -658 47 0 rlineto + return + + + -115 -61 -101 -71 -34 -23 28 42 vhcurveto + 0 55 17 59 31 49 20 33 11 10 41 20 17 -18 13 -9 14 -7 21 -10 4 -7 0 -29 rrcurveto + return + + + 49 28 19 6 44 7 39 6 13 11 0 26 0 23 -15 15 -22 0 -22 0 -31 -41 -24 -24 -19 -18 -11 -8 -28 -16 rrcurveto + 24 vlineto + 0 24 5 27 12 32 7 20 4 15 0 12 rrcurveto + 20 -17 16 -20 -20 -16 -15 -19 vhcurveto + 0 -13 4 -18 7 -21 10 -33 5 -31 0 -27 rrcurveto + -13 vlineto + -38 21 -19 16 -32 36 -22 24 -11 8 -16 0 -20 0 -13 -16 0 -21 0 -25 18 -11 49 -10 34 -7 25 -7 38 -23 -41 -25 -16 -6 -46 -9 rrcurveto + -45 -8 -18 -11 0 -26 0 -21 14 -15 23 0 16 0 10 6 17 19 29 33 2 2 10 10 10 9 7 5 35 22 rrcurveto + -9 vlineto + 0 -43 -4 -24 -12 -32 -7 -19 -5 -16 0 -12 rrcurveto + -19 17 -17 19 20 20 19 20 vhcurveto + 0 7 -4 12 -5 14 -13 35 -7 36 0 39 rrcurveto + 0 9 8 -5 rlineto + 20 -13 22 -12 27 -33 25 -31 16 -13 19 0 21 0 15 17 0 22 0 26 -16 10 -44 7 -37 6 -32 10 -27 18 rrcurveto + return + + + 66 -261 261 -66 -261 -261 -66 261 -261 66 261 vlineto + return + + + 30 -26 27 -29 -31 -25 -25 -31 -31 24 -24 31 30 26 25 29 vhcurveto + return + + + 30 -25 26 -30 -32 -24 -24 -32 -30 26 -25 30 30 25 25 30 vhcurveto + return + + + 30 -25 26 -30 -32 -24 -24 -32 -30 26 -25 30 vhcurveto + 30 25 25 30 hvcurveto + return + + + 70 33 42 58 0 63 rrcurveto + 54 -35 35 -45 vhcurveto + return + + + 9 0 13 4 6 0 8 0 6 -7 0 -7 0 -33 -28 -37 -55 -36 rrcurveto + return + + + 0 63 -432 215 432 215 0 65 -565 -275 0 -10 rlineto + return + + + 66 -589 -66 vlineto + return + + + 0 10 -565 275 0 -65 432 -215 -432 -215 0 -63 rlineto + return + + + 11 68 18 46 36 43 86 102 19 34 0 61 rrcurveto + 97 -85 61 -97 -96 -68 -60 -83 -41 20 -25 28 25 16 18 25 vhcurveto + 0 33 -38 4 0 37 rrcurveto + 32 46 30 48 61 48 -49 -69 vhcurveto + 0 -60 -25 -63 -26 -64 -35 -87 -8 -50 -1 -40 rrcurveto + return + + + 28 -24 25 -28 -31 -23 -23 -30 -28 20 -25 34 30 22 24 29 vhcurveto + return + + + 19 vlineto + -52 6 -14 21 -28 65 rrcurveto + -246 563 -20 0 -206 -488 rlineto + -59 -140 -9 -21 -58 -6 rrcurveto + -19 199 19 vlineto + -48 -22 10 31 hvcurveto + 0 12 4 17 5 13 rrcurveto + 46 114 262 0 41 -94 rlineto + 12 -28 7 -27 0 -15 0 -9 -6 -11 -8 -4 -12 -7 -7 -2 -36 0 rrcurveto + -19 vlineto + return + + + -231 0 115 275 rlineto + return + + + -75 -71 -55 -30 -90 0 -69 0 -65 25 -44 47 -47 51 -25 78 0 110 0 173 89 122 151 0 65 0 52 -29 44 -47 25 -27 15 -29 12 -55 rrcurveto + 23 0 -9 227 -20 0 rlineto + -6 -21 -17 -12 -19 0 -17 0 -31 12 -21 6 -40 11 -40 4 -39 0 -93 0 -86 -33 -64 -71 -57 -64 -33 -83 0 -100 0 -99 35 -91 61 -60 rrcurveto + 58 -57 87 -32 91 0 118 0 97 44 58 83 rrcurveto + return + + + -19 vlineto + 76 -5 12 -9 0 -79 rrcurveto + -435 vlineto + 0 -78 -10 -13 -78 -5 rrcurveto + -19 281 vlineto + 101 0 89 24 62 40 89 57 47 94 0 119 0 103 -34 78 -65 56 -68 59 -103 32 -129 0 rrcurveto + return + + + 33 15 9 38 vhcurveto + 100 0 63 -17 59 -52 63 -55 32 -69 0 -104 0 -113 -34 -79 -70 -47 -60 -40 -55 -12 -103 0 rrcurveto + -39 -9 12 35 hvcurveto + return + + + -28 hlineto + -45 -106 -43 -26 -143 0 rrcurveto + -36 hlineto + -74 -27 5 42 hvcurveto + 243 151 vlineto + 86 0 15 -13 12 -84 rrcurveto + 23 234 -23 hlineto + -12 -79 -15 -17 -86 0 rrcurveto + -151 220 hlineto + 32 7 4 28 vhcurveto + 131 hlineto + 116 0 23 -15 15 -90 rrcurveto + 25 0 -4 143 -530 0 0 -19 rlineto + 75 -4 12 -15 0 -74 rrcurveto + -436 vlineto + 0 -77 -10 -13 -77 -5 rrcurveto + -19 539 vlineto + return + + + -256 -18 hlineto + 79 -6 10 -8 0 -78 rrcurveto + -157 vlineto + -33 -56 -28 -77 -157 -106 112 186 vhcurveto + 0 99 24 91 53 53 43 44 55 25 69 0 56 0 52 -19 38 -36 30 -29 16 -26 25 -61 rrcurveto + 23 0 -8 211 -22 0 rlineto + -6 -19 -18 -14 -21 0 -10 0 -15 3 -19 7 -45 15 -34 8 -54 0 -97 0 -82 -31 -60 -55 -67 -61 -40 -92 0 -110 0 -98 27 -77 56 -59 rrcurveto + 66 -69 102 -38 102 0 103 0 96 25 55 45 rrcurveto + 200 vlineto + 0 65 12 10 58 5 rrcurveto + return + + + 19 vlineto + -75 6 -14 15 0 71 rrcurveto + 437 vlineto + 0 77 15 12 74 6 rrcurveto + 19 -280 -19 vlineto + 76 -6 13 -10 0 -79 rrcurveto + -189 -303 189 vlineto + 0 78 16 11 73 6 rrcurveto + 19 -280 -19 vlineto + 77 -6 12 -11 0 -78 rrcurveto + -426 vlineto + 0 -86 -11 -12 -78 -5 rrcurveto + -19 279 vlineto + -1 19 rlineto + -74 4 -13 17 0 73 rrcurveto + 202 303 -191 vlineto + 0 -84 -10 -16 -78 -5 rrcurveto + -19 vlineto + return + + + 19 vlineto + -82 3 -16 15 0 75 rrcurveto + 439 vlineto + 0 76 14 12 84 4 rrcurveto + 19 -297 -19 vlineto + 85 -5 12 -8 0 -79 rrcurveto + -439 vlineto + 0 -78 -13 -12 -84 -3 rrcurveto + -19 vlineto + return + + + -287 -19 hlineto + 81 -7 12 -5 0 -81 rrcurveto + -456 vlineto + -50 -6 -20 -31 vhcurveto + -20 0 -1 19 -10 25 -11 28 -15 14 -23 0 rrcurveto + -27 -22 -26 -26 -47 41 -25 60 106 61 62 137 hvcurveto + 364 vlineto + 0 77 11 11 81 6 rrcurveto + return + + + 19 vlineto + -46 0 -27 18 -83 91 rrcurveto + -234 256 186 178 rlineto + 69 66 19 10 68 5 rrcurveto + 19 -259 -19 vlineto + 25 -1 rlineto + 33 -1 8 -10 0 -20 0 -25 -30 -29 -48 -45 rrcurveto + -178 -164 0 200 rlineto + 0 77 7 12 83 6 rrcurveto + 19 -282 -19 vlineto + 79 -5 11 -15 0 -74 rrcurveto + -424 vlineto + 0 -87 -12 -15 -79 -4 rrcurveto + -19 282 19 vlineto + -73 6 -16 9 0 77 rrcurveto + 0 185 26 21 100 -103 rlineto + 79 -82 58 -61 0 -27 0 -14 -13 -9 -29 -1 rrcurveto + -27 -1 0 -19 rlineto + return + + + -26 hlineto + -16 -35 -13 -22 -13 -17 -34 -43 -54 -18 -81 0 rrcurveto + -70 hlineto + -73 -17 7 38 hvcurveto + 464 vlineto + 0 77 12 13 82 5 rrcurveto + 19 -283 -19 vlineto + 78 -6 9 -14 0 -77 rrcurveto + -431 vlineto + 0 -76 -13 -16 -74 -4 rrcurveto + -19 538 vlineto + return + + + 19 vlineto + -73 5 -15 17 0 71 rrcurveto + 438 vlineto + 0 75 15 17 72 1 rrcurveto + 19 -199 vlineto + -221 -502 -231 502 -198 0 0 -19 rlineto + 82 -5 13 -13 0 -76 rrcurveto + -398 vlineto + 0 -112 -14 -14 -83 -6 rrcurveto + -19 234 19 vlineto + -77 4 -16 18 0 110 rrcurveto + 0 398 252 -549 14 0 255 572 0 -449 rlineto + 0 -83 -11 -16 -79 -5 rrcurveto + -19 vlineto + return + + + -237 -19 hlineto + 44 -5 18 -2 17 -19 15 -17 4 -30 0 -51 rrcurveto + 0 -340 -386 483 -170 0 0 -19 rlineto + 49 0 15 -12 33 -41 rrcurveto + -440 vlineto + 0 -107 -15 -15 -82 -9 rrcurveto + -19 234 19 vlineto + -77 11 -16 17 0 103 rrcurveto + 0 388 rlineto + return + + + 441 -549 18 0 0 518 rlineto + 0 79 10 22 19 17 13 11 15 3 38 4 rrcurveto + return + + + 198 -131 147 -196 -195 -132 -139 -203 -208 133 -140 194 192 135 139 206 vhcurveto + return + + + 0 -120 -28 -94 -55 -53 -35 -33 -44 -15 -50 0 -46 0 -44 14 -34 31 -61 56 -29 90 0 119 0 95 26 97 43 49 39 45 50 22 54 0 rrcurveto + 47 0 40 -17 36 -30 57 -48 34 -94 0 -114 rrcurveto + return + + + -19 vlineto + 74 -6 10 -12 0 -74 rrcurveto + -429 vlineto + 0 -86 -6 -11 -78 -6 rrcurveto + -19 280 19 vlineto + -81 4 -13 13 0 76 rrcurveto + 179 vlineto + 27 -2 16 -1 26 0 107 0 49 9 58 52 37 33 20 45 0 53 0 50 -17 41 -33 28 -45 38 -72 25 -100 0 rrcurveto + return + + + 27 6 9 29 136 60 -40 -110 -90 -60 -57 -110 vhcurveto + -20 0 -18 1 -23 2 rrcurveto + return + + + 19 vlineto + -44 4 -21 6 -29 37 rrcurveto + -200 253 rlineto + 125 19 56 58 0 87 0 44 -16 49 -33 27 -43 35 -71 24 -95 0 rrcurveto + -272 -19 hlineto + 77 -6 8 -12 0 -74 rrcurveto + -427 vlineto + 0 -88 -10 -11 -75 -6 rrcurveto + -19 276 19 vlineto + -77 5 -12 12 0 76 rrcurveto + 0 194 56 2 238 -308 rlineto + return + + + 35 17 7 34 124 59 -44 -90 vhcurveto + 0 -50 -21 -44 -36 -20 -46 -25 -35 -7 -96 -2 rrcurveto + return + + + -22 213 -21 0 rlineto + return + + + -4 -22 -11 -12 -17 0 -10 0 -16 4 -18 8 rrcurveto + return + + + -38 16 -26 6 -41 0 -99 0 -75 -66 0 -109 0 -62 32 -62 90 -48 64 -34 69 -44 35 -38 19 -21 10 -23 0 -35 0 -66 -49 -46 -70 0 rrcurveto + -95 0 -65 56 -46 121 rrcurveto + -22 0 29 -212 22 0 rlineto + return + + + 21 13 12 15 vhcurveto + 11 0 17 -3 19 -8 rrcurveto + return + + + 37 -16 39 -7 42 0 116 0 88 75 0 112 0 76 -49 58 -106 62 -105 61 -74 42 0 68 0 55 39 40 62 0 60 0 50 -33 35 -45 rrcurveto + 18 -24 11 -26 12 -44 rrcurveto + return + + + -7 170 -562 0 -7 -170 24 0 rlineto + 20 110 27 18 106 0 rrcurveto + 60 -497 hlineto + 0 -88 -11 -11 -83 -5 rrcurveto + -19 292 19 vlineto + -82 4 -14 12 0 76 rrcurveto + 509 59 vlineto + 106 0 26 -18 22 -110 rrcurveto + return + + + -232 -19 hlineto + 81 -5 13 -22 0 -103 rrcurveto + -263 vlineto + 0 -81 -4 -40 -33 -40 -32 -37 -56 -22 -64 0 -57 0 -42 17 -29 28 -40 39 -4 45 0 78 rrcurveto + 312 vlineto + 0 75 11 12 80 7 rrcurveto + 19 -283 -19 vlineto + 79 -6 11 -11 0 -67 rrcurveto + -310 vlineto + 0 -106 26 -70 55 -41 41 -31 56 -15 73 0 92 0 68 25 42 48 45 51 9 54 0 97 rrcurveto + 255 vlineto + 0 95 10 24 84 8 rrcurveto + return + + + -197 -19 hlineto + 51 -1 19 -13 0 -27 0 -21 -6 -25 -11 -31 rrcurveto + -126 -339 -143 364 rlineto + -9 24 -8 23 0 11 0 21 27 13 50 1 rrcurveto + 19 -265 -19 vlineto + 57 -1 12 -7 33 -88 rrcurveto + 28 -75 -104 -283 -144 376 rlineto + -7 19 -4 16 0 12 0 21 13 9 51 1 rrcurveto + 19 -244 -19 vlineto + 51 -1 20 -24 33 -95 59 -168 72 -186 61 -180 rrcurveto + 15 0 154 423 rlineto + 52 -133 57 -148 51 -142 rrcurveto + 15 hlineto + 85 261 10 29 109 294 19 52 11 8 53 10 rrcurveto + return + + + 19 vlineto + -53 6 -19 12 -39 56 rrcurveto + -192 274 143 177 rlineto + 63 78 25 17 64 4 rrcurveto + 19 -237 -19 vlineto + 54 -2 14 -7 0 -23 0 -15 -10 -15 -30 -37 rrcurveto + -112 -140 -44 64 rlineto + -53 78 -30 38 0 29 0 23 18 5 28 1 rrcurveto + 29 1 0 19 -301 0 0 -19 rlineto + 66 -7 21 -12 96 -141 rrcurveto + 106 -155 -156 -195 rlineto + -77 -97 -16 -12 -52 -5 rrcurveto + -19 232 19 vlineto + -58 4 -18 9 0 19 0 15 16 28 37 46 rrcurveto + 119 149 95 -141 rlineto + 34 -51 17 -31 0 -16 0 -18 -18 -9 -32 -2 rrcurveto + -26 -2 0 -19 rlineto + return + + + -218 -19 hlineto + 52 -1 15 -8 1 -22 0 -11 -4 -16 -10 -15 rrcurveto + -145 -222 -146 221 rlineto + -11 16 -7 18 0 13 0 16 15 9 32 1 rrcurveto + 24 1 0 19 -279 0 0 -19 rlineto + 48 -2 22 -20 92 -135 rrcurveto + 131 -192 0 -171 rlineto + 0 -89 -12 -9 -89 -6 rrcurveto + -19 306 19 vlineto + -84 4 -19 10 0 78 rrcurveto + 0 192 148 228 rlineto + 56 86 23 21 59 5 rrcurveto + return + + + -23 hlineto + -18 -59 -9 -28 -33 -23 -25 -18 -36 -10 -56 0 rrcurveto + -252 0 432 609 0 15 -525 0 -21 -171 26 0 rlineto + 10 55 6 17 25 26 24 24 39 11 71 0 rrcurveto + 214 0 -437 -609 0 -15 563 0 rlineto + return + + + 50 -500 -50 vlineto + return + + + -145 148 rlineto + -17 18 -11 5 -15 0 -22 0 -14 -11 0 -21 0 -16 11 -15 19 -12 rrcurveto + 154 -96 rlineto + return + + + 27 -23 23 -27 -27 -22 -22 -27 -28 21 -22 27 28 23 22 27 vhcurveto + return + + + 54 -311 -54 vlineto + return + + + -35 0 -41 -99 7 -5 rlineto + 8 3 8 1 12 0 rrcurveto + 38 15 -10 -26 -27 -21 -17 -34 hvcurveto + -21 0 -17 3 -27 9 rrcurveto + -14 -31 rlineto + 29 -12 24 -4 31 0 rrcurveto + 77 48 32 52 44 -32 25 -54 hvcurveto + -10 0 -6 -1 -10 -2 rrcurveto + return + + + -125 167 -62 0 -124 -167 34 0 121 103 122 -103 rlineto + return + + + -17 -37 -13 -13 -23 0 -15 0 -19 5 -19 10 rrcurveto + -24 12 rlineto + return + + + -24 12 -24 6 -22 0 rrcurveto + return + + + -50 0 -36 -36 -15 -65 rrcurveto + 29 hlineto + return + + + 11 31 17 15 23 0 12 0 14 -4 15 -7 rrcurveto + 23 -11 rlineto + return + + + 41 -20 15 -4 25 0 55 0 29 30 21 76 rrcurveto + return + + + 54 -45 46 -53 -56 -45 -44 -55 -56 43 -44 55 56 45 44 55 vhcurveto + return + + + -37 -29 -29 -38 -35 -29 29 37 35 30 30 34 37 30 -29 -36 vhcurveto + return + + + -29 -71 -28 -26 -54 0 -63 0 -36 34 -13 63 rrcurveto + -29 hlineto + -97 45 -60 94 vhcurveto + 82 0 48 51 12 106 rrcurveto + return + + + 28 -19 22 -31 -30 -19 -23 -27 -27 22 -22 27 27 23 20 29 vhcurveto + return + + + -35 0 -121 -103 -120 103 -35 0 123 -167 64 0 rlineto + return + + + 40 0 154 96 rlineto + 21 13 8 14 0 18 0 19 -14 11 -20 0 -13 0 -10 -3 -20 -20 rrcurveto + return + + + -30 -10 -82 -66 0 -71 rrcurveto + -83 58 -13 23 25 33 15 43 40 -36 10 -26 vhcurveto + -11 0 -7 -4 -6 0 -7 0 -7 5 0 9 0 19 16 45 66 42 rrcurveto + return + + + 30 10 82 66 0 71 rrcurveto + 83 -58 13 -23 -25 -33 -15 -43 -40 36 -10 26 vhcurveto + 11 0 7 4 6 0 7 0 7 -5 0 -9 0 -19 -16 -45 -66 -42 rrcurveto + return + + + -66 42 -16 45 0 19 0 9 7 5 7 0 6 0 7 -4 11 0 rrcurveto + 26 36 10 40 43 -33 15 -25 -23 -58 -13 -83 hvcurveto + 0 -71 82 -66 30 -10 rrcurveto + return + + + -37 30 vlineto + 34 27 -26 -34 -34 -27 -27 -34 hvcurveto + -30 -37 30 hlineto + 54 44 44 54 54 -44 43 -54 hvcurveto + return + + + 37 -30 vlineto + -34 -27 27 34 34 27 26 34 hvcurveto + 30 37 -30 hlineto + -54 -44 -43 -54 -54 44 -44 54 hvcurveto + return + + + 40 -95 147 -40 -147 -95 -40 vlineto + return + + + 40 -230 -40 95 -147 40 147 vlineto + return + + + -29 -27 -21 -9 -21 0 -22 0 -16 11 0 28 0 23 13 22 31 25 rrcurveto + -46 hlineto + -38 -27 -16 -28 0 -36 0 -47 31 -27 43 0 46 0 29 22 36 54 rrcurveto + return + + + 54 -45 45 -53 -56 -45 -44 -55 -56 43 -44 55 56 45 45 55 vhcurveto + return + + + -17 -37 -13 -14 -23 0 -20 0 -14 6 -43 22 rrcurveto + return + + + 11 31 17 15 23 0 18 0 8 -4 38 -18 rrcurveto + return + + + 40 0 154 96 rlineto + 19 12 11 15 0 16 0 21 -14 11 -22 0 -15 0 -11 -5 -17 -18 rrcurveto + return + + + -146 148 rlineto + -20 20 -10 3 -13 0 -20 0 -14 -11 0 -19 0 -18 8 -14 21 -13 rrcurveto + 154 -96 rlineto + return + + + -12 106 -48 51 -82 0 rrcurveto + -94 -45 -60 -97 hvcurveto + 29 hlineto + 13 63 36 34 63 0 54 0 28 -26 29 -71 rrcurveto + return + + + 27 -23 22 -27 -27 -22 -22 -27 -28 21 -22 27 28 23 22 28 vhcurveto + return + + + -133 106 -18 -8 rlineto + 29 -29 21 -18 0 -9 rrcurveto + -13 -21 -2 -28 vhcurveto + -178 hlineto + -33 -16 3 12 hvcurveto + 0 7 17 22 32 27 rrcurveto + -18 8 -132 -106 133 -106 18 8 rlineto + -29 29 -21 18 0 9 rrcurveto + 10 16 5 34 vhcurveto + 177 hlineto + 33 16 -3 -12 hvcurveto + 0 -7 -17 -22 -32 -27 rrcurveto + 18 -8 rlineto + return + + + 14 -10 16 -15 vhcurveto + -4 0 -5 -2 -6 -3 -20 -12 -17 -35 -51 -28 0 49 21 44 0 22 rrcurveto + 14 -14 13 -12 -14 -13 -13 -14 vhcurveto + 0 -22 20 -44 0 -50 -39 21 -30 42 -21 13 -6 3 -5 2 -4 0 -15 0 -10 -16 0 -14 0 -9 5 -7 8 -5 20 -12 54 -6 36 -22 rrcurveto + -36 -21 -52 -6 -19 -11 -10 -5 -4 -13 0 -9 0 -11 9 -11 17 0 5 0 7 2 6 4 16 9 32 43 36 20 0 -48 -20 -43 0 -22 rrcurveto + -12 13 -15 14 12 14 15 12 vhcurveto + 0 22 -21 43 0 48 54 -30 17 -48 28 0 18 0 9 13 0 13 0 8 -4 10 -11 5 -17 11 -44 2 -45 25 51 29 39 0 16 10 rrcurveto + 12 6 5 7 0 9 rrcurveto + return + + + 54 -286 vlineto + -33 -16 3 12 hvcurveto + 0 7 17 22 32 27 rrcurveto + -18 8 -132 -106 133 -106 18 8 rlineto + -29 29 -21 18 0 9 rrcurveto + 13 21 2 28 vhcurveto + return + + + 25 vlineto + -41 0 -18 22 -51 121 rrcurveto + -222 522 -28 0 -221 -545 rlineto + -38 -94 -15 -18 -46 -8 rrcurveto + -25 202 25 vlineto + -59 4 -22 11 0 26 0 24 19 42 12 31 rrcurveto + 13 34 225 0 rlineto + 34 -79 12 -35 0 -22 0 -22 -13 -8 -33 -3 rrcurveto + -32 -3 0 -25 rlineto + return + + + 117 24 42 51 0 76 rrcurveto + 103 -91 57 -161 vhcurveto + -317 -25 hlineto + 68 -5 20 -17 0 -59 rrcurveto + -467 vlineto + 0 -56 -15 -14 -73 -8 rrcurveto + -25 329 vlineto + 178 96 81 107 hvcurveto + 0 95 -83 67 -110 14 rrcurveto + return + + + 39 17 15 40 68 31 -49 -88 -98 -31 -38 -125 vhcurveto + return + + + 26 hlineto + 106 51 -53 -102 -101 -42 -55 -78 -44 -19 18 48 hvcurveto + return + + + -15 -36 -10 -10 -19 0 -10 0 -13 4 -23 10 rrcurveto + return + + + -28 hlineto + -30 -65 -18 -30 -34 -28 -47 -39 -57 -15 -78 0 rrcurveto + -65 -18 13 43 hvcurveto + 242 vlineto + 109 0 43 -36 10 -119 rrcurveto + 26 338 -26 hlineto + -12 -116 -44 -32 -106 -1 rrcurveto + 225 vlineto + 38 13 16 52 vhcurveto + 163 0 51 -33 23 -134 rrcurveto + 25 201 -577 -25 hlineto + 69 -4 19 -16 0 -51 rrcurveto + -479 vlineto + 0 -52 -14 -16 -74 -8 rrcurveto + -25 585 vlineto + return + + + 25 vlineto + -72 9 -16 11 0 53 rrcurveto + 479 vlineto + 0 54 20 15 68 5 rrcurveto + 25 -339 -25 vlineto + 70 -5 19 -13 0 -56 rrcurveto + -204 -241 204 vlineto + 0 56 19 13 70 5 rrcurveto + 25 -336 -25 vlineto + 67 -6 18 -13 0 -55 rrcurveto + -479 vlineto + 0 -53 -15 -11 -70 -9 rrcurveto + -25 336 25 vlineto + -73 8 -16 12 0 53 rrcurveto + 228 241 -228 vlineto + 0 -51 -16 -14 -73 -8 rrcurveto + -25 vlineto + return + + + 25 vlineto + -76 5 -19 11 0 65 rrcurveto + 470 vlineto + 0 56 21 16 74 3 rrcurveto + 25 -350 -25 vlineto + 72 -5 21 -14 0 -56 rrcurveto + -470 vlineto + 0 -62 -18 -12 -75 -7 rrcurveto + -25 vlineto + return + + + 25 vlineto + -25 1 -12 5 -12 15 rrcurveto + -305 378 rlineto + 186 198 37 17 99 12 rrcurveto + 25 -289 -25 vlineto + 35 -3 rlineto + 36 -3 12 -8 0 -20 0 -16 -9 -11 -31 -31 rrcurveto + -212 -215 0 223 rlineto + 0 61 14 18 74 5 rrcurveto + 25 -337 -25 vlineto + 69 -4 18 -12 0 -62 rrcurveto + -472 vlineto + 0 -57 -13 -12 -74 -7 rrcurveto + -25 336 25 vlineto + -72 9 -15 12 0 49 rrcurveto + 0 196 27 25 181 -225 rlineto + 19 -24 6 -10 0 -11 0 -13 -12 -6 -65 -2 rrcurveto + -25 vlineto + return + + + 25 vlineto + -80 7 -8 21 0 73 rrcurveto + 428 vlineto + 0 75 15 17 73 5 rrcurveto + 25 -252 vlineto + -201 -472 -199 472 -253 0 0 -25 rlineto + 73 -6 16 -14 0 -54 rrcurveto + -469 vlineto + 0 -65 -13 -10 -78 -8 rrcurveto + -25 234 25 vlineto + -82 6 -19 22 0 76 rrcurveto + 0 465 252 -594 27 0 252 605 0 -496 rlineto + 0 -59 -14 -18 -75 -7 rrcurveto + -25 vlineto + return + + + -214 -25 hlineto + 74 -7 18 -19 0 -80 rrcurveto + 0 -299 -349 430 -211 0 0 -25 rlineto + 20 0 23 -14 42 -58 rrcurveto + -470 vlineto + 0 -61 -15 -14 -73 -9 rrcurveto + -25 226 25 vlineto + -77 9 -17 20 0 77 rrcurveto + 0 396 rlineto + return + + + 447 -545 28 0 0 586 rlineto + 0 63 13 10 65 10 rrcurveto + return + + + 207 -148 149 -201 -208 -151 -144 -213 -207 146 -146 207 207 148 146 208 vhcurveto + return + + + -206 -61 -108 -115 -114 -64 105 211 210 63 118 110 117 64 -117 -213 vhcurveto + return + + + -25 vlineto + 70 -6 14 -14 0 -78 rrcurveto + -427 vlineto + 0 -51 -6 -28 -33 -13 -11 -5 -15 -2 -19 -2 rrcurveto + -25 333 25 vlineto + -79 10 -8 9 0 67 rrcurveto + 192 vlineto + 132 1 34 3 43 16 83 31 46 61 0 80 rrcurveto + 122 -102 59 -167 vhcurveto + return + + + 23 17 12 33 83 33 -46 -111 vhcurveto + 0 -111 -41 -34 -125 -1 rrcurveto + return + + + -2 201 -600 0 -3 -201 29 0 rlineto + 17 119 52 46 124 4 rrcurveto + -527 vlineto + 0 -74 -11 -11 -86 -7 rrcurveto + -25 357 25 vlineto + -87 6 -11 10 0 76 rrcurveto + 527 vlineto + 123 -4 52 -46 17 -119 rrcurveto + return + + + 25 vlineto + -34 1 -16 12 -47 73 rrcurveto + -182 283 80 111 rlineto + 87 121 26 18 75 7 rrcurveto + 25 -250 -25 vlineto + 21 -2 rlineto + 38 -4 13 -7 0 -24 0 -22 -11 -19 -47 -65 -20 -27 -18 -25 -18 -25 rrcurveto + -97 150 rlineto + -13 20 -2 5 0 12 0 21 12 8 36 2 rrcurveto + 31 2 0 25 -346 0 0 -25 rlineto + 36 -3 13 -10 31 -45 rrcurveto + 198 -291 -175 -221 rlineto + -30 -37 -24 -13 -50 -6 rrcurveto + -25 250 25 vlineto + -61 6 -20 10 0 26 0 21 19 31 75 101 rrcurveto + 38 51 101 -162 rlineto + 12 -19 9 -23 0 -12 0 -17 -17 -9 -35 -2 rrcurveto + -28 -2 0 -25 rlineto + return + + + 24 vlineto + -35 5 -13 13 0 43 rrcurveto + 376 -193 -24 vlineto + 44 -9 10 -10 0 -45 rrcurveto + -285 vlineto + 0 -46 -7 -7 -46 -11 rrcurveto + -24 vlineto + return + + + -202 -24 hlineto + 51 -4 12 -12 0 -43 rrcurveto + -474 vlineto + -53 -14 -23 -34 -19 -13 7 10 vhcurveto + 0 5 3 7 6 10 10 15 4 12 0 10 rrcurveto + 30 -26 24 -33 -37 -25 -25 -36 -59 58 -41 82 vhcurveto + 69 0 53 27 28 49 19 32 8 38 0 59 rrcurveto + return + + + 142 -96 102 -126 -132 -97 -99 -146 -142 100 -100 125 129 97 102 141 vhcurveto + return + + + -149 -20 -52 -58 -58 -21 56 145 171 18 53 61 59 19 -53 -171 vhcurveto + return + + + 16 -16 vlineto + -42 0 -25 21 -7 49 rrcurveto + -78 581 -18 0 -356 -577 rlineto + -36 -58 -37 -12 -35 -4 rrcurveto + -16 218 16 -29 vlineto + -26 0 -16 11 -1 18 0 7 3 11 7 8 rrcurveto + 84 140 198 0 13 -113 rlineto + 1 -7 1 -8 0 -6 rrcurveto + -40 -16 -21 -56 vhcurveto + -16 -16 hlineto + return + + + -170 0 139 227 6 0 rlineto + return + + + -5 -16 19 0 rlineto + 59 19 -16 -26 hvcurveto + 0 -6 -1 -10 -1 -5 rrcurveto + -123 -495 rlineto + -8 -34 -19 -29 -76 0 rrcurveto + -20 0 -4 -16 324 0 rlineto + 207 82 94 99 hvcurveto + 0 85 -67 42 -60 11 rrcurveto + -3 4 rlineto + 20 5 29 8 25 13 48 24 43 45 0 78 rrcurveto + 136 -152 9 -84 vhcurveto + return + + + 13 2 15 1 15 0 rrcurveto + 54 65 -21 -77 -97 -56 -66 -112 hvcurveto + -15 0 -18 2 -20 4 rrcurveto + return + + + 15 4 20 0 16 0 rrcurveto + 78 64 -35 -73 -130 -96 -42 -94 hvcurveto + -26 0 -20 2 -25 7 rrcurveto + return + + + -534 0 -4 -16 19 0 rlineto + 59 16 -11 -24 hvcurveto + 0 -7 -1 -11 -4 -18 rrcurveto + -120 -488 rlineto + -8 -34 -23 -28 -73 0 rrcurveto + -19 0 -4 -16 571 0 66 198 -16 0 rlineto + -43 -113 -56 -47 -158 0 rrcurveto + -67 hlineto + -47 -16 11 16 hvcurveto + 0 3 1 3 1 4 rrcurveto + 66 258 90 0 rlineto + 31 41 -4 -47 hvcurveto + 0 -17 -5 -30 -1 -13 rrcurveto + 17 0 58 242 -16 0 rlineto + -22 -82 -44 -13 -60 0 rrcurveto + -80 0 60 248 160 0 rlineto + 51 0 55 -13 3 -74 0 -11 -1 -11 -1 -13 rrcurveto + 16 hlineto + return + + + -330 0 -4 -16 20 0 rlineto + 57 19 -15 -24 hvcurveto + 0 -7 -2 -7 -2 -10 rrcurveto + -53 -217 -278 0 52 217 rlineto + 8 35 18 28 79 0 rrcurveto + 18 0 4 16 -330 0 -4 -16 20 0 rlineto + 62 16 -18 -21 hvcurveto + 0 -7 -2 -11 -2 -6 rrcurveto + -123 -496 rlineto + -8 -34 -23 -28 -75 0 rrcurveto + -18 0 -4 -16 330 0 4 16 -20 0 rlineto + -59 -18 14 24 hvcurveto + 0 8 2 10 2 6 rrcurveto + 61 242 280 0 -61 -242 rlineto + -9 -37 -24 -25 -72 0 rrcurveto + -18 0 -4 -16 329 0 4 16 -19 0 rlineto + -59 -19 13 21 hvcurveto + 0 10 2 9 2 9 rrcurveto + 123 496 rlineto + 9 35 21 28 76 0 rrcurveto + 18 hlineto + return + + + -328 0 -4 -16 18 0 rlineto + 60 17 -16 -22 hvcurveto + 0 -7 -2 -9 -2 -8 rrcurveto + -123 -497 rlineto + -8 -34 -21 -28 -76 0 rrcurveto + -19 0 -4 -16 328 0 4 16 -17 0 rlineto + -59 -19 12 21 hvcurveto + 0 10 1 11 2 8 rrcurveto + 123 497 rlineto + 9 36 21 26 76 0 rrcurveto + 19 hlineto + return + + + -216 0 -4 -16 rlineto + 42 0 8 -7 1 -14 0 -13 -8 -8 -10 -8 rrcurveto + -282 -221 -5 0 53 209 rlineto + 9 35 13 27 75 0 rrcurveto + 11 0 4 16 -312 0 -4 -16 18 0 rlineto + 60 16 -16 -22 hvcurveto + 0 -21 -3 -1 -7 -27 rrcurveto + -117 -472 rlineto + -7 -27 -17 -35 -60 0 rrcurveto + -18 0 -4 -16 304 0 4 16 -17 0 rlineto + -59 -19 16 24 hvcurveto + 0 7 1 7 2 8 rrcurveto + 62 243 4 0 156 -237 rlineto + 8 -13 8 -6 0 -16 rrcurveto + -28 -19 -5 -28 vhcurveto + -11 0 -4 -16 287 0 4 16 rlineto + -65 0 -33 36 -29 43 rrcurveto + -187 278 271 211 rlineto + 42 33 27 20 51 0 rrcurveto + return + + + -215 0 -333 -493 -4 0 -89 493 -213 0 -4 -16 19 0 rlineto + 62 15 -15 -20 hvcurveto + 0 -9 -1 -12 -4 -17 rrcurveto + -119 -475 rlineto + -10 -40 -19 -33 -79 0 rrcurveto + -19 0 -4 -16 267 0 4 16 -20 0 rlineto + -59 -17 12 21 hvcurveto + 0 12 2 15 3 13 rrcurveto + 109 428 3 0 94 -517 25 0 345 517 6 0 -112 -439 rlineto + -8 -32 -18 -30 -79 0 rrcurveto + -17 0 -4 -16 330 0 4 16 -19 0 rlineto + -58 -19 15 19 hvcurveto + 0 9 0 12 2 7 rrcurveto + 127 498 rlineto + 9 36 16 25 80 0 rrcurveto + 17 hlineto + return + + + -266 0 -4 -16 20 0 rlineto + 59 17 -17 -20 hvcurveto + 0 -10 -2 -7 -2 -9 rrcurveto + -97 -386 -5 0 -236 465 -200 0 -4 -16 19 0 rlineto + 30 0 43 -7 19 -43 rrcurveto + -126 -509 rlineto + -8 -34 -23 -28 -75 0 rrcurveto + -18 0 -4 -16 266 0 4 16 -19 0 rlineto + -59 -17 15 20 hvcurveto + 0 11 1 8 2 8 rrcurveto + 108 448 6 0 274 -526 24 0 144 574 rlineto + 9 37 23 26 75 0 rrcurveto + 18 hlineto + return + + + 126 -82 123 -167 -255 -158 -244 -203 -137 104 -96 149 253 156 233 198 vhcurveto + return + + + -141 -73 -275 -212 -78 -62 53 97 142 52 313 232 101 40 -82 -107 vhcurveto + return + + + -4 -16 19 0 rlineto + 61 17 -16 -21 hvcurveto + 0 -8 -2 -9 -2 -8 rrcurveto + -125 -497 rlineto + -9 -34 -20 -28 -77 0 rrcurveto + -17 0 -4 -16 341 0 4 16 -32 0 rlineto + -59 -18 15 24 hvcurveto + 0 8 1 7 2 8 rrcurveto + 56 223 rlineto + 4 -1 3 -1 5 0 24 -4 22 -1 28 0 rrcurveto + 204 81 101 98 74 -62 86 -170 hvcurveto + return + + + 17 2 15 1 17 0 rrcurveto + 95 30 -53 -60 -102 -80 -70 -102 hvcurveto + -20 0 -24 4 -16 4 rrcurveto + return + + + -599 0 -46 -179 16 0 rlineto + 18 81 70 61 83 0 rrcurveto + 91 0 -133 -538 rlineto + -8 -34 -20 -28 -78 0 rrcurveto + -18 0 -4 -16 329 0 4 16 -21 0 rlineto + -60 -16 16 21 hvcurveto + 0 9 3 7 2 9 rrcurveto + 133 538 98 0 rlineto + 41 58 -5 -66 hvcurveto + 0 -27 -3 -27 -2 -17 rrcurveto + 16 hlineto + return + + + -222 0 -4 -16 9 0 rlineto + 32 0 12 -8 1 -16 0 -11 -6 -12 -9 -11 rrcurveto + -164 -184 -60 179 rlineto + -4 11 -2 9 0 8 rrcurveto + 31 22 4 30 vhcurveto + 19 0 4 16 -283 0 -4 -16 9 0 rlineto + 74 0 17 -52 11 -31 rrcurveto + 85 -231 -208 -230 rlineto + -37 -40 -49 -30 -54 -7 rrcurveto + -4 -16 249 0 4 16 -22 0 rlineto + -38 -18 13 19 hvcurveto + 0 9 3 9 9 10 rrcurveto + 181 201 69 -194 rlineto + 3 -10 1 -8 0 -8 rrcurveto + -37 -29 -4 -38 vhcurveto + -11 0 -4 -16 281 0 4 16 -14 0 rlineto + -50 0 -17 34 -12 34 rrcurveto + -96 266 187 213 rlineto + 37 42 29 32 69 0 rrcurveto + 4 hlineto + return + + + -563 0 -48 -158 17 0 rlineto + 38 115 89 6 96 0 rrcurveto + 202 0 -573 -616 581 0 46 178 -16 0 rlineto + -35 -129 -88 -11 -94 0 rrcurveto + -218 hlineto + return + + + 25 -22 25 -26 -29 -19 -25 -25 -26 19 -20 29 26 22 20 26 vhcurveto + return + + + 108 -67 46 -80 -148 -103 -161 -128 -73 34 -90 101 153 110 159 139 vhcurveto + return + + + -76 -71 -223 -106 -56 -3 74 33 136 87 149 82 53 14 -55 -38 vhcurveto + return + + + -8 -61 -40 -32 -53 0 -22 0 -37 10 -50 22 -93 40 -64 21 -33 0 rrcurveto + -62 -73 -48 -104 hvcurveto + 26 hlineto + 10 60 41 31 50 0 21 0 36 -10 50 -21 108 -44 10 -16 69 0 rrcurveto + 70 69 56 96 hvcurveto + return + + + -80 0 -676 -1845 80 0 rlineto + return + + + 50 -1500 -50 vlineto + return + + + -15 -61 -62 -32 -99 0 -42 0 -70 10 -94 22 -174 40 -120 21 -62 0 -116 0 -79 -40 -20 -112 rrcurveto + 26 hlineto + 19 60 61 31 94 0 39 0 68 -11 94 -20 203 -44 19 -16 129 0 131 0 77 46 19 106 rrcurveto + return + + + 50 -2000 -50 vlineto + return + + + -21 -67 -85 -35 -142 0 -60 0 -142 12 -134 23 -251 43 -183 24 -90 0 -167 0 -127 -45 -32 -121 rrcurveto + 26 hlineto + 28 66 115 33 135 0 56 0 109 -11 134 -22 292 -48 70 -18 185 0 188 0 96 49 26 117 rrcurveto + return + + + 50 -2500 -50 vlineto + return + + + -27 -66 -164 -31 -153 0 -64 0 -225 11 -143 22 -268 41 -280 23 -97 0 -179 0 -222 -44 -35 -119 rrcurveto + 29 hlineto + 33 65 216 32 144 0 60 0 170 -12 144 -22 311 -47 179 -16 199 0 200 0 172 48 29 115 rrcurveto + return + + + 50 -3000 -50 vlineto + return + + + -27 -66 -279 -31 -153 0 -64 0 -337 11 -143 22 -268 41 -380 23 -97 0 -179 0 -337 -44 -35 -119 rrcurveto + 29 hlineto + 33 65 331 32 144 0 60 0 270 -12 144 -22 311 -47 291 -16 199 0 200 0 287 48 29 115 rrcurveto + return + + + + + + -351 endchar + + + 121 0 20 196 41 397 20 hstem + 707 hmoveto + -89 callsubr + -5 257 rmoveto + -88 callsubr + endchar + + + 66 0 37 588 37 hstem + 422 349 rmoveto + 52 13 20 8 24 21 25 22 16 41 0 41 rrcurveto + 106 -89 61 -173 vhcurveto + -280 -19 hlineto + 81 -5 15 -9 0 -79 rrcurveto + -437 vlineto + 0 -76 -17 -16 -79 -2 rrcurveto + -19 337 vlineto + 145 94 67 111 hvcurveto + 0 44 -17 42 -32 29 -29 26 -27 18 -66 11 rrcurveto + -207 18 rmoveto + 227 vlineto + 23 7 9 17 vhcurveto + 41 hlineto + 119 58 -51 -84 -83 -49 -41 -101 hvcurveto + -92 -40 rmoveto + 118 -4 38 4 60 -40 33 -22 14 -37 0 -48 0 -43 -13 -34 -25 -22 -44 -39 -35 -4 -84 0 rrcurveto + -46 -16 11 34 hvcurveto + endchar + + + 66 -14 44 606 40 hstem + 614 131 rmoveto + -87 callsubr + endchar + + + 121 0 37 588 37 hstem + 16 662 rmoveto + -86 callsubr + -80 -79 rmoveto + -85 callsubr + endchar + + + 10 0 37 290 41 256 38 hstem + 597 169 rmoveto + -84 callsubr + endchar + + + -45 0 20 307 41 256 38 hstem + 546 519 rmoveto + -5 143 -529 0 0 -19 rlineto + 77 -6 10 -13 0 -73 rrcurveto + -428 vlineto + 0 -87 -12 -12 -76 -5 rrcurveto + -19 281 19 vlineto + -77 4 -14 13 0 76 rrcurveto + 215 142 vlineto + 86 0 19 -15 8 -82 rrcurveto + 23 233 -23 hlineto + -9 -78 -20 -17 -84 0 rrcurveto + -142 217 hlineto + 31 5 8 29 vhcurveto + 132 hlineto + 121 0 18 -16 15 -89 rrcurveto + endchar + + + 121 -14 40 610 40 hstem + 709 354 rmoveto + -83 callsubr + endchar + + + 121 0 20 295 44 283 20 hstem + 703 hmoveto + -82 callsubr + endchar + + + -268 0 20 622 20 hstem + 315 hmoveto + -81 callsubr + endchar + + + -228 -14 38 618 20 hstem + 354 662 rmoveto + -80 callsubr + endchar + + + 121 0 20 622 20 hstem + 723 hmoveto + -79 callsubr + endchar + + + 10 0 39 603 20 hstem + 598 174 rmoveto + -78 callsubr + endchar + + + 288 0 20 622 20 hstem + 109 44 vstem + 864 hmoveto + -77 callsubr + endchar + + + 121 -11 20 -9 20 622 20 hstemhm + 109 44 415 44 hintmask 01111000 + 707 662 rmoveto + -76 callsubr + hintmask 10111000 + -75 callsubr + endchar + + + 121 -14 36 618 36 hstem + 688 331 rmoveto + -74 callsubr + -114 6 rmoveto + -73 callsubr + endchar + + + -44 0 20 268 40 297 37 hstem + 16 662 rmoveto + -72 callsubr + -73 -73 rmoveto + -71 callsubr + endchar + + + 121 640 36 hstem + 701 -177 rmoveto + 18 vlineto + -114 7 -88 41 -73 104 68 13 42 15 44 43 73 70 35 80 0 119 rrcurveto + 207 -136 136 -191 -187 -140 -132 -212 vhcurveto + 0 -90 28 -78 48 -64 38 -51 39 -23 78 -27 rrcurveto + 41 -52 rlineto + 68 -86 123 -38 156 0 rrcurveto + -79 515 rmoveto + 0 -100 -24 -98 -40 -46 -37 -43 -50 -29 -62 0 -57 0 -65 30 -33 48 -38 53 -20 86 0 96 0 95 26 92 40 47 37 45 53 26 54 0 rrcurveto + 47 0 42 -15 36 -30 57 -48 34 -94 0 -115 rrcurveto + endchar + + + 66 0 20 605 37 hstem + 660 hmoveto + -70 callsubr + -294 583 rmoveto + -69 callsubr + endchar + + + -45 -14 36 -22 20 615 41 -34 20 hstemhm + 71 86 hintmask 00101000 + 469 463 rmoveto + -68 callsubr + hintmask 00011000 + -67 callsubr + hintmask 10101000 + -66 callsubr + hintmask 01001000 + -65 callsubr + hintmask 10101000 + -64 callsubr + endchar + + + 10 0 20 600 42 hstem + 593 492 rmoveto + -63 callsubr + endchar + + + 121 -14 44 612 20 hstem + 567 44 vstem + 705 662 rmoveto + -62 callsubr + endchar + + + 121 -11 20 633 20 hstem + 697 662 rmoveto + -204 -19 hlineto + 40 -4 32 2 0 -39 0 -17 -7 -25 -12 -30 rrcurveto + -147 -369 -152 342 rlineto + -30 67 -9 24 0 16 0 20 13 9 32 2 rrcurveto + 28 2 0 19 -265 0 0 -19 rlineto + 50 -1 15 -12 43 -97 rrcurveto + 244 -544 15 0 222 563 rlineto + 29 74 23 16 40 1 rrcurveto + endchar + + + 343 -11 20 633 20 hstem + 932 662 rmoveto + -61 callsubr + endchar + + + 121 0 20 622 20 hstem + 704 hmoveto + -60 callsubr + endchar + + + 121 0 20 622 20 hstem + 703 662 rmoveto + -59 callsubr + endchar + + + 11 0 38 586 38 hstem + 598 176 rmoveto + -58 callsubr + endchar + + + 177 -13 53 -53 71 586 32 hstemhm + 42 92 68 79 157 53 hintmask 01111100 + 735 111 rmoveto + -32 -40 -21 -13 -39 0 -61 0 -44 31 -47 61 55 74 25 49 46 73 25 40 19 19 50 0 rrcurveto + 21 -216 -21 vlineto + 50 -5 15 -12 0 -30 0 -46 -27 -48 -65 -86 -56 63 -35 55 -41 88 117 50 38 46 0 72 rrcurveto + 79 -58 45 -79 -85 -67 -60 -90 vhcurveto + 0 -45 7 -36 28 -68 rrcurveto + -38 -22 rlineto + -102 -59 -55 -78 0 -81 rrcurveto + hintmask 10011100 + -100 64 -50 107 vhcurveto + 81 0 60 25 75 66 rrcurveto + hintmask 01111100 + 63 -65 49 -26 58 0 67 0 55 40 29 73 rrcurveto + -312 456 rmoveto + 0 -59 -31 -42 -86 -39 -33 61 -7 33 0 51 rrcurveto + 50 32 33 47 46 32 -39 -49 vhcurveto + hintmask 10011100 + -34 -452 rmoveto + -60 -47 -43 -17 -43 0 -71 0 -53 49 0 85 0 69 31 44 87 56 rrcurveto + 54 -113 40 -58 58 -68 rrcurveto + endchar + + + -101 656 20 hstem + 268 471 rmoveto + -101 callsubr + endchar + + + 320 -14 39 118 33 -32 43 277 44 137 31 hstemhm + 116 84 121 71 377 40 hintmask 11011111 + 688 73 rmoveto + -72 -35 -62 -13 -67 0 -171 0 -116 123 0 174 0 74 23 78 39 56 51 73 74 42 95 0 rrcurveto + 157 130 -123 -149 -101 -60 -96 -66 -29 -13 14 33 hvcurveto + 0 7 1 5 1 5 rrcurveto + 65 254 -69 0 -9 -38 -2 0 rlineto + -13 35 -24 17 -36 0 -52 0 -41 -24 -35 -46 -39 -51 -27 -66 0 -65 rrcurveto + hintmask 00100111 + -69 39 -43 49 vhcurveto + 47 0 47 27 31 45 rrcurveto + 2 hlineto + hintmask 11001111 + 5 -44 40 -29 44 0 rrcurveto + 99 85 111 129 165 -143 128 -185 hvcurveto + -86 0 -76 -23 -62 -46 -84 -63 -57 -103 0 -114 0 -197 158 -144 208 0 70 0 55 13 93 44 rrcurveto + hintmask 00110111 + -128 361 rmoveto + 0 -45 -18 -65 -26 -49 -20 -36 -29 -22 -27 0 -37 0 -23 33 0 54 0 44 14 43 22 33 31 46 38 24 29 0 rrcurveto + 29 17 -21 -39 hvcurveto + endchar + + + -323 -11 111 248 111 hstem + 81 111 vstem + 192 403 rmoveto + -98 callsubr + -359 vmoveto + -98 callsubr + endchar + + + -351 -6 20 -16 20 hstemhm + 156 39 hintmask 10100000 + 83 -141 rmoveto + 69 33 43 63 0 60 rrcurveto + 52 -35 35 -45 -36 -24 -23 -34 -34 21 -17 37 vhcurveto + hintmask 01100000 + 11 0 10 4 8 0 8 0 6 -6 0 -7 0 -33 -28 -37 -55 -37 rrcurveto + endchar + + + -101 0 20 hstem + 52 74 104 34 114 80 vstem + 264 637 rmoveto + 80 -10 46 -44 20 -83 rrcurveto + 15 111 hlineto + -31 31 -49 17 -81 5 rrcurveto + 63 -34 -63 vlineto + -93 -6 -85 -45 0 -100 0 -84 40 -41 138 -78 rrcurveto + -282 vlineto + -88 0 -62 53 -21 100 rrcurveto + -15 -130 hlineto + 55 -37 44 -14 87 0 rrcurveto + -87 34 87 vlineto + 115 16 79 38 0 116 0 37 -7 32 -14 21 -27 41 -25 25 -121 65 rrcurveto + -34 16 rmoveto + -84 57 -20 24 0 44 0 51 33 40 71 14 rrcurveto + 34 -344 rmoveto + 91 -54 23 -31 0 -61 0 -71 -33 -31 -81 -17 rrcurveto + endchar + + + -101 -14 28 634 28 hstemhm + 56 76 -70 74 219 69 -55 76 hintmask 11000100 + 445 155 rmoveto + 0 80 -32 54 -123 82 rrcurveto + hintmask 11011000 + 99 53 35 34 0 75 rrcurveto + 78 -69 65 -99 -111 -83 -60 -96 vhcurveto + 0 -65 21 -38 103 -85 rrcurveto + hintmask 11100100 + -107 -75 -23 -39 0 -69 rrcurveto + -94 79 -69 113 vhcurveto + 120 77 66 103 hvcurveto + hintmask 11011000 + -90 378 rmoveto + 0 -62 -26 -42 -68 -40 -88 52 -37 46 0 62 rrcurveto + 62 43 37 67 vhcurveto + 68 41 -46 -69 hvcurveto + hintmask 11100100 + -84 -261 rmoveto + 68 -46 30 -40 0 -62 rrcurveto + -65 -45 -45 -65 -76 -51 52 92 vhcurveto + 0 65 21 41 59 48 rrcurveto + endchar + + + 120 66 134 66 hstem + 637 320 rmoveto + -93 callsubr + 589 -200 rmoveto + -93 callsubr + endchar + + + -268 -9 106 559 20 hstem + 130 106 vstem + 175 176 rmoveto + -107 callsubr + 103 -515 rmoveto + -106 callsubr + endchar + + + -101 -14 37 560 79 hstem + 356 70 vstem + 438 681 rmoveto + -9 7 rlineto + -16 -21 -9 -5 -23 0 rrcurveto + -207 0 -109 -237 rlineto + 0 -1 -3 -2 -6 3 -2 9 hvcurveto + 96 0 70 -32 47 -38 45 -36 22 -50 0 -64 0 -86 -65 -83 -70 0 -20 0 -23 9 -28 23 -32 26 -19 5 -23 0 rrcurveto + -28 -17 -13 -25 -38 52 -24 75 hvcurveto + 68 0 55 15 47 34 68 50 30 62 0 96 0 53 -9 38 -26 36 -57 79 -50 22 -143 27 rrcurveto + 40 85 194 0 rlineto + 16 0 8 6 3 7 rrcurveto + endchar + + + -101 0 20 147 64 425 20 hstem + 292 78 vstem + 473 167 rmoveto + 64 -103 445 -44 vlineto + -314 -445 0 -64 280 0 0 -167 78 0 0 167 rlineto + -78 64 rmoveto + -240 0 240 343 rlineto + endchar + + + 621 249 rmoveto + -92 callsubr + endchar + + + -268 194 63 hstem + 285 194 rmoveto + 63 -246 -63 vlineto + endchar + + + 621 -24 rmoveto + -94 callsubr + endchar + + + -101 237 43 368 28 hstem + 30 92 vstem + 59 -22 rmoveto + 130 18 65 24 85 77 80 73 40 109 0 115 0 84 -24 72 -40 50 -39 48 -54 28 -64 0 rrcurveto + -119 -89 -101 -135 -122 72 -81 108 hvcurveto + 59 0 48 15 43 42 -40 -164 -112 -105 -152 -27 rrcurveto + 306 357 rmoveto + -53 -73 -22 -44 -75 -48 75 119 vhcurveto + 0 54 15 59 20 27 17 22 26 12 30 0 rrcurveto + 87 45 -86 -168 hvcurveto + endchar + + + -101 0 20 196 55 134 55 182 20 hstem + 495 405 rmoveto + 55 -96 vlineto + 30 202 -58 0 -30 -202 -133 0 31 202 -58 0 -31 -202 -117 0 0 -55 109 0 -21 -134 -115 0 0 -55 106 0 -33 -216 rlineto + 58 0 33 216 134 0 -31 -216 58 0 31 216 108 0 0 55 -99 0 20 134 rlineto + -58 hmoveto + -20 -134 -134 0 21 134 rlineto + endchar + + + -101 0 20 636 20 hstem + 213 86 vstem + 394 hmoveto + 15 vlineto + -75 -20 18 43 hvcurveto + 0 597 -9 3 -179 -91 0 -14 27 10 rlineto + 18 7 17 5 10 0 rrcurveto + 21 9 -15 -34 hvcurveto + -449 vlineto + 0 -55 -21 -21 -74 -4 rrcurveto + -15 vlineto + endchar + + + -268 656 20 hstem + 48 86 vstem + 304 -161 rmoveto + -140 117 -30 113 0 186 0 193 31 93 139 119 rrcurveto + -9 16 rlineto + -160 -95 -87 -144 0 -185 0 -170 86 -169 158 -90 rrcurveto + endchar + + + -133 139 81 vstem + 382 -134 rmoveto + -90 110 -72 169 0 306 0 303 72 172 90 110 rrcurveto + 30 vlineto + -142 -134 -101 -214 0 -267 0 -272 101 -209 142 -134 rrcurveto + endchar + + + -12 139 95 vstem + 503 -243 rmoveto + -134 165 -135 265 0 456 0 458 135 294 134 138 rrcurveto + 33 vlineto + -213 -171 -151 -352 0 -400 0 -409 151 -313 213 -200 rrcurveto + endchar + + + 149 182 110 vstem + 667 -346 rmoveto + -178 220 -197 349 0 613 0 606 197 396 178 184 rrcurveto + 44 vlineto + -284 -228 -201 -464 0 -538 0 -541 201 -422 284 -267 rrcurveto + endchar + + + 207 124 130 vstem + 732 -453 rmoveto + -224 232 -254 504 0 746 0 777 254 473 224 231 rrcurveto + 56 vlineto + -355 -286 -253 -578 0 -673 0 -675 253 -577 355 -286 rrcurveto + endchar + + + -268 656 20 hstem + 199 86 vstem + 29 660 rmoveto + 145 -114 25 -115 0 -187 0 -194 -28 -94 -142 -117 rrcurveto + 9 -16 rlineto + 159 97 88 142 0 185 0 170 -91 167 -153 92 rrcurveto + endchar + + + -133 248 81 vstem + 86 1036 rmoveto + 90 -110 72 -169 0 -306 0 -303 -72 -172 -90 -110 rrcurveto + -30 vlineto + 142 134 101 214 0 267 0 272 -101 209 -142 134 rrcurveto + endchar + + + 7 383 95 vstem + 114 1530 rmoveto + 134 -165 135 -262 0 -460 0 -454 -135 -297 -134 -138 rrcurveto + -33 vlineto + 213 171 151 351 0 400 0 409 -151 314 -213 200 rrcurveto + endchar + + + 149 458 110 vstem + 83 2018 rmoveto + 178 -217 197 -351 0 -614 0 -606 -197 -396 -178 -184 rrcurveto + -44 vlineto + 284 228 201 468 0 534 0 545 -201 418 -284 267 rrcurveto + endchar + + + 207 554 130 vstem + 76 2510 rmoveto + 224 -232 254 -482 0 -766 0 -757 -254 -495 -224 -231 rrcurveto + -56 vlineto + 355 286 253 585 0 667 0 681 -253 570 -355 286 rrcurveto + endchar + + + 146 -19 27 294 28 -4 27 230 26 47 20 hstemhm + 61 71 189 24 56 72 190 23 hintmask 1101111110000000 + 686 213 rmoveto + 74 -35 43 -61 -96 -93 -104 -107 -78 53 -60 68 vhcurveto + 86 78 111 121 hvcurveto + -23 2 rmoveto + -105 callsubr + -42 491 rmoveto + -104 callsubr + hintmask 0011011110000000 + -103 callsubr + 130 587 rmoveto + -102 callsubr + endchar + + + -351 -11 111 hstem + 70 111 vstem + 181 43 rmoveto + -99 callsubr + endchar + + + 220 66 hstem + 309 66 vstem + 636 220 rmoveto + -100 callsubr + endchar + + + -157 -8 106 349 20 179 30 hstem + 68 51 65 106 32 92 vstem + 244 164 rmoveto + -91 callsubr + 63 -119 rmoveto + -90 callsubr + endchar + + + -193 656 20 hstem + 299 431 rmoveto + 15 98 17 76 0 30 rrcurveto + 23 -18 18 -24 -25 -18 -18 -25 vhcurveto + 0 -17 14 -87 18 -98 rrcurveto + -148 hmoveto + 20 96 12 78 0 30 rrcurveto + 24 -18 17 -24 -25 -18 -17 -26 vhcurveto + 0 -17 14 -87 18 -98 rrcurveto + endchar + + + -421 656 20 hstem + 101 431 rmoveto + 20 100 12 75 0 30 rrcurveto + 23 -15 17 -27 -24 -19 -17 -25 vhcurveto + 0 -28 14 -77 18 -98 rrcurveto + endchar + + + -323 -6 20 -16 20 330 111 hstemhm + 80 111 -11 39 hintmask 10110000 + 191 403 rmoveto + -97 callsubr + hintmask 10101000 + -84 -544 rmoveto + -96 callsubr + hintmask 10110000 + -33 -26 -23 -33 -33 21 -19 37 hvcurveto + hintmask 01001000 + -95 callsubr + endchar + + + -101 -8 20 576 74 hstem + 449 646 rmoveto + 16 -369 vlineto + -60 -147 18 -9 rlineto + 42 68 17 14 58 0 rrcurveto + 215 0 -198 -596 65 0 rlineto + endchar + + + -101 -14 28 368 46 hstem + 34 93 251 90 vstem + 446 684 rmoveto + -138 -15 -79 -24 -86 -90 -71 -73 -38 -95 0 -108 0 -70 19 -71 28 -51 35 -64 63 -37 79 0 66 0 56 27 37 46 33 40 18 56 0 64 rrcurveto + 129 -72 80 -117 vhcurveto + -44 0 -34 -7 -49 -38 27 151 112 108 157 26 rrcurveto + -70 -480 rmoveto + -102 -37 -72 -72 -94 -48 100 152 92 59 24 57 93 42 -66 -128 vhcurveto + endchar + + + -323 -14 20 650 20 hstem + 287 676 rmoveto + -67 0 -229 -690 68 0 rlineto + endchar + + + -22 552 1066 rmoveto + -73 0 -454 -1230 73 0 rlineto + endchar + + + 205 781 1566 rmoveto + 22 callsubr + endchar + + + 500 1071 2066 rmoveto + -103 0 -938 -2460 103 0 rlineto + endchar + + + 708 1293 2566 rmoveto + -104 0 -1173 -3075 104 0 rlineto + endchar + + + -101 -14 36 594 60 hstemhm + 318 79 -38 72 hintmask 11100000 + 61 510 rmoveto + 41 74 46 32 62 0 66 0 42 -34 0 -68 0 -61 -32 -45 -49 -28 -20 -12 -26 -11 -38 -13 rrcurveto + -14 vlineto + 57 0 23 -3 22 -7 rrcurveto + hintmask 11010000 + 69 -20 35 -49 0 -76 0 -85 -56 -68 -74 0 -28 0 -21 5 -37 26 -28 20 -16 6 -17 0 rrcurveto + -23 -18 -15 -21 -36 39 -21 73 hvcurveto + 89 0 95 29 48 64 29 38 17 49 0 53 0 52 -16 46 -28 31 -21 22 -18 12 -44 19 rrcurveto + hintmask 11100000 + 67 40 26 50 0 48 0 82 -63 55 -93 0 -104 0 -63 -67 -29 -95 rrcurveto + endchar + + + -101 0 76 526 74 hstem + 337 86 vstem + 474 137 rmoveto + -14 6 rlineto + -33 -56 -21 -11 -42 0 rrcurveto + -234 0 165 176 rlineto + 89 94 39 75 0 79 0 99 -72 77 -113 0 -123 0 -64 -82 -21 -117 rrcurveto + 21 -5 rlineto + 40 98 35 32 72 0 85 0 54 -50 0 -91 0 -85 -36 -76 -94 -99 rrcurveto + -178 -189 0 -12 391 0 rlineto + endchar + + + 121 0 20 177 39 hstem + 689 hmoveto + -22 callsubr + 17 236 rmoveto + -195 0 94 243 rlineto + endchar + + + 66 0 32 614 30 hstem + 426 365 rmoveto + -21 callsubr + -162 228 rmoveto + -20 callsubr + -30 vmoveto + -19 callsubr + endchar + + + 121 -19 48 613 20 -4 33 hstemhm + hintmask 10100000 + 657 152 rmoveto + -44 -46 -28 -25 -32 -18 -40 -22 -42 -12 -44 0 -59 0 -59 30 -29 46 -42 66 -12 73 0 101 0 200 79 113 105 0 67 0 54 -35 46 -55 rrcurveto + 24 -28 24 -33 19 -54 rrcurveto + 25 235 -27 hlineto + hintmask 01000000 + -18 callsubr + hintmask 10100000 + -58 24 -50 11 -48 0 rrcurveto + -204 -143 -156 -212 -195 135 -147 215 hvcurveto + 115 0 80 31 93 115 rrcurveto + endchar + + + 121 0 35 607 34 hstem + 14 676 rmoveto + -25 vlineto + 64 -6 19 -15 0 -46 rrcurveto + -486 vlineto + 0 -48 -25 -23 -58 -2 rrcurveto + -25 309 vlineto + 101 0 90 28 61 53 72 63 43 98 0 103 rrcurveto + 206 -136 125 -228 vhcurveto + -67 -78 rmoveto + 28 14 16 39 vhcurveto + 75 0 50 -35 38 -66 29 -51 12 -74 0 -86 0 -88 -19 -90 -31 -45 -34 -48 -49 -24 -66 0 rrcurveto + -45 -13 20 45 hvcurveto + endchar + + + 66 0 31 611 34 hstem + 641 208 rmoveto + -17 callsubr + endchar + + + 10 0 20 622 34 hstem + 583 474 rmoveto + 202 -567 -25 vlineto + 69 -4 19 -14 0 -58 rrcurveto + -472 vlineto + 0 -56 -14 -14 -74 -8 rrcurveto + -25 360 25 vlineto + -92 4 -18 15 0 57 rrcurveto + 228 vlineto + 102 -2 41 -35 10 -118 rrcurveto + 25 338 -25 hlineto + -14 -113 -39 -36 -100 0 rrcurveto + 228 vlineto + 37 19 14 51 vhcurveto + 88 0 44 -12 31 -27 35 -30 11 -23 14 -76 rrcurveto + endchar + + + 177 -19 33 628 20 -4 33 hstemhm + hintmask 10100000 + 755 287 rmoveto + -344 -25 hlineto + 86 -5 16 -17 0 -52 rrcurveto + -89 vlineto + -51 -25 -34 -71 vhcurveto + -77 0 -43 29 -35 58 -33 55 -15 79 0 103 0 207 68 113 116 0 47 0 48 -18 43 -38 43 -38 18 -35 35 -76 rrcurveto + 25 235 -27 hlineto + hintmask 01000000 + -18 callsubr + hintmask 10100000 + -58 24 -45 11 -52 0 rrcurveto + -200 -148 -161 -199 -214 154 -136 201 hvcurveto + 104 0 108 25 65 38 rrcurveto + 118 vlineto + 0 63 11 29 75 8 rrcurveto + endchar + + + 177 0 20 306 47 283 20 hstem + 759 hmoveto + -16 callsubr + endchar + + + -212 0 20 636 20 hstem + 370 hmoveto + -15 callsubr + endchar + + + -101 -96 33 719 20 hstem + 478 676 rmoveto + -351 -25 hlineto + 81 -3 20 -13 0 -58 rrcurveto + -550 vlineto + -62 -18 -28 -46 -28 -17 12 20 vhcurveto + 0 26 27 10 0 30 rrcurveto + 40 -34 36 -38 -37 -34 -37 -37 vhcurveto + 0 -42 30 -43 37 -25 25 -17 43 -6 40 0 rrcurveto + 139 73 71 151 hvcurveto + 430 vlineto + 0 72 13 18 75 5 rrcurveto + endchar + + + 177 0 20 636 20 hstem + 769 hmoveto + -14 callsubr + endchar + + + 66 0 31 625 20 hstem + 638 227 rmoveto + -29 hlineto + -27 -62 -26 -48 -34 -31 -41 -37 -46 -18 -83 0 rrcurveto + -65 -20 15 43 hvcurveto + 464 vlineto + 0 82 13 13 87 4 rrcurveto + 24 -348 -25 vlineto + 68 -4 18 -14 0 -57 rrcurveto + -478 vlineto + 0 -53 -13 -11 -73 -9 rrcurveto + -25 577 vlineto + endchar + + + 343 0 20 636 20 hstem + 105 42 vstem + 921 hmoveto + -13 callsubr + endchar + + + 121 -18 20 -2 20 636 20 hstemhm + 104 44 431 44 hintmask 01111000 + 701 676 rmoveto + -12 callsubr + hintmask 10111000 + -11 callsubr + endchar + + + 177 -19 33 644 33 hstem + 743 335 rmoveto + -10 callsubr + -177 -7 rmoveto + -9 callsubr + endchar + + + 10 0 20 621 35 hstem + 16 676 rmoveto + -8 callsubr + -69 -70 rmoveto + -7 callsubr + endchar + + + 177 -176 47 787 33 hstem + 730 -117 rmoveto + -33 -10 -17 -2 -20 0 -47 0 -42 18 -34 34 -19 19 -10 15 -18 37 158 48 95 114 0 171 rrcurveto + 220 -148 144 -206 -205 -149 -144 -217 vhcurveto + 0 -155 85 -131 153 -46 21 -44 12 -19 23 -23 56 -56 84 -32 88 0 67 0 47 10 65 25 rrcurveto + -170 476 rmoveto + -205 -61 -116 -116 -116 -61 114 207 209 64 114 112 114 64 -111 -212 vhcurveto + endchar + + + 121 0 20 622 34 hstem + 716 hmoveto + 25 vlineto + -18 0 -17 9 -9 13 rrcurveto + -199 282 rlineto + 58 16 26 16 28 25 29 27 16 46 0 45 rrcurveto + 115 -101 57 -195 vhcurveto + -308 -25 hlineto + 74 -5 14 -17 0 -78 rrcurveto + -431 vlineto + 0 -79 -10 -7 -78 -9 rrcurveto + -25 338 25 vlineto + -78 10 -10 9 0 73 rrcurveto + 196 28 vlineto + 207 -313 rlineto + -235 597 rmoveto + 33 14 12 39 93 37 -42 -106 vhcurveto + 0 -122 -52 -26 -131 -1 rrcurveto + endchar + + + -45 -19 33 646 31 hstem + 484 474 rmoveto + 218 -30 vlineto + -7 -27 -7 -6 -18 0 -9 0 -12 2 -21 7 -46 17 -32 6 -39 0 -136 0 -83 -74 0 -127 0 -101 58 -55 113 -54 43 -21 45 -20 34 -24 rrcurveto + 34 -24 23 -28 0 -36 0 -69 -49 -44 -77 0 -58 0 -48 24 -38 48 -29 37 -14 35 -17 71 rrcurveto + -29 -248 29 hlineto + 6 26 8 8 16 0 8 0 11 -3 22 -7 49 -17 37 -7 44 0 148 0 100 87 0 126 0 76 -45 65 -64 35 -51 28 -48 23 -49 26 rrcurveto + -80 42 -23 23 0 49 0 57 41 42 69 0 45 0 42 -19 36 -37 34 -35 16 -30 20 -65 rrcurveto + endchar + + + 66 0 20 636 20 hstem + 636 475 rmoveto + -6 callsubr + endchar + + + 121 -19 49 626 20 hstem + 579 44 vstem + 701 676 rmoveto + -219 -25 hlineto + 74 -6 23 -20 0 -73 rrcurveto + -300 vlineto + -148 -59 -74 -117 -97 -49 63 139 vhcurveto + 318 vlineto + 0 82 17 13 78 6 rrcurveto + 25 -336 -25 vlineto + 70 -7 12 -8 0 -81 rrcurveto + -324 vlineto + 0 -97 23 -54 57 -45 47 -37 64 -17 74 0 71 0 70 20 43 35 49 39 27 77 0 100 rrcurveto + 314 vlineto + 0 59 14 16 64 10 rrcurveto + endchar + + + 121 -18 20 654 20 hstem + 701 676 rmoveto + -213 -25 hlineto + 71 -5 15 -7 0 -32 0 -16 -3 -11 -17 -44 rrcurveto + -127 -329 -138 334 rlineto + -20 48 -3 9 0 15 0 23 15 11 38 2 rrcurveto + 33 2 0 25 -336 0 0 -25 rlineto + 50 -7 10 -7 24 -56 rrcurveto + 256 -599 27 0 228 587 rlineto + 24 62 14 13 52 7 rrcurveto + endchar + + + 399 -15 20 651 20 hstem + 981 676 rmoveto + -182 -25 hlineto + 54 -3 15 -10 0 -31 0 -13 -2 -15 -5 -14 rrcurveto + -112 -343 -108 336 rlineto + -11 34 -3 12 0 10 0 24 15 9 44 3 rrcurveto + 13 1 0 25 -312 0 0 -25 rlineto + 41 -2 22 -8 9 -25 rrcurveto + 35 -96 -118 -308 -120 364 rlineto + -6 17 -1 7 0 9 0 29 12 9 52 4 rrcurveto + 25 -294 -25 vlineto + 42 -6 10 -9 17 -49 rrcurveto + 212 -602 28 0 186 477 171 -477 27 0 200 602 rlineto + 13 40 23 21 33 3 rrcurveto + endchar + + + 121 0 20 636 20 hstem + 699 hmoveto + -5 callsubr + endchar + + + 121 0 20 636 20 hstem + 699 676 rmoveto + -220 -25 hlineto + 68 -5 16 -8 0 -29 0 -20 -15 -39 -26 -43 rrcurveto + -109 -178 -120 232 rlineto + -12 23 -12 27 0 9 0 23 14 3 40 3 rrcurveto + 27 2 0 25 -335 0 0 -25 rlineto + 34 -2 30 -23 19 -34 rrcurveto + 180 -328 0 -136 rlineto + 0 -75 -10 -21 -83 -7 rrcurveto + -25 347 25 vlineto + -79 7 -13 13 0 78 rrcurveto + 0 178 191 311 rlineto + 14 23 20 11 34 5 rrcurveto + endchar + + + 66 0 35 621 20 hstem + 634 242 rmoveto + -26 hlineto + -29 -85 -13 -42 -56 -40 -41 -29 -66 -11 -95 0 rrcurveto + -83 0 379 625 0 16 -523 0 -22 -207 28 0 rlineto + 45 143 41 24 154 2 rrcurveto + 83 1 -382 -623 0 -16 579 0 rlineto + endchar + + + -101 -14 20 435 32 hstem + 473 64 rmoveto + -10 -10 rlineto + -3 -3 -3 -1 -5 0 rrcurveto + -14 -7 11 17 hvcurveto + 261 vlineto + 86 -74 48 -122 -113 -78 -46 -80 -42 24 -26 41 40 28 26 34 vhcurveto + 0 14 -6 12 -13 16 -9 10 -2 7 0 6 rrcurveto + 24 29 13 36 59 22 -29 -64 vhcurveto + -68 vlineto + -116 -33 -47 -18 -38 -25 -45 -30 -22 -38 0 -44 0 -74 46 -32 64 0 58 0 46 19 55 50 11 -51 22 -18 49 0 43 0 31 16 38 41 rrcurveto + -195 54 rmoveto + -22 -31 -24 -9 -24 0 -30 0 -22 23 0 44 0 58 42 41 80 22 rrcurveto + endchar + + + -45 -14 32 401 54 183 20 hstem + 211 676 rmoveto + -194 -24 hlineto + 46 -9 9 -9 0 -40 rrcurveto + -607 12 vlineto + 79 56 rlineto + 46 -42 36 -15 50 0 rrcurveto + 134 92 103 150 139 -77 95 -111 hvcurveto + -49 0 -35 -16 -38 -40 rrcurveto + -57 vmoveto + 18 43 19 16 33 0 rrcurveto + 62 31 -66 -132 -139 -30 -64 -64 -42 -27 31 48 hvcurveto + endchar + + + -157 -14 67 389 31 hstem + 412 109 rmoveto + -37 -42 -29 -14 -41 0 rrcurveto + -87 -52 84 136 109 35 60 49 42 0 -23 -47 -48 22 -25 34 42 26 25 39 64 -67 46 -87 -133 -104 -102 -149 -143 89 -93 125 hvcurveto + 80 0 56 30 55 75 rrcurveto + endchar + + + -45 -14 56 -42 20 397 56 183 20 hstemhm + hintmask 01110000 + 534 20 rmoveto + 23 vlineto + -46 3 -13 13 0 42 rrcurveto + 575 -215 -24 vlineto + 67 -5 9 -7 0 -46 rrcurveto + -183 vlineto + -43 46 -30 16 -46 0 rrcurveto + -110 -82 -107 -145 hvcurveto + hintmask 10000000 + -137 76 -98 105 vhcurveto + 53 0 33 16 47 50 rrcurveto + -65 vlineto + 46 13 25 4 62 8 rrcurveto + hintmask 10100000 + -136 100 rmoveto + 0 -5 -9 -15 -10 -13 -20 -25 -23 -12 -23 0 rrcurveto + -53 -25 60 127 129 27 59 58 hvcurveto + 33 0 30 -24 15 -38 rrcurveto + endchar + + + -157 -14 72 187 37 160 31 hstem + 403 126 rmoveto + -41 -49 -31 -19 -43 0 -98 0 -15 92 -6 95 rrcurveto + 252 hlineto + -4 74 -18 63 -37 41 -29 32 -41 18 -58 0 rrcurveto + -125 -84 -100 -149 -137 83 -101 121 hvcurveto + 85 0 53 31 60 95 rrcurveto + -129 170 rmoveto + -134 hlineto + 120 24 40 46 vhcurveto + 29 0 12 -17 14 -30 9 -19 0 -21 0 -52 rrcurveto + endchar + + + -268 0 20 397 44 199 31 hstem + 14 461 rmoveto + -44 57 -330 vlineto + 0 -45 -11 -13 -46 -5 rrcurveto + -24 278 24 vlineto + -70 2 -12 16 0 65 rrcurveto + 310 86 44 -86 120 vlineto + 59 14 20 36 18 11 -7 -12 vhcurveto + 0 -14 -22 -8 0 -35 rrcurveto + -31 26 -26 34 37 25 27 37 59 -58 41 -85 vhcurveto + -64 0 -44 -18 -27 -33 -33 -40 -7 -60 0 -79 rrcurveto + endchar + + + -101 -206 32 571 53 -8 30 hstemhm + 28 88 288 79 hintmask 01011000 + 254 68 rmoveto + -80 -21 10 27 30 30 19 39 hvcurveto + 91 0 3 0 33 14 61 25 31 43 0 63 0 40 -12 33 -28 25 rrcurveto + 80 53 -129 hlineto + hintmask 10111000 + -44 16 -28 6 -40 0 -119 0 -84 -62 0 -99 0 -76 53 -55 71 -20 -80 -23 -39 -35 0 -59 0 -37 18 -22 64 -23 -63 -9 -33 -25 0 -41 rrcurveto + -60 68 -32 126 167 94 60 94 74 -69 46 -108 vhcurveto + 244 vmoveto + -94 -18 -36 -48 -48 -18 36 93 98 18 33 47 48 19 -37 -93 vhcurveto + 9 -364 rmoveto + 65 24 -22 -34 -38 -55 -28 -99 -88 -46 23 44 hvcurveto + 0 21 7 11 28 23 rrcurveto + endchar + + + -45 0 20 386 67 183 20 hstem + 534 hmoveto + 24 vlineto + -35 -14 17 46 hvcurveto + 250 vlineto + 79 -48 57 -82 vhcurveto + -53 0 -45 -19 -49 -58 rrcurveto + 280 -193 -24 vlineto + 46 -9 8 -4 0 -48 rrcurveto + -502 vlineto + 0 -56 -8 0 -45 -9 rrcurveto + -24 240 24 vlineto + -37 5 -11 15 0 41 rrcurveto + 263 vlineto + 0 4 7 10 10 10 22 22 24 12 24 0 rrcurveto + 39 12 -26 -71 hvcurveto + -224 vlineto + 0 -41 -12 -16 -34 -4 rrcurveto + -24 vlineto + endchar + + + -323 0 20 516 155 hstem + 60 155 vstem + 215 613 rmoveto + 43 -34 35 -42 -45 -34 -34 -44 -44 33 -33 44 44 34 33 44 vhcurveto + 41 -613 rmoveto + -4 callsubr + endchar + + + -268 -203 31 708 155 hstem + 108 155 vstem + 263 613 rmoveto + 42 -35 36 -42 -44 -34 -34 -44 -44 32 -33 44 45 34 33 44 vhcurveto + -3 -152 rmoveto + -3 callsubr + endchar + + + -45 0 20 636 20 hstem + 543 hmoveto + 24 vlineto + -11 0 -5 3 -9 12 rrcurveto + -194 268 rlineto + 100 105 26 18 63 8 rrcurveto + 23 -214 -23 vlineto + 24 -4 rlineto + 26 -4 9 -6 0 -13 0 -9 -10 -18 -11 -11 rrcurveto + -128 -128 0 431 -187 0 0 -24 rlineto + 34 -3 14 -17 0 -38 rrcurveto + -510 vlineto + 0 -39 -15 -18 -33 -3 rrcurveto + -24 239 24 vlineto + -47 7 -5 6 0 47 rrcurveto + 0 114 23 24 95 -134 rlineto + 18 -25 6 -12 0 -8 0 -12 -14 -6 -28 -1 rrcurveto + -24 vlineto + endchar + + + -323 0 20 636 20 hstem + 256 hmoveto + 24 vlineto + -33 1 -17 16 0 46 rrcurveto + 589 -191 -24 vlineto + 35 -3 17 -25 0 -37 rrcurveto + -500 vlineto + 0 -44 -17 -16 -34 -3 rrcurveto + -24 vlineto + endchar + + + 232 0 20 386 67 hstem + 814 hmoveto + 24 vlineto + -39 2 -11 14 0 43 rrcurveto + 248 vlineto + 89 -53 53 -82 vhcurveto + -57 0 -38 -23 -52 -64 -30 63 -35 24 -63 0 -64 0 -42 -26 -38 -61 rrcurveto + -3 75 -192 -24 hlineto + 44 -6 12 -11 0 -47 rrcurveto + -287 vlineto + 0 -41 -11 -13 -44 -8 rrcurveto + -24 239 24 vlineto + -35 5 -10 16 0 38 rrcurveto + 261 vlineto + 24 61 38 23 39 15 -23 -60 vhcurveto + -240 vlineto + 0 -41 -10 -15 -37 -3 rrcurveto + -24 232 24 vlineto + -35 4 -11 17 0 38 rrcurveto + 265 vlineto + 0 5 14 18 13 10 21 18 19 7 18 0 rrcurveto + 38 15 -33 -62 hvcurveto + -228 vlineto + 0 -42 -10 -14 -38 -3 rrcurveto + -24 vlineto + endchar + + + -45 0 20 386 67 hstem + 539 hmoveto + 24 vlineto + -39 3 -10 12 0 45 rrcurveto + 247 vlineto + 87 -53 55 -82 vhcurveto + -60 0 -48 -27 -34 -60 rrcurveto + -1 75 -191 -24 hlineto + 44 -7 9 -11 0 -43 rrcurveto + -289 vlineto + 0 -42 -8 -12 -45 -9 rrcurveto + -24 240 24 vlineto + -37 5 -11 18 0 37 rrcurveto + 264 vlineto + 0 4 7 10 10 10 22 22 24 12 24 0 rrcurveto + 34 17 -29 -56 hvcurveto + -237 vlineto + 0 -40 -12 -16 -34 -4 rrcurveto + -24 vlineto + endchar + + + -101 -14 31 425 31 hstem + 476 229 rmoveto + -2 callsubr + -147 -11 rmoveto + -1 callsubr + endchar + + + -45 -13 55 374 57 hstem + 212 461 rmoveto + -192 -24 hlineto + 44 -7 11 -14 0 -44 rrcurveto + -490 vlineto + 0 -42 -8 -13 -48 -8 rrcurveto + -24 272 24 vlineto + -61 3 -16 32 0 67 rrcurveto + 126 vlineto + 48 -47 25 -13 45 0 rrcurveto + 120 72 107 155 147 -70 77 -111 hvcurveto + -59 0 -38 -21 -34 -60 rrcurveto + 2 -46 rmoveto + 0 6 7 14 11 13 20 24 25 13 23 0 rrcurveto + 51 25 -59 -123 -138 -30 -54 -55 hvcurveto + -33 0 -29 27 -15 38 rrcurveto + endchar + + + -45 -14 54 401 32 hstem + 536 -205 rmoveto + 24 vlineto + -46 9 -9 9 0 40 rrcurveto + 595 -16 vlineto + -77 -55 rlineto + -51 43 -31 13 -50 0 rrcurveto + -135 -87 -114 -140 -145 82 -88 103 hvcurveto + 48 0 40 11 35 45 rrcurveto + -132 vlineto + 0 -66 -8 -16 -65 -9 rrcurveto + -24 vlineto + 73 303 rmoveto + -38 -35 -20 -34 -54 -39 60 136 142 39 63 56 40 27 -32 -55 vhcurveto + endchar + + + -157 0 20 433 20 hstem + 218 461 rmoveto + -190 -24 hlineto + 43 -6 12 -11 0 -47 rrcurveto + -289 vlineto + 0 -41 -10 -11 -44 -8 rrcurveto + -24 265 24 vlineto + -61 4 -11 14 0 64 rrcurveto + 177 vlineto + 59 26 45 35 vhcurveto + 8 0 9 -5 11 -18 17 -28 17 -10 26 0 rrcurveto + 37 26 28 40 46 -34 33 -47 hvcurveto + -50 0 -38 -26 -47 -68 rrcurveto + endchar + + + -212 -14 34 411 20 -11 33 hstemhm + hintmask 01000000 + 340 326 rmoveto + 145 -22 vlineto + -6 -15 -6 -5 -13 0 -6 0 -9 2 -16 5 rrcurveto + hintmask 10100000 + -32 11 -23 4 -23 0 -91 0 -66 -62 0 -84 0 -66 41 -46 101 -43 69 -30 27 -25 0 -32 0 -39 -30 -26 -45 0 -70 0 -46 45 -21 87 rrcurveto + -28 -165 25 hlineto + 11 21 6 7 9 0 5 0 8 -2 10 -4 29 -12 51 -11 28 0 91 0 63 62 0 90 0 71 -38 43 -100 42 -68 28 -28 26 0 34 rrcurveto + 33 28 25 38 vhcurveto + 27 0 26 -11 22 -21 21 -20 11 -19 15 -43 rrcurveto + endchar + + + -268 -12 71 358 44 hstem + 307 112 rmoveto + -20 -38 -16 -15 -21 0 rrcurveto + -28 -11 20 40 hvcurveto + 298 95 44 -95 169 -25 vlineto + -58 -84 -37 -53 -72 -49 rrcurveto + -27 53 -322 vlineto + -67 43 -40 69 vhcurveto + 67 0 40 31 41 82 rrcurveto + endchar + + + -45 -14 65 -51 20 421 20 hstemhm + hintmask 01100000 + 538 20 rmoveto + 23 vlineto + -44 2 -13 16 0 44 rrcurveto + 356 -200 -24 vlineto + 50 -4 11 -14 0 -45 rrcurveto + -276 vlineto + hintmask 10100000 + -31 -34 -22 -13 -29 0 rrcurveto + -36 -20 20 56 hvcurveto + 334 -188 -24 vlineto + 41 -8 8 -11 0 -46 rrcurveto + -241 vlineto + -93 50 -52 83 vhcurveto + 55 0 38 17 52 49 rrcurveto + -65 vlineto + 43 15 24 4 65 7 rrcurveto + endchar + + + -101 -14 20 435 20 hstem + 485 461 rmoveto + -151 -24 hlineto + 43 -2 12 -7 0 -24 0 -12 -3 -14 -8 -19 rrcurveto + -72 -182 -79 203 rlineto + -8 20 -1 4 0 7 0 15 10 6 25 3 rrcurveto + 18 2 0 24 -250 0 0 -24 rlineto + 22 -3 6 -3 6 -8 9 -12 37 -80 20 -49 rrcurveto + 120 -296 26 0 160 396 rlineto + 19 46 8 6 31 3 rrcurveto + endchar + + + 121 -14 20 435 20 hstem + 707 461 rmoveto + -135 -24 hlineto + 37 -4 11 -8 0 -23 0 -13 -15 -37 -29 -78 rrcurveto + -35 -94 rlineto + -10 42 -4 15 -20 70 -20 68 -7 28 0 10 0 16 10 5 38 3 rrcurveto + 24 -234 -24 vlineto + 39 -4 1 -1 19 -66 2 -7 2 -6 2 -6 rrcurveto + -68 -171 -45 118 rlineto + -27 71 -12 31 0 13 0 17 10 7 28 4 rrcurveto + 24 -222 -24 vlineto + 26 -5 6 -10 25 -62 rrcurveto + 148 -374 24 0 125 310 102 -310 23 0 155 401 rlineto + 13 34 11 11 26 5 rrcurveto + endchar + + + -101 0 20 421 20 hstem + 484 hmoveto + 24 vlineto + -16 5 -7 4 -7 11 rrcurveto + -148 228 101 126 rlineto + 19 23 20 11 31 5 rrcurveto + 24 -168 -24 vlineto + 20 -2 rlineto + 23 -2 8 -5 0 -15 0 -15 -11 -14 -27 -35 rrcurveto + -36 -46 rlineto + -4 6 -5 6 -4 5 -33 43 -25 42 0 13 0 12 14 6 33 1 rrcurveto + 24 -250 -24 vlineto + 26 -4 6 -5 20 -30 rrcurveto + 128 -197 rlineto + -15 -18 8 9 -15 -19 -15 -19 -14 -18 -14 -19 -56 -75 -20 -16 -37 -2 rrcurveto + -24 169 24 vlineto + -36 2 -14 7 0 15 0 16 25 40 39 47 6 7 5 7 5 6 13 -21 14 -21 15 -21 24 -35 9 -17 0 -12 0 -12 -13 -6 -32 -2 rrcurveto + -24 vlineto + endchar + + + -101 -205 57 589 20 hstem + 482 461 rmoveto + -152 -24 hlineto + 43 -2 11 -7 0 -24 0 -12 -2 -11 -9 -26 rrcurveto + -68 -188 -72 186 rlineto + -11 28 -9 26 0 4 0 15 11 8 25 2 rrcurveto + 16 1 0 24 -249 0 0 -24 rlineto + 22 -3 6 -3 6 -8 9 -12 38 -84 20 -50 rrcurveto + 119 -290 -18 -53 rlineto + -17 -50 -25 -32 -24 0 -9 0 -8 8 0 9 0 0 0 3 1 3 1 5 1 5 0 4 rrcurveto + 29 -26 23 -34 -38 -24 -29 -38 -47 40 -32 57 vhcurveto + 34 0 29 11 21 22 21 23 21 39 35 94 rrcurveto + 148 398 rlineto + 16 43 13 8 31 4 rrcurveto + endchar + + + -157 0 20 421 20 hstem + 420 160 rmoveto + -28 hlineto + -9 -32 -8 -16 -14 -21 -33 -46 -33 -13 -81 0 rrcurveto + -29 0 231 403 0 26 -371 0 -7 -142 26 0 rlineto + 25 95 24 15 142 0 rrcurveto + -234 -404 0 -25 383 0 rlineto + endchar + + + 116 0 20 191 37 399 20 hstem + 685 hmoveto + 0 callsubr + 66 248 rmoveto + 1 callsubr + endchar + + + 95 0 38 280 35 261 39 hstem + 198 653 rmoveto + 2 callsubr + -44 -42 rmoveto + 3 callsubr + -9 -45 rmoveto + 4 callsubr + endchar + + + 70 -12 48 586 37 hstem + 711 659 rmoveto + -18 hlineto + -15 -25 -16 -2 -11 0 -52 0 -52 27 -54 0 rrcurveto + -242 -201 -163 -230 -147 95 -131 193 hvcurveto + 39 0 76 6 78 55 29 21 33 30 27 41 rrcurveto + -20 13 rlineto + -5 -7 -5 -6 -7 -7 -45 -49 -73 -49 -98 0 rrcurveto + -135 -60 102 112 171 118 201 201 121 31 -79 -65 hvcurveto + 0 -25 0 -9 -1 -8 rrcurveto + 16 hlineto + endchar + + + 189 0 20 595 38 hstem + 194 653 rmoveto + -5 -17 25 0 rlineto + 59 18 -15 -22 hvcurveto + 0 -7 -2 -9 -2 -8 rrcurveto + -122 -497 rlineto + -8 -34 -23 -28 -74 0 rrcurveto + -18 0 -4 -16 306 0 rlineto + 258 163 167 211 172 -101 103 -188 hvcurveto + -71 -41 rmoveto + 16 2 23 1 20 0 rrcurveto + 147 33 -137 -115 -141 -79 -184 -192 -35 -60 2 41 hvcurveto + 0 3 0 5 5 19 rrcurveto + endchar + + + 113 0 38 295 36 248 36 hstem + 734 653 rmoveto + 5 callsubr + endchar + + + 17 0 20 312 36 248 37 hstem + 723 653 rmoveto + -520 0 -4 -15 19 0 rlineto + 60 16 -18 -21 hvcurveto + 0 -7 -2 -9 -2 -8 rrcurveto + -124 -497 rlineto + -9 -34 -21 -28 -76 0 rrcurveto + -18 0 -4 -16 342 0 4 16 -32 0 rlineto + -60 -19 16 25 hvcurveto + 0 7 3 8 1 6 rrcurveto + 62 254 97 0 rlineto + 29 41 -3 -47 hvcurveto + 0 -18 -2 -29 -3 -14 rrcurveto + 16 0 60 242 -16 0 rlineto + -21 -85 -46 -10 -62 0 rrcurveto + -83 0 62 248 147 0 rlineto + 47 0 60 -9 3 -78 0 -14 0 -14 -1 -8 rrcurveto + 16 hlineto + endchar + + + 133 -12 37 606 37 hstem + 734 329 rmoveto + -313 0 -4 -16 19 0 rlineto + 61 14 -15 -22 hvcurveto + 0 -7 -1 -10 -2 -8 rrcurveto + -57 -201 rlineto + -27 -17 -29 -8 -41 0 rrcurveto + -147 -34 97 82 179 89 248 229 113 47 -81 -73 hvcurveto + 0 -11 -1 -11 -1 -10 rrcurveto + 16 0 45 223 -16 0 rlineto + -3 -16 -18 -21 -22 0 -22 0 -19 15 -22 6 -22 7 -32 9 -49 0 rrcurveto + -253 -182 -216 -210 -179 135 -75 157 hvcurveto + 63 0 75 0 79 47 rrcurveto + 62 216 rlineto + 10 36 18 26 85 0 rrcurveto + endchar + + + 272 0 20 300 37 hstem + 923 653 rmoveto + 6 callsubr + endchar + + + -121 0 20 hstem + 530 653 rmoveto + 7 callsubr + endchar + + + -61 -12 37 hstem + 620 653 rmoveto + -329 0 -4 -16 20 0 rlineto + 57 18 -15 -24 hvcurveto + 0 -8 -80 -325 -37 -144 -7 -27 -18 -69 -49 0 -11 0 -13 6 0 15 0 27 27 0 0 32 rrcurveto + 31 -33 20 -31 -50 -20 -34 -37 -60 57 -37 69 vhcurveto + 25 0 45 3 39 25 40 27 33 43 19 75 rrcurveto + 103 413 rlineto + 9 36 25 27 74 0 rrcurveto + 18 hlineto + endchar + + + 161 0 20 hstem + 802 653 rmoveto + 8 callsubr + endchar + + + 107 0 38 hstem + 668 190 rmoveto + -16 hlineto + -5 -14 -5 -12 -7 -11 -46 -93 -51 -22 -102 0 rrcurveto + -122 hlineto + -12 -28 2 22 hvcurveto + 0 2 1 14 4 15 rrcurveto + 122 481 rlineto + 9 37 21 26 75 0 rrcurveto + 18 0 4 16 -327 0 -4 -16 18 0 rlineto + 62 14 -18 -19 hvcurveto + 0 -9 -3 -20 -4 -16 rrcurveto + -118 -477 rlineto + -8 -34 -24 -28 -75 0 rrcurveto + -17 0 -4 -16 569 0 rlineto + endchar + + + 404 0 20 hstem + 1055 653 rmoveto + 9 callsubr + endchar + + + 250 0 20 hstem + 901 653 rmoveto + 10 callsubr + endchar + + + 131 -11 38 605 37 hstem + 712 420 rmoveto + 11 callsubr + -116 23 rmoveto + 12 callsubr + endchar + + + -7 0 20 274 37 285 37 hstem + 201 653 rmoveto + 13 callsubr + -61 -40 rmoveto + 14 callsubr + endchar + + + 180 -152 64 718 37 hstem + 83 -115 rmoveto + 11 -12 rlineto + 27 13 23 5 8 0 30 0 54 -16 50 -11 37 -8 52 -8 53 0 118 0 70 46 59 73 rrcurveto + -13 11 rlineto + -50 -53 -47 -13 -39 0 -87 0 -93 34 -65 0 -13 0 -14 -1 -16 -3 rrcurveto + -4 5 64 43 rlineto + 245 188 226 204 155 -93 92 -146 -254 -188 -241 -204 hvcurveto + 0 -132 83 -81 114 -17 rrcurveto + 365 475 rmoveto + -157 -94 -283 -217 -80 -51 52 97 138 90 316 222 65 65 -31 -132 vhcurveto + endchar + + + 139 0 20 293 36 267 37 hstem + 725 16 rmoveto + -57 5 -26 42 -19 39 rrcurveto + -115 229 rlineto + 107 21 71 67 0 83 rrcurveto + 85 -56 66 -160 vhcurveto + -271 0 -4 -16 20 0 rlineto + 60 17 -16 -21 hvcurveto + 0 -8 -3 -9 -2 -8 rrcurveto + -124 -497 rlineto + -9 -34 -20 -28 -76 0 rrcurveto + -16 0 -4 -16 325 0 4 16 -19 0 rlineto + -58 -18 15 24 hvcurveto + 0 8 1 7 2 8 rrcurveto + 62 239 rlineto + 14 -2 10 -2 14 0 8 0 11 1 10 1 rrcurveto + 150 -315 169 0 rlineto + -313 613 rmoveto + 11 2 9 1 14 0 rrcurveto + 63 62 -17 -108 -84 -58 -58 -133 hvcurveto + -9 0 -10 1 -13 3 rrcurveto + endchar + + + 49 -10 38 603 37 hstem + 680 668 rmoveto + -16 hlineto + -3 -15 -14 -22 -20 0 -43 0 -57 37 -83 0 -158 0 -65 -94 0 -81 0 -49 45 -54 28 -28 36 -35 48 -36 30 -31 28 -28 18 -33 0 -35 rrcurveto + -65 -29 -71 -127 vhcurveto + -131 0 -37 88 -3 79 0 10 -1 11 0 10 rrcurveto + -16 0 -60 -236 16 0 rlineto + 16 27 14 11 25 0 76 0 31 -38 94 0 157 0 87 92 0 99 0 87 -59 58 -59 49 -56 48 -61 42 0 69 rrcurveto + 71 78 26 43 112 48 -76 -84 vhcurveto + 0 -8 0 -8 -1 -8 rrcurveto + 16 hlineto + endchar + + + -51 0 20 596 37 hstem + 670 653 rmoveto + 15 callsubr + endchar + + + 104 -13 47 hstem + 775 653 rmoveto + -263 0 -4 -16 19 0 rlineto + 58 0 16 -14 1 -23 0 -11 -2 -8 -2 -7 rrcurveto + -86 -351 rlineto + -26 -108 -58 -81 -111 0 -97 0 -47 58 0 54 0 17 3 36 4 15 rrcurveto + 90 360 rlineto + 9 36 18 27 79 0 rrcurveto + 17 0 4 16 -328 0 -4 -16 18 0 rlineto + 54 21 -11 -20 hvcurveto + 0 -2 0 -3 -1 -2 rrcurveto + -76 -330 rlineto + -2 -9 -12 -78 0 -8 0 -91 62 -96 160 0 190 0 56 125 26 106 rrcurveto + 87 356 rlineto + 9 36 23 27 75 0 rrcurveto + 16 hlineto + endchar + + + -26 -16 20 hstem + 760 653 rmoveto + -241 0 -4 -16 20 0 rlineto + 38 14 -7 -15 hvcurveto + 0 -21 -19 -29 -19 -24 rrcurveto + -280 -366 -4 1 0 391 rlineto + 33 5 37 65 vhcurveto + 19 0 4 16 -294 0 -4 -16 rlineto + 61 38 -24 -58 hvcurveto + -571 24 vlineto + 441 576 rlineto + 26 34 41 39 65 4 rrcurveto + endchar + + + 315 -16 20 hstem + 1101 653 rmoveto + -242 0 -4 -16 17 0 rlineto + 41 13 -9 -16 hvcurveto + 0 -19 -23 -31 -17 -22 rrcurveto + -278 -370 -6 0 0 395 rlineto + 35 9 37 62 vhcurveto + 18 0 4 16 -285 0 -4 -16 8 0 rlineto + 63 19 -24 -31 hvcurveto + 0 -61 -226 -332 -5 0 0 377 rlineto + 71 37 0 39 vhcurveto + 7 0 4 16 -288 0 -4 -16 10 0 rlineto + 56 33 -29 -55 hvcurveto + -569 29 vlineto + 297 442 11 0 0 -442 29 0 438 576 rlineto + 25 33 43 41 66 3 rrcurveto + endchar + + + 189 0 20 hstem + 810 653 rmoveto + 16 callsubr + endchar + + + -66 0 20 hstem + 695 653 rmoveto + -237 0 -4 -16 rlineto + 58 21 -9 -19 hvcurveto + 0 -11 -4 -8 -12 -15 rrcurveto + -174 -212 -4 0 -75 207 rlineto + -2 7 -5 11 0 9 rrcurveto + 25 8 15 50 vhcurveto + 19 0 4 16 -299 0 -4 -16 rlineto + 77 0 29 -31 17 -45 rrcurveto + 89 -237 -61 -246 rlineto + -8 -34 -20 -28 -77 0 rrcurveto + -17 0 -4 -16 328 0 4 16 -20 0 rlineto + -58 -18 15 21 hvcurveto + 0 11 1 6 2 9 rrcurveto + 59 234 209 256 rlineto + 31 38 38 27 55 4 rrcurveto + endchar + + + 171 0 38 578 37 hstem + 802 653 rmoveto + 17 callsubr + endchar + + + -99 -10 61 361 29 hstem + 40 87 vstem + 472 428 rmoveto + -80 0 -9 -33 -1 0 rlineto + -2 22 -21 24 -43 0 rrcurveto + -144 -132 -200 -132 -64 26 -55 63 hvcurveto + 39 0 62 13 72 92 rrcurveto + 4 hlineto + -5 -16 -6 -21 0 -13 0 -32 9 -23 33 0 48 0 54 60 31 47 rrcurveto + -12 12 rlineto + -39 -52 -15 -2 -10 0 rrcurveto + -8 -6 6 11 9 1 2 0 hvcurveto + -14 258 rmoveto + 0 -34 -7 -40 -8 -26 -28 -95 -70 -95 -69 0 -27 0 -31 13 0 53 0 35 10 44 18 45 34 84 60 87 68 0 38 0 12 -30 0 -41 rrcurveto + endchar + + + -131 -11 29 368 55 207 20 hstem + 367 83 vstem + 214 382 rmoveto + 73 286 -158 -24 3 -16 rlineto + 8 3 12 1 10 0 13 0 15 -8 1 -13 0 -3 -1 -4 -1 -4 rrcurveto + -144 -563 rlineto + 45 -30 41 -18 53 0 rrcurveto + 157 109 180 121 82 -41 69 -70 hvcurveto + -49 0 -39 -31 -33 -32 rrcurveto + -39 -126 rmoveto + 11 45 34 89 72 0 rrcurveto + 33 38 -22 -70 -102 -66 -174 -122 -23 -32 12 7 hvcurveto + endchar + + + -186 -11 55 368 29 hstem + 40 87 210 63 vstem + 363 111 rmoveto + -35 -34 -46 -33 -61 0 rrcurveto + -45 -49 22 87 80 57 179 119 24 10 -9 -12 hvcurveto + 0 -22 -29 -12 0 -24 rrcurveto + -17 12 -21 26 38 16 32 38 49 -47 27 -50 -142 -121 -157 -140 -77 40 -78 97 vhcurveto + 85 0 62 49 51 60 rrcurveto + endchar + + + -69 -12 61 362 30 207 20 hstem + 40 87 vstem + 527 668 rmoveto + -156 -24 4 -15 rlineto + 7 1 11 2 9 0 26 0 7 -7 0 -13 0 -4 -1 -4 -1 -4 rrcurveto + -53 -209 -2 0 rlineto + -1 24 -23 26 -57 0 rrcurveto + -154 -103 -223 -103 -53 15 -74 73 hvcurveto + 37 0 60 12 74 93 rrcurveto + 5 hlineto + -8 -30 -2 -14 0 -20 0 -18 8 -23 32 0 47 0 53 60 30 45 rrcurveto + -11 12 rlineto + -14 -21 -28 -35 -20 0 -10 0 -5 3 0 6 0 3 1 6 1 3 rrcurveto + -15 270 rmoveto + 0 -31 -5 -39 -11 -35 -28 -92 -66 -94 -70 0 rrcurveto + -28 -28 17 49 96 81 200 99 41 15 -24 -47 hvcurveto + endchar + + + -156 -11 59 363 30 hstem + 331 79 vstem + 363 112 rmoveto + -36 -46 -48 -18 -42 0 rrcurveto + -61 -52 43 64 hvcurveto + 28 vlineto + 119 6 167 49 0 127 0 62 -60 14 -42 0 -127 0 -110 -125 -25 -111 -5 -23 -1 -14 0 -23 0 -72 39 -84 98 0 62 0 68 24 70 86 rrcurveto + -253 112 rmoveto + 29 85 60 115 74 0 rrcurveto + 25 19 -18 -25 -88 -94 -69 -104 hvcurveto + endchar + + + -46 -187 29 557 38 202 29 hstem + 40 64 444 67 vstem + 248 437 rmoveto + -9 -38 77 0 -87 -342 rlineto + -15 -57 -30 -158 -65 0 -9 0 -6 3 0 6 0 12 16 1 0 19 rrcurveto + 15 -10 18 -27 -29 -14 -25 -24 -30 24 -24 48 vhcurveto + 88 0 54 95 33 83 15 37 8 38 7 27 rrcurveto + 79 306 76 0 12 38 -80 0 19 70 rlineto + 21 78 39 54 50 0 12 0 3 -3 0 -5 0 -8 -9 -15 0 -14 rrcurveto + -14 7 -11 28 20 21 12 28 35 -29 24 -50 vhcurveto + -76 0 -43 -43 -31 -47 -43 -64 5 -77 -63 0 rrcurveto + endchar + + + -109 -187 28 527 45 -2 30 hstemhm + 20 63 21 80 -71 73 150 79 -66 70 hintmask 01001000 + 492 413 rmoveto + -126 hlineto + hintmask 10101000 + -15 15 -31 13 -42 0 -106 0 -68 -79 0 -79 0 -47 27 -47 67 -15 rrcurveto + -4 vlineto + hintmask 10110101 + -48 -37 -49 -38 hvcurveto + 0 -17 7 -19 17 -11 -62 -20 -55 -30 0 -58 rrcurveto + -58 50 -57 134 149 66 80 71 vhcurveto + 0 125 -233 -40 0 63 0 25 29 25 59 8 rrcurveto + hintmask 01001010 + 88 11 53 62 0 75 0 22 -4 13 -13 15 rrcurveto + 81 hlineto + -143 -19 rmoveto + -56 -25 -102 -71 -44 -12 35 46 vhcurveto + hintmask 10101010 + 68 44 71 54 44 10 -25 -37 vhcurveto + hintmask 10110001 + 13 -418 rmoveto + -44 -36 -46 -91 -95 -44 50 47 vhcurveto + 0 27 22 26 26 19 8 6 10 5 8 5 rrcurveto + 60 -22 132 5 0 -78 rrcurveto + endchar + + + -290 -11 20 511 96 hstem + 50 84 27 96 vstem + 257 566 rmoveto + 18 callsubr + -30 -125 rmoveto + -158 -22 3 -16 rlineto + 7 2 12 2 9 0 23 0 8 -5 0 -17 0 -3 -1 -7 -1 -2 -79 -307 0 -19 0 -12 0 -27 12 -19 28 0 49 0 50 60 35 45 rrcurveto + -13 10 rlineto + -36 -39 -6 -12 -22 0 -7 0 -6 3 0 7 0 4 0 4 1 6 rrcurveto + endchar + + + -212 -187 31 676 96 hstem + -16 66 226 96 vstem + 372 566 rmoveto + 18 callsubr + -21 -125 rmoveto + -158 -28 3 -17 rlineto + 7 3 12 2 8 0 rrcurveto + 22 10 -10 -12 hvcurveto + -92 -363 rlineto + -24 -96 -33 -76 -46 0 -5 0 -5 6 0 4 0 16 16 3 0 15 rrcurveto + 23 -16 15 -19 -35 -12 -30 -22 -28 20 -33 53 vhcurveto + 108 0 56 115 22 88 rrcurveto + endchar + + + -59 -11 66 -55 20 628 20 hstemhm + hintmask 01100000 + 527 428 rmoveto + -194 0 -4 -12 12 0 rlineto + 10 16 -7 -12 hvcurveto + 0 -13 -10 -13 -11 -10 rrcurveto + -167 -149 113 456 -158 -25 3 -14 rlineto + 8 2 10 0 9 0 22 0 10 -11 0 -11 0 -7 -1 -7 -2 -6 rrcurveto + -148 -589 76 0 45 166 56 49 63 -147 rlineto + hintmask 10000000 + 11 -26 21 -53 46 0 68 0 47 107 12 21 rrcurveto + -15 11 rlineto + -22 -30 -22 -43 -30 0 -26 0 -12 24 -15 35 rrcurveto + -64 152 106 92 rlineto + 38 33 28 18 67 7 rrcurveto + endchar + + + -283 -10 63 595 20 hstem + 278 668 rmoveto + -157 -23 3 -15 rlineto + 8 1 12 2 7 0 23 0 8 -10 0 -12 0 -3 0 -8 -3 -13 rrcurveto + -128 -510 rlineto + -3 -10 -3 -14 0 -12 0 -25 9 -26 30 0 58 0 43 66 34 40 rrcurveto + -16 12 rlineto + -9 -16 -30 -39 -23 0 rrcurveto + -8 -4 6 7 7 1 4 0 hvcurveto + endchar + + + 109 -8 20 -12 20 357 64 hstemhm + 344 85 149 82 hintmask 01111000 + 667 107 rmoveto + -19 -37 -26 -16 -17 0 -9 0 -6 4 0 8 0 3 1 6 1 3 rrcurveto + 61 231 rlineto + 6 24 1 19 0 18 0 45 -18 26 -36 0 -20 0 -19 -7 -22 -15 -49 -33 -52 -62 -29 -52 rrcurveto + -5 0 10 36 rlineto + 6 20 3 15 0 17 0 44 -18 37 -37 0 -38 0 -60 -26 -94 -143 rrcurveto + -6 0 44 169 -157 -24 2 -14 rlineto + 8 1 11 1 8 0 rrcurveto + 27 6 -13 -18 hvcurveto + -95 -374 78 0 35 135 rlineto + 16 61 110 181 52 0 18 0 5 -18 0 -20 0 -15 -3 -16 -2 -9 rrcurveto + -77 -299 79 0 34 135 rlineto + 15 61 110 181 54 0 18 0 6 -18 0 -20 0 -17 -2 -9 -4 -14 rrcurveto + -56 -221 rlineto + -5 -20 -1 -10 0 -7 rrcurveto + hintmask 10011000 + -39 17 -10 22 vhcurveto + 47 0 52 50 32 54 rrcurveto + endchar + + + -104 -8 62 -54 20 359 62 hstemhm + 364 84 hintmask 10110000 + 467 96 rmoveto + -13 11 rlineto + -14 -17 -24 -36 -24 0 -11 0 -3 8 -1 6 0 5 0 3 1 2 rrcurveto + 59 231 rlineto + 5 20 6 25 0 25 0 33 -12 29 -44 0 -60 0 -60 -66 -40 -50 -2 -3 -49 -70 -5 0 rrcurveto + -3 0 51 189 -159 -23 2 -15 rlineto + 7 1 11 2 10 0 rrcurveto + 27 3 -14 -18 hvcurveto + hintmask 01110000 + -95 -374 77 0 33 124 rlineto + 6 21 26 47 36 54 31 47 63 86 38 0 18 0 6 -19 0 -19 0 -14 -5 -20 -2 -8 rrcurveto + -57 -221 rlineto + -3 -11 -4 -16 0 -13 rrcurveto + hintmask 10010000 + -23 9 -23 32 vhcurveto + 52 0 48 64 28 34 rrcurveto + endchar + + + -143 -11 30 392 30 hstem + 40 81 236 81 vstem + 438 287 rmoveto + 19 callsubr + -81 31 rmoveto + 20 callsubr + endchar + + + -112 -12 29 368 56 hstem + 390 84 vstem + 253 370 rmoveto + -3 2 18 69 -158 -24 2 -16 rlineto + 8 2 12 2 8 0 rrcurveto + 30 1 -14 -19 hvcurveto + -117 -464 rlineto + -12 -46 -7 -30 -62 -3 rrcurveto + -3 -12 234 0 3 12 -16 0 rlineto + -50 -11 21 22 hvcurveto + 0 11 2 11 3 13 rrcurveto + 24 100 rlineto + 19 -15 18 -4 24 0 rrcurveto + 144 110 169 129 101 -33 54 -61 hvcurveto + -35 0 -39 -21 -39 -37 rrcurveto + -53 -145 rmoveto + 20 86 52 61 53 0 rrcurveto + 23 28 -20 -63 -112 -75 -173 -101 hvcurveto + -12 0 -20 5 -14 16 rrcurveto + endchar + + + -143 -12 61 363 29 hstem + 40 85 vstem + 463 428 rmoveto + -81 0 -7 -39 -3 0 rlineto + -2 26 -22 26 -48 0 -28 0 -32 -13 -31 -21 -89 -58 -80 -123 0 -123 0 -57 25 -58 64 0 36 0 60 10 67 93 rrcurveto + 6 0 -44 -184 rlineto + -9 -37 -16 -41 -68 0 rrcurveto + -19 0 -3 -12 250 0 3 12 -14 0 rlineto + -30 -19 18 30 hvcurveto + 0 9 0 10 3 10 rrcurveto + 26 432 rmoveto + 0 -113 -60 -114 -63 -45 -18 -13 -23 -4 -17 0 -43 0 -9 36 0 27 0 88 59 133 64 52 20 16 20 11 21 0 36 0 13 -31 0 -43 rrcurveto + endchar + + + -193 0 20 hstem + 175 267 rmoveto + 46 174 -157 -23 2 -15 rlineto + 6 1 14 2 7 0 22 0 11 -6 0 -13 0 -5 -1 -7 -3 -11 rrcurveto + -92 -364 77 0 21 76 rlineto + 5 17 87 256 61 0 18 0 -10 -38 39 0 rrcurveto + 41 24 44 38 27 -15 21 -29 hvcurveto + -55 0 -48 -65 -34 -54 -11 -18 -12 -20 -9 -18 rrcurveto + endchar + + + -161 -11 20 -19 25 400 26 hstemhm + 138 70 60 74 hintmask 01111000 + 390 441 rmoveto + -16 hlineto + -8 -10 -11 -10 -11 0 -29 0 -20 20 -43 0 -55 0 -59 -28 0 -79 0 -98 130 -50 0 -82 rrcurveto + -39 -14 -50 -59 -63 -41 67 63 vhcurveto + -16 0 rlineto + hintmask 10011000 + -25 -156 17 0 rlineto + 12 17 9 4 9 0 rrcurveto + hintmask 01111000 + 12 0 64 -20 34 0 87 0 48 63 0 62 0 95 -134 92 0 55 rrcurveto + 41 26 17 25 56 36 -56 -64 vhcurveto + 15 hlineto + endchar + + + -288 -9 66 343 38 hstem + 283 438 rmoveto + -70 0 31 129 -13 0 rlineto + -41 -87 -61 -37 -75 -14 rrcurveto + -5 -29 77 0 -81 -320 rlineto + -3 -11 -2 -13 0 -13 0 -27 8 -25 31 0 59 0 50 76 24 31 rrcurveto + -13 11 rlineto + -37 -50 -23 -2 0 0 -13 0 -3 6 0 6 0 3 1 5 1 3 rrcurveto + 78 320 70 0 rlineto + endchar + + + -127 -9 66 hstem + 30 81 vstem + 444 428 rmoveto + -77 0 -21 -75 rlineto + -20 -73 -46 -89 -43 -57 -35 -46 -40 -31 -25 0 -18 0 -8 13 0 21 0 8 1 7 2 9 rrcurveto + 84 326 -157 -26 3 -15 rlineto + 7 2 12 1 7 0 22 0 11 -11 0 -12 0 -4 -2 -6 -4 -15 rrcurveto + -59 -231 rlineto + -5 -19 -3 -19 0 -21 0 -38 14 -36 47 0 23 0 26 11 23 17 40 30 90 121 7 5 rrcurveto + 1 0 -26 -95 rlineto + -3 -12 -1 -16 0 -9 0 -33 11 -19 29 0 58 0 51 74 23 33 rrcurveto + -13 11 -5 -6 rlineto + -17 -20 -19 -26 -21 0 -10 0 -4 6 0 8 0 3 0 2 1 4 rrcurveto + endchar + + + -95 -9 39 408 20 hstem + 72 82 285 40 vstem + 247 454 rmoveto + -4 4 rlineto + -50 -22 -54 -7 -52 -13 rrcurveto + -15 vlineto + 13 44 0 -20 hvcurveto + 0 -8 -3 -9 -2 -8 rrcurveto + -58 -210 rlineto + -6 -20 -3 -21 0 -21 0 -73 52 -20 64 0 74 0 77 44 43 60 58 80 39 101 0 99 rrcurveto + 32 -5 51 -42 -29 -25 -8 -34 vhcurveto + 0 -44 61 9 0 -74 0 -25 -7 -25 -8 -24 -32 -90 -70 -113 -108 0 -40 0 -20 28 0 38 0 11 1 11 3 10 rrcurveto + endchar + + + 174 -9 39 400 20 -12 20 -18 20 hstemhm + 72 84 205 78 269 40 hintmask 11001110 + 531 450 rmoveto + -84 hlineto + -31 -110 -26 -78 -53 -81 -24 -37 -71 -113 -58 -1 -22 0 -6 21 0 26 0 28 8 32 3 13 rrcurveto + 81 307 rlineto + hintmask 10011110 + -4 3 rlineto + -50 -21 -52 -14 -53 -9 rrcurveto + -15 vlineto + 16 38 2 -24 hvcurveto + 0 -10 -2 -10 -3 -9 rrcurveto + -47 -177 rlineto + -8 -31 -11 -31 0 -33 0 -53 22 -34 56 0 112 0 82 136 40 89 -11 -40 -12 -43 0 -42 rrcurveto + -72 42 -28 69 171 105 238 146 vhcurveto + hintmask 10101110 + 32 -5 51 -42 vhcurveto + -24 0 -32 -6 2 -36 2 -43 59 8 0 -74 0 -25 -7 -25 -8 -24 -31 -88 -56 -115 -108 0 -40 0 -19 22 0 39 0 24 6 23 6 23 rrcurveto + endchar + + + -51 -9 64 320 66 hstem + 305 288 rmoveto + -20 55 rlineto + -12 33 -19 41 -44 24 rrcurveto + -132 -33 3 -18 rlineto + 10 4 15 2 13 0 55 0 26 -41 17 -48 rrcurveto + 33 -92 -65 -101 rlineto + -32 -49 -26 -10 -12 0 -25 0 4 20 -26 0 rrcurveto + -25 -13 -21 -16 -22 16 -25 36 hvcurveto + 56 0 29 43 32 49 rrcurveto + 63 98 40 -113 rlineto + 13 -36 19 -41 41 0 58 0 45 96 12 15 rrcurveto + -14 10 rlineto + -21 -43 -24 -14 -16 0 -45 0 -23 135 -30 62 rrcurveto + 55 85 rlineto + 14 22 11 16 30 0 17 0 5 -9 22 0 rrcurveto + 26 13 23 19 21 -17 12 -32 hvcurveto + -50 0 -35 -44 -26 -40 rrcurveto + endchar + + + -105 -183 65 hstem + 444 52 vstem + 270 307 rmoveto + -9 74 -7 26 -23 33 rrcurveto + -150 -25 2 -16 rlineto + 9 1 16 3 11 0 56 0 9 -60 5 -45 rrcurveto + 38 -314 rlineto + -26 -40 -57 -62 -17 0 -16 0 2 31 -43 0 rrcurveto + -13 -27 -11 -30 -24 19 -31 37 hvcurveto + 68 0 65 98 73 92 42 53 162 240 0 78 rrcurveto + 39 -25 23 -32 -29 -18 -18 -22 vhcurveto + 0 -38 52 -10 0 -21 0 -18 -8 -20 -20 -37 -22 -41 -35 -60 -61 -83 rrcurveto + endchar + + + -102 -14 22 -8 20 -6 80 276 80 hstemhm + 382 66 hintmask 01011000 + 467 450 rmoveto + -314 0 -36 -137 18 -4 rlineto + 23 55 24 6 57 0 rrcurveto + 131 hlineto + -107 -125 -112 -122 -109 -123 rrcurveto + 11 -10 rlineto + hintmask 00101000 + 37 16 45 8 40 0 rrcurveto + hintmask 01001000 + 33 0 34 -5 32 -9 rrcurveto + hintmask 10111000 + 21 -5 25 -9 22 0 rrcurveto + 46 60 26 54 24 -15 18 -25 -19 -19 -19 -19 hvcurveto + 0 -22 12 -3 0 -13 0 -16 -12 -8 -15 0 -45 0 -3 86 -92 0 -23 0 -25 -7 -19 -14 rrcurveto + -3 1 rlineto + 107 120 107 120 108 120 rrcurveto + endchar + + + -601 658 20 hstem + -147 507 rmoveto + -56 callsubr + endchar + + + -601 658 20 hstem + -371 507 rmoveto + -29 callsubr + endchar + + + -601 654 20 hstem + -75 507 rmoveto + -52 callsubr + endchar + + + -41 560 554 rmoveto + -256 213 -48 0 -256 -213 64 0 216 146 216 -146 rlineto + endchar + + + 378 979 564 rmoveto + -465 213 -48 0 -466 -213 99 0 391 145 390 -145 rlineto + endchar + + + 859 1460 564 rmoveto + -706 213 -48 0 -706 -213 153 0 577 145 577 -145 rlineto + endchar + + + 1285 1886 599 rmoveto + -943 197 -943 -197 5 -26 937 161 939 -161 rlineto + endchar + + + 1727 2328 603 rmoveto + -1164 213 -1164 -213 5 -31 1158 182 1160 -182 rlineto + endchar + + + -601 532 55 -9 55 hstemhm + hintmask 10000000 + -94 638 rmoveto + -31 callsubr + hintmask 01000000 + -50 callsubr + hintmask 10000000 + -49 callsubr + hintmask 01000000 + -30 callsubr + hintmask 10000000 + -47 callsubr + endchar + + + -41 598 59 32 61 hstem + 535 750 rmoveto + 21 callsubr + endchar + + + 378 608 59 32 61 hstem + 953 760 rmoveto + 24 callsubr + endchar + + + 859 608 64 35 67 hstem + 1434 774 rmoveto + 26 callsubr + endchar + + + 1285 608 66 31 66 hstem + 1857 771 rmoveto + 28 callsubr + endchar + + + 1727 617 66 31 66 hstem + 2299 780 rmoveto + 30 callsubr + endchar + + + -601 547 54 hstem + -74 547 rmoveto + -54 callsubr + endchar + + + -601 770 50 hstem + 20 770 rmoveto + -57 callsubr + endchar + + + 399 770 50 hstem + 1000 770 rmoveto + 50 -1000 -50 vlineto + endchar + + + 899 770 50 hstem + 1500 770 rmoveto + 23 callsubr + endchar + + + 1399 770 50 hstem + 2000 770 rmoveto + 25 callsubr + endchar + + + 1899 770 50 hstem + 2500 770 rmoveto + 27 callsubr + endchar + + + 2399 770 50 hstem + 3000 770 rmoveto + 29 callsubr + endchar + + + -601 507 60 77 20 hstem + -121 664 rmoveto + -44 callsubr + endchar + + + -601 523 99 hstem + -280 99 vstem + -181 572 rmoveto + -43 callsubr + endchar + + + -601 523 99 hstem + -379 99 100 99 vstem + -81 572 rmoveto + -55 callsubr + -199 hmoveto + -55 callsubr + endchar + + + -601 644 20 64 23 hstem + -173 55 vstem + -261 581 rmoveto + -89 33 vlineto + 2 35 8 0 rlineto + 54 46 50 66 53 -21 55 -83 hvcurveto + -23 0 -23 -7 -15 -11 -16 -11 -8 -19 0 -11 0 -14 11 -14 17 0 37 0 -19 64 43 0 rrcurveto + 31 14 -40 -37 -40 -32 -30 -44 hvcurveto + endchar + + + -601 512 34 131 34 hstem + -329 34 131 34 vstem + -130 611 rmoveto + -46 callsubr + -34 1 rmoveto + -45 callsubr + endchar + + + -601 658 20 hstem + -245 507 rmoveto + -41 callsubr + -302 -148 rmoveto + -41 callsubr + endchar + + + -601 654 20 hstem + -74 674 rmoveto + -42 callsubr + endchar + + + -41 560 767 rmoveto + -64 0 -216 -146 -216 146 -64 0 256 -213 48 0 rlineto + endchar + + + 378 979 777 rmoveto + -99 0 -390 -145 -391 145 -99 0 466 -213 48 0 rlineto + endchar + + + 859 1460 777 rmoveto + -153 0 -577 -145 -577 145 -153 0 706 -213 48 0 rlineto + endchar + + + 1285 1886 770 rmoveto + -5 26 -939 -161 -937 161 -5 -26 943 -197 rlineto + endchar + + + 1727 2328 785 rmoveto + -5 31 -1160 -182 -1158 182 -5 -31 1164 -213 rlineto + endchar + + + -601 -250 55 vstem + -195 500 rmoveto + 200 -55 -200 vlineto + endchar + + + -601 -326 54 84 55 vstem + -133 500 rmoveto + 200 -55 -200 vlineto + -84 hmoveto + 200 -54 -200 vlineto + endchar + + + -601 658 20 hstem + -22 507 rmoveto + -28 callsubr + -116 hmoveto + -28 callsubr + endchar + + + -601 507 60 101 99 hstem + -280 99 vstem + -181 717 rmoveto + -43 callsubr + 60 -53 rmoveto + -44 callsubr + endchar + + + -601 604 60 hstem + -92 507 rmoveto + -27 callsubr + endchar + + + -601 -299 39 vstem + -187 745 rmoveto + -40 callsubr + endchar + + + -601 -199 39 vstem + -272 502 rmoveto + -39 callsubr + endchar + + + -601 -299 39 vstem + -178 521 rmoveto + -38 callsubr + endchar + + + -601 15 39 vstem + -58 502 rmoveto + -39 callsubr + endchar + + + -601 -127 -224 rmoveto + -56 callsubr + endchar + + + -601 -371 -224 rmoveto + -29 callsubr + endchar + + + -601 -188 40 hstem + -250 40 vstem + -210 -283 rmoveto + 230 -40 -95 -147 -40 147 -95 vlineto + endchar + + + -601 -188 40 hstem + -267 40 vstem + -80 -188 rmoveto + 40 -147 95 -40 -230 40 95 vlineto + endchar + + + -601 680 55 hstem + -135 55 vstem + -80 531 rmoveto + 204 -300 -55 245 -149 vlineto + endchar + + + -601 454 20 hstem + -36 345 rmoveto + 14 2 23 4 17 10 24 14 9 21 0 22 rrcurveto + 35 -25 21 -24 -27 -19 -22 -19 vhcurveto + 0 -32 35 -10 0 -9 0 -3 -8 -8 -19 -4 rrcurveto + endchar + + + -601 -360 37 vstem + -232 -266 rmoveto + -36 callsubr + endchar + + + -601 -240 40 hstem + -250 40 vstem + -115 -240 rmoveto + -35 callsubr + endchar + + + -601 -93 40 hstem + -250 40 vstem + -115 -93 rmoveto + -34 callsubr + endchar + + + -601 -168 44 hstem + -249 38 vstem + -134 -168 rmoveto + 44 -77 71 -38 -71 -77 -44 77 -82 38 82 vlineto + endchar + + + -601 -168 44 hstem + -134 -168 rmoveto + 44 -192 -44 vlineto + endchar + + + -601 -287 19 hstem + -54 55 vstem + 1 75 rmoveto + -55 -252 hlineto + -44 -12 -47 -44 vhcurveto + -22 0 -18 15 -14 16 -14 16 -8 16 -21 0 rrcurveto + -16 -12 -12 -15 -32 58 -23 70 61 47 62 77 hvcurveto + endchar + + + -601 -287 19 hstem + -54 55 vstem + 1 75 rmoveto + -55 -223 hlineto + -77 47 -62 61 70 58 23 32 15 -12 12 -16 vhcurveto + -21 0 -8 -16 -14 -16 -14 -16 -18 -15 -22 0 rrcurveto + -44 -12 47 44 hvcurveto + endchar + + + -601 -217 99 hstem + -280 99 vstem + -181 -168 rmoveto + -43 callsubr + endchar + + + -601 -218 99 hstem + -379 99 100 99 vstem + -81 -168 rmoveto + -26 callsubr + -199 hmoveto + -26 callsubr + endchar + + + -601 -268 34 131 34 hstem + -329 34 131 34 vstem + -130 -168 rmoveto + -32 callsubr + -34 hmoveto + -45 callsubr + endchar + + + -601 -199 39 vstem + -272 -353 rmoveto + -39 callsubr + endchar + + + -601 -215 35 180 20 hstem + -200 75 vstem + -212 hmoveto + -53 callsubr + endchar + + + -601 -165 56 109 20 hstem + -322 56 vstem + -157 -73 rmoveto + -33 callsubr + endchar + + + -601 -250 40 vstem + -210 -234 rmoveto + 132 -40 -132 vlineto + endchar + + + -601 -153 55 hstem + -385 55 202 55 vstem + -73 -235 rmoveto + 137 -312 -137 55 82 202 -82 vlineto + endchar + + + -601 -227 40 hstem + -380 40 90 40 95 40 vstem + -75 -110 rmoveto + -40 -35 hlineto + -28 -15 -14 -34 -26 -20 14 28 vhcurveto + 35 -40 -36 vlineto + -28 -11 -13 -32 -27 -20 13 28 vhcurveto + 36 -40 -35 vlineto + -50 35 -32 54 vhcurveto + 32 0 21 16 8 18 7 -18 30 -16 31 0 rrcurveto + 54 33 32 50 hvcurveto + endchar + + + -601 -74 -73 rmoveto + -42 callsubr + endchar + + + -601 -74 -240 rmoveto + -52 callsubr + endchar + + + -601 -225 60 hstem + -118 -68 rmoveto + -44 callsubr + endchar + + + -601 -119 60 hstem + -89 -216 rmoveto + -27 callsubr + endchar + + + -601 -219 55 -9 55 hstemhm + hintmask 10000000 + -94 -113 rmoveto + -31 callsubr + hintmask 01000000 + -50 callsubr + hintmask 10000000 + -49 callsubr + hintmask 01000000 + -30 callsubr + hintmask 10000000 + -47 callsubr + endchar + + + -41 -269 59 32 61 hstem + 535 -117 rmoveto + 21 callsubr + endchar + + + 378 -269 59 32 61 hstem + 953 -117 rmoveto + 24 callsubr + endchar + + + 859 -283 64 35 67 hstem + 1434 -117 rmoveto + 26 callsubr + endchar + + + 1285 -280 66 31 66 hstem + 1857 -117 rmoveto + 28 callsubr + endchar + + + 1727 -280 66 31 66 hstem + 2299 -117 rmoveto + 30 callsubr + endchar + + + -601 -195 54 hstem + -74 -195 rmoveto + -54 callsubr + endchar + + + -601 -191 50 hstem + 20 -191 rmoveto + -57 callsubr + endchar + + + 399 -177 50 hstem + 1000 -177 rmoveto + 50 -1000 -50 vlineto + endchar + + + 899 -177 50 hstem + 1500 -177 rmoveto + 23 callsubr + endchar + + + 1399 -177 50 hstem + 2000 -177 rmoveto + 25 callsubr + endchar + + + 1899 -177 50 hstem + 2500 -177 rmoveto + 27 callsubr + endchar + + + 2399 -177 50 hstem + 3000 -177 rmoveto + 29 callsubr + endchar + + + -601 -300 50 59 50 hstem + 20 -191 rmoveto + -57 callsubr + 500 -109 rmoveto + -57 callsubr + endchar + + + -601 214 55 -9 55 hstemhm + hintmask 10000000 + -100 320 rmoveto + -31 callsubr + hintmask 01000000 + -50 callsubr + hintmask 10000000 + -49 callsubr + hintmask 01000000 + -30 callsubr + hintmask 10000000 + -47 callsubr + endchar + + + -601 230 44 hstem + -78 230 rmoveto + 44 -306 -44 vlineto + endchar + + + -601 230 44 hstem + 20 230 rmoveto + 44 -500 -44 vlineto + endchar + + + -601 -41 580 rmoveto + -54 0 -285 -654 54 0 rlineto + endchar + + + -601 642 20 hstem + 31 662 rmoveto + -54 0 -357 -818 54 0 rlineto + endchar + + + -601 -554 -181 rmoveto + 54 0 375 861 -54 0 rlineto + endchar + + + -601 -103 729 rmoveto + -54 0 -418 -958 54 0 rlineto + endchar + + + -601 -82 778 rmoveto + -54 0 -461 -1058 54 0 rlineto + endchar + + + -601 -141 830 rmoveto + -54 0 -323 -1157 54 0 rlineto + endchar + + + -601 -554 -429 rmoveto + 54 0 369 1360 -54 0 rlineto + endchar + + + -601 -210 1565 rmoveto + -54 0 -497 -1846 54 0 rlineto + endchar + + + -601 -189 37 vstem + -280 -71 rmoveto + -37 callsubr + endchar + + + -601 -190 55 hstem + -385 55 202 55 vstem + -73 -190 rmoveto + 137 -55 -82 -202 82 -55 -137 vlineto + endchar + + + -601 -227 40 94 40 hstem + -313 40 86 40 vstem + -147 -227 rmoveto + 174 -166 -174 vlineto + 126 40 rmoveto + -86 94 86 hlineto + endchar + + + -601 -120 55 hstem + -79 -91 rmoveto + -28 19 -13 7 -24 0 -29 0 -29 -13 -28 -40 -24 40 -29 13 -32 0 -19 0 -16 -6 -30 -20 rrcurveto + -51 vlineto + 32 20 19 2 8 0 46 0 16 -32 20 -37 rrcurveto + 20 hlineto + 19 41 20 28 32 0 12 0 21 0 36 -22 rrcurveto + endchar + + + -601 -135 688 rmoveto + -27 27 -68 -67 -67 67 -29 -27 68 -68 -64 -65 26 -28 66 65 67 -67 26 30 -65 65 rlineto + endchar + + + -601 202 55 -9 55 hintmask 01000000 + -177 829 rmoveto + hintmask 10000000 + -65 -15 -36 -36 0 -50 0 -22 6 -24 12 -24 rrcurveto + hintmask 01000000 + 22 -43 6 -14 0 -20 0 -23 -14 -13 -37 -17 rrcurveto + -29 vlineto + 76 21 30 29 0 55 0 25 -4 15 -20 41 rrcurveto + hintmask 10000000 + -18 38 -4 8 0 18 rrcurveto + hintmask 01000000 + 0 23 15 17 31 11 rrcurveto + endchar + + + -601 770 50 58 50 hstem + 20 878 rmoveto + -57 callsubr + 500 -108 rmoveto + -57 callsubr + endchar + + + -601 627 54 hstem + -350 55 173 54 vstem + -68 538 rmoveto + 143 -282 -143 55 89 173 -89 vlineto + endchar + + + -600 -292 54 44 54 hstem + 323 -194 rmoveto + 54 -312 -54 vlineto + 312 -98 rmoveto + 54 -312 -54 vlineto + endchar + + + -601 532 56 -10 55 38 56 -10 55 hstemhm + hintmask 00100000 + -85 777 rmoveto + -51 callsubr + hintmask 00010000 + -50 callsubr + hintmask 00100000 + -49 callsubr + hintmask 00010000 + -48 callsubr + hintmask 10100000 + -47 callsubr + -29 -139 rmoveto + -51 callsubr + hintmask 01000000 + -50 callsubr + hintmask 10000000 + -49 callsubr + hintmask 01000000 + -48 callsubr + hintmask 10000000 + -47 callsubr + endchar + + + -123 -173 54 hstem + 478 -146 rmoveto + -25 callsubr + endchar + + + -601 -87 -157 rmoveto + -24 callsubr + endchar + + + -601 -233 60 hstem + 266 -76 rmoveto + -18 -84 -218 -13 -69 0 -69 0 -218 13 -18 84 rrcurveto + -29 hlineto + -140 233 -17 101 101 233 17 140 vhcurveto + endchar + + + -601 517 55 6 55 hstem + 336 618 rmoveto + -15 -50 -84 4 -40 0 -79 0 -106 21 -77 19 -51 12 -55 9 -52 0 -73 0 -84 -18 -15 -83 rrcurveto + 29 hlineto + 15 50 84 -4 40 0 51 0 54 -10 50 -12 85 -21 92 -18 88 0 73 0 84 18 15 83 rrcurveto + endchar + + + -601 604 60 hstem + 295 507 rmoveto + 140 -233 17 -101 -101 -233 -17 -140 vhcurveto + 29 hlineto + 18 84 218 13 69 0 69 0 218 -13 18 -84 rrcurveto + endchar + + + -601 -195 55 hstem + 355 -160 rmoveto + -45 19 -36 40 -31 36 rrcurveto + -20 -19 27 -33 rlineto + 4 -5 3 -3 0 -4 rrcurveto + -5 -8 -6 -4 vhcurveto + -640 -55 639 hlineto + 9 3 -5 -4 hvcurveto + 0 -4 -2 -5 -4 -5 rrcurveto + -27 -33 19 -19 rlineto + 32 36 36 40 45 19 rrcurveto + endchar + + + -165 -173 54 hstem + 436 -173 rmoveto + -23 callsubr + endchar + + + -301 -173 54 hstem + 300 -173 rmoveto + 54 -300 -54 vlineto + endchar + + + -165 -173 54 hstem + 436 -146 rmoveto + -132 106 -18 -8 rlineto + 32 -27 17 -22 0 -7 rrcurveto + -12 -16 -3 -33 vhcurveto + -286 -54 286 hlineto + 28 21 -2 -13 hvcurveto + 0 -9 -21 -18 -29 -29 rrcurveto + 18 -8 rlineto + endchar + + + -151 50 124 vstem + 400 1005 rmoveto + -261 -184 -89 -359 0 -303 rrcurveto + -159 124 239 vlineto + 0 254 54 285 172 197 rrcurveto + endchar + + + -151 50 124 vstem + 174 hmoveto + 1010 -124 -1010 vlineto + endchar + + + -151 50 124 vstem + 400 30 rmoveto + -172 197 -54 285 0 254 rrcurveto + 239 -124 -159 vlineto + 0 -303 89 -359 261 -184 rrcurveto + endchar + + + -151 276 124 vstem + 400 hmoveto + 159 vlineto + 0 303 -89 359 -261 184 rrcurveto + -30 vlineto + 172 -197 54 -285 0 -254 rrcurveto + -239 vlineto + endchar + + + -151 276 124 vstem + 400 hmoveto + 1010 -124 -1010 vlineto + endchar + + + -151 276 124 vstem + 400 1005 rmoveto + -124 -239 hlineto + 0 -254 -54 -285 -172 -197 rrcurveto + -30 vlineto + 261 184 89 359 0 303 rrcurveto + endchar + + + -101 -14 26 638 26 hstem + 476 330 rmoveto + 205 -91 141 -131 -161 -69 -163 -177 -164 55 -186 171 163 63 172 172 vhcurveto + -96 -5 rmoveto + -198 -45 -115 -85 -86 -44 114 203 203 45 118 83 88 44 -117 -208 vhcurveto + endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestOPBD-0.ttx fonttools-3.21.2/Tests/subset/data/TestOPBD-0.ttx --- fonttools-3.0/Tests/subset/data/TestOPBD-0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestOPBD-0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,299 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestOPBD + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestOPBD-1.ttx fonttools-3.21.2/Tests/subset/data/TestOPBD-1.ttx --- fonttools-3.0/Tests/subset/data/TestOPBD-1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestOPBD-1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,299 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestOPBD + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestOTF-Regular.ttx fonttools-3.21.2/Tests/subset/data/TestOTF-Regular.ttx --- fonttools-3.0/Tests/subset/data/TestOTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestOTF-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,262 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test OTF + + + Regular + + + 1.000;UKWN;TestOTF-Regular + + + Test OTF + + + Version 1.000;PS 1.0;hotconv 1.0.88;makeotf.lib2.5.647800 + + + TestOTF-Regular + + + Test OTF + + + Regular + + + 1.000;UKWN;TestOTF-Regular + + + Test OTF + + + Version 1.000;PS 1.0;hotconv 1.0.88;makeotf.lib2.5.647800 + + + TestOTF-Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 132 304 rmoveto + 233 263 -233 hlineto + endchar + + + + + + 196 10 hmoveto + 476 660 -476 hlineto + 108 -602 rmoveto + 74 132 54 103 rlineto + 4 hlineto + 52 -103 73 -132 rlineto + -129 329 rmoveto + -50 94 -66 119 rlineto + 235 hlineto + -66 -119 -49 -94 rlineto + -175 -277 rmoveto + 462 vlineto + 127 -232 rlineto + 217 -230 rmoveto + -126 230 126 232 rlineto + endchar + + + -107 callsubr + + + 100 304 263 hstem + 132 233 vstem + -107 callsubr + + + endchar + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestPROP.ttx fonttools-3.21.2/Tests/subset/data/TestPROP.ttx --- fonttools-3.0/Tests/subset/data/TestPROP.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestPROP.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + TestPROP + + + Regular + + + TestPROP + + + TestPROP-Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestTTF-Regular_non_BMP_char.ttx fonttools-3.21.2/Tests/subset/data/TestTTF-Regular_non_BMP_char.ttx --- fonttools-3.0/Tests/subset/data/TestTTF-Regular_non_BMP_char.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestTTF-Regular_non_BMP_char.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,722 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSHB[ ] /* 1 value pushed */ + 9 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 2 values pushed */ + 1 1 + INSTCTRL[ ] /* SetInstrExecControl */ + EIF[ ] /* EndIf */ + PUSHW[ ] /* 1 value pushed */ + 511 + SCANCTRL[ ] /* ScanConversionControl */ + PUSHB[ ] /* 1 value pushed */ + 68 + SCVTCI[ ] /* SetCVTCutIn */ + PUSHB[ ] /* 2 values pushed */ + 9 3 + SDS[ ] /* SetDeltaShiftInGState */ + SDB[ ] /* SetDeltaBaseInGState */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 1 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 2 + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 1 + LOOPCALL[ ] /* LoopAndCallFunction */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 3 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + MD[0] /* MeasureDistance */ + ABS[ ] /* Absolute */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + NEG[ ] /* Negate */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + MDAP[1] /* MoveDirectAbsPt */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + ROUND[01] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 64 + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + ROUND[01] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 64 + NEG[ ] /* Negate */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + MSIRP[0] /* MoveStackIndirRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 4 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[10] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[10] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + MDAP[1] /* MoveDirectAbsPt */ + MIRP[11101] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 5 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + LT[ ] /* LessThan */ + IF[ ] /* If */ + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 128 + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 64 + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 192 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 192 + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 6 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + RDTG[ ] /* RoundDownToGrid */ + ROUND[01] /* Round */ + RTG[ ] /* RoundToGrid */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 7 + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 6 + LOOPCALL[ ] /* LoopAndCallFunction */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 8 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 64 + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 0 + EIF[ ] /* EndIf */ + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 128 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 192 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSHW[ ] /* 1 value pushed */ + 256 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSHW[ ] /* 1 value pushed */ + 320 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHW[ ] /* 1 value pushed */ + 384 + LT[ ] /* LessThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSHW[ ] /* 1 value pushed */ + 384 + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + WCVTP[ ] /* WriteCVTInPixels */ + ENDF[ ] /* EndFunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 9 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 3 values pushed */ + 1 1 2 + CALL[ ] /* CallFunction */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 3 values pushed */ + 2 1 2 + CALL[ ] /* CallFunction */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHW[ ] /* 2 values pushed */ + 2 275 + PUSHB[ ] /* 6 values pushed */ + 225 175 125 75 0 8 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHW[ ] /* 2 values pushed */ + 1 275 + PUSHB[ ] /* 6 values pushed */ + 225 175 125 75 0 8 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 3 values pushed */ + 3 2 7 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 0 + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + RDTG[ ] /* RoundDownToGrid */ + ROUND[01] /* Round */ + RTG[ ] /* RoundToGrid */ + WCVTP[ ] /* WriteCVTInPixels */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSHB[ ] /* 1 value pushed */ + 96 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 1 + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 0 + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 1 + INSTCTRL[ ] /* SetInstrExecControl */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 3 values pushed */ + 1 2 3 + CALL[ ] /* CallFunction */ + IUP[0] /* InterpolateUntPts */ + IUP[1] /* InterpolateUntPts */ + + + + + + + + + + + + TestTTF + + + Regular + + + 1.000;UKWN;TestTTF-Regular + + + TestTTF + + + Version 1.000;PS 1.000;hotconv 1.0.88;makeotf.lib2.5.647800 DEVELOPMENT + + + TestTTF-Regular + + + TestTTF + + + Regular + + + 1.000;UKWN;TestTTF-Regular + + + TestTTF + + + Version 1.000;PS 1.000;hotconv 1.0.88;makeotf.lib2.5.647800 DEVELOPMENT + + + TestTTF-Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/data/TestTTF-Regular.ttx fonttools-3.21.2/Tests/subset/data/TestTTF-Regular.ttx --- fonttools-3.0/Tests/subset/data/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/data/TestTTF-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,705 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSH[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSH[ ] /* 1 value pushed */ + 9 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSH[ ] /* 2 values pushed */ + 1 1 + INSTCTRL[ ] /* SetInstrExecControl */ + EIF[ ] /* EndIf */ + PUSH[ ] /* 1 value pushed */ + 511 + SCANCTRL[ ] /* ScanConversionControl */ + PUSH[ ] /* 1 value pushed */ + 68 + SCVTCI[ ] /* SetCVTCutIn */ + PUSH[ ] /* 2 values pushed */ + 9 3 + SDS[ ] /* SetDeltaShiftInGState */ + SDB[ ] /* SetDeltaBaseInGState */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 1 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSH[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 2 + FDEF[ ] /* FunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 1 + LOOPCALL[ ] /* LoopAndCallFunction */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 3 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + PUSH[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + MD[0] /* MeasureDistance */ + ABS[ ] /* Absolute */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSH[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + NEG[ ] /* Negate */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + MDAP[1] /* MoveDirectAbsPt */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 0 + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + ROUND[01] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 64 + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + ROUND[01] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 64 + NEG[ ] /* Negate */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + MSIRP[0] /* MoveStackIndirRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 4 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + PUSH[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[10] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSH[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[10] /* Round */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + ROLL[ ] /* RollTopThreeStack */ + EIF[ ] /* EndIf */ + MDAP[1] /* MoveDirectAbsPt */ + MIRP[11101] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 5 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + LT[ ] /* LessThan */ + IF[ ] /* If */ + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + PUSH[ ] /* 1 value pushed */ + 128 + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + PUSH[ ] /* 1 value pushed */ + 64 + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSH[ ] /* 1 value pushed */ + 192 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSH[ ] /* 1 value pushed */ + 192 + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 6 + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSH[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + DUP[ ] /* DuplicateTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + RDTG[ ] /* RoundDownToGrid */ + ROUND[01] /* Round */ + RTG[ ] /* RoundToGrid */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSH[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 7 + FDEF[ ] /* FunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 6 + LOOPCALL[ ] /* LoopAndCallFunction */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 8 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSH[ ] /* 1 value pushed */ + 64 + ELSE[ ] /* Else */ + PUSH[ ] /* 1 value pushed */ + 0 + EIF[ ] /* EndIf */ + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 128 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 192 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 256 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 320 + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + ELSE[ ] /* Else */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + PUSH[ ] /* 1 value pushed */ + 3 + MINDEX[ ] /* MoveXToTopStack */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSH[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSH[ ] /* 1 value pushed */ + 384 + LT[ ] /* LessThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + PUSH[ ] /* 1 value pushed */ + 384 + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSH[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + WCVTP[ ] /* WriteCVTInPixels */ + ENDF[ ] /* EndFunctionDefinition */ + PUSH[ ] /* 1 value pushed */ + 9 + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + PUSH[ ] /* 1 value pushed */ + 0 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSH[ ] /* 3 values pushed */ + 1 1 2 + CALL[ ] /* CallFunction */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSH[ ] /* 3 values pushed */ + 2 1 2 + CALL[ ] /* CallFunction */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSH[ ] /* 8 values pushed */ + 2 275 225 175 125 75 0 8 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSH[ ] /* 8 values pushed */ + 1 275 225 175 125 75 0 8 + CALL[ ] /* CallFunction */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSH[ ] /* 3 values pushed */ + 3 2 7 + CALL[ ] /* CallFunction */ + PUSH[ ] /* 1 value pushed */ + 0 + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + RDTG[ ] /* RoundDownToGrid */ + ROUND[01] /* Round */ + RTG[ ] /* RoundToGrid */ + WCVTP[ ] /* WriteCVTInPixels */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSH[ ] /* 1 value pushed */ + 96 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + PUSH[ ] /* 1 value pushed */ + 1 + ELSE[ ] /* Else */ + PUSH[ ] /* 1 value pushed */ + 0 + EIF[ ] /* EndIf */ + PUSH[ ] /* 1 value pushed */ + 1 + INSTCTRL[ ] /* SetInstrExecControl */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + PUSH[ ] /* 3 values pushed */ + 1 2 3 + CALL[ ] /* CallFunction */ + IUP[0] /* InterpolateUntPts */ + IUP[1] /* InterpolateUntPts */ + + + + + + + + + + TestTTF + + + Regular + + + 1.000;UKWN;TestTTF-Regular + + + TestTTF + + + Version 1.000;PS 1.000;hotconv 1.0.88;makeotf.lib2.5.647800 DEVELOPMENT + + + TestTTF-Regular + + + TestTTF + + + Regular + + + 1.000;UKWN;TestTTF-Regular + + + TestTTF + + + Version 1.000;PS 1.000;hotconv 1.0.88;makeotf.lib2.5.647800 DEVELOPMENT + + + TestTTF-Regular + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/subset/subset_test.py fonttools-3.21.2/Tests/subset/subset_test.py --- fonttools-3.0/Tests/subset/subset_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/subset/subset_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,482 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import subset +from fontTools.ttLib import TTFont, newTable +from fontTools.misc.loggingTools import CapturingLogHandler +import difflib +import logging +import os +import shutil +import sys +import tempfile +import unittest + + +class SubsetTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + def temp_path(self, suffix): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def read_ttx(self, path): + lines = [] + with open(path, "r", encoding="utf-8") as ttx: + for line in ttx.readlines(): + # Elide ttFont attributes because ttLibVersion may change, + # and use os-native line separators so we can run difflib. + if line.startswith("" + os.linesep) + else: + lines.append(line.rstrip() + os.linesep) + return lines + + def expect_ttx(self, font, expected_ttx, tables): + path = self.temp_path(suffix=".ttx") + font.saveXML(path, tables=tables) + actual = self.read_ttx(path) + expected = self.read_ttx(expected_ttx) + if actual != expected: + for line in difflib.unified_diff( + expected, actual, fromfile=expected_ttx, tofile=path): + sys.stdout.write(line) + self.fail("TTX output is different from expected") + + def compile_font(self, path, suffix): + savepath = self.temp_path(suffix=suffix) + font = TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(path) + font.save(savepath, reorderTables=None) + return font, savepath + +# ----- +# Tests +# ----- + + def test_no_notdef_outline_otf(self): + _, fontpath = self.compile_font(self.getpath("TestOTF-Regular.ttx"), ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_otf.ttx"), ["CFF "]) + + def test_no_notdef_outline_cid(self): + _, fontpath = self.compile_font(self.getpath("TestCID-Regular.ttx"), ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_cid.ttx"), ["CFF "]) + + def test_no_notdef_outline_ttf(self): + _, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--no-notdef-outline", "--gids=0", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_no_notdef_outline_ttf.ttx"), ["glyf", "hmtx"]) + + def test_subset_ankr(self): + _, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_ankr.ttx"), ["ankr"]) + + def test_subset_ankr_remove(self): + _, fontpath = self.compile_font(self.getpath("TestANKR.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=two", "--output-file=%s" % subsetpath]) + self.assertNotIn("ankr", TTFont(subsetpath)) + + def test_subset_bsln_format_0(self): + _, fontpath = self.compile_font(self.getpath("TestBSLN-0.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"]) + + def test_subset_bsln_format_0_from_format_1(self): + # TestBSLN-1 defines the ideographic baseline to be the font's default, + # and specifies that glyphs {.notdef, zero, one, two} use the roman + # baseline instead of the default ideographic baseline. As we request + # a subsetted font with {zero, one} and the implicit .notdef, all + # glyphs in the resulting font use the Roman baseline. In this case, + # we expect a format 0 'bsln' table because it is the most compact. + _, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030-0031", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_0.ttx"), ["bsln"]) + + def test_subset_bsln_format_1(self): + # TestBSLN-1 defines the ideographic baseline to be the font's default, + # and specifies that glyphs {.notdef, zero, one, two} use the roman + # baseline instead of the default ideographic baseline. We request + # a subset where the majority of glyphs use the roman baseline, + # but one single glyph (uni2EA2) is ideographic. In the resulting + # subsetted font, we expect a format 1 'bsln' table whose default + # is Roman, but with an override that uses the ideographic baseline + # for uni2EA2. + _, fontpath = self.compile_font(self.getpath("TestBSLN-1.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_1.ttx"), ["bsln"]) + + def test_subset_bsln_format_2(self): + # The 'bsln' table in TestBSLN-2 refers to control points in glyph 'P' + # for defining its baselines. Therefore, the subsetted font should + # include this glyph even though it is not requested explicitly. + _, fontpath = self.compile_font(self.getpath("TestBSLN-2.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"]) + + def test_subset_bsln_format_2_from_format_3(self): + # TestBSLN-3 defines the ideographic baseline to be the font's default, + # and specifies that glyphs {.notdef, zero, one, two, P} use the roman + # baseline instead of the default ideographic baseline. As we request + # a subsetted font with zero and the implicit .notdef and P for + # baseline measurement, all glyphs in the resulting font use the Roman + # baseline. In this case, we expect a format 2 'bsln' table because it + # is the most compact encoding. + _, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_2.ttx"), ["bsln"]) + + def test_subset_bsln_format_3(self): + # TestBSLN-3 defines the ideographic baseline to be the font's default, + # and specifies that glyphs {.notdef, zero, one, two} use the roman + # baseline instead of the default ideographic baseline. We request + # a subset where the majority of glyphs use the roman baseline, + # but one single glyph (uni2EA2) is ideographic. In the resulting + # subsetted font, we expect a format 1 'bsln' table whose default + # is Roman, but with an override that uses the ideographic baseline + # for uni2EA2. + _, fontpath = self.compile_font(self.getpath("TestBSLN-3.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030-0031,U+2EA2", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_bsln_3.ttx"), ["bsln"]) + + def test_subset_clr(self): + _, fontpath = self.compile_font(self.getpath("TestCLR-Regular.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=smileface", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_keep_colr.ttx"), ["GlyphOrder", "hmtx", "glyf", "COLR", "CPAL"]) + + def test_subset_gvar(self): + _, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+002B,U+2212", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"]) + + def test_subset_gvar_notdef_outline(self): + _, fontpath = self.compile_font(self.getpath("TestGVAR.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030", "--notdef_outline", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_keep_gvar_notdef_outline.ttx"), ["GlyphOrder", "avar", "fvar", "gvar", "name"]) + + def test_subset_lcar_remove(self): + _, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.assertNotIn("lcar", subsetfont) + + def test_subset_lcar_format_0(self): + _, fontpath = self.compile_font(self.getpath("TestLCAR-0.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+FB01", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_lcar_0.ttx"), ["lcar"]) + + def test_subset_lcar_format_1(self): + _, fontpath = self.compile_font(self.getpath("TestLCAR-1.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+FB01", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_lcar_1.ttx"), ["lcar"]) + + def test_subset_math(self): + _, fontpath = self.compile_font(self.getpath("TestMATH-Regular.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0041,U+0028,U+0302,U+1D400,U+1D435", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_keep_math.ttx"), ["GlyphOrder", "CFF ", "MATH", "hmtx"]) + + def test_subset_opbd_remove(self): + # In the test font, only the glyphs 'A' and 'zero' have an entry in + # the Optical Bounds table. When subsetting, we do not request any + # of those glyphs. Therefore, the produced subsetted font should + # not contain an 'opbd' table. + _, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=one", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.assertNotIn("opbd", subsetfont) + + def test_subset_opbd_format_0(self): + _, fontpath = self.compile_font(self.getpath("TestOPBD-0.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_opbd_0.ttx"), ["opbd"]) + + def test_subset_opbd_format_1(self): + _, fontpath = self.compile_font(self.getpath("TestOPBD-1.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--glyphs=A", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_opbd_1.ttx"), ["opbd"]) + + def test_subset_prop_remove_default_zero(self): + # If all glyphs have an AAT glyph property with value 0, + # the "prop" table should be removed from the subsetted font. + _, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0041", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.assertNotIn("prop", subsetfont) + + def test_subset_prop_0(self): + # If all glyphs share the same AAT glyph properties, the "prop" table + # in the subsetted font should use format 0. + # + # Unless the shared value is zero, in which case the subsetted font + # should have no "prop" table at all. But that case has already been + # tested above in test_subset_prop_remove_default_zero(). + _, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030-0032", "--no-notdef-glyph", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_prop_0.ttx"), ["prop"]) + + def test_subset_prop_1(self): + # If not all glyphs share the same AAT glyph properties, the subsetted + # font should contain a "prop" table in format 1. To save space, the + # DefaultProperties should be set to the most frequent value. + _, fontpath = self.compile_font(self.getpath("TestPROP.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=U+0030-0032", "--notdef-outline", + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_prop_1.ttx"), ["prop"]) + + def test_options(self): + # https://github.com/behdad/fonttools/issues/413 + opt1 = subset.Options() + self.assertTrue('Xyz-' not in opt1.layout_features) + opt2 = subset.Options() + opt2.layout_features.append('Xyz-') + self.assertTrue('Xyz-' in opt2.layout_features) + self.assertTrue('Xyz-' not in opt1.layout_features) + + def test_google_color(self): + _, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--gids=0,1", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.assertTrue("CBDT" in subsetfont) + self.assertTrue("CBLC" in subsetfont) + self.assertTrue("x" in subsetfont['CBDT'].strikeData[0]) + self.assertFalse("y" in subsetfont['CBDT'].strikeData[0]) + + def test_google_color_all(self): + _, fontpath = self.compile_font(self.getpath("google_color.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--unicodes=*", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.assertTrue("x" in subsetfont['CBDT'].strikeData[0]) + self.assertTrue("y" in subsetfont['CBDT'].strikeData[0]) + + def test_timing_publishes_parts(self): + _, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf") + + options = subset.Options() + options.timing = True + subsetter = subset.Subsetter(options) + subsetter.populate(text='ABC') + font = TTFont(fontpath) + with CapturingLogHandler('fontTools.subset.timer', logging.DEBUG) as captor: + captor.logger.propagate = False + subsetter.subset(font) + logs = captor.records + captor.logger.propagate = True + + self.assertTrue(len(logs) > 5) + self.assertEqual(len(logs), len([l for l in logs if 'msg' in l.args and 'time' in l.args])) + # Look for a few things we know should happen + self.assertTrue(filter(lambda l: l.args['msg'] == "load 'cmap'", logs)) + self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'cmap'", logs)) + self.assertTrue(filter(lambda l: l.args['msg'] == "subset 'glyf'", logs)) + + def test_passthrough_tables(self): + _, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf") + font = TTFont(fontpath) + unknown_tag = 'ZZZZ' + unknown_table = newTable(unknown_tag) + unknown_table.data = b'\0'*10 + font[unknown_tag] = unknown_table + font.save(fontpath) + + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + + # tables we can't subset are dropped by default + self.assertFalse(unknown_tag in subsetfont) + + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--passthrough-tables", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + + # unknown tables are kept if --passthrough-tables option is passed + self.assertTrue(unknown_tag in subsetfont) + + def test_non_BMP_text_arg_input(self): + _, fontpath = self.compile_font( + self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + text = tostr(u"A\U0001F6D2", encoding='utf-8') + + subset.main([fontpath, "--text=%s" % text, "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + + self.assertEqual(subsetfont['maxp'].numGlyphs, 3) + self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2']) + + def test_non_BMP_text_file_input(self): + _, fontpath = self.compile_font( + self.getpath("TestTTF-Regular_non_BMP_char.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + text = tobytes(u"A\U0001F6D2", encoding='utf-8') + with tempfile.NamedTemporaryFile(delete=False) as tmp: + tmp.write(text) + + try: + subset.main([fontpath, "--text-file=%s" % tmp.name, + "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + finally: + os.remove(tmp.name) + + self.assertEqual(subsetfont['maxp'].numGlyphs, 3) + self.assertEqual(subsetfont.getGlyphOrder(), ['.notdef', 'A', 'u1F6D2']) + + def test_no_hinting_CFF(self): + ttxpath = self.getpath("Lobster.subset.ttx") + _, fontpath = self.compile_font(ttxpath, ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--no-hinting", "--notdef-outline", + "--output-file=%s" % subsetpath, "*"]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath( + "expect_no_hinting_CFF.ttx"), ["CFF "]) + + def test_desubroutinize_CFF(self): + ttxpath = self.getpath("Lobster.subset.ttx") + _, fontpath = self.compile_font(ttxpath, ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--desubroutinize", "--notdef-outline", + "--output-file=%s" % subsetpath, "*"]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath( + "expect_desubroutinize_CFF.ttx"), ["CFF "]) + + def test_no_hinting_desubroutinize_CFF(self): + ttxpath = self.getpath("Lobster.subset.ttx") + _, fontpath = self.compile_font(ttxpath, ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--no-hinting", "--desubroutinize", "--notdef-outline", + "--output-file=%s" % subsetpath, "*"]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath( + "expect_no_hinting_desubroutinize_CFF.ttx"), ["CFF "]) + + def test_no_hinting_TTF(self): + _, fontpath = self.compile_font(self.getpath("TestTTF-Regular.ttx"), ".ttf") + subsetpath = self.temp_path(".ttf") + subset.main([fontpath, "--no-hinting", "--notdef-outline", + "--output-file=%s" % subsetpath, "*"]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath( + "expect_no_hinting_TTF.ttx"), ["glyf", "maxp"]) + for tag in subset.Options().hinting_tables: + self.assertTrue(tag not in subsetfont) + + def test_notdef_width_cid(self): + # https://github.com/fonttools/fonttools/pull/845 + _, fontpath = self.compile_font(self.getpath("NotdefWidthCID-Regular.ttx"), ".otf") + subsetpath = self.temp_path(".otf") + subset.main([fontpath, "--no-notdef-outline", "--gids=0,1", "--output-file=%s" % subsetpath]) + subsetfont = TTFont(subsetpath) + self.expect_ttx(subsetfont, self.getpath("expect_notdef_width_cid.ttx"), ["CFF "]) + + def test_recalc_timestamp_ttf(self): + ttxpath = self.getpath("TestTTF-Regular.ttx") + font = TTFont() + font.importXML(ttxpath) + modified = font['head'].modified + _, fontpath = self.compile_font(ttxpath, ".ttf") + subsetpath = self.temp_path(".ttf") + + # by default, the subsetter does not recalculate the modified timestamp + subset.main([fontpath, "--output-file=%s" % subsetpath, "*"]) + self.assertEqual(modified, TTFont(subsetpath)['head'].modified) + + subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"]) + self.assertLess(modified, TTFont(subsetpath)['head'].modified) + + def test_recalc_timestamp_otf(self): + ttxpath = self.getpath("TestOTF-Regular.ttx") + font = TTFont() + font.importXML(ttxpath) + modified = font['head'].modified + _, fontpath = self.compile_font(ttxpath, ".otf") + subsetpath = self.temp_path(".otf") + + # by default, the subsetter does not recalculate the modified timestamp + subset.main([fontpath, "--output-file=%s" % subsetpath, "*"]) + self.assertEqual(modified, TTFont(subsetpath)['head'].modified) + + subset.main([fontpath, "--recalc-timestamp", "--output-file=%s" % subsetpath, "*"]) + self.assertLess(modified, TTFont(subsetpath)['head'].modified) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/svgLib/path/parser_test.py fonttools-3.21.2/Tests/svgLib/path/parser_test.py --- fonttools-3.0/Tests/svgLib/path/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/svgLib/path/parser_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,297 @@ +from __future__ import print_function, absolute_import, division + +from fontTools.misc.py23 import * +from fontTools.pens.recordingPen import RecordingPen +from fontTools.svgLib import parse_path + +import pytest + + +@pytest.mark.parametrize( + "pathdef, expected", + [ + + # Examples from the SVG spec + + ( + "M 100 100 L 300 100 L 200 300 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # for Z command behavior when there is multiple subpaths + ( + "M 0 0 L 50 20 M 100 100 L 300 100 L 200 300 z", + [ + ("moveTo", ((0.0, 0.0),)), + ("lineTo", ((50.0, 20.0),)), + ("endPath", ()), + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + ( + "M100,200 C100,100 250,100 250,200 S400,300 400,200", + [ + ("moveTo", ((100.0, 200.0),)), + ("curveTo", ((100.0, 100.0), + (250.0, 100.0), + (250.0, 200.0))), + ("curveTo", ((250.0, 300.0), + (400.0, 300.0), + (400.0, 200.0))), + ("endPath", ()), + ] + ), + ( + "M100,200 C100,100 400,100 400,200", + [ + ("moveTo", ((100.0, 200.0),)), + ("curveTo", ((100.0, 100.0), + (400.0, 100.0), + (400.0, 200.0))), + ("endPath", ()), + ] + ), + ( + "M100,500 C25,400 475,400 400,500", + [ + ("moveTo", ((100.0, 500.0),)), + ("curveTo", ((25.0, 400.0), + (475.0, 400.0), + (400.0, 500.0))), + ("endPath", ()), + ] + ), + ( + "M100,800 C175,700 325,700 400,800", + [ + ("moveTo", ((100.0, 800.0),)), + ("curveTo", ((175.0, 700.0), + (325.0, 700.0), + (400.0, 800.0))), + ("endPath", ()), + ] + ), + ( + "M600,200 C675,100 975,100 900,200", + [ + ("moveTo", ((600.0, 200.0),)), + ("curveTo", ((675.0, 100.0), + (975.0, 100.0), + (900.0, 200.0))), + ("endPath", ()), + ] + ), + ( + "M600,500 C600,350 900,650 900,500", + [ + ("moveTo", ((600.0, 500.0),)), + ("curveTo", ((600.0, 350.0), + (900.0, 650.0), + (900.0, 500.0))), + ("endPath", ()), + ] + ), + ( + "M600,800 C625,700 725,700 750,800 S875,900 900,800", + [ + ("moveTo", ((600.0, 800.0),)), + ("curveTo", ((625.0, 700.0), + (725.0, 700.0), + (750.0, 800.0))), + ("curveTo", ((775.0, 900.0), + (875.0, 900.0), + (900.0, 800.0))), + ("endPath", ()), + ] + ), + ( + "M200,300 Q400,50 600,300 T1000,300", + [ + ("moveTo", ((200.0, 300.0),)), + ("qCurveTo", ((400.0, 50.0), + (600.0, 300.0))), + ("qCurveTo", ((800.0, 550.0), + (1000.0, 300.0))), + ("endPath", ()), + ] + ), + # End examples from SVG spec + + # Relative moveto + ( + "M 0 0 L 50 20 m 50 80 L 300 100 L 200 300 z", + [ + ("moveTo", ((0.0, 0.0),)), + ("lineTo", ((50.0, 20.0),)), + ("endPath", ()), + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # Initial smooth and relative curveTo + ( + "M100,200 s 150,-100 150,0", + [ + ("moveTo", ((100.0, 200.0),)), + ("curveTo", ((100.0, 200.0), + (250.0, 100.0), + (250.0, 200.0))), + ("endPath", ()), + ] + ), + # Initial smooth and relative qCurveTo + ( + "M100,200 t 150,0", + [ + ("moveTo", ((100.0, 200.0),)), + ("qCurveTo", ((100.0, 200.0), + (250.0, 200.0))), + ("endPath", ()), + ] + ), + # relative l command + ( + "M 100 100 L 300 100 l -100 200 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # relative q command + ( + "M200,300 q200,-250 400,0", + [ + ("moveTo", ((200.0, 300.0),)), + ("qCurveTo", ((400.0, 50.0), + (600.0, 300.0))), + ("endPath", ()), + ] + ), + # absolute H command + ( + "M 100 100 H 300 L 200 300 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # relative h command + ( + "M 100 100 h 200 L 200 300 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # absolute V command + ( + "M 100 100 V 300 L 200 300 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((100.0, 300.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + # relative v command + ( + "M 100 100 v 200 L 200 300 z", + [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((100.0, 300.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ] + ), + ] +) +def test_parse_path(pathdef, expected): + pen = RecordingPen() + parse_path(pathdef, pen) + + assert pen.value == expected + + +@pytest.mark.parametrize( + "pathdef1, pathdef2", + [ + # don't need spaces between numbers and commands + ( + "M 100 100 L 200 200", + "M100 100L200 200", + ), + # repeated implicit command + ( + "M 100 200 L 200 100 L -100 -200", + "M 100 200 L 200 100 -100 -200" + ), + # don't need spaces before a minus-sign + ( + "M100,200c10-5,20-10,30-20", + "M 100 200 c 10 -5 20 -10 30 -20" + ), + # closed paths have an implicit lineTo if they don't + # end on the same point as the initial moveTo + ( + "M 100 100 L 300 100 L 200 300 z", + "M 100 100 L 300 100 L 200 300 L 100 100 z" + ) + ] +) +def test_equivalent_paths(pathdef1, pathdef2): + pen1 = RecordingPen() + parse_path(pathdef1, pen1) + + pen2 = RecordingPen() + parse_path(pathdef2, pen2) + + assert pen1.value == pen2.value + + +def test_exponents(): + # It can be e or E, the plus is optional, and a minimum of +/-3.4e38 must be supported. + pen = RecordingPen() + parse_path("M-3.4e38 3.4E+38L-3.4E-38,3.4e-38", pen) + expected = [ + ("moveTo", ((-3.4e+38, 3.4e+38),)), + ("lineTo", ((-3.4e-38, 3.4e-38),)), + ("endPath", ()), + ] + + assert pen.value == expected + + +def test_invalid_implicit_command(): + with pytest.raises(ValueError) as exc_info: + parse_path("M 100 100 L 200 200 Z 100 200", RecordingPen()) + assert exc_info.match("Unallowed implicit command") + + +def test_arc_not_implemented(): + pathdef = "M300,200 h-150 a150,150 0 1,0 150,-150 z" + with pytest.raises(NotImplementedError) as exc_info: + parse_path(pathdef, RecordingPen()) + assert exc_info.match("arcs are not supported") diff -Nru fonttools-3.0/Tests/svgLib/path/path_test.py fonttools-3.21.2/Tests/svgLib/path/path_test.py --- fonttools-3.0/Tests/svgLib/path/path_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/svgLib/path/path_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,80 @@ +from __future__ import print_function, absolute_import, division + +from fontTools.misc.py23 import * +from fontTools.pens.recordingPen import RecordingPen +from fontTools.svgLib import SVGPath + +import os +from tempfile import NamedTemporaryFile + + +SVG_DATA = """\ + + + + + + +""" + +EXPECTED_PEN_COMMANDS = [ + ("moveTo", ((100.0, 100.0),)), + ("lineTo", ((300.0, 100.0),)), + ("lineTo", ((200.0, 300.0),)), + ("lineTo", ((100.0, 100.0),)), + ("closePath", ()), + ("moveTo", ((100.0, 200.0),)), + ("curveTo", ((100.0, 100.0), + (250.0, 100.0), + (250.0, 200.0))), + ("curveTo", ((250.0, 300.0), + (400.0, 300.0), + (400.0, 200.0))), + ("endPath", ()) +] + + +class SVGPathTest(object): + + def test_from_svg_file(self): + pen = RecordingPen() + with NamedTemporaryFile(delete=False) as tmp: + tmp.write(tobytes(SVG_DATA)) + try: + svg = SVGPath(tmp.name) + svg.draw(pen) + finally: + os.remove(tmp.name) + + assert pen.value == EXPECTED_PEN_COMMANDS + + def test_fromstring(self): + pen = RecordingPen() + svg = SVGPath.fromstring(SVG_DATA) + svg.draw(pen) + + assert pen.value == EXPECTED_PEN_COMMANDS + + def test_transform(self): + pen = RecordingPen() + svg = SVGPath.fromstring(SVG_DATA, + transform=(1.0, 0, 0, -1.0, 0, 1000)) + svg.draw(pen) + + assert pen.value == [ + ("moveTo", ((100.0, 900.0),)), + ("lineTo", ((300.0, 900.0),)), + ("lineTo", ((200.0, 700.0),)), + ("lineTo", ((100.0, 900.0),)), + ("closePath", ()), + ("moveTo", ((100.0, 800.0),)), + ("curveTo", ((100.0, 900.0), + (250.0, 900.0), + (250.0, 800.0))), + ("curveTo", ((250.0, 700.0), + (400.0, 700.0), + (400.0, 800.0))), + ("endPath", ()) + ] Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/t1Lib/data/TestT1-Regular.lwfn and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/t1Lib/data/TestT1-Regular.lwfn differ diff -Nru fonttools-3.0/Tests/t1Lib/data/TestT1-Regular.pfa fonttools-3.21.2/Tests/t1Lib/data/TestT1-Regular.pfa --- fonttools-3.0/Tests/t1Lib/data/TestT1-Regular.pfa 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/t1Lib/data/TestT1-Regular.pfa 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,60 @@ +%!FontType1-1.1: TestT1-Regular 1.0 +%%BeginResource: font TestT1-Regular +12 dict dup begin +/FontType 1 def +/FontName /TestT1-Regular def +/FontInfo 14 dict dup begin +/version (1.0) def +/Notice (Test T1 is not a trademark of FontTools.) def +/Copyright (Copyright c 2015 by FontTools. No rights reserved.) def +/FullName (Test T1) def +/FamilyName (Test T1) def +/Weight (Regular) def +/ItalicAngle 0.000000 def +/isFixedPitch false def +/UnderlinePosition -75.000000 def +/UnderlineThickness 50.000000 def +/FSType 0 def +end def +/PaintType 0 def +/FontMatrix [0.001 0 0 0.001 0 0] def +/Encoding 256 array +0 1 255 {1 index exch /.notdef put} for +def +/FontBBox {50.000000 0.000000 668.000000 750.000000} def +end +currentfile eexec bab431ea06bb0a1031e1aa11919e714ac1ac5197cb08b39a4d7e746fca0af12d89ac0ebd1bc11ab1 +b3887b922efcec739534242d2fd22e7c30e3edce24b93798627e1ac3387816a8c4b84d76047dada8 +28b2ad27c5603046fecbc2a97adc5c37a68912324d2d435f2ee0ccc38df10ba1271a1c9af8897a6d +6e425cd7d18fd6bd64c2adadb74365bc101a850841669886291e158cbfa7f204b3fe0ba49ffe0c80 +4f6795d32eb770c5fcd38a3879c06a4bb87b2d3ab100d8c2b5f89e9be99248575575025c66381446 +e4d9183674880aef57fb2032a1e00431133b16f6d758de7c3d0c48a0fada1d40034742a69fb3a6f9 +450d2251e659158a04697cbfa70907346d27d37ef683284385c44a1b5089bd29b4629b6483122dc8 +cbce7327bdc33dd30e6fcdb346c0ddaf433a5ac740423aa35639b2386673832f5ae8cc380e9703ba +d3369533bfa85af9f56a090c9d97f5fc26ed102c07b647137e83632be51a65a532bd26430b59a31c +3cb037ded351c1d4e944733feb30a3e6f81c1a7b74ac4e0eadbe705412d47991c246e8820876bbc6 +1f6a3e264ae6b2ad4b864b0d7abee289308bea26eb15d00d2b9103861386e0a5f1802ba06f916810 +62110d2b1c3641806f78eea365614f440b580185e84bac6f87bee36108d95174c786600cf0e9dc4a +5545d1a84cfe8392115c0b7027c17fd460481d21f684af32204085690946327bfded992852645149 +8d44150d2495bd2efe0db6a450c6e28d0a52ca234e252129d5095596b0d8de096682d2eb00bc8320 +f257fd653b05a22eab7a193ccc315a6ee274a03ff1fdf443b310157a02656ca4b06c581dca8ced72 +c6ddcab26eb856ad1093452c587438b7f8408c1311e19254955914612c09828fd4d4fc2b8b0406ea +2ee38348a8bdab88a77b8033366b2e469834c01b7bd73207b7c67756937c7a9232947fde2e0ea327 +7b7d610e601b91389ccbcdd813c87db5333c0c723e48d3ef69285f246327978ce68ae9081076a227 +1a962a2a10e2b1147ec40b0f6553a00c8b329118569d16fe04a4fa195caf1b04c52c9a562b72e0cd +e411d747af796b9d2fb086ed927efb0e5fc9f50aa18aaf4949cba0de0805210620a19eec4319dfef +a74d9d13d16f8ad793323a231347e6b40022a1100c1e064b8679c1da63a26dfb217a6037096ad796 +320da5a9d0526eed51d7d64d3223e285c1a8c70780c59ecc9dd9bc90a0f84ffa038834918cebe247 +f6e8fa4ca0654019196388f2df008e63bc32c8e5e686dbb69193b7749638c22b389fb1f090fbb007 +fdb8a6ee4e4b29e123fe1652fe72239bd2c8 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +0000000000000000000000000000000000000000000000000000000000000000 +cleartomark +%%EndResource +%%EOF Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/t1Lib/data/TestT1-Regular.pfb and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/t1Lib/data/TestT1-Regular.pfb differ diff -Nru fonttools-3.0/Tests/t1Lib/t1Lib_test.py fonttools-3.21.2/Tests/t1Lib/t1Lib_test.py --- fonttools-3.0/Tests/t1Lib/t1Lib_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/t1Lib/t1Lib_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +import unittest +import os +from fontTools import t1Lib +from fontTools.pens.basePen import NullPen +import random + + +CWD = os.path.abspath(os.path.dirname(__file__)) +DATADIR = os.path.join(CWD, 'data') +# I used `tx` to convert PFA to LWFN (stored in the data fork) +LWFN = os.path.join(DATADIR, 'TestT1-Regular.lwfn') +PFA = os.path.join(DATADIR, 'TestT1-Regular.pfa') +PFB = os.path.join(DATADIR, 'TestT1-Regular.pfb') + + +class FindEncryptedChunksTest(unittest.TestCase): + + def test_findEncryptedChunks(self): + with open(PFA, "rb") as f: + data = f.read() + chunks = t1Lib.findEncryptedChunks(data) + self.assertEqual(len(chunks), 3) + self.assertFalse(chunks[0][0]) + # the second chunk is encrypted + self.assertTrue(chunks[1][0]) + self.assertFalse(chunks[2][0]) + + +class DecryptType1Test(unittest.TestCase): + + def test_decryptType1(self): + with open(PFA, "rb") as f: + data = f.read() + decrypted = t1Lib.decryptType1(data) + self.assertNotEqual(decrypted, data) + + +class ReadWriteTest(unittest.TestCase): + + def test_read_pfa_write_pfb(self): + font = t1Lib.T1Font(PFA) + data = self.write(font, 'PFB') + self.assertEqual(font.getData(), data) + + def test_read_pfb_write_pfa(self): + font = t1Lib.T1Font(PFB) + # 'OTHER' == 'PFA' + data = self.write(font, 'OTHER', dohex=True) + self.assertEqual(font.getData(), data) + + @staticmethod + def write(font, outtype, dohex=False): + temp = os.path.join(DATADIR, 'temp.' + outtype.lower()) + try: + font.saveAs(temp, outtype, dohex=dohex) + newfont = t1Lib.T1Font(temp) + data = newfont.getData() + finally: + if os.path.exists(temp): + os.remove(temp) + return data + + +class T1FontTest(unittest.TestCase): + + def test_parse_lwfn(self): + # the extended attrs are lost on git so we can't auto-detect 'LWFN' + font = t1Lib.T1Font() + font.data = t1Lib.readLWFN(LWFN) + font.parse() + self.assertEqual(font['FontName'], 'TestT1-Regular') + self.assertTrue('Subrs' in font['Private']) + + def test_parse_pfa(self): + font = t1Lib.T1Font(PFA) + font.parse() + self.assertEqual(font['FontName'], 'TestT1-Regular') + self.assertTrue('Subrs' in font['Private']) + + def test_parse_pfb(self): + font = t1Lib.T1Font(PFB) + font.parse() + self.assertEqual(font['FontName'], 'TestT1-Regular') + self.assertTrue('Subrs' in font['Private']) + + def test_getGlyphSet(self): + font = t1Lib.T1Font(PFA) + glyphs = font.getGlyphSet() + i = random.randrange(len(glyphs)) + aglyph = list(glyphs.values())[i] + self.assertTrue(hasattr(aglyph, 'draw')) + self.assertFalse(hasattr(aglyph, 'width')) + aglyph.draw(NullPen()) + self.assertTrue(hasattr(aglyph, 'width')) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/data/TestOTF-Regular.otx fonttools-3.21.2/Tests/ttLib/data/TestOTF-Regular.otx --- fonttools-3.0/Tests/ttLib/data/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/data/TestOTF-Regular.otx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,519 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test OTF + + + Regular + + + FontTools: Test OTF: 2015 + + + Test OTF + + + Version 1.000 + + + TestOTF-Regular + + + Test OTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + Test TTF + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test OTF + + + Regular + + + FontTools: Test OTF: 2015 + + + Test OTF + + + Version 1.000 + + + TestOTF-Regular + + + Test OTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 131 122 -131 hlineto + return + + + + + + 500 450 hmoveto + 750 -400 -750 vlineto + 50 50 rmoveto + 650 300 -650 vlineto + endchar + + + 0 endchar + + + 250 endchar + + + 723 55 hmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + endchar + + + 241 55 hmoveto + -107 callsubr + endchar + + + 250 endchar + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/data/TestTTFComplex-Regular.ttx fonttools-3.21.2/Tests/ttLib/data/TestTTFComplex-Regular.ttx --- fonttools-3.0/Tests/ttLib/data/TestTTFComplex-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/data/TestTTFComplex-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/data/TestTTF-Regular.ttx fonttools-3.21.2/Tests/ttLib/data/TestTTF-Regular.ttx --- fonttools-3.0/Tests/ttLib/data/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/data/TestTTF-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,553 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test TTF + + + Regular + + + FontTools: Test TTF: 2015 + + + Test TTF + + + Version 1.000 + + + TestTTF-Regular + + + Test TTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + Test TTF + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test TTF + + + Regular + + + FontTools: Test TTF: 2015 + + + Test TTF + + + Version 1.000 + + + TestTTF-Regular + + + Test TTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/data/test_woff2_metadata.xml fonttools-3.21.2/Tests/ttLib/data/test_woff2_metadata.xml --- fonttools-3.0/Tests/ttLib/data/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/data/test_woff2_metadata.xml 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + Description without language. + + + Description with "en" language. + + + Description with "fr" language. + + + + + License without language. + + + License with "en" language. + + + License with "fr" language. + + + + + Copyright without language. + + + Copyright with "en" language. + + + Copyright with "fr" language. + + + + + Trademark without language. + + + Trademark with "en" language. + + + Trademark with "fr" language. + + + + + Extension 1 - Name Without Language + Extension 1 - Name With "en" Language + Extension 1 - Name With "fr" Language + + Extension 1 - Item 1 - Name Without Language + Extension 1 - Item 1 - Name With "en" Language + Extension 1 - Item 1 - Name With "fr" Language + Extension 1 - Item 1 - Value Without Language + Extension 1 - Item 1 - Value With "en" Language + Extension 1 - Item 1 - Value With "fr" Language + + + Extension 1 - Item 2 - Name Without Language + Extension 1 - Item 2 - Name With "en" Language + Extension 1 - Item 2 - Name With "fr" Language + Extension 1 - Item 2 - Value Without Language + Extension 1 - Item 2 - Value With "en" Language + Extension 1 - Item 2 - Value With "fr" Language + + + + Extension 2 - Name Without Language + Extension 2 - Name With "en" Language + Extension 2 - Name With "fr" Language + + Extension 2 - Item 1 - Name Without Language + Extension 2 - Item 1 - Name With "en" Language + Extension 2 - Item 1 - Name With "fr" Language + Extension 2 - Item 1 - Value Without Language + Extension 2 - Item 1 - Value With "en" Language + Extension 2 - Item 1 - Value With "fr" Language + + + Extension 2 - Item 2 - Name Without Language + Extension 2 - Item 2 - Name With "en" Language + Extension 2 - Item 2 - Name With "fr" Language + Extension 2 - Item 2 - Value Without Language + Extension 2 - Item 2 - Value With "en" Language + Extension 2 - Item 2 - Value With "fr" Language + + + Extension 2 - Item 3 - Name Without Language + Extension 2 - Item 3 - Name With "en" Language + Extension 2 - Item 3 - Name With "fr" Language + Extension 2 - Item 3 - Value Without Language + Extension 2 - Item 3 - Value With "en" Language + + + diff -Nru fonttools-3.0/Tests/ttLib/sfnt_test.py fonttools-3.21.2/Tests/ttLib/sfnt_test.py --- fonttools-3.0/Tests/ttLib/sfnt_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/sfnt_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib.sfnt import calcChecksum + + +def test_calcChecksum(): + assert calcChecksum(b"abcd") == 1633837924 + assert calcChecksum(b"abcdxyz") == 3655064932 diff -Nru fonttools-3.0/Tests/ttLib/tables/_a_n_k_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_a_n_k_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_a_n_k_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_a_n_k_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,167 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# This is the anchor points table of the first font file in +# “/Library/Fonts/Devanagari Sangam MN.ttc” on macOS 10.12.6. +# For testing, we’ve changed the GlyphIDs to smaller values. +# Also, in the AATLookup, we’ve changed GlyphDataOffset value +# for the end-of-table marker from 0xFFFF to 0 since that is +# what our encoder emits. (The value for end-of-table markers +# does not actually matter). +ANKR_FORMAT_0_DATA = deHexStr( + '0000 0000 ' # 0: Format=0, Flags=0 + '0000 000C ' # 4: LookupTableOffset=12 + '0000 0024 ' # 8: GlyphDataTableOffset=36 + '0006 0004 0002 ' # 12: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 18: SearchRange=8, EntrySelector=1, RangeShift=0 + '0001 0000 ' # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36) + '0003 0008 ' # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44) + 'FFFF 0000 ' # 32: Glyph=, Offset= + '0000 0001 ' # 36: GlyphData[A].NumPoints=1 + '0235 045E ' # 40: GlyphData[A].Points[0].X=565, .Y=1118 + '0000 0001 ' # 44: GlyphData[C].NumPoints=1 + 'FED2 045E ' # 48: GlyphData[C].Points[0].X=-302, .Y=1118 +) # 52: +assert len(ANKR_FORMAT_0_DATA) == 52 + + +ANKR_FORMAT_0_XML = [ + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Same data as ANKR_FORMAT_0_DATA, but with chunks of unused data +# whose presence should not stop us from decompiling the table. +ANKR_FORMAT_0_STRAY_DATA = deHexStr( + '0000 0000 ' # 0: Format=0, Flags=0 + '0000 0018 ' # 4: LookupTableOffset=24 + '0000 0034 ' # 8: GlyphDataTableOffset=52 + 'DEAD BEEF CAFE ' # 12: + 'DEAD BEEF CAFE ' # 18: + '0006 0004 0002 ' # 24: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 30: SearchRange=8, EntrySelector=1, RangeShift=0 + '0001 0000 ' # 36: Glyph=A, Offset=0 (+GlyphDataTableOffset=52) + '0003 0008 ' # 40: Glyph=C, Offset=8 (+GlyphDataTableOffset=60) + 'FFFF 0000 ' # 44: Glyph=, Offset= + 'BEEF F00D ' # 48: + '0000 0001 ' # 52: GlyphData[A].NumPoints=1 + '0235 045E ' # 56: GlyphData[A].Points[0].X=565, .Y=1118 + '0000 0001 ' # 60: GlyphData[C].NumPoints=1 + 'FED2 045E ' # 64: GlyphData[C].Points[0].X=-302, .Y=1118 +) # 68: +assert len(ANKR_FORMAT_0_STRAY_DATA) == 68 + + +# Constructed test case where glyphs A and D share the same anchor data. +ANKR_FORMAT_0_SHARING_DATA = deHexStr( + '0000 0000 ' # 0: Format=0, Flags=0 + '0000 000C ' # 4: LookupTableOffset=12 + '0000 0028 ' # 8: GlyphDataTableOffset=40 + '0006 0004 0003 ' # 12: LookupFormat=6, UnitSize=4, NUnits=3 + '0008 0001 0004 ' # 18: SearchRange=8, EntrySelector=1, RangeShift=4 + '0001 0000 ' # 24: Glyph=A, Offset=0 (+GlyphDataTableOffset=36) + '0003 0008 ' # 28: Glyph=C, Offset=8 (+GlyphDataTableOffset=44) + '0004 0000 ' # 32: Glyph=D, Offset=0 (+GlyphDataTableOffset=36) + 'FFFF 0000 ' # 36: Glyph=, Offset= + '0000 0001 ' # 40: GlyphData[A].NumPoints=1 + '0235 045E ' # 44: GlyphData[A].Points[0].X=565, .Y=1118 + '0000 0002 ' # 48: GlyphData[C].NumPoints=2 + '000B 000C ' # 52: GlyphData[C].Points[0].X=11, .Y=12 + '001B 001C ' # 56: GlyphData[C].Points[1].X=27, .Y=28 +) # 60: +assert len(ANKR_FORMAT_0_SHARING_DATA) == 60 + + +ANKR_FORMAT_0_SHARING_XML = [ + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class ANKRTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D']) + + def decompileToXML(self, data, xml): + table = newTable('ankr') + table.decompile(data, self.font) + self.assertEqual(getXML(table.toXML), xml) + + def compileFromXML(self, xml, data): + table = newTable('ankr') + for name, attrs, content in parseXML(xml): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), hexStr(data)) + + def roundtrip(self, data, xml): + self.decompileToXML(data, xml) + self.compileFromXML(xml, data) + + def testFormat0(self): + self.roundtrip(ANKR_FORMAT_0_DATA, ANKR_FORMAT_0_XML) + + def testFormat0_stray(self): + self.decompileToXML(ANKR_FORMAT_0_STRAY_DATA, ANKR_FORMAT_0_XML) + + def testFormat0_sharing(self): + self.roundtrip(ANKR_FORMAT_0_SHARING_DATA, ANKR_FORMAT_0_SHARING_XML) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_a_v_a_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_a_v_a_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_a_v_a_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,85 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis +import collections +import logging +import unittest + + +TEST_DATA = deHexStr( + "00 01 00 00 00 00 00 02 " + "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " + "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") + + +class AxisVariationTableTest(unittest.TestCase): + def test_compile(self): + avar = table__a_v_a_r() + avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) + + def test_decompile(self): + avar = table__a_v_a_r() + avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) + self.assertEqual({ + "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, + "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} + }, avar.segments) + + def test_decompile_unsupportedVersion(self): + avar = table__a_v_a_r() + font = self.makeFont(["wdth", "wght"]) + self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) + + def test_toXML(self): + avar = table__a_v_a_r() + avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} + writer = XMLWriter(BytesIO()) + avar.toXML(writer, self.makeFont(["opsz"])) + self.assertEqual([ + '', + '', + '', + '', + '', + '' + ], self.xml_lines(writer)) + + def test_fromXML(self): + avar = table__a_v_a_r() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + ' ' + ' ' + ''): + avar.fromXML(name, attrs, content, ttFont=None) + self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, + avar.segments) + + @staticmethod + def makeFont(axisTags): + """['opsz', 'wdth'] --> ttFont""" + fvar = table__f_v_a_r() + for tag in axisTags: + axis = Axis() + axis.axisTag = tag + fvar.axes.append(axis) + return {"fvar": fvar} + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_b_s_l_n_test.py fonttools-3.21.2/Tests/ttLib/tables/_b_s_l_n_test.py --- fonttools-3.0/Tests/ttLib/tables/_b_s_l_n_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_b_s_l_n_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,311 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# Apple's spec of the baseline table gives no example for 'bsln' format 0, +# but the Apple Chancery font contains the following data. +BSLN_FORMAT_0_DATA = deHexStr( + '0001 0000 0000 ' # 0: Version=1.0, Format=0 + '0000 ' # 6: DefaultBaseline=0 (Roman baseline) + '0000 01D1 0000 0541 ' # 8: Delta[0..3]=0, 465, 0, 1345 + '01FB 0000 0000 0000 ' # 16: Delta[4..7]=507, 0, 0, 0 + '0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0 +) # 72: +assert len(BSLN_FORMAT_0_DATA) == 72 + + +BSLN_FORMAT_0_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Example: Format 1 Baseline Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html +# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph, +# whereas we use the value 10 for testng to shorten the XML dump. +BSLN_FORMAT_1_DATA = deHexStr( + '0001 0000 0001 ' # 0: Version=1.0, Format=1 + '0001 ' # 6: DefaultBaseline=1 (Ideographic baseline) + '0000 0357 0000 05F0 ' # 8: Delta[0..3]=0, 855, 0, 1520 + '0000 0000 0000 0000 ' # 16: Delta[4..7]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 24: Delta[8..11]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 32: Delta[12..15]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 40: Delta[16..19]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 48: Delta[20..23]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 56: Delta[24..27]=0, 0, 0, 0 + '0000 0000 0000 0000 ' # 64: Delta[28..31]=0, 0, 0, 0 + '0002 0006 0001 ' # 72: LookupFormat=2, UnitSize=6, NUnits=1 + '0006 0000 0000 ' # 78: SearchRange=6, EntrySelector=0, RangeShift=0 + '000A 0002 0000 ' # 84: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman + 'FFFF FFFF 0000 ' # 90: Seg[1]= +) # 96: +assert len(BSLN_FORMAT_1_DATA) == 96 + + +BSLN_FORMAT_1_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +BSLN_FORMAT_2_DATA = deHexStr( + '0001 0000 0002 ' # 0: Version=1.0, Format=2 + '0004 ' # 6: DefaultBaseline=4 (Math) + '0016 ' # 8: StandardGlyph=22 + '0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, , 82 + 'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]= + 'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]= + 'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]= + 'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]= + 'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]= + 'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]= + 'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]= +) # 74: +assert len(BSLN_FORMAT_2_DATA) == 74 + + +BSLN_FORMAT_2_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Example: Format 3 Baseline Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html +# The example in the AAT specification uses the value 270 for Seg[0].LastGlyph, +# whereas we use the value 10 for testng to shorten the XML dump. +BSLN_FORMAT_3_DATA = deHexStr( + '0001 0000 0003 ' # 0: Version=1.0, Format=3 + '0001 ' # 6: DefaultBaseline=1 (Ideographic) + '0016 ' # 8: StandardGlyph=22 + '0050 0051 FFFF 0052 ' # 10: ControlPoint[0..3]=80, 81, , 82 + 'FFFF FFFF FFFF FFFF ' # 18: ControlPoint[4..7]= + 'FFFF FFFF FFFF FFFF ' # 26: ControlPoint[8..11]= + 'FFFF FFFF FFFF FFFF ' # 34: ControlPoint[12..15]= + 'FFFF FFFF FFFF FFFF ' # 42: ControlPoint[16..19]= + 'FFFF FFFF FFFF FFFF ' # 50: ControlPoint[20..23]= + 'FFFF FFFF FFFF FFFF ' # 58: ControlPoint[24..27]= + 'FFFF FFFF FFFF FFFF ' # 66: ControlPoint[28..31]= + '0002 0006 0001 ' # 74: LookupFormat=2, UnitSize=6, NUnits=1 + '0006 0000 0000 ' # 80: SearchRange=6, EntrySelector=0, RangeShift=0 + '000A 0002 0000 ' # 86: Seg[0].LastGlyph=10 FirstGl=2 Value=0/Roman + 'FFFF FFFF 0000 ' # 92: Seg[1]= +) # 98: +assert len(BSLN_FORMAT_3_DATA) == 98 + + +BSLN_FORMAT_3_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class BSLNTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont( + ['.notdef'] + [g for g in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) + + def decompileToXML(self, data, xml): + table = newTable('bsln') + table.decompile(data, self.font) + self.assertEqual(getXML(table.toXML), xml) + + def compileFromXML(self, xml, data): + table = newTable('bsln') + for name, attrs, content in parseXML(xml): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), hexStr(data)) + + def testFormat0(self): + self.decompileToXML(BSLN_FORMAT_0_DATA, BSLN_FORMAT_0_XML) + self.compileFromXML(BSLN_FORMAT_0_XML, BSLN_FORMAT_0_DATA) + + def testFormat1(self): + self.decompileToXML(BSLN_FORMAT_1_DATA, BSLN_FORMAT_1_XML) + self.compileFromXML(BSLN_FORMAT_1_XML, BSLN_FORMAT_1_DATA) + + def testFormat2(self): + self.decompileToXML(BSLN_FORMAT_2_DATA, BSLN_FORMAT_2_XML) + self.compileFromXML(BSLN_FORMAT_2_XML, BSLN_FORMAT_2_DATA) + + def testFormat3(self): + self.decompileToXML(BSLN_FORMAT_3_DATA, BSLN_FORMAT_3_XML) + self.compileFromXML(BSLN_FORMAT_3_XML, BSLN_FORMAT_3_DATA) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/C_F_F__2_test.py fonttools-3.21.2/Tests/ttLib/tables/C_F_F__2_test.py --- fonttools-3.0/Tests/ttLib/tables/C_F_F__2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/C_F_F__2_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,60 @@ +"""cff2Lib_test.py -- unit test for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, newTable +import re +import os +import unittest + + +CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +DATA_DIR = os.path.join(CURR_DIR, 'data') + +CFF_TTX = os.path.join(DATA_DIR, "C_F_F__2.ttx") +CFF_BIN = os.path.join(DATA_DIR, "C_F_F__2.bin") + + +def strip_VariableItems(string): + # ttlib changes with the fontTools version + string = re.sub(' ttLibVersion=".*"', '', string) + # head table checksum and mod date changes with each save. + string = re.sub('', '', string) + string = re.sub('', '', string) + return string + +class CFFTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + with open(CFF_BIN, 'rb') as f: + font = TTFont(file=CFF_BIN) + cffTable = font['CFF2'] + cls.cff2Data = cffTable.compile(font) + with open(CFF_TTX, 'r') as f: + cff2XML = f.read() + cff2XML = strip_VariableItems(cff2XML) + cls.cff2XML = cff2XML.splitlines() + + def test_toXML(self): + font = TTFont(file=CFF_BIN) + cffTable = font['CFF2'] + cffData = cffTable.compile(font) + out = UnicodeIO() + font.saveXML(out) + cff2XML = out.getvalue() + cff2XML = strip_VariableItems(cff2XML) + cff2XML = cff2XML.splitlines() + self.assertEqual(cff2XML, self.cff2XML) + + def test_fromXML(self): + font = TTFont(sfntVersion='OTTO') + font.importXML(CFF_TTX) + cffTable = font['CFF2'] + cff2Data = cffTable.compile(font) + self.assertEqual(cff2Data, self.cff2Data) + + +if __name__ == "__main__": + unittest.main() diff -Nru fonttools-3.0/Tests/ttLib/tables/C_F_F_test.py fonttools-3.21.2/Tests/ttLib/tables/C_F_F_test.py --- fonttools-3.0/Tests/ttLib/tables/C_F_F_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/C_F_F_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,51 @@ +"""cffLib_test.py -- unit test for Adobe CFF fonts.""" + +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, newTable +import re +import os +import unittest + + +CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +DATA_DIR = os.path.join(CURR_DIR, 'data') + +CFF_TTX = os.path.join(DATA_DIR, "C_F_F_.ttx") +CFF_BIN = os.path.join(DATA_DIR, "C_F_F_.bin") + + +def strip_ttLibVersion(string): + return re.sub(' ttLibVersion=".*"', '', string) + + +class CFFTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + with open(CFF_BIN, 'rb') as f: + cls.cffData = f.read() + with open(CFF_TTX, 'r') as f: + cls.cffXML = strip_ttLibVersion(f.read()).splitlines() + + def test_toXML(self): + font = TTFont(sfntVersion='OTTO') + cffTable = font['CFF '] = newTable('CFF ') + cffTable.decompile(self.cffData, font) + out = UnicodeIO() + font.saveXML(out) + cffXML = strip_ttLibVersion(out.getvalue()).splitlines() + self.assertEqual(cffXML, self.cffXML) + + def test_fromXML(self): + font = TTFont(sfntVersion='OTTO') + font.importXML(CFF_TTX) + cffTable = font['CFF '] + cffData = cffTable.compile(font) + self.assertEqual(cffData, self.cffData) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_c_i_d_g_test.py fonttools-3.21.2/Tests/ttLib/tables/_c_i_d_g_test.py --- fonttools-3.0/Tests/ttLib/tables/_c_i_d_g_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_c_i_d_g_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# On macOS X 10.12.6, the first font in /System/Library/Fonts/PingFang.ttc +# has a ‘cidg’ table with a similar structure as this test data, just larger. +CIDG_DATA = deHexStr( + "0000 0000 " # 0: Format=0, Flags=0 + "0000 0098 " # 4: StructLength=152 + "0000 " # 8: Registry=0 + "41 64 6F 62 65 " # 10: RegistryName="Adobe" + + ("00" * 59) + # 15: + "0002 " # 74: Order=2 + "43 4E 53 31 " # 76: Order="CNS1" + + ("00" * 60) + # 80: + "0000 " # 140: SupplementVersion=0 + "0004 " # 142: Count + "0000 " # 144: GlyphID[0]=.notdef + "FFFF " # 146: CIDs[1]= + "0003 " # 148: CIDs[2]=C + "0001 " # 150: CIDs[3]=A +) # 152: +assert len(CIDG_DATA) == 152, len(CIDG_DATA) + + +CIDG_XML = [ + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class GCIDTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D']) + + def testDecompileToXML(self): + table = newTable('cidg') + table.decompile(CIDG_DATA, self.font) + self.assertEqual(getXML(table.toXML, self.font), CIDG_XML) + + def testCompileFromXML(self): + table = newTable('cidg') + for name, attrs, content in parseXML(CIDG_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(CIDG_DATA)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_c_m_a_p_test.py fonttools-3.21.2/Tests/ttLib/tables/_c_m_a_p_test.py --- fonttools-3.0/Tests/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_c_m_a_p_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,88 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +import unittest +from fontTools.ttLib.tables._c_m_a_p import CmapSubtable, table__c_m_a_p + +class CmapSubtableTest(unittest.TestCase): + + def makeSubtable(self, cmapFormat, platformID, platEncID, langID): + subtable = CmapSubtable.newSubtable(cmapFormat) + subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) + return subtable + + def test_toUnicode_utf16be(self): + subtable = self.makeSubtable(4, 0, 2, 7) + self.assertEqual("utf_16_be", subtable.getEncoding()) + self.assertEqual(True, subtable.isUnicode()) + + def test_toUnicode_macroman(self): + subtable = self.makeSubtable(4, 1, 0, 7) # MacRoman + self.assertEqual("mac_roman", subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_toUnicode_macromanian(self): + subtable = self.makeSubtable(4, 1, 0, 37) # Mac Romanian + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_mac_encodings(self): + subtable = self.makeSubtable(4, 1, 1, 0) # Mac Japanese + self.assertNotEqual(None, subtable.getEncoding()) + self.assertEqual(False, subtable.isUnicode()) + + def test_extended_unknown(self): + subtable = self.makeSubtable(4, 10, 11, 12) + self.assertEqual(subtable.getEncoding(), None) + self.assertEqual(subtable.getEncoding("ascii"), "ascii") + self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") + + def test_decompile_4(self): + subtable = CmapSubtable.newSubtable(4) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) + + def test_decompile_12(self): + subtable = CmapSubtable.newSubtable(12) + font = ttLib.TTFont() + font.setGlyphOrder([]) + subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) + + def test_buildReversed(self): + c4 = self.makeSubtable(4, 3, 1, 0) + c4.cmap = {0x0041:'A', 0x0391:'A'} + c12 = self.makeSubtable(12, 3, 10, 0) + c12.cmap = {0x10314: 'u10314'} + cmap = table__c_m_a_p() + cmap.tables = [c4, c12] + self.assertEqual(cmap.buildReversed(), {'A':{0x0041, 0x0391}, 'u10314':{0x10314}}) + + def test_getBestCmap(self): + c4 = self.makeSubtable(4, 3, 1, 0) + c4.cmap = {0x0041:'A', 0x0391:'A'} + c12 = self.makeSubtable(12, 3, 10, 0) + c12.cmap = {0x10314: 'u10314'} + cmap = table__c_m_a_p() + cmap.tables = [c4, c12] + self.assertEqual(cmap.getBestCmap(), {0x10314: 'u10314'}) + self.assertEqual(cmap.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041:'A', 0x0391:'A'}) + self.assertEqual(cmap.getBestCmap(cmapPreferences=[(0, 4)]), None) + + def test_font_getBestCmap(self): + c4 = self.makeSubtable(4, 3, 1, 0) + c4.cmap = {0x0041:'A', 0x0391:'A'} + c12 = self.makeSubtable(12, 3, 10, 0) + c12.cmap = {0x10314: 'u10314'} + cmap = table__c_m_a_p() + cmap.tables = [c4, c12] + font = ttLib.TTFont() + font["cmap"] = cmap + self.assertEqual(font.getBestCmap(), {0x10314: 'u10314'}) + self.assertEqual(font.getBestCmap(cmapPreferences=[(3, 1)]), {0x0041:'A', 0x0391:'A'}) + self.assertEqual(font.getBestCmap(cmapPreferences=[(0, 4)]), None) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/C_P_A_L_test.py fonttools-3.21.2/Tests/ttLib/tables/C_P_A_L_test.py --- fonttools-3.0/Tests/ttLib/tables/C_P_A_L_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/C_P_A_L_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,227 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.testTools import getXML, parseXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import getTableModule, newTable +import unittest + + +CPAL_DATA_V0 = deHexStr( + '0000 0002 ' # version=0, numPaletteEntries=2 + '0002 0004 ' # numPalettes=2, numColorRecords=4 + '00000010 ' # offsetToFirstColorRecord=16 + '0000 0002 ' # colorRecordIndex=[0, 2] + '000000FF FFCC66FF ' # colorRecord #0, #1 (blue/green/red/alpha) + '000000FF 000080FF') # colorRecord #2, #3 + + +CPAL_DATA_V0_SHARING_COLORS = deHexStr( + '0000 0003 ' # version=0, numPaletteEntries=3 + '0004 0006 ' # numPalettes=4, numColorRecords=6 + '00000014 ' # offsetToFirstColorRecord=20 + '0000 0000 0003 0000 ' # colorRecordIndex=[0, 0, 3, 0] + '443322FF 77889911 55555555 ' # colorRecord #0, #1, #2 (BGRA) + '443322FF 77889911 FFFFFFFF') # colorRecord #3, #4, #5 + + +CPAL_DATA_V1_NOLABELS_NOTYPES = deHexStr( + '0001 0003 ' # version=1, numPaletteEntries=3 + '0002 0006 ' # numPalettes=2, numColorRecords=6 + '0000001C ' # offsetToFirstColorRecord=28 + '0000 0003 ' # colorRecordIndex=[0, 3] + '00000000 ' # offsetToPaletteTypeArray=0 + '00000000 ' # offsetToPaletteLabelArray=0 + '00000000 ' # offsetToPaletteEntryLabelArray=0 + 'CAFECAFE 00112233 44556677 ' # colorRecord #0, #1, #2 (BGRA) + '31415927 42424242 00331337') # colorRecord #3, #4, #5 + + +CPAL_DATA_V1 = deHexStr( + '0001 0003 ' # version=1, numPaletteEntries=3 + '0002 0006 ' # numPalettes=2, numColorRecords=6 + '0000001C ' # offsetToFirstColorRecord=28 + '0000 0003 ' # colorRecordIndex=[0, 3] + '00000034 ' # offsetToPaletteTypeArray=52 + '0000003C ' # offsetToPaletteLabelArray=60 + '00000040 ' # offsetToPaletteEntryLabelArray=64 + 'CAFECAFE 00112233 44556677 ' # colorRecord #0, #1, #2 (BGRA) + '31415927 42424242 00331337 ' # colorRecord #3, #4, #5 + '00000001 00000002 ' # paletteType=[1, 2] + '0102 0103 ' # paletteLabel=[258, 259] + '0201 0202 0203') # paletteEntryLabel=[513, 514, 515] + + +class FakeNameTable(object): + def __init__(self, names): + self.names = names + + def getDebugName(self, nameID): + return self.names.get(nameID) + + +class CPALTest(unittest.TestCase): + def test_decompile_v0(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V0, ttFont=None) + self.assertEqual(cpal.version, 0) + self.assertEqual(cpal.numPaletteEntries, 2) + self.assertEqual(repr(cpal.palettes), + '[[#000000FF, #66CCFFFF], [#000000FF, #800000FF]]') + self.assertEqual(cpal.paletteLabels, [0, 0]) + self.assertEqual(cpal.paletteTypes, [0, 0]) + self.assertEqual(cpal.paletteEntryLabels, [0, 0]) + + def test_decompile_v0_sharingColors(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V0_SHARING_COLORS, ttFont=None) + self.assertEqual(cpal.version, 0) + self.assertEqual(cpal.numPaletteEntries, 3) + self.assertEqual([repr(p) for p in cpal.palettes], [ + '[#223344FF, #99887711, #55555555]', + '[#223344FF, #99887711, #55555555]', + '[#223344FF, #99887711, #FFFFFFFF]', + '[#223344FF, #99887711, #55555555]']) + self.assertEqual(cpal.paletteLabels, [0, 0, 0, 0]) + self.assertEqual(cpal.paletteTypes, [0, 0, 0, 0]) + self.assertEqual(cpal.paletteEntryLabels, [0, 0, 0]) + + def test_decompile_v1_noLabelsNoTypes(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V1_NOLABELS_NOTYPES, ttFont=None) + self.assertEqual(cpal.version, 1) + self.assertEqual(cpal.numPaletteEntries, 3) + self.assertEqual([repr(p) for p in cpal.palettes], [ + '[#CAFECAFE, #22110033, #66554477]', # RGBA + '[#59413127, #42424242, #13330037]']) + self.assertEqual(cpal.paletteLabels, [0, 0]) + self.assertEqual(cpal.paletteTypes, [0, 0]) + self.assertEqual(cpal.paletteEntryLabels, [0, 0, 0]) + + def test_decompile_v1(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V1, ttFont=None) + self.assertEqual(cpal.version, 1) + self.assertEqual(cpal.numPaletteEntries, 3) + self.assertEqual([repr(p) for p in cpal.palettes], [ + '[#CAFECAFE, #22110033, #66554477]', # RGBA + '[#59413127, #42424242, #13330037]']) + self.assertEqual(cpal.paletteTypes, [1, 2]) + self.assertEqual(cpal.paletteLabels, [258, 259]) + self.assertEqual(cpal.paletteEntryLabels, [513, 514, 515]) + + def test_compile_v0(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V0, ttFont=None) + self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V0) + + def test_compile_v0_sharingColors(self): + cpal = newTable('CPAL') + cpal.version = 0 + Color = getTableModule('CPAL').Color + palette1 = [Color(red=0x22, green=0x33, blue=0x44, alpha=0xff), + Color(red=0x99, green=0x88, blue=0x77, alpha=0x11), + Color(red=0x55, green=0x55, blue=0x55, alpha=0x55)] + palette2 = [Color(red=0x22, green=0x33, blue=0x44, alpha=0xff), + Color(red=0x99, green=0x88, blue=0x77, alpha=0x11), + Color(red=0xFF, green=0xFF, blue=0xFF, alpha=0xFF)] + cpal.numPaletteEntries = len(palette1) + cpal.palettes = [palette1, palette1, palette2, palette1] + self.assertEqual(cpal.compile(ttFont=None), + CPAL_DATA_V0_SHARING_COLORS) + + def test_compile_v1(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V1, ttFont=None) + self.assertEqual(cpal.compile(ttFont=None), CPAL_DATA_V1) + + def test_compile_v1_noLabelsNoTypes(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V1_NOLABELS_NOTYPES, ttFont=None) + self.assertEqual(cpal.compile(ttFont=None), + CPAL_DATA_V1_NOLABELS_NOTYPES) + + def test_toXML_v0(self): + cpal = newTable('CPAL') + cpal.decompile(CPAL_DATA_V0, ttFont=None) + self.assertEqual(getXML(cpal.toXML), + ['', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '']) + + def test_toXML_v1(self): + name = FakeNameTable({258: "Spring theme", 259: "Winter theme", + 513: "darks", 515: "lights"}) + cpal = newTable('CPAL') + ttFont = {"name": name, "CPAL": cpal} + cpal.decompile(CPAL_DATA_V1, ttFont) + self.assertEqual(getXML(cpal.toXML, ttFont), + ['', + '', + '', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ']) + + def test_fromXML_v0(self): + cpal = newTable('CPAL') + for name, attrs, content in parseXML( + '' + '' + '' + ' ' + ' ' + ''): + cpal.fromXML(name, attrs, content, ttFont=None) + self.assertEqual(cpal.version, 0) + self.assertEqual(cpal.numPaletteEntries, 2) + self.assertEqual(repr(cpal.palettes), '[[#12345678, #FEDCBA98]]') + self.assertEqual(cpal.paletteLabels, [0]) + self.assertEqual(cpal.paletteTypes, [0]) + self.assertEqual(cpal.paletteEntryLabels, [0, 0]) + + def test_fromXML_v1(self): + cpal = newTable('CPAL') + for name, attrs, content in parseXML( + '' + '' + '' + ' ' + ' ' + ' ' + '' + '' + ' '): + cpal.fromXML(name, attrs, content, ttFont=None) + self.assertEqual(cpal.version, 1) + self.assertEqual(cpal.numPaletteEntries, 3) + self.assertEqual(repr(cpal.palettes), + '[[#12345678, #FEDCBA98, #CAFECAFE]]') + self.assertEqual(cpal.paletteLabels, [259]) + self.assertEqual(cpal.paletteTypes, [2]) + self.assertEqual(cpal.paletteEntryLabels, [0, 262, 0]) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_c_v_a_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_c_v_a_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_c_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_c_v_a_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,111 @@ +from __future__ import \ + print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import TTLibError, getTableModule, newTable +from fontTools.ttLib.tables.TupleVariation import TupleVariation + +import unittest + + +CVAR_DATA = deHexStr( + "0001 0000 " # 0: majorVersion=1 minorVersion=0 + "8002 0018 " # 4: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS offsetToData=24 + "0004 " # 8: tvHeader[0].variationDataSize=4 + "8000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK + "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0] + "0004 " # 16: tvHeader[1].variationDataSize=4 + "8000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK + "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8] + "03 02 02 01 01" # 24: shared_pointCount=03, run_count=2 cvt=[2, 3, 4] + "02 03 01 04 " # 25: deltas=[3, 1, 4] + "02 09 07 08") # 29: deltas=[9, 7, 8] + +CVAR_PRIVATE_POINT_DATA = deHexStr( + "0001 0000 " # 0: majorVersion=1 minorVersion=0 + "0002 0018 " # 4: tupleVariationCount=2 offsetToData=24 + "0009 " # 8: tvHeader[0].variationDataSize=9 + "A000 " # 10: tvHeader[0].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS + "4000 0000 " # 12: tvHeader[0].peakTuple=[1.0, 0.0] + "0009 " # 16: tvHeader[1].variationDataSize=9 + "A000 " # 18: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINT_NUMBERS + "C000 3333 " # 20: tvHeader[1].peakTuple=[-1.0, 0.8] + "03 02 02 01 01 02 03 01 04 " # 24: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[3, 1, 4] + "03 02 02 01 01 02 09 07 08 ") # 33: pointCount=3 run_count=2 cvt=2 1 1 run_count=2 deltas=[9, 7, 8] + +CVAR_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + +CVAR_VARIATIONS = [ + TupleVariation({"wght": (0.0, 1.0, 1.0)}, [None, None, 3, 1, 4]), + TupleVariation({"wght": (-1, -1.0, 0.0), "wdth": (0.0, 0.8, 0.8)}, + [None, None, 9, 7, 8]), +] + + +class CVARTableTest(unittest.TestCase): + def makeFont(self): + cvt, cvar, fvar = newTable("cvt "), newTable("cvar"), newTable("fvar") + font = {"cvt ": cvt, "cvar": cvar, "fvar": fvar} + cvt.values = [0, 0, 0, 1000, -2000] + Axis = getTableModule("fvar").Axis + fvar.axes = [Axis(), Axis()] + fvar.axes[0].axisTag, fvar.axes[1].axisTag = "wght", "wdth" + return font, cvar + + def test_compile(self): + font, cvar = self.makeFont() + cvar.variations = CVAR_VARIATIONS + self.assertEqual(hexStr(cvar.compile(font)), hexStr(CVAR_PRIVATE_POINT_DATA)) + + def test_compile_shared_points(self): + font, cvar = self.makeFont() + cvar.variations = CVAR_VARIATIONS + self.assertEqual(hexStr(cvar.compile(font, useSharedPoints=True)), hexStr(CVAR_DATA)) + + def test_decompile(self): + font, cvar = self.makeFont() + cvar.decompile(CVAR_PRIVATE_POINT_DATA, font) + self.assertEqual(cvar.majorVersion, 1) + self.assertEqual(cvar.minorVersion, 0) + self.assertEqual(cvar.variations, CVAR_VARIATIONS) + + def test_decompile_shared_points(self): + font, cvar = self.makeFont() + cvar.decompile(CVAR_DATA, font) + self.assertEqual(cvar.majorVersion, 1) + self.assertEqual(cvar.minorVersion, 0) + self.assertEqual(cvar.variations, CVAR_VARIATIONS) + + def test_fromXML(self): + font, cvar = self.makeFont() + for name, attrs, content in parseXML(CVAR_XML): + cvar.fromXML(name, attrs, content, ttFont=font) + self.assertEqual(cvar.majorVersion, 1) + self.assertEqual(cvar.minorVersion, 0) + self.assertEqual(cvar.variations, CVAR_VARIATIONS) + + def test_toXML(self): + font, cvar = self.makeFont() + cvar.variations = CVAR_VARIATIONS + self.assertEqual(getXML(cvar.toXML, font), CVAR_XML) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/base.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.CFF fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.CFF --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.CFF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.CFF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,735 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 145 665 rmoveto + -74 -43 -28 -166 -6 -75 -10 -124 21 -220 148 -44 rrcurveto + -11 37 40 7 39 hhcurveto + 6 8 3 4 4 hvcurveto + 69 60 39 31 2 103 1 44 5 43 -4 43 -7 87 -50 217 -88 45 -24 13 -29 1 -28 7 -12 -1 -8 -4 -6 -7 -16 -2 -16 -3 -13 -8 rrcurveto + 122 -50 rmoveto + 97 -69 31 -246 -15 -107 -13 -95 -42 -80 -111 33 -52 16 -30 55 -16 46 -32 98 -1 279 95 68 13 9 18 -2 15 4 1 1 2 1 2 1 14 -2 13 -2 11 -8 rrcurveto + 233 -615 rmoveto + return + + + 175 661 rmoveto + 1 -215 6 -215 1 -215 4 -42 54 3 2 41 -1 216 -6 214 -1 215 -11 35 -42 0 -7 -37 rrcurveto + 325 -661 rmoveto + return + + + 143 536 rmoveto + 59 22 61 39 64 3 78 5 3 -97 -32 -48 -76 -117 -268 -55 -9 -168 -2 -31 11 -30 5 -31 5 -10 5 -5 10 -5 50 -15 58 8 50 1 65 1 66 3 65 1 37 7 0 42 -35 11 rrcurveto + -106 -2 -108 -7 -107 4 -2 18 -8 18 2 17 16 141 259 55 69 117 72 122 -67 142 -156 -40 -52 -14 -48 -26 -51 -19 -40 -14 16 -51 41 8 rrcurveto + 357 -536 rmoveto + return + + + 92 580 rmoveto + 13 6 13 7 14 4 54 16 184 1 9 -81 1 -13 -3 -13 -3 -14 -9 -45 -124 -14 -42 -8 rrcurveto + -2 -2 1 -1 hhcurveto + -2 vlineto + -30 -15 5 -40 35 -4 60 -5 62 -4 47 -43 83 -75 -108 -134 -82 -20 -75 -17 -101 91 -42 -14 -22 -8 -7 -18 10 -21 2 -2 2 -2 1 -2 10 -10 11 -3 10 2 rrcurveto + 2 2 -1 1 hhcurveto + 16 -7 15 -7 15 -7 33 -14 33 -14 35 -7 103 -18 81 94 48 78 51 83 -64 98 -77 36 -4 1 -3 2 -4 2 17 7 16 9 15 12 77 61 -32 107 -79 40 -91 47 -115 -9 -91 -40 rrcurveto + -27 -24 18 -37 36 7 rrcurveto + 408 -580 rmoveto + return + + + 336 627 rmoveto + -73 -94 -78 -92 -70 -97 -32 -45 -39 -39 -2 -56 2 -16 5 -7 14 -7 76 -39 130 16 102 10 -2 -44 -2 -44 -1 -43 4 -42 54 3 2 41 1 45 2 45 2 45 rrcurveto + 6 6 0 1 5 hvcurveto + 41 8 -6 54 -42 -3 rrcurveto + -2 -3 -1 -2 hhcurveto + 4 135 -3 133 -49 127 -2 3 -3 2 -2 2 -6 6 -8 4 -9 -1 rrcurveto + -6 -6 -3 -4 -4 hvcurveto + -2 -1 -1 -1 -1 -1 rrcurveto + -230 -408 rmoveto + 9 14 6 14 9 13 16 24 37 51 17 22 48 64 50 62 50 62 29 -105 1 -110 -4 -109 -87 -9 -131 -13 -50 20 rrcurveto + 394 -219 rmoveto + return + + + 41 642 rmoveto + 1 -2 1 -1 -1 vvcurveto + -7 2 -7 5 -5 vhcurveto + 15 -69 -71 -105 61 -45 71 -50 214 60 48 -116 9 -20 3 -24 -3 -22 -13 -128 -51 -35 -120 -6 -38 -1 -62 -5 -26 34 -29 22 -33 -28 16 -33 39 -51 75 0 59 2 83 5 76 21 49 69 rrcurveto + 25 36 0 48 11 42 19 72 -43 43 -42 45 -62 68 -159 -25 -76 26 -20 43 44 56 -6 66 101 14 102 -5 103 -1 37 7 0 42 -35 11 -109 1 -110 5 -108 -17 rrcurveto + -1 1 0 0 1 vvcurveto + -25 33 -45 -26 18 -38 rrcurveto + 407 -673 rmoveto + return + + + 399 660 rmoveto + -36 2 -37 10 -35 -8 -152 -32 -56 -137 -37 -134 -35 -130 55 -175 141 -42 156 -46 135 253 -64 123 -39 78 -32 -3 -81 14 -26 5 -36 -14 -24 -10 -36 -15 -28 -18 -26 -26 19 101 63 130 114 18 rrcurveto + 32 5 31 -8 32 -1 37 7 0 42 -35 11 rrcurveto + -263 -360 rmoveto + 52 57 149 71 42 -110 33 -84 -77 -193 -113 33 -98 30 -29 103 4 92 9 -7 14 -1 14 9 rrcurveto + 401 -299 rmoveto + return + + + 99 610 rmoveto + 63 14 62 -15 64 -2 rrcurveto + 22 23 1 2 22 hvcurveto + -24 -33 -19 -38 -22 -38 -85 -149 -77 -149 -19 -173 4 -37 43 -4 12 34 19 165 74 145 83 142 34 57 25 61 56 36 21 24 -14 30 -32 -2 rrcurveto + -6 -47 -49 -8 -48 hhcurveto + -71 2 -67 15 -70 -17 -40 -14 16 -51 41 8 rrcurveto + 418 -667 rmoveto + return + + + 289 676 rmoveto + -88 12 -105 -100 -7 -86 -1 -23 -10 -26 9 -22 9 -21 8 -23 13 -20 6 -8 8 -7 9 -5 -42 -15 -31 -26 -21 -57 -31 -83 41 -138 89 -34 25 -9 24 -16 27 1 90 2 -6 -5 70 46 rrcurveto + 60 39 -5 113 -8 58 -2 24 -13 22 -9 22 -8 20 -18 15 -15 16 -7 7 -9 4 -9 3 3 5 3 5 3 6 43 84 -21 87 -3 90 -6 20 -17 8 -14 -3 -10 9 -11 8 -13 1 rrcurveto + -12 -364 rmoveto + 2 -2 2 -1 3 -1 12 -4 13 -1 9 -8 26 -18 13 -38 6 -28 24 -103 -43 -94 -120 16 -104 15 -73 140 80 83 31 33 22 -2 42 7 19 -4 19 3 17 7 rrcurveto + 32 196 rmoveto + 2 -48 -9 -48 -33 -37 -30 -34 -85 64 -8 41 -11 56 73 136 70 -23 8 -3 8 -6 7 -6 2 -31 4 -31 2 -30 rrcurveto + 191 -508 rmoveto + return + + + 379 635 rmoveto + -50 16 -48 25 -52 6 -169 23 -32 -255 81 -95 66 -76 -16 4 97 -2 rrcurveto + 6 9 3 4 4 hvcurveto + 21 21 19 16 16 17 8 -65 4 -65 -6 -62 -4 -33 -9 -54 -40 -14 -66 -23 -78 47 -54 20 -40 13 -19 -50 37 -19 46 -17 45 -17 45 -16 31 -11 34 12 32 2 104 6 0 190 -4 62 rrcurveto + -1 36 -5 36 -5 36 -2 23 -4 24 -3 23 13 51 -17 20 19 51 5 16 -4 13 -9 8 15 11 0 23 -20 16 rrcurveto + -72 -84 rmoveto + 2 -34 4 -35 5 -35 -3 -19 -4 -16 -6 -7 -19 -22 -22 -20 -21 -21 -14 1 -14 0 -15 1 -53 58 -34 59 18 84 5 21 15 17 10 18 21 7 21 16 22 -3 41 -5 38 -19 40 -14 rrcurveto + -1 -2 -2 -1 -1 -2 -14 3 -15 -9 -4 -21 rrcurveto + 193 -551 rmoveto + return + + + f75af910 158c838c 828d8387 5d8a7d7a + 4d5ffb37 3afb2878 fb3e8f66 b68797ad + 92c79ac5 9dc3c287 bf99c18f 9d559f55 + a4569e66 bd9e7eb3 0838f74a 65f7516b + f7570892 8c938c93 1e8da478 977a887a + 8d797d8e 7208acfb 50159847 9b489e49 + 61866381 618ca4d2 a8d2a1d4 08f7a1fc + 54150b + + + c0f8f115 78538277 884f8830 a6318e30 + 8e468891 7e480888 8c878c89 1e867b95 + 78a389c9 91f72b8d c3c1a0a0 a49d9aa4 + c7f22be8 2baea298 a39ba6a1 cfc272f6 + 57be799e 71937497 50aa4068 55790871 + 82897396 7d898989 898a8808 b4fba915 + dd8daf97 d367d665 9f323c5c 47625089 + 428593b6 8e9f89c0 89b584b6 84b708f7 + 28f7bd15 ce79b23d 5852564f 3b7f3f7f + 088a8b8a 8a1b8c84 07898b8a 8c8a1e73 + 7b9168aa 86d696bb 96bda779 9179907a + 8d618f61 85608a86 b98ab995 b990a194 + 9e92a008 928a9188 901ebe9d a79ac37d + 08fb36fb 85158f78 90798f78 088c0688 + 9f889e89 9f08f833 fc17150b + + + f83bf8f9 155ea564 b85791fb 5ba649fc + 1bb1fb10 bafb2b70 a9f70734 08879092 + 89911bda 90d09eb3 cc9ba696 a1a29fa4 + a771ac69 7f7e8080 82807d7d 78745176 + 85698168 83688276 9b6e967d a143f70b + 9df7b1f4 ec089e9d aa8ba393 b175b075 + b075b179 a5b86aa4 08d4fcf9 150b + + + cef8ec15 93948c94 1b6efb1f 9efb1d9d + fb200889 0791578a 998e4408 8807838d + 848c848e 89997f97 778a0888 888b8a88 + 1f808787 88848008 8a8b8a8a 891e887c + 8e829680 df3ff75a cbc2e0e8 f724a2f7 + 0e4ef734 81a6729e 79a14dd6 fb1b7c34 + 81088a85 858b851b 62898855 b58708f7 + 9a6b15f7 052b64fb 6f38266a 624c6e54 + 82088e07 84f7455a f743b4f7 42089007 + c991c88a bf5f08f7 3ffccc15 0b + + + bff8fa15 9f36903a 87338957 88678757 + 08857bfb 4392751e 9669b487 a98a08f3 + ecb091f2 1fb49385 c1618820 85215cfb + 019a94c4 8ac48ec4 8daa8dab 8daabd8d + be90bd8c b0928bb5 6896598a 5a865889 + 8ed988d9 7bd908e8 a0f7088b e7799e77 + ab958dab 8a8e8b8e 8a8e869a 85927b8f + 21a3fb2e 88216d08 7a85857f 801a8a88 + 8b888c88 08f854fc fa150b + + + cef90215 90068e6c 7969876d 876b8c7f + 8a61082d 0783808c 7d93828d fb0190fb + 018cfb01 8f61c18e 8db48af5 85f689f6 + d79ad97b d99caa9d 7fb46789 437b4298 + 44818ab6 8cb68db6 088db59e b48cb2f7 + 0287f584 f702a3aa 9d7fb467 89fb0174 + fb0095fb 028f7ba1 7286817b 67858b5a + b38708f8 45fd0215 0b + + + 500 0 rmoveto + return + + + 0b + + + + + + -91 callsubr + -91 callsubr + endchar + + + -107 callsubr + -106 callsubr + endchar + + + -106 callsubr + -107 callsubr + endchar + + + -106 callsubr + -106 callsubr + endchar + + + -106 callsubr + -105 callsubr + endchar + + + -106 callsubr + -104 callsubr + endchar + + + -106 callsubr + -103 callsubr + endchar + + + -106 callsubr + -102 callsubr + endchar + + + -106 callsubr + -101 callsubr + endchar + + + -106 callsubr + -100 callsubr + endchar + + + -106 callsubr + -99 callsubr + endchar + + + -106 callsubr + -98 callsubr + endchar + + + -107 callsubr + -105 callsubr + endchar + + + -105 callsubr + -107 callsubr + endchar + + + -105 callsubr + -106 callsubr + endchar + + + -105 callsubr + -105 callsubr + endchar + + + -105 callsubr + -104 callsubr + endchar + + + -105 callsubr + -103 callsubr + endchar + + + -105 callsubr + -102 callsubr + endchar + + + -105 callsubr + -101 callsubr + endchar + + + -105 callsubr + -100 callsubr + endchar + + + -105 callsubr + -99 callsubr + endchar + + + -105 callsubr + -98 callsubr + endchar + + + -107 callsubr + -104 callsubr + endchar + + + -104 callsubr + -107 callsubr + endchar + + + -104 callsubr + -106 callsubr + endchar + + + -104 callsubr + -105 callsubr + endchar + + + -104 callsubr + -104 callsubr + endchar + + + -104 callsubr + -103 callsubr + endchar + + + -104 callsubr + -102 callsubr + endchar + + + -104 callsubr + -101 callsubr + endchar + + + -104 callsubr + -100 callsubr + endchar + + + -104 callsubr + -99 callsubr + endchar + + + -104 callsubr + -98 callsubr + endchar + + + -107 callsubr + -103 callsubr + endchar + + + -103 callsubr + -107 callsubr + endchar + + + -103 callsubr + -106 callsubr + endchar + + + -103 callsubr + -105 callsubr + endchar + + + -103 callsubr + -104 callsubr + endchar + + + -103 callsubr + -103 callsubr + endchar + + + -103 callsubr + -102 callsubr + endchar + + + -103 callsubr + -101 callsubr + endchar + + + -103 callsubr + -100 callsubr + endchar + + + -103 callsubr + -99 callsubr + endchar + + + -103 callsubr + -98 callsubr + endchar + + + -107 callsubr + -102 callsubr + endchar + + + -102 callsubr + -107 callsubr + endchar + + + -102 callsubr + -106 callsubr + endchar + + + -102 callsubr + -105 callsubr + endchar + + + -102 callsubr + -104 callsubr + endchar + + + -102 callsubr + -103 callsubr + endchar + + + -102 callsubr + -102 callsubr + endchar + + + -102 callsubr + -101 callsubr + endchar + + + -102 callsubr + -100 callsubr + endchar + + + -102 callsubr + -99 callsubr + endchar + + + -102 callsubr + -98 callsubr + endchar + + + -107 callsubr + -101 callsubr + endchar + + + -101 callsubr + -107 callsubr + endchar + + + -101 callsubr + -106 callsubr + endchar + + + -101 callsubr + -105 callsubr + endchar + + + -101 callsubr + -104 callsubr + endchar + + + -101 callsubr + -103 callsubr + endchar + + + -101 callsubr + -102 callsubr + endchar + + + -101 callsubr + -101 callsubr + endchar + + + -101 callsubr + -100 callsubr + endchar + + + -101 callsubr + -99 callsubr + endchar + + + -101 callsubr + -98 callsubr + endchar + + + -107 callsubr + -100 callsubr + endchar + + + -100 callsubr + -107 callsubr + endchar + + + -100 callsubr + -106 callsubr + endchar + + + -100 callsubr + -105 callsubr + endchar + + + -100 callsubr + -104 callsubr + endchar + + + -100 callsubr + -103 callsubr + endchar + + + -100 callsubr + -102 callsubr + endchar + + + -100 callsubr + -101 callsubr + endchar + + + -100 callsubr + -100 callsubr + endchar + + + -100 callsubr + -99 callsubr + endchar + + + -100 callsubr + -98 callsubr + endchar + + + -107 callsubr + -99 callsubr + endchar + + + -99 callsubr + -107 callsubr + endchar + + + -99 callsubr + -106 callsubr + endchar + + + -99 callsubr + -105 callsubr + endchar + + + -99 callsubr + -104 callsubr + endchar + + + -99 callsubr + -103 callsubr + endchar + + + -99 callsubr + -102 callsubr + endchar + + + -99 callsubr + -101 callsubr + endchar + + + -99 callsubr + -100 callsubr + endchar + + + -99 callsubr + -99 callsubr + endchar + + + -99 callsubr + -98 callsubr + endchar + + + -107 callsubr + -98 callsubr + endchar + + + -98 callsubr + -107 callsubr + endchar + + + -98 callsubr + -106 callsubr + endchar + + + -98 callsubr + -105 callsubr + endchar + + + -98 callsubr + -104 callsubr + endchar + + + -98 callsubr + -103 callsubr + endchar + + + -98 callsubr + -102 callsubr + endchar + + + -98 callsubr + -101 callsubr + endchar + + + -98 callsubr + -100 callsubr + endchar + + + -98 callsubr + -99 callsubr + endchar + + + -98 callsubr + -98 callsubr + endchar + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,210 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.head fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.head --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.head 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.head 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.hhea fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.hhea --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.hhea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.hhea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.hmtx fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.hmtx --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.hmtx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.hmtx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.maxp fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.maxp --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.maxp 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.maxp 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.name fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.name --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.name 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.name 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,25 @@ + + + + + + base + + + Regular + + + base + + + base + + + Version1.0 + + + base + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.OS_2 fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.OS_2 --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.OS_2 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.OS_2 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.post fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.post --- fonttools-3.0/Tests/ttLib/tables/data/aots/base.ttx.post 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/base.ttx.post 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,494 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font3.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font3.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font3.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font3.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,510 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font4.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font4.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef1_font4.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef1_font4.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,483 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,494 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font3.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font3.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font3.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font3.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,510 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font4.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font4.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/classdef2_font4.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/classdef2_font4.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap0_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap0_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap0_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap0_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap0_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap0_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + 000a0000 0000001a 00000000 00109423 + 00000003 001a001b 0020 + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font2.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font2.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap10_font2.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap10_font2.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + 000a0000 00000014 00000000 00000000 + 00000000 + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap12_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap12_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap12_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap12_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap12_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap12_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap14_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap14_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap14_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap14_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap14_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap14_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap2_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap2_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap2_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap2_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap2_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap2_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font2.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font2.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font2.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font2.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font3.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font3.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font3.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font3.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font4.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font4.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap4_font4.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap4_font4.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1011 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,13 @@ + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font2.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font2.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap6_font2.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap6_font2.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap8_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap8_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap8_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap8_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap8_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap8_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,529 @@ + + + + + + + 00080000 00002064 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000000 + 00000000 00000000 00000000 00000007 + 00000034 00000034 00000011 00000035 + 00000035 00000038 00000036 00000036 + 0000000c 00008432 00008434 00000014 + 00009232 00009234 00000017 00109423 + 00109424 0000001a 00109425 00109425 + 00000020 + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_composition_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_composition_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_composition_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_composition_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_composition_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_composition_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font1.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font2.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font3.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font4.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.ttx.cmap fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.ttx.cmap --- fonttools-3.0/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.ttx.cmap 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/cmap_subtableselection_font5.ttx.cmap 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_1_simple_f4.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos1_2_font2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font6.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font6.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font6.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font6.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font6.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font6.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font7.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font7.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font7.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font7.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_font7.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_font7.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_lookupflag_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_next_glyph_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_1_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font4.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font4.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font4.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font4.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font5.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font5.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font5.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font5.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos2_2_font5.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos2_2_font5.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos3_font3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_lookupflag_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_multiple_anchors_1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_simple_1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_simple_1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos4_simple_1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos5_font1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos6_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos6_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos6_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos7_1_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos7_1_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos7_1_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos7_1_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos7_1_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos7_1_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos9_font2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos9_font2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,127 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_boundary_f4.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,127 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_multiple_subrules_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining1_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,134 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,183 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,187 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_boundary_f4.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,186 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_multiple_subrules_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,201 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,203 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,190 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining2_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,193 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f3.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_boundary_f4.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_chaining3_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,138 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_expansion_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_lookupflag_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,134 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_multiple_subrules_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,134 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context1_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,130 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,127 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_classes_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,155 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_expansion_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_lookupflag_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_multiple_subrules_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,142 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,128 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_simple_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context2_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_boundary_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_lookupflag_f2.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,123 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_next_glyph_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_simple_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GPOS fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GPOS --- fonttools-3.0/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GPOS 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gpos_context3_successive_f1.ttx.GPOS 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,130 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_modulo_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub1_2_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_multiple_sequences_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub2_1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_multiple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub3_1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligatures_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_multiple_ligsets_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub4_1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub7_font2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub7_font2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f3.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_boundary_f4.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_multiple_subrules_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,125 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_simple_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining1_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,162 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,166 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f3.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_boundary_f4.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,165 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_multiple_subrules_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,182 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_simple_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,169 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining2_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,172 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f3.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_boundary_f4.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,111 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_simple_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_chaining3_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,117 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,102 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_expansion_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_lookupflag_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_multiple_subrules_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,113 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_simple_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context1_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,119 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_classes_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,134 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_expansion_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_lookupflag_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_multiple_subrules_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,121 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,107 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,120 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_simple_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,108 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context2_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,119 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_boundary_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_lookupflag_f2.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,102 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_next_glyph_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_simple_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,110 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/gsub_context3_successive_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,109 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_attach_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_base_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_combination_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_ligatures_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.otf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GDEF fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GDEF --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GDEF 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GDEF 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GSUB fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GSUB --- fonttools-3.0/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GSUB 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/lookupflag_ignore_marks_f1.ttx.GSUB 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/aots/README fonttools-3.21.2/Tests/ttLib/tables/data/aots/README --- fonttools-3.0/Tests/ttLib/tables/data/aots/README 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/aots/README 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +The *.otf data in this directory was built from: + +https://github.com/adobe-type-tools/aots + +at the following revision: + +1c41fd20d2b020177625541a228c4c7c934879ef + +Fonts were built by running "make" and copying tests/*.otf over. +Original .xml files were not copied to save space. Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/C_F_F__2.bin and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F__2.bin differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/C_F_F__2.ttx fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F__2.ttx --- fonttools-3.0/Tests/ttLib/tables/data/C_F_F__2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F__2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 80 0 0 10 -6 -10 1 blend + 0 rmoveto + 80 -20 -55 -40 25 40 1 blend + 0 rlineto + 400 652 20 55 20 -13 -20 18 25 0 0 0 2 blend + rlineto + -80 20 55 40 -25 -40 1 blend + 0 rlineto + -400 -652 -20 -55 -20 13 20 -18 -25 0 0 0 2 blend + rlineto + 480 0 0 -20 12 20 1 blend + 0 rmoveto + -400 652 -20 -55 -20 13 20 18 25 0 0 0 2 blend + rlineto + -80 20 55 40 -25 -40 1 blend + 0 rlineto + 400 -652 20 55 20 -13 -20 -18 -25 0 0 0 2 blend + rlineto + 80 -20 -55 -40 25 40 1 blend + 0 rlineto + -410 60 -10 -45 10 -6 -10 -10 -38 0 0 0 2 blend + rmoveto + 0 532 38 101 0 0 0 1 blend + rlineto + 340 20 90 0 0 0 1 blend + 0 rlineto + 0 -532 -38 -101 0 0 0 1 blend + rlineto + -340 -20 -90 0 0 0 1 blend + 0 rlineto + -70 -60 10 45 0 0 0 10 38 0 0 0 2 blend + rmoveto + 480 0 rlineto + 0 652 18 25 0 0 0 1 blend + rlineto + -480 0 rlineto + 0 -652 -18 -25 0 0 0 1 blend + rlineto + + + 260 39 -12 -15 0 0 0 -4 -32 -20 13 20 2 blend + rmoveto + -65 26 0 0 0 0 1 blend + 0 -28 11 -49 24 -17 -11 0 0 0 -6 4 0 0 0 3 3 0 0 0 -6 26 0 0 0 4 blend + rrcurveto + 78 -55 -25 -42 0 0 0 19 7 5 -4 -5 2 blend + rlineto + -8 85 -9 -20 0 0 0 -9 15 15 -9 -15 2 blend + rlineto + -5 52 -22 20 -43 -7 1 1 -1 -1 1 -36 0 0 0 0 10 -1 1 1 -7 -16 0 0 0 19 32 0 0 0 5 blend + 0 rrcurveto + -26 4 12 0 0 0 1 blend + 0 -27 -14 -14 -38 13 19 0 0 0 3 7 0 0 0 5 13 0 0 0 18 24 0 0 0 4 blend + rrcurveto + 0 -90 71 -50 139 4 24 0 0 0 3 5 0 0 0 10 -10 0 0 0 -9 -1 0 0 0 -32 -32 0 0 0 5 blend + 0 rrcurveto + 163 -27 -72 0 0 0 1 blend + 0 99 84 -17 -9 0 0 0 -8 -31 2 -1 -2 2 blend + 0 108 -1 3 0 0 0 1 blend + rrcurveto + 0 107 -56 54 -138 56 -25 -37 0 0 0 15 30 0 0 0 11 12 -2 1 2 3 4 0 0 0 -9 1 0 0 0 5 blend + rrcurveto + -32 13 -6 13 0 0 0 0 -5 0 0 0 2 blend + rlineto + -63 25 -30 18 -8 -30 0 0 0 -2 14 0 0 0 -10 -12 0 0 0 17 31 0 0 0 4 blend + 0 48 16 20 8 -5 -8 1 blend + rrcurveto + 0 63 43 25 61 12 28 -6 4 6 14 17 -2 1 2 12 23 18 -12 -18 13 27 2 -1 -2 4 blend + 0 rrcurveto + 42 -12 14 0 0 0 1 blend + 0 27 -4 52 -24 9 8 0 0 0 -1 -10 0 0 0 -10 -8 0 0 0 7 -26 0 0 0 4 blend + rrcurveto + -85 47 33 47 -3 2 3 -11 0 5 -3 -5 2 blend + rlineto + 10 -67 7 18 3 -2 -3 -9 -33 -25 16 25 2 blend + rlineto + 11 -75 37 -14 39 1 -5 -1 1 1 23 60 1 -1 -1 -12 -27 1 -1 -1 0 9 -1 1 1 -17 -28 -1 1 1 5 blend + 0 rrcurveto + 26 -7 -12 1 -1 -1 1 blend + 0 29 15 5 41 -12 -21 -2 1 2 -5 -8 1 -1 -1 3 -4 2 -1 -2 -20 -27 -1 1 1 4 blend + rrcurveto + 0 84 -84 52 -121 -6 -24 0 0 0 2 4 0 0 0 4 17 8 -5 -8 8 -4 0 0 0 20 37 -8 5 8 5 blend + 0 rrcurveto + -158 43 66 0 0 0 1 blend + 0 -85 -80 2 3 0 0 0 0 29 0 0 0 2 blend + 0 -103 1 -5 0 0 0 1 blend + rrcurveto + 0 -105 64 -55 117 -49 5 25 0 0 0 -2 -19 0 0 0 1 2 0 0 0 -12 -25 0 0 0 12 7 0 0 0 5 blend + rrcurveto + 31 -13 6 6 0 0 0 0 -4 0 0 0 2 blend + rlineto + 72 -30 28 -19 13 42 0 0 0 0 -22 0 0 0 8 -2 0 0 0 -11 -27 0 0 0 4 blend + 0 -63 0 -2 -5 3 5 1 blend + rrcurveto + 0 -49 -39 -35 -66 -25 -43 -2 1 2 -14 -26 -2 1 2 -7 -19 -13 9 13 -16 -24 2 -1 -2 4 blend + 0 rrcurveto + 65 275 -34 -47 -10 6 10 12 52 20 -13 -20 2 blend + rmoveto + 0 417 11 11 0 0 0 1 blend + rlineto + -71 31 49 20 -12 -20 1 blend + 0 rlineto + 0 -417 -11 -11 0 0 0 1 blend + rlineto + 71 -31 -49 -20 12 20 1 blend + 0 rlineto + -79 -429 38 57 20 -12 -20 -8 -20 0 0 0 2 blend + rmoveto + 71 -31 -49 -20 12 20 1 blend + 0 rlineto + 0 429 8 20 0 0 0 1 blend + rlineto + -71 31 49 20 -12 -20 1 blend + 0 rlineto + 0 -429 -8 -20 0 0 0 1 blend + rlineto + + + 260 39 -12 -15 0 0 0 -4 -32 -20 13 20 2 blend + rmoveto + -65 26 0 0 0 0 1 blend + 0 -28 11 -49 24 -17 -11 0 0 0 -6 4 0 0 0 3 3 0 0 0 -6 26 0 0 0 4 blend + rrcurveto + 78 -55 -25 -42 0 0 0 19 7 5 -4 -5 2 blend + rlineto + -8 85 -9 -20 0 0 0 -9 15 15 -9 -15 2 blend + rlineto + -5 52 -22 20 -43 -7 1 1 -1 -1 1 -36 0 0 0 0 10 -1 1 1 -7 -16 0 0 0 19 32 0 0 0 5 blend + 0 rrcurveto + -26 4 12 0 0 0 1 blend + 0 -27 -14 -14 -38 13 19 0 0 0 3 7 0 0 0 5 13 0 0 0 18 24 0 0 0 4 blend + rrcurveto + 0 -90 71 -50 139 4 24 0 0 0 3 5 0 0 0 10 -10 0 0 0 -9 -1 0 0 0 -32 -32 0 0 0 5 blend + 0 rrcurveto + 163 -27 -72 0 0 0 1 blend + 0 99 84 -17 -9 0 0 0 -8 -31 2 -1 -2 2 blend + 0 108 -1 3 0 0 0 1 blend + rrcurveto + 0 107 -59 47 -135 63 -25 -37 0 0 0 18 33 0 0 0 18 19 -2 1 2 0 1 0 0 0 -16 -6 0 0 0 5 blend + rrcurveto + -32 15 -6 13 0 0 0 -2 -7 0 0 0 2 blend + rlineto + -55 26 -26 21 -16 -38 4 -3 -4 -3 13 -2 1 2 -14 -16 -2 2 2 14 28 4 -2 -4 4 blend + 0 45 19 23 8 -5 -8 1 blend + rrcurveto + 0 60 38 25 53 15 31 -6 3 6 19 22 -3 2 3 12 23 16 -10 -16 21 35 2 -2 -2 4 blend + 0 rrcurveto + 43 -13 13 -1 1 1 1 blend + 0 27 -4 52 -24 9 8 0 0 0 -1 -10 0 0 0 -10 -8 0 0 0 7 -26 0 0 0 4 blend + rrcurveto + -85 47 33 47 -3 2 3 -11 0 5 -3 -5 2 blend + rlineto + 10 -67 7 18 3 -2 -3 -9 -33 -25 16 25 2 blend + rlineto + 11 -75 37 -14 39 1 -5 -1 1 1 23 60 1 -1 -1 -12 -27 1 -1 -1 0 9 -1 1 1 -17 -28 -1 1 1 5 blend + 0 rrcurveto + 26 -7 -12 1 -1 -1 1 blend + 0 29 15 5 41 -12 -21 -2 1 2 -5 -8 1 -1 -1 3 -4 2 -1 -2 -20 -27 -1 1 1 4 blend + rrcurveto + 0 84 -84 52 -121 -6 -24 0 0 0 2 4 0 0 0 4 17 8 -5 -8 8 -4 0 0 0 20 37 -8 5 8 5 blend + 0 rrcurveto + -155 40 63 0 0 0 1 blend + 0 -84 -80 1 2 0 0 0 0 29 0 0 0 2 blend + 0 -103 1 -5 0 0 0 1 blend + rrcurveto + 0 -104 65 -49 112 -54 4 24 0 0 0 -3 -20 0 0 0 -5 -4 0 0 0 -7 -20 0 0 0 17 12 0 0 0 5 blend + rrcurveto + 31 -15 6 6 0 0 0 2 -2 0 0 0 2 blend + rlineto + 66 -32 28 -22 19 48 0 0 0 2 -20 0 0 0 8 -2 -4 3 4 -8 -24 -10 6 10 4 blend + 0 -55 -8 -10 -4 3 4 1 blend + rrcurveto + 0 -49 -41 -38 -58 -25 -43 -3 2 3 -12 -24 1 -1 -1 -4 -16 -3 2 3 -24 -32 3 -2 -3 4 blend + 0 rrcurveto + 65 573 -34 -47 -10 6 10 27 77 32 -21 -32 2 blend + rmoveto + 0 119 -4 -14 -12 8 12 1 blend + rlineto + -71 31 49 20 -12 -20 1 blend + 0 rlineto + 0 -119 4 14 12 -8 -12 1 blend + rlineto + 71 -31 -49 -20 12 20 1 blend + 0 rlineto + -69 -727 28 47 10 -6 -10 -23 -45 -12 8 12 2 blend + rmoveto + 71 -31 -49 -20 13 20 1 blend + 0 rlineto + 0 129 -2 -18 -10 6 10 1 blend + rlineto + -71 31 49 20 -13 -20 1 blend + 0 rlineto + 0 -129 2 18 10 -6 -10 1 blend + rlineto + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/C_F_F_.bin and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F_.bin differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/C_F_F_.ttx fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F_.ttx --- fonttools-3.0/Tests/ttLib/tables/data/C_F_F_.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/C_F_F_.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,282 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -97 0 50 600 50 hstem + 0 50 400 50 vstem + 0 0 rmoveto + 500 0 rlineto + 0 700 rlineto + -500 0 rlineto + 0 -700 rlineto + 250 395 rmoveto + -170 255 rlineto + 340 0 rlineto + -170 -255 rlineto + 30 -45 rmoveto + 170 255 rlineto + 0 -510 rlineto + -170 255 rlineto + -200 -300 rmoveto + 170 255 rlineto + 170 -255 rlineto + -340 0 rlineto + -30 555 rmoveto + 170 -255 rlineto + -170 -255 rlineto + 0 510 rlineto + endchar + + + 56 523 26 rmoveto + -120 -6 rlineto + 0 -20 rlineto + 248 0 rlineto + 0 20 rlineto + -114 6 rlineto + -14 0 rlineto + -424 0 rmoveto + -87 -6 rlineto + 0 -20 rlineto + 198 0 rlineto + 0 20 rlineto + -97 6 rlineto + -14 0 rlineto + 369 221 rmoveto + -8 20 rlineto + -278 0 rlineto + -9 -20 rlineto + 295 0 rlineto + -161 430 rmoveto + -222 -677 rlineto + 27 0 rlineto + 211 660 rlineto + -17 -10 rlineto + 216 -650 rlineto + 34 0 rlineto + -229 677 rlineto + -20 0 rlineto + endchar + + + -3 167 310 rmoveto + 0 -104 0 -104 -2 -102 rrcurveto + 34 0 rlineto + -2 102 0 104 0 144 rrcurveto + 0 7 rlineto + 0 114 0 104 2 102 rrcurveto + -34 0 rlineto + 2 -102 0 -104 0 -104 rrcurveto + 0 -57 rlineto + 8 340 rmoveto + 7 0 rlineto + 0 27 rlineto + -124 0 rlineto + 0 -20 rlineto + 117 -7 rlineto + 0 -623 rmoveto + -117 -7 rlineto + 0 -20 rlineto + 124 0 rlineto + 0 27 rlineto + -7 0 rlineto + 7 316 rmoveto + 101 0 rlineto + 162 0 69 -60 0 -102 rrcurveto + 0 -102 -75 -57 -125 0 rrcurveto + -132 0 rlineto + 0 -22 rlineto + 131 0 rlineto + 156 0 75 77 0 102 rrcurveto + 0 100 -68 76 -162 2 rrcurveto + -10 -8 rlineto + 141 11 64 75 0 84 rrcurveto + 0 95 -66 63 -146 0 rrcurveto + -115 0 rlineto + 0 -22 rlineto + 104 0 rlineto + 145 0 50 -57 0 -76 rrcurveto + 0 -95 -75 -64 -136 0 rrcurveto + -88 0 rlineto + 0 -20 rlineto + endchar + + + 47 386 7 rmoveto + -167 0 -123 128 0 203 rrcurveto + 0 199 116 133 180 0 rrcurveto + 73 0 40 -17 56 -37 rrcurveto + -21 29 rlineto + 18 -145 rlineto + 24 0 rlineto + -4 139 rlineto + -60 35 -49 18 -80 0 rrcurveto + -190 0 -135 -144 0 -210 rrcurveto + 0 -209 129 -144 195 0 rrcurveto + 72 0 57 12 67 41 rrcurveto + 4 139 rlineto + -24 0 rlineto + -18 -139 rlineto + 17 20 rlineto + -55 -37 -55 -14 -67 0 rrcurveto + endchar + + + 245 5 rmoveto + -65 0 -39 15 -46 50 rrcurveto + 36 -48 rlineto + -28 100 rlineto + -6 15 -10 5 -11 0 rrcurveto + -14 0 -8 -7 -1 -14 rrcurveto + 24 -85 61 -51 107 0 rrcurveto + 91 0 90 54 0 112 rrcurveto + 0 70 -26 66 -134 57 rrcurveto + -19 8 rlineto + -93 39 -42 49 0 68 rrcurveto + 0 91 60 48 88 0 rrcurveto + 56 0 35 -14 44 -50 rrcurveto + -38 47 rlineto + 28 -100 rlineto + 6 -15 10 -5 11 0 rrcurveto + 14 0 8 7 1 14 rrcurveto + -24 88 -67 48 -84 0 rrcurveto + -92 0 -82 -51 0 -108 rrcurveto + 0 -80 45 -53 92 -42 rrcurveto + 37 -17 rlineto + 114 -52 26 -46 0 -65 rrcurveto + 0 -93 -65 -55 -90 0 rrcurveto + 18 318 rmoveto + 0 439 rlineto + -22 0 rlineto + 0 -425 rlineto + 22 -14 rlineto + -20 -438 rmoveto + 22 0 rlineto + 0 438 rlineto + -22 14 rlineto + 0 -452 rlineto + endchar + + + 3 245 5 rmoveto + -65 0 -39 15 -46 50 rrcurveto + 36 -48 rlineto + -28 100 rlineto + -6 15 -10 5 -11 0 rrcurveto + -14 0 -8 -7 -1 -14 rrcurveto + 24 -85 61 -51 107 0 rrcurveto + 91 0 90 54 0 112 rrcurveto + 0 70 -26 66 -134 57 rrcurveto + -19 8 rlineto + -93 39 -42 49 0 68 rrcurveto + 0 91 60 48 88 0 rrcurveto + 56 0 35 -14 44 -50 rrcurveto + -38 47 rlineto + 28 -100 rlineto + 6 -15 10 -5 11 0 rrcurveto + 14 0 8 7 1 14 rrcurveto + -24 88 -67 48 -84 0 rrcurveto + -92 0 -82 -51 0 -108 rrcurveto + 0 -80 45 -53 92 -42 rrcurveto + 37 -17 rlineto + 114 -52 26 -46 0 -65 rrcurveto + 0 -93 -65 -55 -90 0 rrcurveto + 17 651 rmoveto + 1 106 rlineto + -22 0 rlineto + 1 -107 rlineto + 20 1 rlineto + -15 -784 rmoveto + 22 0 rlineto + -3 121 rlineto + -20 2 rlineto + 1 -123 rlineto + endchar + + + 91 618 rmoveto + 0 -20 rlineto + 155 35 rlineto + 0 -421 rlineto + 0 -70 -1 -71 -2 -72 rrcurveto + 34 0 rlineto + -2 72 -1 71 0 70 rrcurveto + 0 297 rlineto + 4 146 rlineto + -14 12 rlineto + -173 -49 rlineto + 176 -593 rmoveto + -14 0 rlineto + -170 -6 rlineto + 0 -20 rlineto + 344 0 rlineto + 0 20 rlineto + -160 6 rlineto + endchar + + + endchar + + + endchar + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttf differ diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Feat fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Feat --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Feat 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Feat 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat.setup fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat.setup --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat.setup 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Glat.setup 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + c + + + + + + + + + + + .notdef=0 space=0 a=1 b=2 c=0 + + + + + + + + + PUT_GLYPH_8BIT_OBS(0) + ASSOC(0, 1) + NEXT + DELETE + NEXT + RET_ZERO + + + + + PUT_GLYPH_8BIT_OBS(0) + NEXT + RET_ZERO + + + + + 0 1 + 1 1 2 + 0 3 0 + 0 4 0 + 0 0 5 + 0 0 5 + + + + + + + a=0 c=1 + + + + + + + + COPY_NEXT + PUT_COPY(0) + PUSH_BYTE(-1) + ATTR_SET_SLOT(2) + PUSH_BYTE(0) + ATTR_SET(17) + PUSH_GLYPH_ATTR_OBS(6, 0) + ATTR_SET(8) + PUSH_GLYPH_ATTR_OBS(7, 0) + ATTR_SET(9) + PUSH_ATT_TO_GATTR_OBS(6, 0) + ATTR_SET(3) + PUSH_ATT_TO_GATTR_OBS(7, 0) + ATTR_SET(4) + NEXT + RET_ZERO + + + + + 0 + 1 0 + 0 2 + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf.setup fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf.setup --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf.setup 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Silf.setup 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Sill fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Sill --- fonttools-3.0/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Sill 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/graphite/graphite_tests.ttx.Sill 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_empty.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_empty.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_empty.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_empty.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 endchar + + + 400 endchar + + + 500 endchar + + + 600 endchar + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_OTF.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_OTF.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_OTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_OTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + 0 0 rmoveto + 1 1 rlineto + endchar + + + 400 + -55.2 -55.2 rmoveto + 110.4 110.4 rlineto + endchar + + + 500 + 100 0 rmoveto + 300 0 rlineto + endchar + + + 600 + endchar + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_TTF.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_TTF.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_h_h_e_a_recalc_TTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_h_h_e_a_recalc_TTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/ttProgram.ttx fonttools-3.21.2/Tests/ttLib/tables/data/ttProgram.ttx --- fonttools-3.0/Tests/ttLib/tables/data/ttProgram.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/ttProgram.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1627 @@ + + + NPUSHB[ ] /* 59 values pushed */ + 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 + 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 + 8 7 6 5 4 3 2 1 0 + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 25 + RS[ ] /* ReadStore */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + SWAP[ ] /* SwapTopStack */ + SRP0[ ] /* SetRefPoint0 */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 70 + FLIPOFF[ ] /* SetAutoFlipOff */ + MIRP[10000] /* MoveIndirectRelPt */ + FLIPON[ ] /* SetAutoFlipOn */ + MDAP[1] /* MoveDirectAbsPt */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + SRP1[ ] /* SetRefPoint1 */ + SHP[1] /* ShiftPointByLastPoint */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 5 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP0[ ] /* SetZonePointer0 */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSHB[ ] /* 1 value pushed */ + 20 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 2 values pushed */ + 0 64 + SHPIX[ ] /* ShiftZoneByPixel */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 6 + CALL[ ] /* CallFunction */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + SRP1[ ] /* SetRefPoint1 */ + SHP[1] /* ShiftPointByLastPoint */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + FLIPOFF[ ] /* SetAutoFlipOff */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + MIRP[10010] /* MoveIndirectRelPt */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + FLIPON[ ] /* SetAutoFlipOn */ + ELSE[ ] /* Else */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + MD[1] /* MeasureDistance */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 40 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + SRP0[ ] /* SetRefPoint0 */ + MDRP[10110] /* MoveDirectRelPt */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + MIRP[10010] /* MoveIndirectRelPt */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 26 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MD[1] /* MeasureDistance */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 16 + LT[ ] /* LessThan */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + PUSHB[ ] /* 1 value pushed */ + 0 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 0 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHW[ ] /* 1 value pushed */ + -30 + SHPIX[ ] /* ShiftZoneByPixel */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 16 + SHPIX[ ] /* ShiftZoneByPixel */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 0 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 30 + SHPIX[ ] /* ShiftZoneByPixel */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + PUSHW[ ] /* 1 value pushed */ + -16 + SHPIX[ ] /* ShiftZoneByPixel */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 5 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP0[ ] /* SetZonePointer0 */ + MPPEM[ ] /* MeasurePixelPerEm */ + PUSHB[ ] /* 1 value pushed */ + 20 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHW[ ] /* 2 values pushed */ + 0 -64 + SHPIX[ ] /* ShiftZoneByPixel */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 6 + CALL[ ] /* CallFunction */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + SRP1[ ] /* SetRefPoint1 */ + SHP[1] /* ShiftPointByLastPoint */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + FLIPOFF[ ] /* SetAutoFlipOff */ + SVTCA[1] /* SetFPVectorToAxis */ + ROLL[ ] /* RollTopThreeStack */ + SRP0[ ] /* SetRefPoint0 */ + PUSHB[ ] /* 2 values pushed */ + 70 25 + RS[ ] /* ReadStore */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP1[ ] /* SetZonePointer1 */ + PUSHB[ ] /* 2 values pushed */ + 0 70 + MIRP[00010] /* MoveIndirectRelPt */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP2[ ] /* SetZonePointer2 */ + PUSHW[ ] /* 2 values pushed */ + 0 -16 + SHPIX[ ] /* ShiftZoneByPixel */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 0 + ALIGNRP[ ] /* AlignRelativePt */ + PUSHB[ ] /* 1 value pushed */ + 40 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + PUSHB[ ] /* 1 value pushed */ + 0 + ALIGNRP[ ] /* AlignRelativePt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + RTG[ ] /* RoundToGrid */ + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[1] /* MoveDirectAbsPt */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZP1[ ] /* SetZonePointer1 */ + MIRP[10010] /* MoveIndirectRelPt */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZP0[ ] /* SetZonePointer0 */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZP2[ ] /* SetZonePointer2 */ + FLIPON[ ] /* SetAutoFlipOn */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 5 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP0[ ] /* SetZonePointer0 */ + PUSHW[ ] /* 2 values pushed */ + 0 -32 + SHPIX[ ] /* ShiftZoneByPixel */ + PUSHB[ ] /* 1 value pushed */ + 6 + CALL[ ] /* CallFunction */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SWAP[ ] /* SwapTopStack */ + SRP1[ ] /* SetRefPoint1 */ + SHP[1] /* ShiftPointByLastPoint */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ROUND[10] /* Round */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ROUND[01] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 25 + RS[ ] /* ReadStore */ + ABS[ ] /* Absolute */ + ADD[ ] /* Add */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + SWAP[ ] /* SwapTopStack */ + SRP0[ ] /* SetRefPoint0 */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 70 + MIRP[10000] /* MoveIndirectRelPt */ + MDAP[1] /* MoveDirectAbsPt */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SRP1[ ] /* SetRefPoint1 */ + SHP[1] /* ShiftPointByLastPoint */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 2 values pushed */ + 11 10 + RS[ ] /* ReadStore */ + SWAP[ ] /* SwapTopStack */ + RS[ ] /* ReadStore */ + NEG[ ] /* Negate */ + SPVFS[ ] /* SetPVectorFromStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 2 values pushed */ + 10 11 + RS[ ] /* ReadStore */ + SWAP[ ] /* SwapTopStack */ + RS[ ] /* ReadStore */ + SFVFS[ ] /* SetFVectorFromStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTF[ ] /* WriteCVTInFUnits */ + PUSHB[ ] /* 2 values pushed */ + 1 70 + MIAP[0] /* MoveIndirectAbsPt */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTF[ ] /* WriteCVTInFUnits */ + PUSHB[ ] /* 2 values pushed */ + 2 70 + RCVT[ ] /* ReadCVT */ + MSIRP[0] /* MoveStackIndirRelPt */ + PUSHB[ ] /* 2 values pushed */ + 2 0 + SFVTL[0] /* SetFVectorToLine */ + GFV[ ] /* GetFVector */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 18 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 64 + MAX[ ] /* Maximum */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 19 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + PUSHW[ ] /* 1 value pushed */ + -64 + MIN[ ] /* Minimum */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 0 + PUSHB[ ] /* 1 value pushed */ + 18 + CALL[ ] /* CallFunction */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 0 + PUSHB[ ] /* 1 value pushed */ + 19 + CALL[ ] /* CallFunction */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 6 + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 7 + RS[ ] /* ReadStore */ + NEG[ ] /* Negate */ + SPVFS[ ] /* SetPVectorFromStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + ROUND[01] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 64 + SUB[ ] /* Subtract */ + PUSHB[ ] /* 1 value pushed */ + 0 + MAX[ ] /* Maximum */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 2 values pushed */ + 44 192 + ROLL[ ] /* RollTopThreeStack */ + MIN[ ] /* Minimum */ + PUSHW[ ] /* 1 value pushed */ + 4096 + DIV[ ] /* Divide */ + ADD[ ] /* Add */ + CALL[ ] /* CallFunction */ + GPV[ ] /* GetPVector */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + ABS[ ] /* Absolute */ + SUB[ ] /* Subtract */ + NOT[ ] /* LogicalNot */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 3 + SUB[ ] /* Subtract */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 2 values pushed */ + 0 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + EQ[ ] /* Equal */ + PUSHB[ ] /* 1 value pushed */ + 28 + MPPEM[ ] /* MeasurePixelPerEm */ + LT[ ] /* LessThan */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ADD[ ] /* Add */ + ROUND[00] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ROUND[00] /* Round */ + NEG[ ] /* Negate */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 9 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + SDPVTL[1] /* SetDualPVectorToLine */ + POP[ ] /* PopTopStack */ + MDRP[00000] /* MoveDirectRelPt */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 18 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + SDPVTL[1] /* SetDualPVectorToLine */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 17 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 71 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 71 + ROFF[ ] /* RoundOff */ + MIRP[00100] /* MoveIndirectRelPt */ + ELSE[ ] /* Else */ + SPVTCA[1] /* SetPVectorToAxis */ + ROLL[ ] /* RollTopThreeStack */ + RCVT[ ] /* ReadCVT */ + RTG[ ] /* RoundToGrid */ + ROUND[01] /* Round */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 71 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + SDPVTL[1] /* SetDualPVectorToLine */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 160 + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 17 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 71 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + PUSHB[ ] /* 1 value pushed */ + 71 + ROFF[ ] /* RoundOff */ + MIRP[00100] /* MoveIndirectRelPt */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 71 + ROFF[ ] /* RoundOff */ + MIRP[00100] /* MoveIndirectRelPt */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + RTG[ ] /* RoundToGrid */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + RCVT[ ] /* ReadCVT */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + ADD[ ] /* Add */ + WCVTP[ ] /* WriteCVTInPixels */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + WS[ ] /* WriteStore */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + RTG[ ] /* RoundToGrid */ + MDAP[1] /* MoveDirectAbsPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ROUND[01] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 64 + MAX[ ] /* Maximum */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 128 + DIV[ ] /* Divide */ + ROUND[10] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SUB[ ] /* Subtract */ + MIN[ ] /* Minimum */ + PUSHB[ ] /* 1 value pushed */ + 25 + RS[ ] /* ReadStore */ + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SRP0[ ] /* SetRefPoint0 */ + PUSHB[ ] /* 1 value pushed */ + 70 + MIRP[10110] /* MoveIndirectRelPt */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ADD[ ] /* Add */ + ROUND[10] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + MAX[ ] /* Maximum */ + NEG[ ] /* Negate */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + MIN[ ] /* Minimum */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[10] /* Round */ + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 0 + NEQ[ ] /* NotEqual */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + ROUND[00] /* Round */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 64 + MAX[ ] /* Maximum */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 128 + DIV[ ] /* Divide */ + ROUND[10] /* Round */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SWAP[ ] /* SwapTopStack */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MAX[ ] /* Maximum */ + NEG[ ] /* Negate */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + MIN[ ] /* Minimum */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 25 + CALL[ ] /* CallFunction */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + WS[ ] /* WriteStore */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 25 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 24 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 27 + CALL[ ] /* CallFunction */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 64 + EQ[ ] /* Equal */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 64 + SUB[ ] /* Subtract */ + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 0 + EQ[ ] /* Equal */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 64 + EQ[ ] /* Equal */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 64 + ADD[ ] /* Add */ + WCVTP[ ] /* WriteCVTInPixels */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + MPPEM[ ] /* MeasurePixelPerEm */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + SWAP[ ] /* SwapTopStack */ + SUB[ ] /* Subtract */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + MPPEM[ ] /* MeasurePixelPerEm */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + SWAP[ ] /* SwapTopStack */ + ADD[ ] /* Add */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + SWAP[ ] /* SwapTopStack */ + MD[0] /* MeasureDistance */ + PUSHB[ ] /* 1 value pushed */ + 64 + ADD[ ] /* Add */ + PUSHB[ ] /* 1 value pushed */ + 32 + MUL[ ] /* Multiply */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + SWAP[ ] /* SwapTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SHPIX[ ] /* ShiftZoneByPixel */ + SWAP[ ] /* SwapTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + NEG[ ] /* Negate */ + SHPIX[ ] /* ShiftZoneByPixel */ + SVTCA[0] /* SetFPVectorToAxis */ + ROLL[ ] /* RollTopThreeStack */ + MUL[ ] /* Multiply */ + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + SVTCA[1] /* SetFPVectorToAxis */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + LT[ ] /* LessThan */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 5 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + SWAP[ ] /* SwapTopStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + MIRP[10101] /* MoveIndirectRelPt */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 1 + ADD[ ] /* Add */ + SWAP[ ] /* SwapTopStack */ + MIRP[01101] /* MoveIndirectRelPt */ + MIRP[01100] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 5 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + SWAP[ ] /* SwapTopStack */ + DUP[ ] /* DuplicateTopStack */ + ROLL[ ] /* RollTopThreeStack */ + MIRP[10101] /* MoveIndirectRelPt */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 1 + SUB[ ] /* Subtract */ + SWAP[ ] /* SwapTopStack */ + MIRP[01101] /* MoveIndirectRelPt */ + MIRP[01100] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 6 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + MIRP[10101] /* MoveIndirectRelPt */ + SVTCA[0] /* SetFPVectorToAxis */ + MIRP[01101] /* MoveIndirectRelPt */ + MIRP[01100] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + GC[0] /* GetCoordOnPVector */ + SWAP[ ] /* SwapTopStack */ + GC[0] /* GetCoordOnPVector */ + ADD[ ] /* Add */ + ROLL[ ] /* RollTopThreeStack */ + ROLL[ ] /* RollTopThreeStack */ + GC[0] /* GetCoordOnPVector */ + SWAP[ ] /* SwapTopStack */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + ROLL[ ] /* RollTopThreeStack */ + ADD[ ] /* Add */ + ROLL[ ] /* RollTopThreeStack */ + SUB[ ] /* Subtract */ + PUSHW[ ] /* 1 value pushed */ + -128 + DIV[ ] /* Divide */ + SWAP[ ] /* SwapTopStack */ + DUP[ ] /* DuplicateTopStack */ + SRP0[ ] /* SetRefPoint0 */ + SWAP[ ] /* SwapTopStack */ + ROLL[ ] /* RollTopThreeStack */ + PUSHB[ ] /* 2 values pushed */ + 75 75 + ROLL[ ] /* RollTopThreeStack */ + WCVTF[ ] /* WriteCVTInFUnits */ + RCVT[ ] /* ReadCVT */ + ADD[ ] /* Add */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 1 + SUB[ ] /* Subtract */ + PUSHW[ ] /* 1 value pushed */ + -70 + MAX[ ] /* Maximum */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 70 + MIN[ ] /* Minimum */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 16 + ADD[ ] /* Add */ + ROUND[00] /* Round */ + SVTCA[1] /* SetFPVectorToAxis */ + MSIRP[0] /* MoveStackIndirRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MIAP[1] /* MoveIndirectAbsPt */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SRP1[ ] /* SetRefPoint1 */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + LT[ ] /* LessThan */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + GC[0] /* GetCoordOnPVector */ + DUP[ ] /* DuplicateTopStack */ + ROUND[00] /* Round */ + SUB[ ] /* Subtract */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MIAP[1] /* MoveIndirectAbsPt */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + SRP1[ ] /* SetRefPoint1 */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[0] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 7 + RS[ ] /* ReadStore */ + PUSHB[ ] /* 1 value pushed */ + 6 + RS[ ] /* ReadStore */ + SFVFS[ ] /* SetFVectorFromStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + ROLL[ ] /* RollTopThreeStack */ + SRP0[ ] /* SetRefPoint0 */ + MIRP[01100] /* MoveIndirectRelPt */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 12 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + DUP[ ] /* DuplicateTopStack */ + GC[0] /* GetCoordOnPVector */ + PUSHB[ ] /* 1 value pushed */ + 0 + GT[ ] /* GreaterThan */ + IF[ ] /* If */ + PUSHW[ ] /* 1 value pushed */ + -16 + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 16 + SHPIX[ ] /* ShiftZoneByPixel */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 0 + NEQ[ ] /* NotEqual */ + IF[ ] /* If */ + PUSHW[ ] /* 1 value pushed */ + 4096 + MUL[ ] /* Multiply */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + SUB[ ] /* Subtract */ + PUSHB[ ] /* 1 value pushed */ + 0 + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 2 + ELSE[ ] /* Else */ + PUSHB[ ] /* 1 value pushed */ + 64 + SUB[ ] /* Subtract */ + PUSHB[ ] /* 1 value pushed */ + 3 + EIF[ ] /* EndIf */ + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + ROUND[01] /* Round */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + RCVT[ ] /* ReadCVT */ + SUB[ ] /* Subtract */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 40 + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + RCVT[ ] /* ReadCVT */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + GPV[ ] /* GetPVector */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + ABS[ ] /* Absolute */ + MAX[ ] /* Maximum */ + PUSHW[ ] /* 1 value pushed */ + 16384 + DIV[ ] /* Divide */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 128 + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + GPV[ ] /* GetPVector */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + ABS[ ] /* Absolute */ + MAX[ ] /* Maximum */ + PUSHW[ ] /* 1 value pushed */ + 8192 + DIV[ ] /* Divide */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 3 values pushed */ + 0 64 47 + CALL[ ] /* CallFunction */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 2 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + PUSHB[ ] /* 1 value pushed */ + 192 + LTEQ[ ] /* LessThenOrEqual */ + IF[ ] /* If */ + GPV[ ] /* GetPVector */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + ABS[ ] /* Absolute */ + MAX[ ] /* Maximum */ + PUSHW[ ] /* 1 value pushed */ + 5461 + DIV[ ] /* Divide */ + ELSE[ ] /* Else */ + PUSHB[ ] /* 3 values pushed */ + 0 128 47 + CALL[ ] /* CallFunction */ + EIF[ ] /* EndIf */ + PUSHB[ ] /* 1 value pushed */ + 2 + ADD[ ] /* Add */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + GPV[ ] /* GetPVector */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + ABS[ ] /* Absolute */ + MAX[ ] /* Maximum */ + PUSHW[ ] /* 1 value pushed */ + 16384 + DIV[ ] /* Divide */ + ADD[ ] /* Add */ + SWAP[ ] /* SwapTopStack */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GTEQ[ ] /* GreaterThanOrEqual */ + IF[ ] /* If */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 4 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + ABS[ ] /* Absolute */ + SWAP[ ] /* SwapTopStack */ + RCVT[ ] /* ReadCVT */ + ABS[ ] /* Absolute */ + ROUND[01] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 64 + MAX[ ] /* Maximum */ + SUB[ ] /* Subtract */ + DUP[ ] /* DuplicateTopStack */ + PUSHB[ ] /* 1 value pushed */ + 128 + DIV[ ] /* Divide */ + ROUND[10] /* Round */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SUB[ ] /* Subtract */ + MIN[ ] /* Minimum */ + PUSHB[ ] /* 1 value pushed */ + 70 + SWAP[ ] /* SwapTopStack */ + WCVTP[ ] /* WriteCVTInPixels */ + POP[ ] /* PopTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SRP0[ ] /* SetRefPoint0 */ + PUSHB[ ] /* 1 value pushed */ + 70 + MIRP[10110] /* MoveIndirectRelPt */ + POP[ ] /* PopTopStack */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + ROLL[ ] /* RollTopThreeStack */ + SRP1[ ] /* SetRefPoint1 */ + SWAP[ ] /* SwapTopStack */ + SRP2[ ] /* SetRefPoint2 */ + DUP[ ] /* DuplicateTopStack */ + IP[ ] /* InterpolatePts */ + MDAP[1] /* MoveDirectAbsPt */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + ABS[ ] /* Absolute */ + PUSHB[ ] /* 1 value pushed */ + 192 + EQ[ ] /* Equal */ + IF[ ] /* If */ + PUSHW[ ] /* 1 value pushed */ + -8 + SHPIX[ ] /* ShiftZoneByPixel */ + PUSHB[ ] /* 1 value pushed */ + 8 + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 19 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + SPVTCA[1] /* SetPVectorToAxis */ + ELSE[ ] /* Else */ + SPVTCA[0] /* SetPVectorToAxis */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 19 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + SPVTCA[0] /* SetPVectorToAxis */ + ELSE[ ] /* Else */ + SPVTCA[1] /* SetPVectorToAxis */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 10 + CALL[ ] /* CallFunction */ + SWAP[ ] /* SwapTopStack */ + SRP0[ ] /* SetRefPoint0 */ + DUP[ ] /* DuplicateTopStack */ + ALIGNRP[ ] /* AlignRelativePt */ + PUSHB[ ] /* 1 value pushed */ + 23 + CALL[ ] /* CallFunction */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + PUSHW[ ] /* 1 value pushed */ + -16 + SHPIX[ ] /* ShiftZoneByPixel */ + PUSHB[ ] /* 1 value pushed */ + 40 + CALL[ ] /* CallFunction */ + ROLL[ ] /* RollTopThreeStack */ + SRP0[ ] /* SetRefPoint0 */ + SWAP[ ] /* SwapTopStack */ + DUP[ ] /* DuplicateTopStack */ + MDRP[10000] /* MoveDirectRelPt */ + SWAP[ ] /* SwapTopStack */ + PUSHB[ ] /* 1 value pushed */ + 16 + CALL[ ] /* CallFunction */ + PUSHB[ ] /* 1 value pushed */ + 5 + RS[ ] /* ReadStore */ + IF[ ] /* If */ + MDRP[00000] /* MoveDirectRelPt */ + ELSE[ ] /* Else */ + ALIGNRP[ ] /* AlignRelativePt */ + EIF[ ] /* EndIf */ + DUP[ ] /* DuplicateTopStack */ + SRP0[ ] /* SetRefPoint0 */ + SRP1[ ] /* SetRefPoint1 */ + PUSHB[ ] /* 1 value pushed */ + 0 + SRP2[ ] /* SetRefPoint2 */ + SVTCA[1] /* SetFPVectorToAxis */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GTEQ[ ] /* GreaterThanOrEqual */ + SWAP[ ] /* SwapTopStack */ + MPPEM[ ] /* MeasurePixelPerEm */ + LTEQ[ ] /* LessThenOrEqual */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + SHPIX[ ] /* ShiftZoneByPixel */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 2 + CINDEX[ ] /* CopyXToTopStack */ + SRP0[ ] /* SetRefPoint0 */ + MDRP[10000] /* MoveDirectRelPt */ + SWAP[ ] /* SwapTopStack */ + MDRP[01001] /* MoveDirectRelPt */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZP0[ ] /* SetZonePointer0 */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP1[ ] /* SetZonePointer1 */ + SRP0[ ] /* SetRefPoint0 */ + PUSHB[ ] /* 1 value pushed */ + 1 + ALIGNRP[ ] /* AlignRelativePt */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZPS[ ] /* SetZonePointerS */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + SVTCA[1] /* SetFPVectorToAxis */ + PUSHB[ ] /* 1 value pushed */ + 0 + SZP0[ ] /* SetZonePointer0 */ + PUSHB[ ] /* 1 value pushed */ + 1 + PUSHB[ ] /* 1 value pushed */ + 3 + CINDEX[ ] /* CopyXToTopStack */ + MD[0] /* MeasureDistance */ + PUSHB[ ] /* 1 value pushed */ + 3 + SLOOP[ ] /* SetLoopVariable */ + SHPIX[ ] /* ShiftZoneByPixel */ + PUSHB[ ] /* 1 value pushed */ + 1 + SZP0[ ] /* SetZonePointer0 */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + MPPEM[ ] /* MeasurePixelPerEm */ + GTEQ[ ] /* GreaterThanOrEqual */ + SWAP[ ] /* SwapTopStack */ + MPPEM[ ] /* MeasurePixelPerEm */ + LTEQ[ ] /* LessThenOrEqual */ + AND[ ] /* LogicalAnd */ + IF[ ] /* If */ + DUP[ ] /* DuplicateTopStack */ + RCVT[ ] /* ReadCVT */ + ROLL[ ] /* RollTopThreeStack */ + ADD[ ] /* Add */ + WCVTP[ ] /* WriteCVTInPixels */ + ELSE[ ] /* Else */ + POP[ ] /* PopTopStack */ + POP[ ] /* PopTopStack */ + EIF[ ] /* EndIf */ + ENDF[ ] /* EndFunctionDefinition */ + FDEF[ ] /* FunctionDefinition */ + DUP[ ] /* DuplicateTopStack */ + IP[ ] /* InterpolatePts */ + MDAP[1] /* MoveDirectAbsPt */ + ENDF[ ] /* EndFunctionDefinition */ + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_empty.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_empty.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_empty.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_empty.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 endchar + + + 400 endchar + + + 500 endchar + + + 600 endchar + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_OTF.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_OTF.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_OTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_OTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 300 + 0 0 rmoveto + 1 1 rlineto + endchar + + + 400 + -55.2 -55.2 rmoveto + 110.4 110.4 rlineto + endchar + + + 500 + 100 0 rmoveto + 300 0 rlineto + endchar + + + 600 + endchar + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_TTF.ttx fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_TTF.ttx --- fonttools-3.0/Tests/ttLib/tables/data/_v_h_e_a_recalc_TTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/data/_v_h_e_a_recalc_TTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/ttLib/tables/_f_p_g_m_test.py fonttools-3.21.2/Tests/ttLib/tables/_f_p_g_m_test.py --- fonttools-3.0/Tests/ttLib/tables/_f_p_g_m_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_f_p_g_m_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.ttLib.tables._f_p_g_m import table__f_p_g_m +from fontTools.ttLib.tables import ttProgram + + +def test__bool__(): + fpgm = table__f_p_g_m() + assert not bool(fpgm) + + p = ttProgram.Program() + fpgm.program = p + assert not bool(fpgm) + + bc = bytearray([0]) + p.fromBytecode(bc) + assert bool(fpgm) + + p.bytecode.pop() + assert not bool(fpgm) diff -Nru fonttools-3.0/Tests/ttLib/tables/_f_v_a_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_f_v_a_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_f_v_a_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,264 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance +from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord +import unittest + + + +FVAR_DATA = deHexStr( + "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " + "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " + "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " + "01 03 00 00 01 2c 00 00 00 64 00 00 " + "01 04 00 00 01 2c 00 00 00 4b 00 00") + +FVAR_AXIS_DATA = deHexStr( + "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") + +FVAR_INSTANCE_DATA_WITHOUT_PSNAME = deHexStr( + "01 59 00 00 00 00 b3 33 00 00 80 00") + +FVAR_INSTANCE_DATA_WITH_PSNAME = ( + FVAR_INSTANCE_DATA_WITHOUT_PSNAME + deHexStr("02 34")) + + +def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +def AddName(font, name): + nameTable = font.get("name") + if nameTable is None: + nameTable = font["name"] = table__n_a_m_e() + nameTable.names = [] + namerec = NameRecord() + namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) + namerec.string = name.encode('mac_roman') + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + return namerec + + +def MakeFont(): + axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] + instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] + fvarTable = table__f_v_a_r() + font = {"fvar": fvarTable} + for tag, name, minValue, defaultValue, maxValue in axes: + axis = Axis() + axis.axisTag = tag + axis.defaultValue = defaultValue + axis.minValue, axis.maxValue = minValue, maxValue + axis.axisNameID = AddName(font, name).nameID + fvarTable.axes.append(axis) + for name, weight, width in instances: + inst = NamedInstance() + inst.subfamilyNameID = AddName(font, name).nameID + inst.coordinates = {"wght": weight, "wdth": width} + fvarTable.instances.append(inst) + return font + + +class FontVariationTableTest(unittest.TestCase): + def test_compile(self): + font = MakeFont() + h = font["fvar"].compile(font) + self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) + + def test_decompile(self): + fvar = table__f_v_a_r() + fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) + self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) + self.assertEqual([259, 260], [i.subfamilyNameID for i in fvar.instances]) + + def test_toXML(self): + font = MakeFont() + writer = XMLWriter(BytesIO()) + font["fvar"].toXML(writer, font) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual(2, xml.count("")) + self.assertTrue("wght" in xml) + self.assertTrue("wdth" in xml) + self.assertEqual(2, xml.count("" in xml) + self.assertTrue("" in xml) + + def test_fromXML(self): + fvar = table__f_v_a_r() + for name, attrs, content in parseXML( + '' + ' opsz' + '' + '' + ' slnt' + ' 0x123' + '' + '' + ''): + fvar.fromXML(name, attrs, content, ttFont=None) + self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) + self.assertEqual([0, 0x123], [a.flags for a in fvar.axes]) + self.assertEqual([765, 234], [i.subfamilyNameID for i in fvar.instances]) + + +class AxisTest(unittest.TestCase): + def test_compile(self): + axis = Axis() + axis.axisTag, axis.axisNameID = ('opsz', 345) + axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) + self.assertEqual(FVAR_AXIS_DATA, axis.compile()) + + def test_decompile(self): + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + self.assertEqual("opsz", axis.axisTag) + self.assertEqual(345, axis.axisNameID) + self.assertEqual(-0.5, axis.minValue) + self.assertEqual(1.3, axis.defaultValue) + self.assertEqual(1.5, axis.maxValue) + + def test_toXML(self): + font = MakeFont() + axis = Axis() + axis.decompile(FVAR_AXIS_DATA) + AddName(font, "Optical Size").nameID = 256 + axis.axisNameID = 256 + axis.flags = 0xABC + writer = XMLWriter(BytesIO()) + axis.toXML(writer, font) + self.assertEqual([ + '', + '', + '', + 'opsz', + '0xABC', + '-0.5', + '1.3', + '1.5', + '256', + '' + ], xml_lines(writer)) + + def test_fromXML(self): + axis = Axis() + for name, attrs, content in parseXML( + '' + ' wght' + ' 0x123ABC' + ' 100' + ' 400' + ' 900' + ' 256' + ''): + axis.fromXML(name, attrs, content, ttFont=None) + self.assertEqual("wght", axis.axisTag) + self.assertEqual(0x123ABC, axis.flags) + self.assertEqual(100, axis.minValue) + self.assertEqual(400, axis.defaultValue) + self.assertEqual(900, axis.maxValue) + self.assertEqual(256, axis.axisNameID) + + +class NamedInstanceTest(unittest.TestCase): + def test_compile_withPostScriptName(self): + inst = NamedInstance() + inst.subfamilyNameID = 345 + inst.postscriptNameID = 564 + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + self.assertEqual(FVAR_INSTANCE_DATA_WITH_PSNAME, + inst.compile(["wght", "wdth"], True)) + + def test_compile_withoutPostScriptName(self): + inst = NamedInstance() + inst.subfamilyNameID = 345 + inst.postscriptNameID = 564 + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + self.assertEqual(FVAR_INSTANCE_DATA_WITHOUT_PSNAME, + inst.compile(["wght", "wdth"], False)) + + def test_decompile_withPostScriptName(self): + inst = NamedInstance() + inst.decompile(FVAR_INSTANCE_DATA_WITH_PSNAME, ["wght", "wdth"]) + self.assertEqual(564, inst.postscriptNameID) + self.assertEqual(345, inst.subfamilyNameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_decompile_withoutPostScriptName(self): + inst = NamedInstance() + inst.decompile(FVAR_INSTANCE_DATA_WITHOUT_PSNAME, ["wght", "wdth"]) + self.assertEqual(0xFFFF, inst.postscriptNameID) + self.assertEqual(345, inst.subfamilyNameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_toXML_withPostScriptName(self): + font = MakeFont() + inst = NamedInstance() + inst.flags = 0xE9 + inst.subfamilyNameID = AddName(font, "Light Condensed").nameID + inst.postscriptNameID = AddName(font, "Test-LightCondensed").nameID + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + writer = XMLWriter(BytesIO()) + inst.toXML(writer, font) + self.assertEqual([ + '', + '', + '', + '' % ( + inst.postscriptNameID, inst.subfamilyNameID), + '', + '', + '' + ], xml_lines(writer)) + + def test_toXML_withoutPostScriptName(self): + font = MakeFont() + inst = NamedInstance() + inst.flags = 0xABC + inst.subfamilyNameID = AddName(font, "Light Condensed").nameID + inst.coordinates = {"wght": 0.7, "wdth": 0.5} + writer = XMLWriter(BytesIO()) + inst.toXML(writer, font) + self.assertEqual([ + '', + '', + '' % + inst.subfamilyNameID, + '', + '', + '' + ], xml_lines(writer)) + + def test_fromXML_withPostScriptName(self): + inst = NamedInstance() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + ''): + inst.fromXML(name, attrs, content, ttFont=MakeFont()) + self.assertEqual(257, inst.postscriptNameID) + self.assertEqual(345, inst.subfamilyNameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + def test_fromXML_withoutPostScriptName(self): + inst = NamedInstance() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + ''): + inst.fromXML(name, attrs, content, ttFont=MakeFont()) + self.assertEqual(0x123ABC, inst.flags) + self.assertEqual(345, inst.subfamilyNameID) + self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_g_c_i_d_test.py fonttools-3.21.2/Tests/ttLib/tables/_g_c_i_d_test.py --- fonttools-3.0/Tests/ttLib/tables/_g_c_i_d_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_g_c_i_d_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# On macOS X 10.12.3, the font /Library/Fonts/AppleGothic.ttf has a ‘gcid’ +# table with a similar structure as this test data, just more CIDs. +GCID_DATA = deHexStr( + "0000 0000 " # 0: Format=0, Flags=0 + "0000 0098 " # 4: Size=152 + "0000 " # 8: Registry=0 + "41 64 6F 62 65 " # 10: RegistryName="Adobe" + + ("00" * 59) + # 15: + "0003 " # 74: Order=3 + "4B 6F 72 65 61 31 " # 76: Order="Korea1" + + ("00" * 58) + # 82: + "0001 " # 140: SupplementVersion + "0004 " # 142: Count + "1234 " # 144: CIDs[0/.notdef]=4660 + "FFFF " # 146: CIDs[1/A]=None + "0007 " # 148: CIDs[2/B]=7 + "DEF0 " # 150: CIDs[3/C]=57072 +) # 152: +assert len(GCID_DATA) == 152, len(GCID_DATA) + + +GCID_XML = [ + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class GCIDTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D']) + + def testDecompileToXML(self): + table = newTable('gcid') + table.decompile(GCID_DATA, self.font) + self.assertEqual(getXML(table.toXML, self.font), GCID_XML) + + def testCompileFromXML(self): + table = newTable('gcid') + for name, attrs, content in parseXML(GCID_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(GCID_DATA)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_g_l_y_f_test.py fonttools-3.21.2/Tests/ttLib/tables/_g_l_y_f_test.py --- fonttools-3.0/Tests/ttLib/tables/_g_l_y_f_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_g_l_y_f_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,152 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates +import sys +import pytest + + +class GlyphCoordinatesTest(object): + + def test_translate(self): + g = GlyphCoordinates([(1,2)]) + g.translate((.5,0)) + assert g == GlyphCoordinates([(1.5,2.0)]) + + def test_scale(self): + g = GlyphCoordinates([(1,2)]) + g.scale((.5,0)) + assert g == GlyphCoordinates([(0.5,0.0)]) + + def test_transform(self): + g = GlyphCoordinates([(1,2)]) + g.transform(((.5,0),(.2,.5))) + assert g[0] == GlyphCoordinates([(0.9,1.0)])[0] + + def test__eq__(self): + g = GlyphCoordinates([(1,2)]) + g2 = GlyphCoordinates([(1.0,2)]) + g3 = GlyphCoordinates([(1.5,2)]) + assert g == g2 + assert not g == g3 + assert not g2 == g3 + assert not g == object() + + def test__ne__(self): + g = GlyphCoordinates([(1,2)]) + g2 = GlyphCoordinates([(1.0,2)]) + g3 = GlyphCoordinates([(1.5,2)]) + assert not (g != g2) + assert g != g3 + assert g2 != g3 + assert g != object() + + def test__pos__(self): + g = GlyphCoordinates([(1,2)]) + g2 = +g + assert g == g2 + + def test__neg__(self): + g = GlyphCoordinates([(1,2)]) + g2 = -g + assert g2 == GlyphCoordinates([(-1, -2)]) + + @pytest.mark.skipif(sys.version_info[0] < 3, + reason="__round___ requires Python 3") + def test__round__(self): + g = GlyphCoordinates([(-1.5,2)]) + g2 = round(g) + assert g2 == GlyphCoordinates([(-2,2)]) + + def test__add__(self): + g1 = GlyphCoordinates([(1,2)]) + g2 = GlyphCoordinates([(3,4)]) + g3 = GlyphCoordinates([(4,6)]) + assert g1 + g2 == g3 + assert g1 + (1, 1) == GlyphCoordinates([(2,3)]) + with pytest.raises(TypeError) as excinfo: + assert g1 + object() + assert 'unsupported operand' in str(excinfo.value) + + def test__sub__(self): + g1 = GlyphCoordinates([(1,2)]) + g2 = GlyphCoordinates([(3,4)]) + g3 = GlyphCoordinates([(-2,-2)]) + assert g1 - g2 == g3 + assert g1 - (1, 1) == GlyphCoordinates([(0,1)]) + with pytest.raises(TypeError) as excinfo: + assert g1 - object() + assert 'unsupported operand' in str(excinfo.value) + + def test__rsub__(self): + g = GlyphCoordinates([(1,2)]) + # other + (-self) + assert (1, 1) - g == GlyphCoordinates([(0,-1)]) + + def test__mul__(self): + g = GlyphCoordinates([(1,2)]) + assert g * 3 == GlyphCoordinates([(3,6)]) + assert g * (3,2) == GlyphCoordinates([(3,4)]) + assert g * (1,1) == g + with pytest.raises(TypeError) as excinfo: + assert g * object() + assert 'unsupported operand' in str(excinfo.value) + + def test__truediv__(self): + g = GlyphCoordinates([(1,2)]) + assert g / 2 == GlyphCoordinates([(.5,1)]) + assert g / (1, 2) == GlyphCoordinates([(1,1)]) + assert g / (1, 1) == g + with pytest.raises(TypeError) as excinfo: + assert g / object() + assert 'unsupported operand' in str(excinfo.value) + + def test__iadd__(self): + g = GlyphCoordinates([(1,2)]) + g += (.5,0) + assert g == GlyphCoordinates([(1.5, 2.0)]) + g2 = GlyphCoordinates([(3,4)]) + g += g2 + assert g == GlyphCoordinates([(4.5, 6.0)]) + + def test__isub__(self): + g = GlyphCoordinates([(1,2)]) + g -= (.5, 0) + assert g == GlyphCoordinates([(0.5, 2.0)]) + g2 = GlyphCoordinates([(3,4)]) + g -= g2 + assert g == GlyphCoordinates([(-2.5, -2.0)]) + + def __test__imul__(self): + g = GlyphCoordinates([(1,2)]) + g *= (2,.5) + g *= 2 + assert g == GlyphCoordinates([(4.0, 2.0)]) + g = GlyphCoordinates([(1,2)]) + g *= 2 + assert g == GlyphCoordinates([(2, 4)]) + + def test__itruediv__(self): + g = GlyphCoordinates([(1,3)]) + g /= (.5,1.5) + g /= 2 + assert g == GlyphCoordinates([(1.0, 1.0)]) + + def test__bool__(self): + g = GlyphCoordinates([]) + assert bool(g) == False + g = GlyphCoordinates([(0,0), (0.,0)]) + assert bool(g) == True + g = GlyphCoordinates([(0,0), (1,0)]) + assert bool(g) == True + g = GlyphCoordinates([(0,.5), (0,0)]) + assert bool(g) == True + + def test_double_precision_float(self): + # https://github.com/fonttools/fonttools/issues/963 + afloat = 242.50000000000003 + g = GlyphCoordinates([(afloat, 0)]) + g.toInt() + # this would return 242 if the internal array.array typecode is 'f', + # since the Python float is truncated to a C float. + # when using typecode 'd' it should return the correct value 243 + assert g[0][0] == round(afloat) diff -Nru fonttools-3.0/Tests/ttLib/tables/_g_v_a_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_g_v_a_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_g_v_a_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,211 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import TTLibError, getTableClass, getTableModule, newTable +import unittest +from fontTools.ttLib.tables.TupleVariation import TupleVariation + + +gvarClass = getTableClass("gvar") + + +GVAR_DATA = deHexStr( + "0001 0000 " # 0: majorVersion=1 minorVersion=0 + "0002 0000 " # 4: axisCount=2 sharedTupleCount=0 + "0000001C " # 8: offsetToSharedTuples=28 + "0003 0000 " # 12: glyphCount=3 flags=0 + "0000001C " # 16: offsetToGlyphVariationData=28 + "0000 0000 000C 002F " # 20: offsets=[0,0,12,47], times 2: [0,0,24,94], + # # +offsetToGlyphVariationData: [28,28,52,122] + # + # 28: Glyph variation data for glyph #0, ".notdef" + # ------------------------------------------------ + # (no variation data for this glyph) + # + # 28: Glyph variation data for glyph #1, "space" + # ---------------------------------------------- + "8001 000C " # 28: tupleVariationCount=1|TUPLES_SHARE_POINT_NUMBERS, offsetToData=12(+28=40) + "000A " # 32: tvHeader[0].variationDataSize=10 + "8000 " # 34: tvHeader[0].tupleIndex=EMBEDDED_PEAK + "0000 2CCD " # 36: tvHeader[0].peakTuple={wght:0.0, wdth:0.7} + "00 " # 40: all points + "03 01 02 03 04 " # 41: deltaX=[1, 2, 3, 4] + "03 0b 16 21 2C " # 46: deltaY=[11, 22, 33, 44] + "00 " # 51: padding + # + # 52: Glyph variation data for glyph #2, "I" + # ------------------------------------------ + "8002 001c " # 52: tupleVariationCount=2|TUPLES_SHARE_POINT_NUMBERS, offsetToData=28(+52=80) + "0012 " # 56: tvHeader[0].variationDataSize=18 + "C000 " # 58: tvHeader[0].tupleIndex=EMBEDDED_PEAK|INTERMEDIATE_REGION + "2000 0000 " # 60: tvHeader[0].peakTuple={wght:0.5, wdth:0.0} + "0000 0000 " # 64: tvHeader[0].intermediateStart={wght:0.0, wdth:0.0} + "4000 0000 " # 68: tvHeader[0].intermediateEnd={wght:1.0, wdth:0.0} + "0016 " # 72: tvHeader[1].variationDataSize=22 + "A000 " # 74: tvHeader[1].tupleIndex=EMBEDDED_PEAK|PRIVATE_POINTS + "C000 3333 " # 76: tvHeader[1].peakTuple={wght:-1.0, wdth:0.8} + "00 " # 80: all points + "07 03 01 04 01 " # 81: deltaX.len=7, deltaX=[3, 1, 4, 1, + "05 09 02 06 " # 86: 5, 9, 2, 6] + "07 03 01 04 01 " # 90: deltaY.len=7, deltaY=[3, 1, 4, 1, + "05 09 02 06 " # 95: 5, 9, 2, 6] + "06 " # 99: 6 points + "05 00 01 03 01 " # 100: runLen=5(+1=6); delta-encoded run=[0, 1, 4, 5, + "01 01 " # 105: 6, 7] + "05 f8 07 fc 03 fe 01 " # 107: deltaX.len=5, deltaX=[-8,7,-4,3,-2,1] + "05 a8 4d 2c 21 ea 0b " # 114: deltaY.len=5, deltaY=[-88,77,44,33,-22,11] + "00" # 121: padding +) # 122: +assert len(GVAR_DATA) == 122 + + +GVAR_VARIATIONS = { + ".notdef": [ + ], + "space": [ + TupleVariation( + {"wdth": (0.0, 0.7, 0.7)}, + [(1, 11), (2, 22), (3, 33), (4, 44)]), + ], + "I": [ + TupleVariation( + {"wght": (0.0, 0.5, 1.0)}, + [(3,3), (1,1), (4,4), (1,1), (5,5), (9,9), (2,2), (6,6)]), + TupleVariation( + {"wght": (-1.0, -1.0, 0.0), "wdth": (0.0, 0.8, 0.8)}, + [(-8,-88), (7,77), None, None, (-4,44), (3,33), (-2,-22), (1,11)]), + ], +} + + +GVAR_XML = [ + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +GVAR_DATA_EMPTY_VARIATIONS = deHexStr( + "0001 0000 " # 0: majorVersion=1 minorVersion=0 + "0002 0000 " # 4: axisCount=2 sharedTupleCount=0 + "0000001c " # 8: offsetToSharedTuples=28 + "0003 0000 " # 12: glyphCount=3 flags=0 + "0000001c " # 16: offsetToGlyphVariationData=28 + "0000 0000 0000 0000" # 20: offsets=[0, 0, 0, 0] +) # 28: + + +def hexencode(s): + h = hexStr(s).upper() + return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) + + +class GVARTableTest(unittest.TestCase): + def makeFont(self, variations): + glyphs=[".notdef", "space", "I"] + Axis = getTableModule("fvar").Axis + Glyph = getTableModule("glyf").Glyph + glyf, fvar, gvar = newTable("glyf"), newTable("fvar"), newTable("gvar") + font = FakeFont(glyphs) + font.tables = {"glyf": glyf, "gvar": gvar, "fvar": fvar} + glyf.glyphs = {glyph: Glyph() for glyph in glyphs} + glyf.glyphs["I"].coordinates = [(10, 10), (10, 20), (20, 20), (20, 10)] + fvar.axes = [Axis(), Axis()] + fvar.axes[0].axisTag, fvar.axes[1].axisTag = "wght", "wdth" + gvar.variations = variations + return font, gvar + + def test_compile(self): + font, gvar = self.makeFont(GVAR_VARIATIONS) + self.assertEqual(hexStr(gvar.compile(font)), hexStr(GVAR_DATA)) + + def test_compile_noVariations(self): + font, gvar = self.makeFont({}) + self.assertEqual(hexStr(gvar.compile(font)), + hexStr(GVAR_DATA_EMPTY_VARIATIONS)) + + def test_compile_emptyVariations(self): + font, gvar = self.makeFont({".notdef": [], "space": [], "I": []}) + self.assertEqual(hexStr(gvar.compile(font)), + hexStr(GVAR_DATA_EMPTY_VARIATIONS)) + + def test_decompile(self): + font, gvar = self.makeFont({}) + gvar.decompile(GVAR_DATA, font) + self.assertEqual(gvar.variations, GVAR_VARIATIONS) + + def test_decompile_noVariations(self): + font, gvar = self.makeFont({}) + gvar.decompile(GVAR_DATA_EMPTY_VARIATIONS, font) + self.assertEqual(gvar.variations, + {".notdef": [], "space": [], "I": []}) + + def test_fromXML(self): + font, gvar = self.makeFont({}) + for name, attrs, content in parseXML(GVAR_XML): + gvar.fromXML(name, attrs, content, ttFont=font) + self.assertEqual(gvar.variations, + {g:v for g,v in GVAR_VARIATIONS.items() if v}) + + def test_toXML(self): + font, gvar = self.makeFont(GVAR_VARIATIONS) + self.assertEqual(getXML(gvar.toXML, font), GVAR_XML) + + def test_compileOffsets_shortFormat(self): + self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), + gvarClass.compileOffsets_([0, 4, 0x1ff80])) + + def test_compileOffsets_longFormat(self): + self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), + gvarClass.compileOffsets_([0, 4, 0xCAFEBEEF])) + + def test_decompileOffsets_shortFormat(self): + decompileOffsets = gvarClass.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual( + [2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], + list(decompileOffsets(data, tableFormat=0, glyphCount=5))) + + def test_decompileOffsets_longFormat(self): + decompileOffsets = gvarClass.decompileOffsets_ + data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") + self.assertEqual( + [0x00112233, 0x44556677, 0x8899aabb], + list(decompileOffsets(data, tableFormat=1, glyphCount=2))) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_h_h_e_a_test.py fonttools-3.21.2/Tests/ttLib/tables/_h_h_e_a_test.py --- fonttools-3.0/Tests/ttLib/tables/_h_h_e_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_h_h_e_a_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,198 @@ +from __future__ import absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.misc.testTools import parseXML, getXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import TTFont, newTable +from fontTools.misc.fixedTools import log +import os +import unittest + + +CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +DATA_DIR = os.path.join(CURR_DIR, 'data') + +HHEA_DATA = deHexStr( + '0001 0000 ' # 1.0 version + '02EE ' # 750 ascent + 'FF06 ' # -250 descent + '00C8 ' # 200 lineGap + '03E8 ' # 1000 advanceWidthMax + 'FFE7 ' # -25 minLeftSideBearing + 'FFEC ' # -20 minRightSideBearing + '03D1 ' # 977 xMaxExtent + '0000 ' # 0 caretSlopeRise + '0001 ' # 1 caretSlopeRun + '0010 ' # 16 caretOffset + '0000 ' # 0 reserved0 + '0000 ' # 0 reserved1 + '0000 ' # 0 reserved2 + '0000 ' # 0 reserved3 + '0000 ' # 0 metricDataFormat + '002A ' # 42 numberOfHMetrics +) + +HHEA_AS_DICT = { + 'tableTag': 'hhea', + 'tableVersion': 0x00010000, + 'ascent': 750, + 'descent': -250, + 'lineGap': 200, + 'advanceWidthMax': 1000, + 'minLeftSideBearing': -25, + 'minRightSideBearing': -20, + 'xMaxExtent': 977, + 'caretSlopeRise': 0, + 'caretSlopeRun': 1, + 'caretOffset': 16, + 'reserved0': 0, + 'reserved1': 0, + 'reserved2': 0, + 'reserved3': 0, + 'metricDataFormat': 0, + 'numberOfHMetrics': 42, +} + +HHEA_XML = [ + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', +] + +HHEA_XML_VERSION_AS_FLOAT = [ + '', +] + HHEA_XML[1:] + + +class HheaCompileOrToXMLTest(unittest.TestCase): + + def setUp(self): + hhea = newTable('hhea') + hhea.tableVersion = 0x00010000 + hhea.ascent = 750 + hhea.descent = -250 + hhea.lineGap = 200 + hhea.advanceWidthMax = 1000 + hhea.minLeftSideBearing = -25 + hhea.minRightSideBearing = -20 + hhea.xMaxExtent = 977 + hhea.caretSlopeRise = 0 + hhea.caretSlopeRun = 1 + hhea.caretOffset = 16 + hhea.metricDataFormat = 0 + hhea.numberOfHMetrics = 42 + hhea.reserved0 = hhea.reserved1 = hhea.reserved2 = hhea.reserved3 = 0 + self.font = TTFont(sfntVersion='OTTO') + self.font['hhea'] = hhea + + def test_compile(self): + hhea = self.font['hhea'] + hhea.tableVersion = 0x00010000 + self.assertEqual(HHEA_DATA, hhea.compile(self.font)) + + def test_compile_version_10_as_float(self): + hhea = self.font['hhea'] + hhea.tableVersion = 1.0 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(HHEA_DATA, hhea.compile(self.font)) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + def test_toXML(self): + hhea = self.font['hhea'] + self.font['hhea'].tableVersion = 0x00010000 + self.assertEqual(getXML(hhea.toXML), HHEA_XML) + + def test_toXML_version_as_float(self): + hhea = self.font['hhea'] + hhea.tableVersion = 1.0 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(getXML(hhea.toXML), HHEA_XML) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + +class HheaDecompileOrFromXMLTest(unittest.TestCase): + + def setUp(self): + hhea = newTable('hhea') + self.font = TTFont(sfntVersion='OTTO') + self.font['hhea'] = hhea + + def test_decompile(self): + hhea = self.font['hhea'] + hhea.decompile(HHEA_DATA, self.font) + for key in hhea.__dict__: + self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key]) + + def test_fromXML(self): + hhea = self.font['hhea'] + for name, attrs, content in parseXML(HHEA_XML): + hhea.fromXML(name, attrs, content, self.font) + for key in hhea.__dict__: + self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key]) + + def test_fromXML_version_as_float(self): + hhea = self.font['hhea'] + with CapturingLogHandler(log, "WARNING") as captor: + for name, attrs, content in parseXML(HHEA_XML_VERSION_AS_FLOAT): + hhea.fromXML(name, attrs, content, self.font) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + for key in hhea.__dict__: + self.assertEqual(getattr(hhea, key), HHEA_AS_DICT[key]) + + +class HheaRecalcTest(unittest.TestCase): + + def test_recalc_TTF(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_TTF.ttx')) + hhea = font['hhea'] + hhea.recalc(font) + self.assertEqual(hhea.advanceWidthMax, 600) + self.assertEqual(hhea.minLeftSideBearing, -56) + self.assertEqual(hhea.minRightSideBearing, 100) + self.assertEqual(hhea.xMaxExtent, 400) + + def test_recalc_OTF(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_OTF.ttx')) + hhea = font['hhea'] + hhea.recalc(font) + self.assertEqual(hhea.advanceWidthMax, 600) + self.assertEqual(hhea.minLeftSideBearing, -56) + self.assertEqual(hhea.minRightSideBearing, 100) + self.assertEqual(hhea.xMaxExtent, 400) + + def test_recalc_empty(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_h_h_e_a_recalc_empty.ttx')) + hhea = font['hhea'] + hhea.recalc(font) + self.assertEqual(hhea.advanceWidthMax, 600) + self.assertEqual(hhea.minLeftSideBearing, 0) + self.assertEqual(hhea.minRightSideBearing, 0) + self.assertEqual(hhea.xMaxExtent, 0) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_h_m_t_x_test.py fonttools-3.21.2/Tests/ttLib/tables/_h_m_t_x_test.py --- fonttools-3.0/Tests/ttLib/tables/_h_m_t_x_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_h_m_t_x_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,219 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML, getXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import TTFont, newTable, TTLibError +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.ttLib.tables._h_m_t_x import table__h_m_t_x, log +import struct +import unittest + + +class HmtxTableTest(unittest.TestCase): + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + @classmethod + def setUpClass(cls): + cls.tableClass = table__h_m_t_x + cls.tag = "hmtx" + + def makeFont(self, numGlyphs, numberOfMetrics): + font = TTFont() + maxp = font['maxp'] = newTable('maxp') + maxp.numGlyphs = numGlyphs + # from A to ... + font.glyphOrder = [chr(i) for i in range(65, 65+numGlyphs)] + headerTag = self.tableClass.headerTag + font[headerTag] = newTable(headerTag) + numberOfMetricsName = self.tableClass.numberOfMetricsName + setattr(font[headerTag], numberOfMetricsName, numberOfMetrics) + return font + + def test_decompile(self): + font = self.makeFont(numGlyphs=3, numberOfMetrics=3) + data = deHexStr("02A2 FFF5 0278 004F 02C6 0036") + + mtxTable = newTable(self.tag) + mtxTable.decompile(data, font) + + self.assertEqual(mtxTable['A'], (674, -11)) + self.assertEqual(mtxTable['B'], (632, 79)) + self.assertEqual(mtxTable['C'], (710, 54)) + + def test_decompile_additional_SB(self): + font = self.makeFont(numGlyphs=4, numberOfMetrics=2) + metrics = deHexStr("02A2 FFF5 0278 004F") + extraSideBearings = deHexStr("0036 FFFC") + data = metrics + extraSideBearings + + mtxTable = newTable(self.tag) + mtxTable.decompile(data, font) + + self.assertEqual(mtxTable['A'], (674, -11)) + self.assertEqual(mtxTable['B'], (632, 79)) + # all following have same width as the previous + self.assertEqual(mtxTable['C'], (632, 54)) + self.assertEqual(mtxTable['D'], (632, -4)) + + def test_decompile_not_enough_data(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=1) + mtxTable = newTable(self.tag) + msg = "not enough '%s' table data" % self.tag + + with self.assertRaisesRegex(TTLibError, msg): + mtxTable.decompile(b"\0\0\0", font) + + def test_decompile_too_much_data(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=1) + mtxTable = newTable(self.tag) + msg = "too much '%s' table data" % self.tag + + with CapturingLogHandler(log, "WARNING") as captor: + mtxTable.decompile(b"\0\0\0\0\0", font) + + self.assertTrue( + len([r for r in captor.records if msg == r.msg]) == 1) + + def test_decompile_num_metrics_greater_than_glyphs(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=2) + mtxTable = newTable(self.tag) + msg = "The %s.%s exceeds the maxp.numGlyphs" % ( + self.tableClass.headerTag, self.tableClass.numberOfMetricsName) + + with CapturingLogHandler(log, "WARNING") as captor: + mtxTable.decompile(b"\0\0\0\0", font) + + self.assertTrue( + len([r for r in captor.records if msg == r.msg]) == 1) + + def test_decompile_possibly_negative_advance(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=1) + # we warn if advance is > 0x7FFF as it might be interpreted as signed + # by some authoring tools + data = deHexStr("8000 0000") + mtxTable = newTable(self.tag) + + with CapturingLogHandler(log, "WARNING") as captor: + mtxTable.decompile(data, font) + + self.assertTrue( + len([r for r in captor.records + if "has a huge advance" in r.msg]) == 1) + + def test_compile(self): + # we set the wrong 'numberOfMetrics' to check it gets adjusted + font = self.makeFont(numGlyphs=3, numberOfMetrics=4) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = { + 'A': (674, -11), + 'B': (632, 79), + 'C': (710, 54), + } + + data = mtxTable.compile(font) + + self.assertEqual(data, deHexStr("02A2 FFF5 0278 004F 02C6 0036")) + + headerTable = font[self.tableClass.headerTag] + self.assertEqual( + getattr(headerTable, self.tableClass.numberOfMetricsName), 3) + + def test_compile_additional_SB(self): + font = self.makeFont(numGlyphs=4, numberOfMetrics=1) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = { + 'A': (632, -11), + 'B': (632, 79), + 'C': (632, 54), + 'D': (632, -4), + } + + data = mtxTable.compile(font) + + self.assertEqual(data, deHexStr("0278 FFF5 004F 0036 FFFC")) + + def test_compile_negative_advance(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=1) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = {'A': [-1, 0]} + + with CapturingLogHandler(log, "ERROR") as captor: + with self.assertRaisesRegex(TTLibError, "negative advance"): + mtxTable.compile(font) + + self.assertTrue( + len([r for r in captor.records + if "Glyph 'A' has negative advance" in r.msg]) == 1) + + def test_compile_struct_out_of_range(self): + font = self.makeFont(numGlyphs=1, numberOfMetrics=1) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = {'A': (0xFFFF+1, -0x8001)} + + with self.assertRaises(struct.error): + mtxTable.compile(font) + + def test_compile_round_float_values(self): + font = self.makeFont(numGlyphs=3, numberOfMetrics=2) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = { + 'A': (0.5, 0.5), # round -> (0, 0) + 'B': (0.1, 0.9), # round -> (0, 1) + 'C': (0.1, 0.1), # round -> (0, 0) + } + + data = mtxTable.compile(font) + + self.assertEqual(data, deHexStr("0000 0000 0000 0001 0000")) + + def test_toXML(self): + font = self.makeFont(numGlyphs=2, numberOfMetrics=2) + mtxTable = font[self.tag] = newTable(self.tag) + mtxTable.metrics = {'B': (632, 79), 'A': (674, -11)} + + self.assertEqual( + getXML(mtxTable.toXML), + ('\n' + '' % ( + (self.tableClass.advanceName, + self.tableClass.sideBearingName) * 2)).split('\n')) + + def test_fromXML(self): + mtxTable = newTable(self.tag) + + for name, attrs, content in parseXML( + '' + '' % ( + (self.tableClass.advanceName, + self.tableClass.sideBearingName) * 2)): + mtxTable.fromXML(name, attrs, content, ttFont=None) + + self.assertEqual( + mtxTable.metrics, {'A': (674, -11), 'B': (632, 79)}) + + def test_delitem(self): + mtxTable = newTable(self.tag) + mtxTable.metrics = {'A': (0, 0)} + + del mtxTable['A'] + + self.assertTrue('A' not in mtxTable.metrics) + + def test_setitem(self): + mtxTable = newTable(self.tag) + mtxTable.metrics = {'A': (674, -11), 'B': (632, 79)} + mtxTable['B'] = [0, 0] # list is converted to tuple + + self.assertEqual(mtxTable.metrics, {'A': (674, -11), 'B': (0, 0)}) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_k_e_r_n_test.py fonttools-3.21.2/Tests/ttLib/tables/_k_e_r_n_test.py --- fonttools-3.0/Tests/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_k_e_r_n_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,370 @@ +from __future__ import print_function, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import newTable +from fontTools.ttLib.tables._k_e_r_n import ( + KernTable_format_0, KernTable_format_unkown) +from fontTools.misc.textTools import deHexStr +from fontTools.misc.testTools import FakeFont, getXML, parseXML +import pytest + + +KERN_VER_0_FMT_0_DATA = deHexStr( + '0000 ' # 0: version=0 + '0001 ' # 2: nTables=1 + '0000 ' # 4: version=0 (bogus field, unused) + '0020 ' # 6: length=32 + '00 ' # 8: format=0 + '01 ' # 9: coverage=1 + '0003 ' # 10: nPairs=3 + '000C ' # 12: searchRange=12 + '0001 ' # 14: entrySelector=1 + '0006 ' # 16: rangeShift=6 + '0004 000C FFD8 ' # 18: l=4, r=12, v=-40 + '0004 001C 0028 ' # 24: l=4, r=28, v=40 + '0005 0028 FFCE ' # 30: l=5, r=40, v=-50 +) +assert len(KERN_VER_0_FMT_0_DATA) == 36 + +KERN_VER_0_FMT_0_XML = [ + '', + '', + ' ', + ' ', + ' ', + '', +] + +KERN_VER_1_FMT_0_DATA = deHexStr( + '0001 0000 ' # 0: version=1 + '0000 0001 ' # 4: nTables=1 + '0000 0022 ' # 8: length=34 + '00 ' # 12: coverage=0 + '00 ' # 13: format=0 + '0000 ' # 14: tupleIndex=0 + '0003 ' # 16: nPairs=3 + '000C ' # 18: searchRange=12 + '0001 ' # 20: entrySelector=1 + '0006 ' # 22: rangeShift=6 + '0004 000C FFD8 ' # 24: l=4, r=12, v=-40 + '0004 001C 0028 ' # 30: l=4, r=28, v=40 + '0005 0028 FFCE ' # 36: l=5, r=40, v=-50 +) +assert len(KERN_VER_1_FMT_0_DATA) == 42 + +KERN_VER_1_FMT_0_XML = [ + '', + '', + ' ', + ' ', + ' ', + '', +] + +KERN_VER_0_FMT_UNKNOWN_DATA = deHexStr( + '0000 ' # 0: version=0 + '0002 ' # 2: nTables=2 + '0000 ' # 4: version=0 + '000A ' # 6: length=10 + '04 ' # 8: format=4 (format 4 doesn't exist) + '01 ' # 9: coverage=1 + '1234 5678 ' # 10: garbage... + '0000 ' # 14: version=0 + '000A ' # 16: length=10 + '05 ' # 18: format=5 (format 5 doesn't exist) + '01 ' # 19: coverage=1 + '9ABC DEF0 ' # 20: garbage... +) +assert len(KERN_VER_0_FMT_UNKNOWN_DATA) == 24 + +KERN_VER_0_FMT_UNKNOWN_XML = [ + '', + '', + " ", + ' 0000000A 04011234', + ' 5678 ', + '', + '', + "", + ' 0000000A 05019ABC', + ' DEF0 ', + '', +] + +KERN_VER_1_FMT_UNKNOWN_DATA = deHexStr( + '0001 0000 ' # 0: version=1 + '0000 0002 ' # 4: nTables=2 + '0000 000C ' # 8: length=12 + '00 ' # 12: coverage=0 + '04 ' # 13: format=4 (format 4 doesn't exist) + '0000 ' # 14: tupleIndex=0 + '1234 5678' # 16: garbage... + '0000 000C ' # 20: length=12 + '00 ' # 24: coverage=0 + '05 ' # 25: format=5 (format 5 doesn't exist) + '0000 ' # 26: tupleIndex=0 + '9ABC DEF0 ' # 28: garbage... +) +assert len(KERN_VER_1_FMT_UNKNOWN_DATA) == 32 + +KERN_VER_1_FMT_UNKNOWN_XML = [ + '', + '', + " ", + ' 0000000C 00040000', + ' 12345678 ', + '', + '', + " ", + ' 0000000C 00050000', + ' 9ABCDEF0 ', + '', +] + + +@pytest.fixture +def font(): + return FakeFont(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz")) + + +class KernTableTest(object): + + @pytest.mark.parametrize( + "data, version", + [ + (KERN_VER_0_FMT_0_DATA, 0), + (KERN_VER_1_FMT_0_DATA, 1.0), + ], + ids=["version_0", "version_1"] + ) + def test_decompile_single_format_0(self, data, font, version): + kern = newTable("kern") + kern.decompile(data, font) + + assert kern.version == version + assert len(kern.kernTables) == 1 + + st = kern.kernTables[0] + assert st.apple is (version == 1.0) + assert st.format == 0 + # horizontal kerning in OT kern is coverage 0x01, while in + # AAT kern it's the default (0) + assert st.coverage == (0 if st.apple else 1) + assert st.tupleIndex == (0 if st.apple else None) + assert len(st.kernTable) == 3 + assert st.kernTable == { + ('E', 'M'): -40, + ('E', 'c'): 40, + ('F', 'o'): -50 + } + + @pytest.mark.parametrize( + "version, expected", + [ + (0, KERN_VER_0_FMT_0_DATA), + (1.0, KERN_VER_1_FMT_0_DATA), + ], + ids=["version_0", "version_1"] + ) + def test_compile_single_format_0(self, font, version, expected): + kern = newTable("kern") + kern.version = version + apple = version == 1.0 + st = KernTable_format_0(apple) + kern.kernTables = [st] + st.coverage = (0 if apple else 1) + st.tupleIndex = 0 if apple else None + st.kernTable = { + ('E', 'M'): -40, + ('E', 'c'): 40, + ('F', 'o'): -50 + } + data = kern.compile(font) + assert data == expected + + @pytest.mark.parametrize( + "xml, version", + [ + (KERN_VER_0_FMT_0_XML, 0), + (KERN_VER_1_FMT_0_XML, 1.0), + ], + ids=["version_0", "version_1"] + ) + def test_fromXML_single_format_0(self, xml, font, version): + kern = newTable("kern") + for name, attrs, content in parseXML(xml): + kern.fromXML(name, attrs, content, ttFont=font) + + assert kern.version == version + assert len(kern.kernTables) == 1 + + st = kern.kernTables[0] + assert st.apple is (version == 1.0) + assert st.format == 0 + assert st.coverage == (0 if st.apple else 1) + assert st.tupleIndex == (0 if st.apple else None) + assert len(st.kernTable) == 3 + assert st.kernTable == { + ('E', 'M'): -40, + ('E', 'c'): 40, + ('F', 'o'): -50 + } + + @pytest.mark.parametrize( + "version, expected", + [ + (0, KERN_VER_0_FMT_0_XML), + (1.0, KERN_VER_1_FMT_0_XML), + ], + ids=["version_0", "version_1"] + ) + def test_toXML_single_format_0(self, font, version, expected): + kern = newTable("kern") + kern.version = version + apple = version == 1.0 + st = KernTable_format_0(apple) + kern.kernTables = [st] + st.coverage = 0 if apple else 1 + st.tupleIndex = 0 if apple else None + st.kernTable = { + ('E', 'M'): -40, + ('E', 'c'): 40, + ('F', 'o'): -50 + } + xml = getXML(kern.toXML, font) + assert xml == expected + + @pytest.mark.parametrize( + "data, version, header_length, st_length", + [ + (KERN_VER_0_FMT_UNKNOWN_DATA, 0, 4, 10), + (KERN_VER_1_FMT_UNKNOWN_DATA, 1.0, 8, 12), + ], + ids=["version_0", "version_1"] + ) + def test_decompile_format_unknown( + self, data, font, version, header_length, st_length): + kern = newTable("kern") + kern.decompile(data, font) + + assert kern.version == version + assert len(kern.kernTables) == 2 + + st_data = data[header_length:] + st0 = kern.kernTables[0] + assert st0.format == 4 + assert st0.data == st_data[:st_length] + st_data = st_data[st_length:] + + st1 = kern.kernTables[1] + assert st1.format == 5 + assert st1.data == st_data[:st_length] + + @pytest.mark.parametrize( + "version, st_length, expected", + [ + (0, 10, KERN_VER_0_FMT_UNKNOWN_DATA), + (1.0, 12, KERN_VER_1_FMT_UNKNOWN_DATA), + ], + ids=["version_0", "version_1"] + ) + def test_compile_format_unknown(self, version, st_length, expected): + kern = newTable("kern") + kern.version = version + kern.kernTables = [] + + for unknown_fmt, kern_data in zip((4, 5), ("1234 5678", "9ABC DEF0")): + if version > 0: + coverage = 0 + header_fmt = deHexStr( + "%08X %02X %02X %04X" % ( + st_length, coverage, unknown_fmt, 0)) + else: + coverage = 1 + header_fmt = deHexStr( + "%04X %04X %02X %02X" % ( + 0, st_length, unknown_fmt, coverage)) + st = KernTable_format_unkown(unknown_fmt) + st.data = header_fmt + deHexStr(kern_data) + kern.kernTables.append(st) + + data = kern.compile(font) + assert data == expected + + @pytest.mark.parametrize( + "xml, version, st_length", + [ + (KERN_VER_0_FMT_UNKNOWN_XML, 0, 10), + (KERN_VER_1_FMT_UNKNOWN_XML, 1.0, 12), + ], + ids=["version_0", "version_1"] + ) + def test_fromXML_format_unknown(self, xml, font, version, st_length): + kern = newTable("kern") + for name, attrs, content in parseXML(xml): + kern.fromXML(name, attrs, content, ttFont=font) + + assert kern.version == version + assert len(kern.kernTables) == 2 + + st0 = kern.kernTables[0] + assert st0.format == 4 + assert len(st0.data) == st_length + + st1 = kern.kernTables[1] + assert st1.format == 5 + assert len(st1.data) == st_length + + @pytest.mark.parametrize( + "version", [0, 1.0], ids=["version_0", "version_1"]) + def test_toXML_format_unknown(self, font, version): + kern = newTable("kern") + kern.version = version + st = KernTable_format_unkown(4) + st.data = b"ABCD" + kern.kernTables = [st] + + xml = getXML(kern.toXML, font) + + assert xml == [ + '' % version, + '', + ' ', + ' 41424344 ', + '', + ] + + def test_getkern(self): + table = newTable("kern") + table.version = 0 + table.kernTables = [] + + assert table.getkern(0) is None + + st0 = KernTable_format_0() + table.kernTables.append(st0) + + assert table.getkern(0) is st0 + assert table.getkern(4) is None + + st1 = KernTable_format_unkown(4) + table.kernTables.append(st1) + + +class KernTable_format_0_Test(object): + + def test_decompileBadGlyphId(self, font): + subtable = KernTable_format_0() + subtable.decompile( + b'\x00' + b'\x00' + b'\x00' + b'\x1a' + b'\x00' + b'\x00' + + b'\x00' + b'\x02' + b'\x00' * 6 + + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' + + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', + font) + assert subtable[("B", "D")] == 1 + assert subtable[("B", "glyph65535")] == 2 + + +if __name__ == "__main__": + import sys + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/ttLib/tables/_l_c_a_r_test.py fonttools-3.21.2/Tests/ttLib/tables/_l_c_a_r_test.py --- fonttools-3.0/Tests/ttLib/tables/_l_c_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_l_c_a_r_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,109 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# Example: Format 0 Ligature Caret Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6lcar.html +LCAR_FORMAT_0_DATA = deHexStr( + '0001 0000 0000 ' # 0: Version=1.0, Format=0 + '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0 + '0001 001E ' # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30 + '0003 0022 ' # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34 + 'FFFF 0000 ' # 26: Glyph=, OffsetOfLigCaretEntry=0 + '0001 00DC ' # 30: DivisionPointCount=1, DivisionPoint=[220] + '0002 00EF 01D8 ' # 34: DivisionPointCount=2, DivisionPoint=[239, 475] +) # 40: +assert(len(LCAR_FORMAT_0_DATA) == 40) + + +LCAR_FORMAT_0_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Example: Format 1 Ligature Caret Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6lcar.html +LCAR_FORMAT_1_DATA = deHexStr( + '0001 0000 0001 ' # 0: Version=1.0, Format=1 + '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0 + '0001 001E ' # 18: Glyph=1 (f_r), OffsetOfLigCaretEntry=30 + '0003 0022 ' # 22: Glyph=3 (f_f_l), OffsetOfLigCaretEntry=34 + 'FFFF 0000 ' # 26: Glyph=, OffsetOfLigCaretEntry=0 + '0001 0032 ' # 30: DivisionPointCount=1, DivisionPoint=[50] + '0002 0037 004B ' # 34: DivisionPointCount=2, DivisionPoint=[55, 75] +) # 40: +assert(len(LCAR_FORMAT_1_DATA) == 40) + + +LCAR_FORMAT_1_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class LCARTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'f_r', 'X', 'f_f_l']) + + def test_decompile_toXML_format0(self): + table = newTable('lcar') + table.decompile(LCAR_FORMAT_0_DATA, self.font) + self.assertEqual(getXML(table.toXML), LCAR_FORMAT_0_XML) + + def test_compile_fromXML_format0(self): + table = newTable('lcar') + for name, attrs, content in parseXML(LCAR_FORMAT_0_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(LCAR_FORMAT_0_DATA)) + + def test_decompile_toXML_format1(self): + table = newTable('lcar') + table.decompile(LCAR_FORMAT_1_DATA, self.font) + self.assertEqual(getXML(table.toXML), LCAR_FORMAT_1_XML) + + def test_compile_fromXML_format1(self): + table = newTable('lcar') + for name, attrs, content in parseXML(LCAR_FORMAT_1_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(LCAR_FORMAT_1_DATA)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_l_t_a_g_test.py fonttools-3.21.2/Tests/ttLib/tables/_l_t_a_g_test.py --- fonttools-3.0/Tests/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_l_t_a_g_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,63 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML +from fontTools.misc.xmlWriter import XMLWriter +import os +import struct +import unittest +from fontTools.ttLib import newTable + + +class Test_l_t_a_g(unittest.TestCase): + + DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" + TAGS_ = ["en", "zh-Hant", "zh"] + + def test_addTag(self): + table = newTable("ltag") + self.assertEqual(table.addTag("de-CH"), 0) + self.assertEqual(table.addTag("gsw-LI"), 1) + self.assertEqual(table.addTag("de-CH"), 0) + self.assertEqual(table.tags, ["de-CH", "gsw-LI"]) + + def test_decompile_compile(self): + table = newTable("ltag") + table.decompile(self.DATA_, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(0, table.flags) + self.assertEqual(self.TAGS_, table.tags) + compiled = table.compile(ttFont=None) + self.assertEqual(self.DATA_, compiled) + self.assertIsInstance(compiled, bytes) + + def test_fromXML(self): + table = newTable("ltag") + for name, attrs, content in parseXML( + '' + '' + '' + ''): + table.fromXML(name, attrs, content, ttFont=None) + self.assertEqual(1, table.version) + self.assertEqual(777, table.flags) + self.assertEqual(["sr-Latn", "fa"], table.tags) + + def test_toXML(self): + writer = XMLWriter(BytesIO()) + table = newTable("ltag") + table.decompile(self.DATA_, ttFont=None) + table.toXML(writer, ttFont=None) + expected = os.linesep.join([ + '', + '', + '', + '', + '', + '' + ]) + os.linesep + self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_m_e_t_a_test.py fonttools-3.21.2/Tests/ttLib/tables/_m_e_t_a_test.py --- fonttools-3.0/Tests/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_m_e_t_a_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,95 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML +from fontTools.misc.textTools import deHexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib import TTLibError +from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a +import unittest + + +# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', +# and shortened the payload. +META_DATA = deHexStr( + "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " + "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") + +# The 'dlng' and 'slng' tag with text data containing "augmented" BCP 47 +# comma-separated or comma-space-separated tags. These should be UTF-8 encoded +# text. +META_DATA_TEXT = deHexStr( + "00 00 00 01 00 00 00 00 00 00 00 28 00 00 00 02 " + "64 6C 6E 67 00 00 00 28 00 00 00 0E 73 6C 6E 67 " + "00 00 00 36 00 00 00 0E 4C 61 74 6E 2C 47 72 65 " + "6B 2C 43 79 72 6C 4C 61 74 6E 2C 47 72 65 6B 2C " + "43 79 72 6C") + +class MetaTableTest(unittest.TestCase): + def test_decompile(self): + table = table__m_e_t_a() + table.decompile(META_DATA, ttFont={"meta": table}) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + def test_compile(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) + + def test_decompile_text(self): + table = table__m_e_t_a() + table.decompile(META_DATA_TEXT, ttFont={"meta": table}) + self.assertEqual({"dlng": u"Latn,Grek,Cyrl", + "slng": u"Latn,Grek,Cyrl"}, table.data) + + def test_compile_text(self): + table = table__m_e_t_a() + table.data["dlng"] = u"Latn,Grek,Cyrl" + table.data["slng"] = u"Latn,Grek,Cyrl" + self.assertEqual(META_DATA_TEXT, table.compile(ttFont={"meta": table})) + + def test_toXML(self): + table = table__m_e_t_a() + table.data["TEST"] = b"\xCA\xFE\xBE\xEF" + writer = XMLWriter(BytesIO()) + table.toXML(writer, {"meta": table}) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual([ + '', + 'cafebeef', + '' + ], [line.strip() for line in xml.splitlines()][1:]) + + def test_fromXML(self): + table = table__m_e_t_a() + for name, attrs, content in parseXML( + '' + ' cafebeef' + ''): + table.fromXML(name, attrs, content, ttFont=None) + self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) + + def test_toXML_text(self): + table = table__m_e_t_a() + table.data["dlng"] = u"Latn,Grek,Cyrl" + writer = XMLWriter(BytesIO()) + table.toXML(writer, {"meta": table}) + xml = writer.file.getvalue().decode("utf-8") + self.assertEqual([ + '', + 'Latn,Grek,Cyrl', + '' + ], [line.strip() for line in xml.splitlines()][1:]) + + def test_fromXML_text(self): + table = table__m_e_t_a() + for name, attrs, content in parseXML( + '' + ' Latn,Grek,Cyrl' + ''): + table.fromXML(name, attrs, content, ttFont=None) + self.assertEqual({"dlng": u"Latn,Grek,Cyrl"}, table.data) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_m_o_r_t_test.py fonttools-3.21.2/Tests/ttLib/tables/_m_o_r_t_test.py --- fonttools-3.0/Tests/ttLib/tables/_m_o_r_t_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_m_o_r_t_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,115 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# Glyph Metamorphosis Table Examples +# Example 1: Non-contextual Glyph Substitution +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html +# The example given by Apple's 'mort' specification is suboptimally +# encoded: it uses AAT lookup format 6 even though format 8 would be +# more compact. Because our encoder always uses the most compact +# encoding, this breaks our round-trip testing. Therefore, we changed +# the example to use GlyphID 13 instead of 12 for the 'parenright' +# character; the non-contiguous glyph range for the AAT lookup makes +# format 6 to be most compact. +MORT_NONCONTEXTUAL_DATA = deHexStr( + '0001 0000 ' # 0: Version=1.0 + '0000 0001 ' # 4: MorphChainCount=1 + '0000 0001 ' # 8: DefaultFlags=1 + '0000 0050 ' # 12: StructLength=80 + '0003 0001 ' # 16: MorphFeatureCount=3, MorphSubtableCount=1 + '0004 0000 ' # 20: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on + '0000 0001 ' # 24: Feature[0].EnableFlags=0x00000001 + 'FFFF FFFF ' # 28: Feature[0].DisableFlags=0xFFFFFFFF + '0004 0001 ' # 32: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off + '0000 0000 ' # 36: Feature[1].EnableFlags=0x00000000 + 'FFFF FFFE ' # 40: Feature[1].DisableFlags=0xFFFFFFFE + '0000 0001 ' # 44: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off + '0000 0000 ' # 48: Feature[2].EnableFlags=0 (required for last feature) + '0000 0000 ' # 52: Feature[2].EnableFlags=0 (required for last feature) + '0020 ' # 56: Subtable[0].StructLength=32 + '80 ' # 58: Subtable[0].CoverageFlags=0x80 + '04 ' # 59: Subtable[0].MorphType=4/NoncontextualMorph + '0000 0001 ' # 60: Subtable[0].SubFeatureFlags=0x1 + '0006 0004 ' # 64: LookupFormat=6, UnitSize=4 + '0002 0008 ' # 68: NUnits=2, SearchRange=8 + '0001 0000 ' # 72: EntrySelector=1, RangeShift=0 + '000B 0087 ' # 76: Glyph=11 (parenleft); Value=135 (parenleft.vertical) + '000D 0088 ' # 80: Glyph=13 (parenright); Value=136 (parenright.vertical) + 'FFFF 0000 ' # 84: Glyph=; Value=0 +) # 88: +assert len(MORT_NONCONTEXTUAL_DATA) == 88 + + +MORT_NONCONTEXTUAL_XML = [ + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class MORTNoncontextualGlyphSubstitutionTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + glyphs = ['.notdef'] + ['g.%d' % i for i in range (1, 140)] + glyphs[11], glyphs[13] = 'parenleft', 'parenright' + glyphs[135], glyphs[136] = 'parenleft.vertical', 'parenright.vertical' + cls.font = FakeFont(glyphs) + + def test_decompile_toXML(self): + table = newTable('mort') + table.decompile(MORT_NONCONTEXTUAL_DATA, self.font) + self.assertEqual(getXML(table.toXML), MORT_NONCONTEXTUAL_XML) + + def test_compile_fromXML(self): + table = newTable('mort') + for name, attrs, content in parseXML(MORT_NONCONTEXTUAL_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(MORT_NONCONTEXTUAL_DATA)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_m_o_r_x_test.py fonttools-3.21.2/Tests/ttLib/tables/_m_o_r_x_test.py --- fonttools-3.0/Tests/ttLib/tables/_m_o_r_x_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_m_o_r_x_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,903 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# A simple 'morx' table with non-contextual glyph substitution. +# Unfortunately, the Apple spec for 'morx' does not contain a complete example. +# The test case has therefore been adapted from the example 'mort' table in +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html +MORX_NONCONTEXTUAL_DATA = deHexStr( + '0002 0000 ' # 0: Version=2, Reserved=0 + '0000 0001 ' # 4: MorphChainCount=1 + '0000 0001 ' # 8: DefaultFlags=1 + '0000 0058 ' # 12: StructLength=88 + '0000 0003 ' # 16: MorphFeatureCount=3 + '0000 0001 ' # 20: MorphSubtableCount=1 + '0004 0000 ' # 24: Feature[0].FeatureType=4/VertSubst, .FeatureSetting=on + '0000 0001 ' # 28: Feature[0].EnableFlags=0x00000001 + 'FFFF FFFF ' # 32: Feature[0].DisableFlags=0xFFFFFFFF + '0004 0001 ' # 36: Feature[1].FeatureType=4/VertSubst, .FeatureSetting=off + '0000 0000 ' # 40: Feature[1].EnableFlags=0x00000000 + 'FFFF FFFE ' # 44: Feature[1].DisableFlags=0xFFFFFFFE + '0000 0001 ' # 48: Feature[2].FeatureType=0/GlyphEffects, .FeatSetting=off + '0000 0000 ' # 52: Feature[2].EnableFlags=0 (required for last feature) + '0000 0000 ' # 56: Feature[2].EnableFlags=0 (required for last feature) + '0000 0024 ' # 60: Subtable[0].StructLength=36 + '80 ' # 64: Subtable[0].CoverageFlags=0x80 + '00 00 ' # 65: Subtable[0].Reserved=0 + '04 ' # 67: Subtable[0].MorphType=4/NoncontextualMorph + '0000 0001 ' # 68: Subtable[0].SubFeatureFlags=0x1 + '0006 0004 ' # 72: LookupFormat=6, UnitSize=4 + '0002 0008 ' # 76: NUnits=2, SearchRange=8 + '0001 0000 ' # 80: EntrySelector=1, RangeShift=0 + '000B 0087 ' # 84: Glyph=11 (parenleft); Value=135 (parenleft.vertical) + '000D 0088 ' # 88: Glyph=13 (parenright); Value=136 (parenright.vertical) + 'FFFF 0000 ' # 92: Glyph=; Value=0 +) # 96: +assert len(MORX_NONCONTEXTUAL_DATA) == 96 + + +MORX_NONCONTEXTUAL_XML = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +MORX_REARRANGEMENT_DATA = deHexStr( + '0002 0000 ' # 0: Version=2, Reserved=0 + '0000 0001 ' # 4: MorphChainCount=1 + '0000 0001 ' # 8: DefaultFlags=1 + '0000 0078 ' # 12: StructLength=120 (+8=128) + '0000 0000 ' # 16: MorphFeatureCount=0 + '0000 0001 ' # 20: MorphSubtableCount=1 + '0000 0068 ' # 24: Subtable[0].StructLength=104 (+24=128) + '80 ' # 28: Subtable[0].CoverageFlags=0x80 + '00 00 ' # 29: Subtable[0].Reserved=0 + '00 ' # 31: Subtable[0].MorphType=0/RearrangementMorph + '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1 + '0000 0006 ' # 36: STXHeader.ClassCount=6 + '0000 0010 ' # 40: STXHeader.ClassTableOffset=16 (+36=52) + '0000 0028 ' # 44: STXHeader.StateArrayOffset=40 (+36=76) + '0000 004C ' # 48: STXHeader.EntryTableOffset=76 (+36=112) + '0006 0004 ' # 52: ClassTable.LookupFormat=6, .UnitSize=4 + '0002 0008 ' # 56: .NUnits=2, .SearchRange=8 + '0001 0000 ' # 60: .EntrySelector=1, .RangeShift=0 + '0001 0005 ' # 64: Glyph=A; Class=5 + '0003 0004 ' # 68: Glyph=C; Class=4 + 'FFFF 0000 ' # 72: Glyph=; Value=0 + '0000 0001 0002 0003 0002 0001 ' # 76: State[0][0..5] + '0003 0003 0003 0003 0003 0003 ' # 88: State[1][0..5] + '0001 0003 0003 0003 0002 0002 ' # 100: State[2][0..5] + '0002 FFFF ' # 112: Entries[0].NewState=2, .Flags=0xFFFF + '0001 A00D ' # 116: Entries[1].NewState=1, .Flags=0xA00D + '0000 8006 ' # 120: Entries[2].NewState=0, .Flags=0x8006 + '0002 0000 ' # 124: Entries[3].NewState=2, .Flags=0x0000 +) # 128: +assert len(MORX_REARRANGEMENT_DATA) == 128, len(MORX_REARRANGEMENT_DATA) + + +MORX_REARRANGEMENT_XML = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Taken from “Example 1: A contextal substituation table” in +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html +# as retrieved on 2017-09-05. +# +# Compared to the example table in Apple’s specification, we’ve +# made the following changes: +# +# * at offsets 0..35, we’ve prepended 36 bytes of boilerplate +# to make the data a structurally valid ‘morx’ table; +# +# * at offset 36 (offset 0 in Apple’s document), we’ve changed +# the number of glyph classes from 5 to 6 because the encoded +# finite-state machine has transitions for six different glyph +# classes (0..5); +# +# * at offset 52 (offset 16 in Apple’s document), we’ve replaced +# the presumably leftover ‘XXX’ mark by an actual data offset; +# +# * at offset 72 (offset 36 in Apple’s document), we’ve changed +# the input GlyphID from 51 to 52. With the original value of 51, +# the glyph class lookup table can be encoded with equally many +# bytes in either format 2 or 6; after changing the GlyphID to 52, +# the most compact encoding is lookup format 6, as used in Apple’s +# example; +# +# * at offset 90 (offset 54 in Apple’s document), we’ve changed +# the value for the lookup end-of-table marker from 1 to 0. +# Fonttools always uses zero for this value, whereas Apple’s +# spec examples are inconsistently using one of {0, 1, 0xFFFF} +# for this filler value; +# +# * at offset 172 (offset 136 in Apple’s document), we’ve again changed +# the input GlyphID from 51 to 52, for the same reason as above. +# +# TODO: Ask Apple to fix “Example 1” in the ‘morx’ specification. +MORX_CONTEXTUAL_DATA = deHexStr( + '0002 0000 ' # 0: Version=2, Reserved=0 + '0000 0001 ' # 4: MorphChainCount=1 + '0000 0001 ' # 8: DefaultFlags=1 + '0000 00B4 ' # 12: StructLength=180 (+8=188) + '0000 0000 ' # 16: MorphFeatureCount=0 + '0000 0001 ' # 20: MorphSubtableCount=1 + '0000 00A4 ' # 24: Subtable[0].StructLength=164 (+24=188) + '80 ' # 28: Subtable[0].CoverageFlags=0x80 + '00 00 ' # 29: Subtable[0].Reserved=0 + '01 ' # 31: Subtable[0].MorphType=1/ContextualMorph + '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1 + '0000 0006 ' # 36: STXHeader.ClassCount=6 + '0000 0014 ' # 40: STXHeader.ClassTableOffset=20 (+36=56) + '0000 0038 ' # 44: STXHeader.StateArrayOffset=56 (+36=92) + '0000 005C ' # 48: STXHeader.EntryTableOffset=92 (+36=128) + '0000 0074 ' # 52: STXHeader.PerGlyphTableOffset=116 (+36=152) + + # Glyph class table. + '0006 0004 ' # 56: ClassTable.LookupFormat=6, .UnitSize=4 + '0005 0010 ' # 60: .NUnits=5, .SearchRange=16 + '0002 0004 ' # 64: .EntrySelector=2, .RangeShift=4 + '0032 0004 ' # 68: Glyph=50; Class=4 + '0034 0004 ' # 72: Glyph=52; Class=4 + '0050 0005 ' # 76: Glyph=80; Class=5 + '00C9 0004 ' # 80: Glyph=201; Class=4 + '00CA 0004 ' # 84: Glyph=202; Class=4 + 'FFFF 0000 ' # 88: Glyph=; Value= + + # State array. + '0000 0000 0000 0000 0000 0001 ' # 92: State[0][0..5] + '0000 0000 0000 0000 0000 0001 ' # 104: State[1][0..5] + '0000 0000 0000 0000 0002 0001 ' # 116: State[2][0..5] + + # Entry table. + '0000 0000 ' # 128: Entries[0].NewState=0, .Flags=0 + 'FFFF FFFF ' # 132: Entries[0].MarkSubst=None, .CurSubst=None + '0002 0000 ' # 136: Entries[1].NewState=2, .Flags=0 + 'FFFF FFFF ' # 140: Entries[1].MarkSubst=None, .CurSubst=None + '0000 0000 ' # 144: Entries[2].NewState=0, .Flags=0 + 'FFFF 0000 ' # 148: Entries[2].MarkSubst=None, .CurSubst=PerGlyph #0 + # 152: + + # Per-glyph lookup tables. + '0000 0004 ' # 152: Offset from this point to per-glyph lookup #0. + + # Per-glyph lookup #0. + '0006 0004 ' # 156: ClassTable.LookupFormat=6, .UnitSize=4 + '0004 0010 ' # 160: .NUnits=4, .SearchRange=16 + '0002 0000 ' # 164: .EntrySelector=2, .RangeShift=0 + '0032 0258 ' # 168: Glyph=50; ReplacementGlyph=600 + '0034 0259 ' # 172: Glyph=52; ReplacementGlyph=601 + '00C9 025A ' # 176: Glyph=201; ReplacementGlyph=602 + '00CA 0384 ' # 180: Glyph=202; ReplacementGlyph=900 + 'FFFF 0000 ' # 184: Glyph=; Value= + +) # 188: +assert len(MORX_CONTEXTUAL_DATA) == 188, len(MORX_CONTEXTUAL_DATA) + + +MORX_CONTEXTUAL_XML = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Taken from “Example 2: A ligature table” in +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html +# as retrieved on 2017-09-11. +# +# Compared to the example table in Apple’s specification, we’ve +# made the following changes: +# +# * at offsets 0..35, we’ve prepended 36 bytes of boilerplate +# to make the data a structurally valid ‘morx’ table; +# +# * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve +# changed the range of the third segment from 23..24 to 26..28. +# The hexdump values in Apple’s specification are completely wrong; +# the values from the comments would work, but they can be encoded +# more compactly than in the specification example. For round-trip +# testing, we omit the ‘f’ glyph, which makes AAT lookup format 2 +# the most compact encoding; +# +# * at offsets 92..93 (offsets 56..57 in Apple’s document), we’ve +# changed the glyph class of the third segment from 5 to 6, which +# matches the values from the comments to the spec (but not the +# Apple’s hexdump). +# +# TODO: Ask Apple to fix “Example 2” in the ‘morx’ specification. +MORX_LIGATURE_DATA = deHexStr( + '0002 0000 ' # 0: Version=2, Reserved=0 + '0000 0001 ' # 4: MorphChainCount=1 + '0000 0001 ' # 8: DefaultFlags=1 + '0000 00DA ' # 12: StructLength=218 (+8=226) + '0000 0000 ' # 16: MorphFeatureCount=0 + '0000 0001 ' # 20: MorphSubtableCount=1 + '0000 00CA ' # 24: Subtable[0].StructLength=202 (+24=226) + '80 ' # 28: Subtable[0].CoverageFlags=0x80 + '00 00 ' # 29: Subtable[0].Reserved=0 + '02 ' # 31: Subtable[0].MorphType=2/LigatureMorph + '0000 0001 ' # 32: Subtable[0].SubFeatureFlags=0x1 + + # State table header. + '0000 0007 ' # 36: STXHeader.ClassCount=7 + '0000 001C ' # 40: STXHeader.ClassTableOffset=28 (+36=64) + '0000 0040 ' # 44: STXHeader.StateArrayOffset=64 (+36=100) + '0000 0078 ' # 48: STXHeader.EntryTableOffset=120 (+36=156) + '0000 0090 ' # 52: STXHeader.LigActionsOffset=144 (+36=180) + '0000 009C ' # 56: STXHeader.LigComponentsOffset=156 (+36=192) + '0000 00AE ' # 60: STXHeader.LigListOffset=174 (+36=210) + + # Glyph class table. + '0002 0006 ' # 64: ClassTable.LookupFormat=2, .UnitSize=6 + '0003 000C ' # 68: .NUnits=3, .SearchRange=12 + '0001 0006 ' # 72: .EntrySelector=1, .RangeShift=6 + '0016 0014 0004 ' # 76: GlyphID 20..22 [a..c] -> GlyphClass 4 + '0018 0017 0005 ' # 82: GlyphID 23..24 [d..e] -> GlyphClass 5 + '001C 001A 0006 ' # 88: GlyphID 26..28 [g..i] -> GlyphClass 6 + 'FFFF FFFF 0000 ' # 94: + + # State array. + '0000 0000 0000 0000 0001 0000 0000 ' # 100: State[0][0..6] + '0000 0000 0000 0000 0001 0000 0000 ' # 114: State[1][0..6] + '0000 0000 0000 0000 0001 0002 0000 ' # 128: State[2][0..6] + '0000 0000 0000 0000 0001 0002 0003 ' # 142: State[3][0..6] + + # Entry table. + '0000 0000 ' # 156: Entries[0].NewState=0, .Flags=0 + '0000 ' # 160: Entries[0].ActionIndex= because no 0x2000 flag + '0002 8000 ' # 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent) + '0000 ' # 166: Entries[1].ActionIndex= because no 0x2000 flag + '0003 8000 ' # 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent) + '0000 ' # 172: Entries[2].ActionIndex= because no 0x2000 flag + '0000 A000 ' # 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act) + '0000 ' # 178: Entries[3].ActionIndex=0 (start at Action[0]) + + # Ligature actions table. + '3FFF FFE7 ' # 180: Action[0].Flags=0, .GlyphIndexDelta=-25 + '3FFF FFED ' # 184: Action[1].Flags=0, .GlyphIndexDelta=-19 + 'BFFF FFF2 ' # 188: Action[2].Flags=, .GlyphIndexDelta=-14 + + # Ligature component table. + '0000 0001 ' # 192: LigComponent[0]=0, LigComponent[1]=1 + '0002 0003 ' # 196: LigComponent[2]=2, LigComponent[3]=3 + '0000 0004 ' # 200: LigComponent[4]=0, LigComponent[5]=4 + '0000 0008 ' # 204: LigComponent[6]=0, LigComponent[7]=8 + '0010 ' # 208: LigComponent[8]=16 + + # Ligature list. + '03E8 03E9 ' # 210: LigList[0]=1000, LigList[1]=1001 + '03EA 03EB ' # 214: LigList[2]=1002, LigList[3]=1003 + '03EC 03ED ' # 218: LigList[4]=1004, LigList[3]=1005 + '03EE 03EF ' # 222: LigList[5]=1006, LigList[6]=1007 +) # 226: +assert len(MORX_LIGATURE_DATA) == 226, len(MORX_LIGATURE_DATA) + + +MORX_LIGATURE_XML = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class MORXNoncontextualGlyphSubstitutionTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + glyphs = ['.notdef'] + ['g.%d' % i for i in range (1, 140)] + glyphs[11], glyphs[13] = 'parenleft', 'parenright' + glyphs[135], glyphs[136] = 'parenleft.vertical', 'parenright.vertical' + cls.font = FakeFont(glyphs) + + def test_decompile_toXML(self): + table = newTable('morx') + table.decompile(MORX_NONCONTEXTUAL_DATA, self.font) + self.assertEqual(getXML(table.toXML), MORX_NONCONTEXTUAL_XML) + + def test_compile_fromXML(self): + table = newTable('morx') + for name, attrs, content in parseXML(MORX_NONCONTEXTUAL_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(MORX_NONCONTEXTUAL_DATA)) + + +class MORXRearrangementTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.nodef', 'A', 'B', 'C']) + + def test_decompile_toXML(self): + table = newTable('morx') + table.decompile(MORX_REARRANGEMENT_DATA, self.font) + self.assertEqual(getXML(table.toXML), MORX_REARRANGEMENT_XML) + + def test_compile_fromXML(self): + table = newTable('morx') + for name, attrs, content in parseXML(MORX_REARRANGEMENT_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(MORX_REARRANGEMENT_DATA)) + + +class MORXContextualSubstitutionTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + g = ['.notdef'] + ['g.%d' % i for i in range (1, 910)] + g[80] = 'C' + g[50], g[52], g[201], g[202] = 'A', 'B', 'X', 'Y' + g[600], g[601], g[602], g[900] = ( + 'A.swash', 'B.swash', 'X.swash', 'Y.swash') + cls.font = FakeFont(g) + + def test_decompile_toXML(self): + table = newTable('morx') + table.decompile(MORX_CONTEXTUAL_DATA, self.font) + self.assertEqual(getXML(table.toXML), MORX_CONTEXTUAL_XML) + + def test_compile_fromXML(self): + table = newTable('morx') + for name, attrs, content in parseXML(MORX_CONTEXTUAL_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(MORX_CONTEXTUAL_DATA)) + + +class MORXLigatureSubstitutionTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + g = ['.notdef'] + ['g.%d' % i for i in range (1, 1515)] + g[20:29] = 'a b c d e f g h i'.split() + g[1000:1008] = 'adf adg adh adi aef aeg aeh aei'.split() + g[1008:1016] = 'bdf bdg bdh bdi bef beg beh bei'.split() + g[1500:1507] = 'cdf cdg cdh cdi cef ceg ceh'.split() + g[1511] = 'cei' + cls.font = FakeFont(g) + + def test_decompile_toXML(self): + table = newTable('morx') + table.decompile(MORX_LIGATURE_DATA, self.font) + self.assertEqual(getXML(table.toXML), MORX_LIGATURE_XML) + + def test_compile_fromXML(self): + table = newTable('morx') + for name, attrs, content in parseXML(MORX_LIGATURE_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(MORX_LIGATURE_DATA)) + + +class MORXCoverageFlagsTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'A', 'B', 'C']) + + def checkFlags(self, flags, textDirection, processingOrder, + checkCompile=True): + data = bytesjoin([ + MORX_REARRANGEMENT_DATA[:28], + bytechr(flags << 4), + MORX_REARRANGEMENT_DATA[29:]]) + xml = [] + for line in MORX_REARRANGEMENT_XML: + if line.startswith(' ', xml) + table2 = newTable('morx') + for name, attrs, content in parseXML(xml): + table2.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table2.compile(self.font)[28:31]), "8abcde") + + +class UnsupportedMorxLookupTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_unsupportedLookupType(self): + data = bytesjoin([ + MORX_NONCONTEXTUAL_DATA[:67], + bytechr(66), + MORX_NONCONTEXTUAL_DATA[69:]]) + with self.assertRaisesRegex(AssertionError, + r"unsupported 'morx' lookup type 66"): + morx = newTable('morx') + morx.decompile(data, FakeFont(['.notdef'])) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/M_V_A_R_test.py fonttools-3.21.2/Tests/ttLib/tables/M_V_A_R_test.py --- fonttools-3.0/Tests/ttLib/tables/M_V_A_R_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/M_V_A_R_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,139 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib.tables._f_v_a_r import Axis +from fontTools.ttLib import newTable, TTFont +import unittest + + +MVAR_DATA = deHexStr( + '0001 0000 ' # 0: version=1.0 + '0000 0008 ' # 4: reserved=0, valueRecordSize=8 + '0007 ' # 8: valueRecordCount=7 + '0044 ' # 10: offsetToItemVariationStore=68 + '6861 7363 ' # 12: ValueRecord.valueTag="hasc" + '0000 ' # 16: ValueRecord.deltaSetOuterIndex + '0003 ' # 18: ValueRecord.deltaSetInnerIndex + '6863 6C61 ' # 20: ValueRecord.valueTag="hcla" + '0000 ' # 24: ValueRecord.deltaSetOuterIndex + '0003 ' # 26: ValueRecord.deltaSetInnerIndex + '6863 6C64 ' # 28: ValueRecord.valueTag="hcld" + '0000 ' # 32: ValueRecord.deltaSetOuterIndex + '0003 ' # 34: ValueRecord.deltaSetInnerIndex + '6864 7363 ' # 36: ValueRecord.valueTag="hdsc" + '0000 ' # 40: ValueRecord.deltaSetOuterIndex + '0000 ' # 42: ValueRecord.deltaSetInnerIndex + '686C 6770 ' # 44: ValueRecord.valueTag="hlgp" + '0000 ' # 48: ValueRecord.deltaSetOuterIndex + '0002 ' # 50: ValueRecord.deltaSetInnerIndex + '7362 796F ' # 52: ValueRecord.valueTag="sbyo" + '0000 ' # 56: ValueRecord.deltaSetOuterIndex + '0001 ' # 58: ValueRecord.deltaSetInnerIndex + '7370 796F ' # 60: ValueRecord.valueTag="spyo" + '0000 ' # 64: ValueRecord.deltaSetOuterIndex + '0002 ' # 66: ValueRecord.deltaSetInnerIndex + '0001 ' # 68: VarStore.format=1 + '0000 000C ' # 70: VarStore.offsetToVariationRegionList=12 + '0001 ' # 74: VarStore.itemVariationDataCount=1 + '0000 0016 ' # 76: VarStore.itemVariationDataOffsets[0]=22 + '0001 ' # 80: VarRegionList.axisCount=1 + '0001 ' # 82: VarRegionList.regionCount=1 + '0000 ' # 84: variationRegions[0].regionAxes[0].startCoord=0.0 + '4000 ' # 86: variationRegions[0].regionAxes[0].peakCoord=1.0 + '4000 ' # 88: variationRegions[0].regionAxes[0].endCoord=1.0 + '0004 ' # 90: VarData.ItemCount=4 + '0001 ' # 92: VarData.NumShorts=1 + '0001 ' # 94: VarData.VarRegionCount=1 + '0000 ' # 96: VarData.VarRegionIndex[0]=0 + 'FF38 ' # 98: VarData.deltaSets[0]=-200 + 'FFCE ' # 100: VarData.deltaSets[0]=-50 + '0064 ' # 102: VarData.deltaSets[0]=100 + '00C8 ' # 104: VarData.deltaSets[0]=200 +) + +MVAR_XML = [ + '', + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', + '', + ' ', + ' ', + '', +] + + +class MVARTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + + def test_decompile_toXML(self): + mvar = newTable('MVAR') + font = TTFont() + mvar.decompile(MVAR_DATA, font) + self.assertEqual(getXML(mvar.toXML), MVAR_XML) + + def test_compile_fromXML(self): + mvar = newTable('MVAR') + font = TTFont() + for name, attrs, content in parseXML(MVAR_XML): + mvar.fromXML(name, attrs, content, font=font) + data = MVAR_DATA + self.assertEqual(hexStr(mvar.compile(font)), hexStr(data)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_n_a_m_e_test.py fonttools-3.21.2/Tests/ttLib/tables/_n_a_m_e_test.py --- fonttools-3.0/Tests/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_n_a_m_e_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc import sstruct +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.misc.testTools import FakeFont +from fontTools.misc.xmlWriter import XMLWriter +import struct +import unittest +from fontTools.ttLib import newTable +from fontTools.ttLib.tables._n_a_m_e import ( + table__n_a_m_e, NameRecord, nameRecordFormat, nameRecordSize, makeName, log) + + +def names(nameTable): + result = [(n.nameID, n.platformID, n.platEncID, n.langID, n.string) + for n in nameTable.names] + result.sort() + return result + + +class NameTableTest(unittest.TestCase): + + def test_getDebugName(self): + table = table__n_a_m_e() + table.names = [ + makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English + makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French + makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German + makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese + ] + self.assertEqual("Bold", table.getDebugName(258)) + self.assertEqual("Sem Fracções", table.getDebugName(292)) + self.assertEqual(None, table.getDebugName(999)) + + def test_setName(self): + table = table__n_a_m_e() + table.setName("Regular", 2, 1, 0, 0) + table.setName("Version 1.000", 5, 3, 1, 0x409) + table.setName("寬鬆", 276, 1, 2, 0x13) + self.assertEqual("Regular", table.getName(2, 1, 0, 0).toUnicode()) + self.assertEqual("Version 1.000", table.getName(5, 3, 1, 0x409).toUnicode()) + self.assertEqual("寬鬆", table.getName(276, 1, 2, 0x13).toUnicode()) + self.assertTrue(len(table.names) == 3) + table.setName("緊縮", 276, 1, 2, 0x13) + self.assertEqual("緊縮", table.getName(276, 1, 2, 0x13).toUnicode()) + self.assertTrue(len(table.names) == 3) + # passing bytes issues a warning + with CapturingLogHandler(log, "WARNING") as captor: + table.setName(b"abc", 0, 1, 0, 0) + self.assertTrue( + len([r for r in captor.records if "string is bytes" in r.msg]) == 1) + # anything other than unicode or bytes raises an error + with self.assertRaises(TypeError): + table.setName(1.000, 5, 1, 0, 0) + + def test_addName(self): + table = table__n_a_m_e() + nameIDs = [] + for string in ("Width", "Weight", "Custom"): + nameIDs.append(table.addName(string)) + + self.assertEqual(nameIDs[0], 256) + self.assertEqual(nameIDs[1], 257) + self.assertEqual(nameIDs[2], 258) + self.assertEqual(len(table.names), 6) + self.assertEqual(table.names[0].string, "Width") + self.assertEqual(table.names[1].string, "Width") + self.assertEqual(table.names[2].string, "Weight") + self.assertEqual(table.names[3].string, "Weight") + self.assertEqual(table.names[4].string, "Custom") + self.assertEqual(table.names[5].string, "Custom") + + with self.assertRaises(ValueError): + table.addName('Invalid nameID', minNameID=32767) + with self.assertRaises(TypeError): + table.addName(b"abc") # must be unicode string + + def test_addMultilingualName(self): + # Microsoft Windows has language codes for “English” (en) + # and for “Standard German as used in Switzerland” (de-CH). + # In this case, we expect that the implementation just + # encodes the name for the Windows platform; Apple platforms + # have been able to decode Windows names since the early days + # of OSX (~2001). However, Windows has no language code for + # “Swiss German as used in Liechtenstein” (gsw-LI), so we + # expect that the implementation populates the 'ltag' table + # to represent that particular, rather exotic BCP47 code. + font = FakeFont(glyphs=[".notdef", "A"]) + nameTable = font.tables['name'] = newTable("name") + with CapturingLogHandler(log, "WARNING") as captor: + widthID = nameTable.addMultilingualName({ + "en": "Width", + "de-CH": "Breite", + "gsw-LI": "Bräiti", + }, ttFont=font) + self.assertEqual(widthID, 256) + xHeightID = nameTable.addMultilingualName({ + "en": "X-Height", + "gsw-LI": "X-Hööchi" + }, ttFont=font) + self.assertEqual(xHeightID, 257) + captor.assertRegex("cannot add Windows name in language gsw-LI") + self.assertEqual(names(nameTable), [ + (256, 0, 4, 0, "Bräiti"), + (256, 3, 1, 0x0409, "Width"), + (256, 3, 1, 0x0807, "Breite"), + (257, 0, 4, 0, "X-Hööchi"), + (257, 3, 1, 0x0409, "X-Height"), + ]) + self.assertEqual(set(font.tables.keys()), {"ltag", "name"}) + self.assertEqual(font["ltag"].tags, ["gsw-LI"]) + + def test_addMultilingualName_legacyMacEncoding(self): + # Windows has no language code for Latin; MacOS has a code; + # and we actually can convert the name to the legacy MacRoman + # encoding. In this case, we expect that the name gets encoded + # as Macintosh name (platformID 1) with the corresponding Mac + # language code (133); the 'ltag' table should not be used. + font = FakeFont(glyphs=[".notdef", "A"]) + nameTable = font.tables['name'] = newTable("name") + with CapturingLogHandler(log, "WARNING") as captor: + nameTable.addMultilingualName({"la": "SPQR"}, + ttFont=font) + captor.assertRegex("cannot add Windows name in language la") + self.assertEqual(names(nameTable), [(256, 1, 0, 131, "SPQR")]) + self.assertNotIn("ltag", font.tables.keys()) + + def test_addMultilingualName_legacyMacEncodingButUnencodableName(self): + # Windows has no language code for Latin; MacOS has a code; + # but we cannot encode the name into this encoding because + # it contains characters that are not representable. + # In this case, we expect that the name gets encoded as + # Unicode name (platformID 0) with the language tag being + # added to the 'ltag' table. + font = FakeFont(glyphs=[".notdef", "A"]) + nameTable = font.tables['name'] = newTable("name") + with CapturingLogHandler(log, "WARNING") as captor: + nameTable.addMultilingualName({"la": "ⱾƤℚⱤ"}, + ttFont=font) + captor.assertRegex("cannot add Windows name in language la") + self.assertEqual(names(nameTable), [(256, 0, 4, 0, "ⱾƤℚⱤ")]) + self.assertIn("ltag", font.tables) + self.assertEqual(font["ltag"].tags, ["la"]) + + def test_addMultilingualName_legacyMacEncodingButNoCodec(self): + # Windows has no language code for “Azeri written in the + # Arabic script” (az-Arab); MacOS would have a code (50); + # but we cannot encode the name into the legacy encoding + # because we have no codec for MacArabic in fonttools. + # In this case, we expect that the name gets encoded as + # Unicode name (platformID 0) with the language tag being + # added to the 'ltag' table. + font = FakeFont(glyphs=[".notdef", "A"]) + nameTable = font.tables['name'] = newTable("name") + with CapturingLogHandler(log, "WARNING") as captor: + nameTable.addMultilingualName({"az-Arab": "آذربايجان ديلی"}, + ttFont=font) + captor.assertRegex("cannot add Windows name in language az-Arab") + self.assertEqual(names(nameTable), [(256, 0, 4, 0, "آذربايجان ديلی")]) + self.assertIn("ltag", font.tables) + self.assertEqual(font["ltag"].tags, ["az-Arab"]) + + def test_addMultilingualName_noTTFont(self): + # If the ttFont argument is not passed, the implementation + # should add whatever names it can, but it should not crash + # just because it cannot build an ltag table. + nameTable = newTable("name") + with CapturingLogHandler(log, "WARNING") as captor: + nameTable.addMultilingualName({"en": "A", "la": "ⱾƤℚⱤ"}) + captor.assertRegex("cannot store language la into 'ltag' table") + + def test_decompile_badOffset(self): + # https://github.com/behdad/fonttools/issues/525 + table = table__n_a_m_e() + badRecord = { + "platformID": 1, + "platEncID": 3, + "langID": 7, + "nameID": 1, + "length": 3, + "offset": 8765 # out of range + } + data = bytesjoin([ + struct.pack(">HHH", 1, 1, 6 + nameRecordSize), + sstruct.pack(nameRecordFormat, badRecord)]) + table.decompile(data, ttFont=None) + self.assertEqual(table.names, []) + + +class NameRecordTest(unittest.TestCase): + + def test_toUnicode_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertEqual("Foo Bold", name.toUnicode()) + + def test_toUnicode_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual("mac_roman", name.getEncoding()) + self.assertEqual("Foo Italic", name.toUnicode()) + + def test_toUnicode_macromanian(self): + name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian + self.assertEqual("mac_romanian", name.getEncoding()) + self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) + + def test_toUnicode_UnicodeDecodeError(self): + name = makeName(b"\1", 111, 0, 2, 7) + self.assertEqual("utf_16_be", name.getEncoding()) + self.assertRaises(UnicodeDecodeError, name.toUnicode) + + def toXML(self, name): + writer = XMLWriter(BytesIO()) + name.toXML(writer, ttFont=None) + xml = writer.file.getvalue().decode("utf_8").strip() + return xml.split(writer.newlinestr.decode("utf_8"))[1:] + + def test_toXML_utf16be(self): + name = makeName("Foo Bold", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo Bold', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length1(self): + name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_odd_length2(self): + name = makeName(b"\0Fooz", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fooz', + '' + ], self.toXML(name)) + + def test_toXML_utf16be_double_encoded(self): + name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) + self.assertEqual([ + '', + ' Fo', + '' + ], self.toXML(name)) + + def test_toXML_macroman(self): + name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman + self.assertEqual([ + '', + ' Foo Italic', + '' + ], self.toXML(name)) + + def test_toXML_macroman_actual_utf16be(self): + name = makeName("\0F\0o\0o", 222, 1, 0, 7) + self.assertEqual([ + '', + ' Foo', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_nonASCII(self): + name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' BŠrli', + '' + ], self.toXML(name)) + + def test_toXML_unknownPlatEncID_ASCII(self): + name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID + self.assertEqual([ + '', + ' Barli', + '' + ], self.toXML(name)) + + def test_encoding_macroman_misc(self): + name = makeName('', 123, 1, 0, 17) # Mac Turkish + self.assertEqual(name.getEncoding(), "mac_turkish") + name.langID = 37 + self.assertEqual(name.getEncoding(), "mac_romanian") + name.langID = 45 # Other + self.assertEqual(name.getEncoding(), "mac_roman") + + def test_extended_mac_encodings(self): + name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese + self.assertEqual(name.toUnicode(), unichr(0x2122)) + + def test_extended_unknown(self): + name = makeName(b'\xfe', 123, 10, 11, 12) + self.assertEqual(name.getEncoding(), "ascii") + self.assertEqual(name.getEncoding(None), None) + self.assertEqual(name.getEncoding(default=None), None) + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_o_p_b_d_test.py fonttools-3.21.2/Tests/ttLib/tables/_o_p_b_d_test.py --- fonttools-3.0/Tests/ttLib/tables/_o_p_b_d_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_o_p_b_d_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,183 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +# Example: Format 0 Optical Bounds Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html +OPBD_FORMAT_0_DATA = deHexStr( + '0001 0000 0000 ' # 0: Version=1.0, Format=0 + '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0 + '000A 001E ' # 18: Glyph=10(=C), OffsetOfOpticalBoundsDeltas=30 + '002B 0026 ' # 22: Glyph=43(=A), OffsetOfOpticalBoundsDeltas=38 + 'FFFF 0000 ' # 26: Glyph=, OffsetOfOpticalBoundsDeltas=0 + 'FFCE 0005 0037 FFFB ' # 30: Bounds[C].Left=-50 .Top=5 .Right=55 .Bottom=-5 + 'FFF6 000F 0000 0000 ' # 38: Bounds[A].Left=-10 .Top=15 .Right=0 .Bottom=0 +) # 46: +assert(len(OPBD_FORMAT_0_DATA) == 46) + + +OPBD_FORMAT_0_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Example: Format 1 Optical Bounds Table +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html +OPBD_FORMAT_1_DATA = deHexStr( + '0001 0000 0001 ' # 0: Version=1.0, Format=1 + '0006 0004 0002 ' # 6: LookupFormat=6, UnitSize=4, NUnits=2 + '0008 0001 0000 ' # 12: SearchRange=8, EntrySelector=1, RangeShift=0 + '000A 001E ' # 18: Glyph=10(=C), OffsetOfOpticalBoundsPoints=30 + '002B 0026 ' # 22: Glyph=43(=A), OffsetOfOpticalBoundsPoints=38 + 'FFFF 0000 ' # 26: Glyph=, OffsetOfOpticalBoundsPoints=0 + '0024 0025 0026 0027 ' # 30: Bounds[C].Left=36 .Top=37 .Right=38 .Bottom=39 + '0020 0029 FFFF FFFF ' # 38: Bounds[A].Left=32 .Top=41 .Right=-1 .Bottom=-1 +) # 46: +assert(len(OPBD_FORMAT_1_DATA) == 46) + + +OPBD_FORMAT_1_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# This is the content of the Optical Bounds table in AppleChancery.ttf, +# font version 8.0d1e1 of 2013-02-06. An early version of fontTools +# was crashing when trying to decompile this table. +# https://github.com/fonttools/fonttools/issues/1031 +OPBD_APPLE_CHANCERY_DATA = deHexStr( + '0001 0000 0000 ' # 0: Version=1.0, Format=0 + '0004 0006 0011 ' # 6: LookupFormat=4, UnitSize=6, NUnits=17 + '0060 0004 0006 ' # 12: SearchRange=96, EntrySelector=4, RangeShift=6 + '017d 017d 0072 ' # 18: Seg[0].LastGlyph=381, FirstGlyph=381, Off=114(+6) + '0183 0180 0074 ' # 24: Seg[1].LastGlyph=387, FirstGlyph=384, Off=116(+6) + '0186 0185 007c ' # 30: Seg[2].LastGlyph=390, FirstGlyph=389, Off=124(+6) + '018f 018b 0080 ' # 36: Seg[3].LastGlyph=399, FirstGlyph=395, Off=128(+6) + '01a0 0196 008a ' # 42: Seg[4].LastGlyph=416, FirstGlyph=406, Off=138(+6) + '01a5 01a3 00a0 ' # 48: Seg[5].LastGlyph=421, FirstGlyph=419, Off=160(+6) + '01aa 01aa 00a6 ' # 54: Seg[6].LastGlyph=426, FirstGlyph=426, Off=166(+6) + '01ac 01ac 00a8 ' # 60: Seg[7].LastGlyph=428, FirstGlyph=428, Off=168(+6) + '01fb 01f1 00aa ' # 66: Seg[8].LastGlyph=507, FirstGlyph=497, Off=170(+6) + '0214 0209 00c0 ' # 72: Seg[9].LastGlyph=532, FirstGlyph=521, Off=192(+6) + '021d 0216 00d8 ' # 78: Seg[10].LastGlyph=541, FirstGlyph=534, Off=216(+6) + '0222 0220 00e8 ' # 84: Seg[11].LastGlyph=546, FirstGlyph=544, Off=232(+6) + '0227 0225 00ee ' # 90: Seg[12].LastGlyph=551, FirstGlyph=549, Off=238(+6) + '0229 0229 00f4 ' # 96: Seg[13].LastGlyph=553, FirstGlyph=553, Off=244(+6) + '023b 023b 00f6 ' # 102: Seg[14].LastGlyph=571, FirstGlyph=571, Off=246(+6) + '023e 023e 00f8 ' # 108: Seg[15].LastGlyph=574, FirstGlyph=574, Off=248(+6) + 'ffff ffff 00fa ' # 114: Seg[16]= + '0100 0108 0110 0118 0120 0128 0130 0138 0140 0148 0150 0158 ' + '0160 0168 0170 0178 0180 0188 0190 0198 01a0 01a8 01b0 01b8 ' + '01c0 01c8 01d0 01d8 01e0 01e8 01f0 01f8 0200 0208 0210 0218 ' + '0220 0228 0230 0238 0240 0248 0250 0258 0260 0268 0270 0278 ' + '0280 0288 0290 0298 02a0 02a8 02b0 02b8 02c0 02c8 02d0 02d8 ' + '02e0 02e8 02f0 02f8 0300 0308 0310 0318 fd98 0000 0000 0000 ' + 'fdbc 0000 0000 0000 fdbc 0000 0000 0000 fdbf 0000 0000 0000 ' + 'fdbc 0000 0000 0000 fd98 0000 0000 0000 fda9 0000 0000 0000 ' + 'fd98 0000 0000 0000 fd98 0000 0000 0000 fd98 0000 0000 0000 ' + '0000 0000 0205 0000 0000 0000 0205 0000 0000 0000 02a4 0000 ' + '0000 0000 027e 0000 0000 0000 02f4 0000 0000 0000 02a4 0000 ' + '0000 0000 0365 0000 0000 0000 0291 0000 0000 0000 0291 0000 ' + '0000 0000 026a 0000 0000 0000 02b8 0000 0000 0000 02cb 0000 ' + '0000 0000 02a4 0000 0000 0000 01a9 0000 0000 0000 0244 0000 ' + '0000 0000 02a4 0000 0000 0000 02cb 0000 0000 0000 0244 0000 ' + '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 037f 0000 ' + '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 0307 0000 ' + '0000 0000 0307 0000 0000 0000 0307 0000 0000 0000 03e3 0000 ' + '0000 0000 030c 0000 0000 0000 0307 0000 fe30 0000 0000 0000 ' + 'fe7e 0000 0000 0000 fe91 0000 0000 0000 fe6a 0000 0000 0000 ' + 'fe6a 0000 0000 0000 fecb 0000 0000 0000 fe6a 0000 0000 0000 ' + 'fe7e 0000 0000 0000 fea4 0000 0000 0000 fe7e 0000 0000 0000 ' + 'fe44 0000 0000 0000 fea4 0000 0000 0000 feb8 0000 0000 0000 ' + 'fe7e 0000 0000 0000 fe5e 0000 0000 0000 fe37 0000 0000 0000 ' + 'fe37 0000 0000 0000 fcbd 0000 0000 0000 fd84 0000 0000 0000 ' + 'fd98 0000 0000 0000 fd82 0000 0000 0000 fcbd 0000 0000 0000 ' + 'fd84 0000 0000 0000 fcbd 0000 0000 0000 fcbd 0000 0000 0000 ' + 'fe72 0000 0000 0000 ff9d 0000 0000 0000 0000 0000 032f 0000 ' + '0000 0000 03ba 0000 ' +) +assert len(OPBD_APPLE_CHANCERY_DATA) == 800 + + +class OPBDTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + glyphs = ['.notdef'] + ['X.alt%d' for g in range(1, 50)] + glyphs[10] = 'C' + glyphs[43] = 'A' + cls.font = FakeFont(glyphs) + + def test_decompile_toXML_format0(self): + table = newTable('opbd') + table.decompile(OPBD_FORMAT_0_DATA, self.font) + self.assertEqual(getXML(table.toXML), OPBD_FORMAT_0_XML) + + def test_compile_fromXML_format0(self): + table = newTable('opbd') + for name, attrs, content in parseXML(OPBD_FORMAT_0_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(OPBD_FORMAT_0_DATA)) + + def test_decompile_toXML_format1(self): + table = newTable('opbd') + table.decompile(OPBD_FORMAT_1_DATA, self.font) + self.assertEqual(getXML(table.toXML), OPBD_FORMAT_1_XML) + + def test_compile_fromXML_format1(self): + table = newTable('opbd') + for name, attrs, content in parseXML(OPBD_FORMAT_1_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(OPBD_FORMAT_1_DATA)) + + def test_decompile_AppleChancery(self): + # Make sure we do not crash when decompiling the 'opbd' table of + # AppleChancery.ttf. https://github.com/fonttools/fonttools/issues/1031 + table = newTable('opbd') + table.decompile(OPBD_APPLE_CHANCERY_DATA, self.font) + self.assertIn('', getXML(table.toXML)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/O_S_2f_2_test.py fonttools-3.21.2/Tests/ttLib/tables/O_S_2f_2_test.py --- fonttools-3.0/Tests/ttLib/tables/O_S_2f_2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/O_S_2f_2_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ +from __future__ import print_function, division, absolute_import +from fontTools.ttLib import TTFont, newTable, getTableModule +from fontTools.ttLib.tables.O_S_2f_2 import * +import unittest + + +class OS2TableTest(unittest.TestCase): + + def test_getUnicodeRanges(self): + table = table_O_S_2f_2() + table.ulUnicodeRange1 = 0xFFFFFFFF + table.ulUnicodeRange2 = 0xFFFFFFFF + table.ulUnicodeRange3 = 0xFFFFFFFF + table.ulUnicodeRange4 = 0xFFFFFFFF + bits = table.getUnicodeRanges() + for i in range(127): + self.assertIn(i, bits) + + def test_setUnicodeRanges(self): + table = table_O_S_2f_2() + table.ulUnicodeRange1 = 0 + table.ulUnicodeRange2 = 0 + table.ulUnicodeRange3 = 0 + table.ulUnicodeRange4 = 0 + bits = set(range(123)) + table.setUnicodeRanges(bits) + self.assertEqual(table.getUnicodeRanges(), bits) + with self.assertRaises(ValueError): + table.setUnicodeRanges([-1, 127, 255]) + + def test_recalcUnicodeRanges(self): + font = TTFont() + font['OS/2'] = os2 = newTable('OS/2') + font['cmap'] = cmap = newTable('cmap') + st = getTableModule('cmap').CmapSubtable.newSubtable(4) + st.platformID, st.platEncID, st.language = 3, 1, 0 + st.cmap = {0x0041:'A', 0x03B1: 'alpha', 0x0410: 'Acyr'} + cmap.tables = [] + cmap.tables.append(st) + os2.setUnicodeRanges({0, 1, 9}) + # 'pruneOnly' will clear any bits for which there's no intersection: + # bit 1 ('Latin 1 Supplement'), in this case. However, it won't set + # bit 7 ('Greek and Coptic') despite the "alpha" character is present. + self.assertEqual(os2.recalcUnicodeRanges(font, pruneOnly=True), {0, 9}) + # try again with pruneOnly=False: bit 7 is now set. + self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9}) + # add a non-BMP char from 'Mahjong Tiles' block (bit 122) + st.cmap[0x1F000] = 'eastwindtile' + # the bit 122 and the special bit 57 ('Non Plane 0') are also enabled + self.assertEqual(os2.recalcUnicodeRanges(font), {0, 7, 9, 57, 122}) + + def test_intersectUnicodeRanges(self): + self.assertEqual(intersectUnicodeRanges([0x0410]), {9}) + self.assertEqual(intersectUnicodeRanges([0x0410, 0x1F000]), {9, 57, 122}) + self.assertEqual( + intersectUnicodeRanges([0x0410, 0x1F000], inverse=True), + (set(range(123)) - {9, 57, 122})) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/otBase_test.py fonttools-3.21.2/Tests/ttLib/tables/otBase_test.py --- fonttools-3.0/Tests/ttLib/tables/otBase_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/otBase_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,96 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter +import unittest + + +class OTTableReaderTest(unittest.TestCase): + def test_readShort(self): + reader = OTTableReader(deHexStr("CA FE")) + self.assertEqual(reader.readShort(), -13570) + self.assertEqual(reader.pos, 2) + + def test_readLong(self): + reader = OTTableReader(deHexStr("CA FE BE EF")) + self.assertEqual(reader.readLong(), -889274641) + self.assertEqual(reader.pos, 4) + + def test_readUInt8(self): + reader = OTTableReader(deHexStr("C3")) + self.assertEqual(reader.readUInt8(), 0xC3) + self.assertEqual(reader.pos, 1) + + def test_readUShort(self): + reader = OTTableReader(deHexStr("CA FE")) + self.assertEqual(reader.readUShort(), 0xCAFE) + self.assertEqual(reader.pos, 2) + + def test_readUShortArray(self): + reader = OTTableReader(deHexStr("DE AD BE EF CA FE")) + self.assertEqual(list(reader.readUShortArray(3)), + [0xDEAD, 0xBEEF, 0xCAFE]) + self.assertEqual(reader.pos, 6) + + def test_readUInt24(self): + reader = OTTableReader(deHexStr("C3 13 37")) + self.assertEqual(reader.readUInt24(), 0xC31337) + self.assertEqual(reader.pos, 3) + + def test_readULong(self): + reader = OTTableReader(deHexStr("CA FE BE EF")) + self.assertEqual(reader.readULong(), 0xCAFEBEEF) + self.assertEqual(reader.pos, 4) + + def test_readTag(self): + reader = OTTableReader(deHexStr("46 6F 6F 64")) + self.assertEqual(reader.readTag(), "Food") + self.assertEqual(reader.pos, 4) + + def test_readData(self): + reader = OTTableReader(deHexStr("48 65 6C 6C 6F")) + self.assertEqual(reader.readData(5), b"Hello") + self.assertEqual(reader.pos, 5) + + def test_getSubReader(self): + reader = OTTableReader(deHexStr("CAFE F00D")) + sub = reader.getSubReader(2) + self.assertEqual(sub.readUShort(), 0xF00D) + self.assertEqual(reader.readUShort(), 0xCAFE) + + +class OTTableWriterTest(unittest.TestCase): + def test_writeShort(self): + writer = OTTableWriter() + writer.writeShort(-12345) + self.assertEqual(writer.getData(), deHexStr("CF C7")) + + def test_writeLong(self): + writer = OTTableWriter() + writer.writeLong(-12345678) + self.assertEqual(writer.getData(), deHexStr("FF 43 9E B2")) + + def test_writeUInt8(self): + writer = OTTableWriter() + writer.writeUInt8(0xBE) + self.assertEqual(writer.getData(), deHexStr("BE")) + + def test_writeUShort(self): + writer = OTTableWriter() + writer.writeUShort(0xBEEF) + self.assertEqual(writer.getData(), deHexStr("BE EF")) + + def test_writeUInt24(self): + writer = OTTableWriter() + writer.writeUInt24(0xBEEF77) + self.assertEqual(writer.getData(), deHexStr("BE EF 77")) + + def test_writeULong(self): + writer = OTTableWriter() + writer.writeULong(0xBEEFCAFE) + self.assertEqual(writer.getData(), deHexStr("BE EF CA FE")) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/otConverters_test.py fonttools-3.21.2/Tests/ttLib/tables/otConverters_test.py --- fonttools-3.0/Tests/ttLib/tables/otConverters_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/otConverters_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,424 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, \ + unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.misc.testTools import FakeFont, makeXMLWriter +from fontTools.misc.textTools import deHexStr +import fontTools.ttLib.tables.otConverters as otConverters +from fontTools.ttLib import newTable +from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter +import unittest + + +class Char64Test(unittest.TestCase): + font = FakeFont([]) + converter = otConverters.Char64("char64", 0, None, None) + + def test_read(self): + reader = OTTableReader(b"Hello\0junk after zero byte" + 100 * b"\0") + self.assertEqual(self.converter.read(reader, self.font, {}), "Hello") + self.assertEqual(reader.pos, 64) + + def test_read_replace_not_ascii(self): + reader = OTTableReader(b"Hello \xE4 world" + 100 * b"\0") + with CapturingLogHandler(otConverters.log, "WARNING") as captor: + data = self.converter.read(reader, self.font, {}) + self.assertEqual(data, "Hello � world") + self.assertEqual(reader.pos, 64) + self.assertIn('replaced non-ASCII characters in "Hello � world"', + [r.msg for r in captor.records]) + + def test_write(self): + writer = OTTableWriter() + self.converter.write(writer, self.font, {}, "Hello world") + self.assertEqual(writer.getData(), b"Hello world" + 53 * b"\0") + + def test_write_replace_not_ascii(self): + writer = OTTableWriter() + with CapturingLogHandler(otConverters.log, "WARNING") as captor: + self.converter.write(writer, self.font, {}, "Hello ☃") + self.assertEqual(writer.getData(), b"Hello ?" + 57 * b"\0") + self.assertIn('replacing non-ASCII characters in "Hello ☃"', + [r.msg for r in captor.records]) + + def test_write_truncated(self): + writer = OTTableWriter() + with CapturingLogHandler(otConverters.log, "WARNING") as captor: + self.converter.write(writer, self.font, {}, "A" * 80) + self.assertEqual(writer.getData(), b"A" * 64) + self.assertIn('truncating overlong "' + "A" * 80 + '" to 64 bytes', + [r.msg for r in captor.records]) + + def test_xmlRead(self): + value = self.converter.xmlRead({"value": "Foo"}, [], self.font) + self.assertEqual(value, "Foo") + + def test_xmlWrite(self): + writer = makeXMLWriter() + self.converter.xmlWrite(writer, self.font, "Hello world", "Element", + [("attr", "v")]) + xml = writer.file.getvalue().decode("utf-8").rstrip() + self.assertEqual(xml, '') + + +class GlyphIDTest(unittest.TestCase): + font = FakeFont(".notdef A B C".split()) + converter = otConverters.GlyphID('GlyphID', 0, None, None) + + def test_readArray(self): + reader = OTTableReader(deHexStr("0002 0001 DEAD 0002")) + self.assertEqual(self.converter.readArray(reader, self.font, {}, 4), + ["B", "A", "glyph57005", "B"]) + self.assertEqual(reader.pos, 8) + + def test_read(self): + reader = OTTableReader(deHexStr("0003")) + self.assertEqual(self.converter.read(reader, self.font, {}), "C") + self.assertEqual(reader.pos, 2) + + def test_write(self): + writer = OTTableWriter() + self.converter.write(writer, self.font, {}, "B") + self.assertEqual(writer.getData(), deHexStr("0002")) + + +class LongTest(unittest.TestCase): + font = FakeFont([]) + converter = otConverters.Long('Long', 0, None, None) + + def test_read(self): + reader = OTTableReader(deHexStr("FF0000EE")) + self.assertEqual(self.converter.read(reader, self.font, {}), -16776978) + self.assertEqual(reader.pos, 4) + + def test_write(self): + writer = OTTableWriter() + self.converter.write(writer, self.font, {}, -16777213) + self.assertEqual(writer.getData(), deHexStr("FF000003")) + + def test_xmlRead(self): + value = self.converter.xmlRead({"value": "314159"}, [], self.font) + self.assertEqual(value, 314159) + + def test_xmlWrite(self): + writer = makeXMLWriter() + self.converter.xmlWrite(writer, self.font, 291, "Foo", [("attr", "v")]) + xml = writer.file.getvalue().decode("utf-8").rstrip() + self.assertEqual(xml, '') + + +class NameIDTest(unittest.TestCase): + converter = otConverters.NameID('NameID', 0, None, None) + + def makeFont(self): + nameTable = newTable('name') + nameTable.setName(u"Demibold Condensed", 0x123, 3, 0, 0x409) + return {"name": nameTable} + + def test_read(self): + font = self.makeFont() + reader = OTTableReader(deHexStr("0123")) + self.assertEqual(self.converter.read(reader, font, {}), 0x123) + + def test_write(self): + writer = OTTableWriter() + self.converter.write(writer, self.makeFont(), {}, 0x123) + self.assertEqual(writer.getData(), deHexStr("0123")) + + def test_xmlWrite(self): + writer = makeXMLWriter() + self.converter.xmlWrite(writer, self.makeFont(), 291, + "FooNameID", [("attr", "val")]) + xml = writer.file.getvalue().decode("utf-8").rstrip() + self.assertEqual( + xml, + ' ') + + def test_xmlWrite_missingID(self): + writer = makeXMLWriter() + with CapturingLogHandler(otConverters.log, "WARNING") as captor: + self.converter.xmlWrite(writer, self.makeFont(), 666, + "Entity", [("attrib", "val")]) + self.assertIn("name id 666 missing from name table", + [r.msg for r in captor.records]) + xml = writer.file.getvalue().decode("utf-8").rstrip() + self.assertEqual( + xml, + ' ') + + +class UInt8Test(unittest.TestCase): + font = FakeFont([]) + converter = otConverters.UInt8("UInt8", 0, None, None) + + def test_read(self): + reader = OTTableReader(deHexStr("FE")) + self.assertEqual(self.converter.read(reader, self.font, {}), 254) + self.assertEqual(reader.pos, 1) + + def test_write(self): + writer = OTTableWriter() + self.converter.write(writer, self.font, {}, 253) + self.assertEqual(writer.getData(), deHexStr("FD")) + + def test_xmlRead(self): + value = self.converter.xmlRead({"value": "254"}, [], self.font) + self.assertEqual(value, 254) + + def test_xmlWrite(self): + writer = makeXMLWriter() + self.converter.xmlWrite(writer, self.font, 251, "Foo", [("attr", "v")]) + xml = writer.file.getvalue().decode("utf-8").rstrip() + self.assertEqual(xml, '') + + +class AATLookupTest(unittest.TestCase): + font = FakeFont(".notdef A B C D E F G H A.alt B.alt".split()) + converter = otConverters.AATLookup("AATLookup", 0, None, + tableClass=otConverters.GlyphID) + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_readFormat0(self): + reader = OTTableReader(deHexStr("0000 0000 0001 0002 0000 7D00 0001")) + self.assertEqual(self.converter.read(reader, self.font, None), { + ".notdef": ".notdef", + "A": "A", + "B": "B", + "C": ".notdef", + "D": "glyph32000", + "E": "A" + }) + + def test_readFormat2(self): + reader = OTTableReader(deHexStr( + "0002 0006 0002 000C 0001 0006 " + "0002 0001 0003 " # glyph A..B: map to C + "0007 0005 0008 " # glyph E..G: map to H + "FFFF FFFF FFFF")) # end of search table + self.assertEqual(self.converter.read(reader, self.font, None), { + "A": "C", + "B": "C", + "E": "H", + "F": "H", + "G": "H", + }) + + def test_readFormat4(self): + reader = OTTableReader(deHexStr( + "0004 0006 0003 000C 0001 0006 " + "0002 0001 001E " # glyph 1..2: mapping at offset 0x1E + "0005 0004 001E " # glyph 4..5: mapping at offset 0x1E + "FFFF FFFF FFFF " # end of search table + "0007 0008")) # offset 0x18: glyphs [7, 8] = [G, H] + self.assertEqual(self.converter.read(reader, self.font, None), { + "A": "G", + "B": "H", + "D": "G", + "E": "H", + }) + + def test_readFormat6(self): + reader = OTTableReader(deHexStr( + "0006 0004 0002 0008 0001 0004 " + "0003 0001 " # C --> A + "0005 0002 " # E --> B + "FFFF FFFF")) # end of search table + self.assertEqual(self.converter.read(reader, self.font, None), { + "C": "A", + "E": "B", + }) + + def test_readFormat8(self): + reader = OTTableReader(deHexStr( + "0008 " + "0003 0003 " # first: C, count: 3 + "0007 0001 0002")) # [G, A, B] + self.assertEqual(self.converter.read(reader, self.font, None), { + "C": "G", + "D": "A", + "E": "B", + }) + + def test_readUnknownFormat(self): + reader = OTTableReader(deHexStr("0009")) + self.assertRaisesRegex( + AssertionError, + "unsupported lookup format: 9", + self.converter.read, reader, self.font, None) + + def test_writeFormat0(self): + writer = OTTableWriter() + font = FakeFont(".notdef A B C".split()) + self.converter.write(writer, font, {}, { + ".notdef": ".notdef", + "A": "C", + "B": "C", + "C": "A" + }) + self.assertEqual(writer.getData(), deHexStr("0000 0000 0003 0003 0001")) + + def test_writeFormat2(self): + writer = OTTableWriter() + font = FakeFont(".notdef A B C D E F G H".split()) + self.converter.write(writer, font, {}, { + "B": "C", + "C": "C", + "D": "C", + "E": "C", + "G": "A", + "H": "A", + }) + self.assertEqual(writer.getData(), deHexStr( + "0002 " # format=2 + "0006 " # binSrchHeader.unitSize=6 + "0002 " # binSrchHeader.nUnits=2 + "000C " # binSrchHeader.searchRange=12 + "0001 " # binSrchHeader.entrySelector=1 + "0000 " # binSrchHeader.rangeShift=0 + "0005 0002 0003 " # segments[0].lastGlyph=E, firstGlyph=B, value=C + "0008 0007 0001 " # segments[1].lastGlyph=H, firstGlyph=G, value=A + "FFFF FFFF 0000 " # segments[2]= + )) + + def test_writeFormat6(self): + writer = OTTableWriter() + font = FakeFont(".notdef A B C D E".split()) + self.converter.write(writer, font, {}, { + "A": "C", + "C": "B", + "D": "D", + "E": "E", + }) + self.assertEqual(writer.getData(), deHexStr( + "0006 " # format=6 + "0004 " # binSrchHeader.unitSize=4 + "0004 " # binSrchHeader.nUnits=4 + "0010 " # binSrchHeader.searchRange=16 + "0002 " # binSrchHeader.entrySelector=2 + "0000 " # binSrchHeader.rangeShift=0 + "0001 0003 " # entries[0].glyph=A, .value=C + "0003 0002 " # entries[1].glyph=C, .value=B + "0004 0004 " # entries[2].glyph=D, .value=D + "0005 0005 " # entries[3].glyph=E, .value=E + "FFFF 0000 " # entries[4]= + )) + + def test_writeFormat8(self): + writer = OTTableWriter() + font = FakeFont(".notdef A B C D E F G H".split()) + self.converter.write(writer, font, {}, { + "B": "B", + "C": "A", + "D": "B", + "E": "C", + "F": "B", + "G": "A", + }) + self.assertEqual(writer.getData(), deHexStr( + "0008 " # format=8 + "0002 " # firstGlyph=B + "0006 " # glyphCount=6 + "0002 0001 0002 0003 0002 0001" # valueArray=[B, A, B, C, B, A] + )) + + def test_xmlRead(self): + value = self.converter.xmlRead({}, [ + ("Lookup", {"glyph": "A", "value": "A.alt"}, []), + ("Lookup", {"glyph": "B", "value": "B.alt"}, []), + ], self.font) + self.assertEqual(value, {"A": "A.alt", "B": "B.alt"}) + + def test_xmlWrite(self): + writer = makeXMLWriter() + self.converter.xmlWrite(writer, self.font, + value={"A": "A.alt", "B": "B.alt"}, + name="Foo", attrs=[("attr", "val")]) + xml = writer.file.getvalue().decode("utf-8").splitlines() + self.assertEqual(xml, [ + '', + ' ', + ' ', + '', + ]) + + +class LazyListTest(unittest.TestCase): + + def test_slice(self): + ll = otConverters._LazyList([10, 11, 12, 13]) + sl = ll[:] + + self.assertIsNot(sl, ll) + self.assertIsInstance(sl, list) + self.assertEqual([10, 11, 12, 13], sl) + + self.assertEqual([11, 12], ll[1:3]) + + def test_getitem(self): + count = 2 + reader = OTTableReader(b"\x00\xFE\xFF\x00\x00\x00", offset=1) + converter = otConverters.UInt8("UInt8", 0, None, None) + recordSize = converter.staticSize + l = otConverters._LazyList() + l.reader = reader + l.pos = l.reader.pos + l.font = None + l.conv = converter + l.recordSize = recordSize + l.extend(otConverters._MissingItem([i]) for i in range(count)) + reader.advance(count * recordSize) + + self.assertEqual(l[0], 254) + self.assertEqual(l[1], 255) + + def test_add_both_LazyList(self): + ll1 = otConverters._LazyList([1]) + ll2 = otConverters._LazyList([2]) + + l3 = ll1 + ll2 + + self.assertIsInstance(l3, list) + self.assertEqual([1, 2], l3) + + def test_add_LazyList_and_list(self): + ll1 = otConverters._LazyList([1]) + l2 = [2] + + l3 = ll1 + l2 + + self.assertIsInstance(l3, list) + self.assertEqual([1, 2], l3) + + def test_add_not_implemented(self): + with self.assertRaises(TypeError): + otConverters._LazyList() + 0 + with self.assertRaises(TypeError): + otConverters._LazyList() + tuple() + + def test_radd_list_and_LazyList(self): + l1 = [1] + ll2 = otConverters._LazyList([2]) + + l3 = l1 + ll2 + + self.assertIsInstance(l3, list) + self.assertEqual([1, 2], l3) + + def test_radd_not_implemented(self): + with self.assertRaises(TypeError): + 0 + otConverters._LazyList() + with self.assertRaises(TypeError): + tuple() + otConverters._LazyList() + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/otTables_test.py fonttools-3.21.2/Tests/ttLib/tables/otTables_test.py --- fonttools-3.0/Tests/ttLib/tables/otTables_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/otTables_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,490 @@ +# coding: utf-8 +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import getXML, parseXML, FakeFont +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter +import fontTools.ttLib.tables.otTables as otTables +import unittest + + +def makeCoverage(glyphs): + coverage = otTables.Coverage() + coverage.glyphs = glyphs + return coverage + + +class SingleSubstTest(unittest.TestCase): + def setUp(self): + self.glyphs = ".notdef A B C D E a b c d e".split() + self.font = FakeFont(self.glyphs) + + def test_postRead_format1(self): + table = otTables.SingleSubst() + table.Format = 1 + rawTable = { + "Coverage": makeCoverage(["A", "B", "C"]), + "DeltaGlyphID": 5 + } + table.postRead(rawTable, self.font) + self.assertEqual(table.mapping, {"A": "a", "B": "b", "C": "c"}) + + def test_postRead_format2(self): + table = otTables.SingleSubst() + table.Format = 2 + rawTable = { + "Coverage": makeCoverage(["A", "B", "C"]), + "GlyphCount": 3, + "Substitute": ["c", "b", "a"] + } + table.postRead(rawTable, self.font) + self.assertEqual(table.mapping, {"A": "c", "B": "b", "C": "a"}) + + def test_postRead_formatUnknown(self): + table = otTables.SingleSubst() + table.Format = 987 + rawTable = {"Coverage": makeCoverage(["A", "B", "C"])} + self.assertRaises(AssertionError, table.postRead, rawTable, self.font) + + def test_preWrite_format1(self): + table = otTables.SingleSubst() + table.mapping = {"A": "a", "B": "b", "C": "c"} + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 1) + self.assertEqual(rawTable["Coverage"].glyphs, ["A", "B", "C"]) + self.assertEqual(rawTable["DeltaGlyphID"], 5) + + def test_preWrite_format2(self): + table = otTables.SingleSubst() + table.mapping = {"A": "c", "B": "b", "C": "a"} + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 2) + self.assertEqual(rawTable["Coverage"].glyphs, ["A", "B", "C"]) + self.assertEqual(rawTable["Substitute"], ["c", "b", "a"]) + + def test_preWrite_emptyMapping(self): + table = otTables.SingleSubst() + table.mapping = {} + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 2) + self.assertEqual(rawTable["Coverage"].glyphs, []) + self.assertEqual(rawTable["Substitute"], []) + + def test_toXML2(self): + writer = XMLWriter(StringIO()) + table = otTables.SingleSubst() + table.mapping = {"A": "a", "B": "b", "C": "c"} + table.toXML2(writer, self.font) + self.assertEqual(writer.file.getvalue().splitlines()[1:], [ + '', + '', + '', + ]) + + def test_fromXML(self): + table = otTables.SingleSubst() + for name, attrs, content in parseXML( + '' + '' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.mapping, {"A": "a", "B": "b", "C": "c"}) + + +class MultipleSubstTest(unittest.TestCase): + def setUp(self): + self.glyphs = ".notdef c f i t c_t f_f_i".split() + self.font = FakeFont(self.glyphs) + + def test_postRead_format1(self): + makeSequence = otTables.MultipleSubst.makeSequence_ + table = otTables.MultipleSubst() + table.Format = 1 + rawTable = { + "Coverage": makeCoverage(["c_t", "f_f_i"]), + "Sequence": [ + makeSequence(["c", "t"]), + makeSequence(["f", "f", "i"]) + ] + } + table.postRead(rawTable, self.font) + self.assertEqual(table.mapping, { + "c_t": ["c", "t"], + "f_f_i": ["f", "f", "i"] + }) + + def test_postRead_formatUnknown(self): + table = otTables.MultipleSubst() + table.Format = 987 + self.assertRaises(AssertionError, table.postRead, {}, self.font) + + def test_preWrite_format1(self): + table = otTables.MultipleSubst() + table.mapping = {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]} + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 1) + self.assertEqual(rawTable["Coverage"].glyphs, ["c_t", "f_f_i"]) + + def test_toXML2(self): + writer = XMLWriter(StringIO()) + table = otTables.MultipleSubst() + table.mapping = {"c_t": ["c", "t"], "f_f_i": ["f", "f", "i"]} + table.toXML2(writer, self.font) + self.assertEqual(writer.file.getvalue().splitlines()[1:], [ + '', + '', + ]) + + def test_fromXML(self): + table = otTables.MultipleSubst() + for name, attrs, content in parseXML( + '' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.mapping, + {'c_t': ['c', 't'], 'f_f_i': ['f', 'f', 'i']}) + + def test_fromXML_oldFormat(self): + table = otTables.MultipleSubst() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + '' + '' + ' ' + ' ' + '' + '' + ' ' + ' ' + ' ' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.mapping, + {'c_t': ['c', 't'], 'f_f_i': ['f', 'f', 'i']}) + + def test_fromXML_oldFormat_bug385(self): + # https://github.com/behdad/fonttools/issues/385 + table = otTables.MultipleSubst() + table.Format = 1 + for name, attrs, content in parseXML( + '' + ' ' + ' ' + '' + '' + ' ' + ' ' + ' ' + '' + '' + ' ' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.mapping, + {'o': ['o', 'l', 'o'], 'l': ['o']}) + + +class LigatureSubstTest(unittest.TestCase): + def setUp(self): + self.glyphs = ".notdef c f i t c_t f_f f_i f_f_i".split() + self.font = FakeFont(self.glyphs) + + def makeLigature(self, s): + """'ffi' --> Ligature(LigGlyph='f_f_i', Component=['f', 'f', 'i'])""" + lig = otTables.Ligature() + lig.Component = list(s) + lig.LigGlyph = "_".join(lig.Component) + return lig + + def makeLigatures(self, s): + """'ffi fi' --> [otTables.Ligature, otTables.Ligature]""" + return [self.makeLigature(lig) for lig in s.split()] + + def test_postRead_format1(self): + table = otTables.LigatureSubst() + table.Format = 1 + ligs_c = otTables.LigatureSet() + ligs_c.Ligature = self.makeLigatures("ct") + ligs_f = otTables.LigatureSet() + ligs_f.Ligature = self.makeLigatures("ffi ff fi") + rawTable = { + "Coverage": makeCoverage(["c", "f"]), + "LigatureSet": [ligs_c, ligs_f] + } + table.postRead(rawTable, self.font) + self.assertEqual(set(table.ligatures.keys()), {"c", "f"}) + self.assertEqual(len(table.ligatures["c"]), 1) + self.assertEqual(table.ligatures["c"][0].LigGlyph, "c_t") + self.assertEqual(table.ligatures["c"][0].Component, ["c", "t"]) + self.assertEqual(len(table.ligatures["f"]), 3) + self.assertEqual(table.ligatures["f"][0].LigGlyph, "f_f_i") + self.assertEqual(table.ligatures["f"][0].Component, ["f", "f", "i"]) + self.assertEqual(table.ligatures["f"][1].LigGlyph, "f_f") + self.assertEqual(table.ligatures["f"][1].Component, ["f", "f"]) + self.assertEqual(table.ligatures["f"][2].LigGlyph, "f_i") + self.assertEqual(table.ligatures["f"][2].Component, ["f", "i"]) + + def test_postRead_formatUnknown(self): + table = otTables.LigatureSubst() + table.Format = 987 + rawTable = {"Coverage": makeCoverage(["f"])} + self.assertRaises(AssertionError, table.postRead, rawTable, self.font) + + def test_preWrite_format1(self): + table = otTables.LigatureSubst() + table.ligatures = { + "c": self.makeLigatures("ct"), + "f": self.makeLigatures("ffi ff fi") + } + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 1) + self.assertEqual(rawTable["Coverage"].glyphs, ["c", "f"]) + [c, f] = rawTable["LigatureSet"] + self.assertIsInstance(c, otTables.LigatureSet) + self.assertIsInstance(f, otTables.LigatureSet) + [ct] = c.Ligature + self.assertIsInstance(ct, otTables.Ligature) + self.assertEqual(ct.LigGlyph, "c_t") + self.assertEqual(ct.Component, ["c", "t"]) + [ffi, ff, fi] = f.Ligature + self.assertIsInstance(ffi, otTables.Ligature) + self.assertEqual(ffi.LigGlyph, "f_f_i") + self.assertEqual(ffi.Component, ["f", "f", "i"]) + self.assertIsInstance(ff, otTables.Ligature) + self.assertEqual(ff.LigGlyph, "f_f") + self.assertEqual(ff.Component, ["f", "f"]) + self.assertIsInstance(fi, otTables.Ligature) + self.assertEqual(fi.LigGlyph, "f_i") + self.assertEqual(fi.Component, ["f", "i"]) + + def test_toXML2(self): + writer = XMLWriter(StringIO()) + table = otTables.LigatureSubst() + table.ligatures = { + "c": self.makeLigatures("ct"), + "f": self.makeLigatures("ffi ff fi") + } + table.toXML2(writer, self.font) + self.assertEqual(writer.file.getvalue().splitlines()[1:], [ + '', + ' ', + '', + '', + ' ', + ' ', + ' ', + '' + ]) + + def test_fromXML(self): + table = otTables.LigatureSubst() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(set(table.ligatures.keys()), {"f"}) + [ffi, ff] = table.ligatures["f"] + self.assertEqual(ffi.LigGlyph, "f_f_i") + self.assertEqual(ffi.Component, ["f", "f", "i"]) + self.assertEqual(ff.LigGlyph, "f_f") + self.assertEqual(ff.Component, ["f", "f"]) + + +class AlternateSubstTest(unittest.TestCase): + def setUp(self): + self.glyphs = ".notdef G G.alt1 G.alt2 Z Z.fina".split() + self.font = FakeFont(self.glyphs) + + def makeAlternateSet(self, s): + result = otTables.AlternateSet() + result.Alternate = s.split() + return result + + def test_postRead_format1(self): + table = otTables.AlternateSubst() + table.Format = 1 + rawTable = { + "Coverage": makeCoverage(["G", "Z"]), + "AlternateSet": [ + self.makeAlternateSet("G.alt2 G.alt1"), + self.makeAlternateSet("Z.fina") + ] + } + table.postRead(rawTable, self.font) + self.assertEqual(table.alternates, { + "G": ["G.alt2", "G.alt1"], + "Z": ["Z.fina"] + }) + + def test_postRead_formatUnknown(self): + table = otTables.AlternateSubst() + table.Format = 987 + self.assertRaises(AssertionError, table.postRead, {}, self.font) + + def test_preWrite_format1(self): + table = otTables.AlternateSubst() + table.alternates = {"G": ["G.alt2", "G.alt1"], "Z": ["Z.fina"]} + rawTable = table.preWrite(self.font) + self.assertEqual(table.Format, 1) + self.assertEqual(rawTable["Coverage"].glyphs, ["G", "Z"]) + [g, z] = rawTable["AlternateSet"] + self.assertIsInstance(g, otTables.AlternateSet) + self.assertEqual(g.Alternate, ["G.alt2", "G.alt1"]) + self.assertIsInstance(z, otTables.AlternateSet) + self.assertEqual(z.Alternate, ["Z.fina"]) + + def test_toXML2(self): + writer = XMLWriter(StringIO()) + table = otTables.AlternateSubst() + table.alternates = {"G": ["G.alt2", "G.alt1"], "Z": ["Z.fina"]} + table.toXML2(writer, self.font) + self.assertEqual(writer.file.getvalue().splitlines()[1:], [ + '', + ' ', + ' ', + '', + '', + ' ', + '' + ]) + + def test_fromXML(self): + table = otTables.AlternateSubst() + for name, attrs, content in parseXML( + '' + ' ' + ' ' + '' + '' + ' ' + ''): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.alternates, { + "G": ["G.alt2", "G.alt1"], + "Z": ["Z.fina"] + }) + + +class RearrangementMorphActionTest(unittest.TestCase): + def setUp(self): + self.font = FakeFont(['.notdef', 'A', 'B', 'C']) + + def testCompile(self): + r = otTables.RearrangementMorphAction() + r.NewState = 0x1234 + r.MarkFirst = r.DontAdvance = r.MarkLast = True + r.ReservedFlags, r.Verb = 0x1FF0, 0xD + writer = OTTableWriter() + r.compile(writer, self.font, actionIndex=None) + self.assertEqual(hexStr(writer.getAllData()), "1234fffd") + + def testDecompileToXML(self): + r = otTables.RearrangementMorphAction() + r.decompile(OTTableReader(deHexStr("1234fffd")), + self.font, actionReader=None) + toXML = lambda w, f: r.toXML(w, f, {"Test": "Foo"}, "Transition") + self.assertEqual(getXML(toXML, self.font), [ + '', + ' ', # 0x1234 = 4660 + ' ', + ' ', + ' ', + '', + ]) + + +class ContextualMorphActionTest(unittest.TestCase): + def setUp(self): + self.font = FakeFont(['.notdef', 'A', 'B', 'C']) + + def testCompile(self): + a = otTables.ContextualMorphAction() + a.NewState = 0x1234 + a.SetMark, a.DontAdvance, a.ReservedFlags = True, True, 0x3117 + a.MarkIndex, a.CurrentIndex = 0xDEAD, 0xBEEF + writer = OTTableWriter() + a.compile(writer, self.font, actionIndex=None) + self.assertEqual(hexStr(writer.getAllData()), "1234f117deadbeef") + + def testDecompileToXML(self): + a = otTables.ContextualMorphAction() + a.decompile(OTTableReader(deHexStr("1234f117deadbeef")), + self.font, actionReader=None) + toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition") + self.assertEqual(getXML(toXML, self.font), [ + '', + ' ', # 0x1234 = 4660 + ' ', + ' ', + ' ', # 0xDEAD = 57005 + ' ', # 0xBEEF = 48879 + '', + ]) + + +class LigatureMorphActionTest(unittest.TestCase): + def setUp(self): + self.font = FakeFont(['.notdef', 'A', 'B', 'C']) + + def testDecompileToXML(self): + a = otTables.LigatureMorphAction() + actionReader = OTTableReader(deHexStr("DEADBEEF 7FFFFFFE 80000003")) + a.decompile(OTTableReader(deHexStr("1234FAB30001")), + self.font, actionReader) + toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition") + self.assertEqual(getXML(toXML, self.font), [ + '', + ' ', # 0x1234 = 4660 + ' ', + ' ', + ' ', + ' ', + '', + ]) + + +class InsertionMorphActionTest(unittest.TestCase): + MORPH_ACTION_XML = [ + '', + ' ', # 0x1234 = 4660 + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '' + ] + + def setUp(self): + self.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D']) + self.maxDiff = None + + def testDecompileToXML(self): + a = otTables.InsertionMorphAction() + actionReader = OTTableReader( + deHexStr("DEAD BEEF 0002 0001 0004 0002 0003 DEAD BEEF")) + a.decompile(OTTableReader(deHexStr("1234 FC43 0005 0002")), + self.font, actionReader) + toXML = lambda w, f: a.toXML(w, f, {"Test": "Foo"}, "Transition") + self.assertEqual(getXML(toXML, self.font), self.MORPH_ACTION_XML) + + def testCompileFromXML(self): + a = otTables.InsertionMorphAction() + for name, attrs, content in parseXML(self.MORPH_ACTION_XML): + a.fromXML(name, attrs, content, self.font) + writer = OTTableWriter() + a.compile(writer, self.font, + actionIndex={('B', 'C'): 9, ('B', 'A', 'D'): 7}) + self.assertEqual(hexStr(writer.getAllData()), "1234fc4300090007") + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_p_r_o_p_test.py fonttools-3.21.2/Tests/ttLib/tables/_p_r_o_p_test.py --- fonttools-3.0/Tests/ttLib/tables/_p_r_o_p_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_p_r_o_p_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,84 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.ttLib import newTable +import unittest + + +PROP_FORMAT_0_DATA = deHexStr( + '0001 0000 0000 ' # 0: Version=1.0, Format=0 + '0005 ' # 6: DefaultProperties=European number terminator +) # 8: +assert(len(PROP_FORMAT_0_DATA) == 8) + + +PROP_FORMAT_0_XML = [ + '', + '', + ' ', + '', +] + + +PROP_FORMAT_1_DATA = deHexStr( + '0003 0000 0001 ' # 0: Version=3.0, Format=1 + '0000 ' # 6: DefaultProperties=left-to-right; non-whitespace + '0008 0003 0004 ' # 8: LookupFormat=8, FirstGlyph=3, GlyphCount=4 + '000B ' # 14: Properties[C]=other neutral + '000A ' # 16: Properties[D]=whitespace + '600B ' # 18: Properties[E]=other neutral; hanging punct + '0005 ' # 20: Properties[F]=European number terminator +) # 22: +assert(len(PROP_FORMAT_1_DATA) == 22) + + +PROP_FORMAT_1_XML = [ + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +class PROPTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + cls.font = FakeFont(['.notdef', 'A', 'B', 'C', 'D', 'E', 'F', 'G']) + + def test_decompile_toXML_format0(self): + table = newTable('prop') + table.decompile(PROP_FORMAT_0_DATA, self.font) + self.assertEqual(getXML(table.toXML), PROP_FORMAT_0_XML) + + def test_compile_fromXML_format0(self): + table = newTable('prop') + for name, attrs, content in parseXML(PROP_FORMAT_0_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(PROP_FORMAT_0_DATA)) + + def test_decompile_toXML_format1(self): + table = newTable('prop') + table.decompile(PROP_FORMAT_1_DATA, self.font) + self.assertEqual(getXML(table.toXML), PROP_FORMAT_1_XML) + + def test_compile_fromXML_format1(self): + table = newTable('prop') + for name, attrs, content in parseXML(PROP_FORMAT_1_XML): + table.fromXML(name, attrs, content, font=self.font) + self.assertEqual(hexStr(table.compile(self.font)), + hexStr(PROP_FORMAT_1_DATA)) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/S_T_A_T_test.py fonttools-3.21.2/Tests/ttLib/tables/S_T_A_T_test.py --- fonttools-3.0/Tests/ttLib/tables/S_T_A_T_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/S_T_A_T_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,264 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.testTools import FakeFont, getXML, parseXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import newTable +import unittest + + +STAT_DATA = deHexStr( + '0001 0000 ' # 0: Version=1.0 + '0008 0002 ' # 4: DesignAxisSize=8, DesignAxisCount=2 + '0000 0012 ' # 8: OffsetToDesignAxes=18 + '0003 0000 0022 ' # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34 + '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght' + '012D 0002 ' # 22: DesignAxis[0].NameID=301, .AxisOrdering=2 + '5445 5354 ' # 26: DesignAxis[1].AxisTag='TEST' + '012E 0001 ' # 30: DesignAxis[1].NameID=302, .AxisOrdering=1 + '0006 0012 0026 ' # 34: AxisValueOffsets = [6, 18, 38] (+34) + '0001 0000 0000 ' # 40: AxisValue[0].Format=1, .AxisIndex=0, .Flags=0 + '0191 0190 0000 ' # 46: AxisValue[0].ValueNameID=401, .Value=400.0 + '0002 0001 0000 ' # 52: AxisValue[1].Format=2, .AxisIndex=1, .Flags=0 + '0192 ' # 58: AxisValue[1].ValueNameID=402 + '0002 0000 ' # 60: AxisValue[1].NominalValue=2.0 + '0001 0000 ' # 64: AxisValue[1].RangeMinValue=1.0 + '0003 0000 ' # 68: AxisValue[1].RangeMaxValue=3.0 + '0003 0000 0000 ' # 72: AxisValue[2].Format=3, .AxisIndex=0, .Flags=0 + '0002 ' # 78: AxisValue[2].ValueNameID=2 'Regular' + '0190 0000 02BC 0000 ' # 80: AxisValue[2].Value=400.0, .LinkedValue=700.0 +) # 88: +assert(len(STAT_DATA) == 88) + + +STAT_XML = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# Contains junk data for making sure we get our offset decoding right. +STAT_DATA_WITH_AXIS_JUNK = deHexStr( + '0001 0000 ' # 0: Version=1.0 + '000A 0002 ' # 4: DesignAxisSize=10, DesignAxisCount=2 + '0000 0012 ' # 8: OffsetToDesignAxes=18 + '0000 0000 0000 ' # 12: AxisValueCount=3, OffsetToAxisValueOffsets=34 + '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght' + '012D 0002 ' # 22: DesignAxis[0].NameID=301, .AxisOrdering=2 + 'DEAD ' # 26: + '5445 5354 ' # 28: DesignAxis[1].AxisTag='TEST' + '012E 0001 ' # 32: DesignAxis[1].NameID=302, .AxisOrdering=1 + 'BEEF ' # 36: +) # 38: + +assert(len(STAT_DATA_WITH_AXIS_JUNK) == 38) + + +STAT_XML_WITH_AXIS_JUNK = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', # 0xDE + ' ', # 0xAD + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', # 0xBE + ' ', # 0xEF + ' ', + '', + '', +] + + +STAT_DATA_AXIS_VALUE_FORMAT3 = deHexStr( + '0001 0000 ' # 0: Version=1.0 + '0008 0001 ' # 4: DesignAxisSize=8, DesignAxisCount=1 + '0000 0012 ' # 8: OffsetToDesignAxes=18 + '0001 ' # 12: AxisValueCount=1 + '0000 001A ' # 14: OffsetToAxisValueOffsets=26 + '7767 6874 ' # 18: DesignAxis[0].AxisTag='wght' + '0102 ' # 22: DesignAxis[0].AxisNameID=258 'Weight' + '0000 ' # 24: DesignAxis[0].AxisOrdering=0 + '0002 ' # 26: AxisValueOffsets=[2] (+26) + '0003 ' # 28: AxisValue[0].Format=3 + '0000 0002 ' # 30: AxisValue[0].AxisIndex=0, .Flags=0x2 + '0002 ' # 34: AxisValue[0].ValueNameID=2 'Regular' + '0190 0000 ' # 36: AxisValue[0].Value=400.0 + '02BC 0000 ' # 40: AxisValue[0].LinkedValue=700.0 +) # 44: +assert(len(STAT_DATA_AXIS_VALUE_FORMAT3) == 44) + + +STAT_XML_AXIS_VALUE_FORMAT3 = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +STAT_DATA_VERSION_1_1 = deHexStr( + '0001 0001 ' # 0: Version=1.1 + '0008 0001 ' # 4: DesignAxisSize=8, DesignAxisCount=1 + '0000 0014 ' # 8: OffsetToDesignAxes=20 + '0001 ' # 12: AxisValueCount=1 + '0000 001C ' # 14: OffsetToAxisValueOffsets=28 + '0101 ' # 18: ElidedFallbackNameID: 257 + '7767 6874 ' # 20: DesignAxis[0].AxisTag='wght' + '0102 ' # 24: DesignAxis[0].AxisNameID=258 'Weight' + '0000 ' # 26: DesignAxis[0].AxisOrdering=0 + '0002 ' # 28: AxisValueOffsets=[2] (+28) + '0003 ' # 30: AxisValue[0].Format=3 + '0000 0002 ' # 32: AxisValue[0].AxisIndex=0, .Flags=0x2 + '0002 ' # 36: AxisValue[0].ValueNameID=2 'Regular' + '0190 0000 ' # 38: AxisValue[0].Value=400.0 + '02BC 0000 ' # 42: AxisValue[0].LinkedValue=700.0 +) # 46: +assert(len(STAT_DATA_VERSION_1_1) == 46) + + +STAT_XML_VERSION_1_1 = [ + '', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', +] + + +class STATTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.maxDiff = None + + def test_decompile_toXML(self): + table = newTable('STAT') + table.decompile(STAT_DATA, font=FakeFont(['.notdef'])) + self.assertEqual(getXML(table.toXML), STAT_XML) + + def test_decompile_toXML_withAxisJunk(self): + table = newTable('STAT') + table.decompile(STAT_DATA_WITH_AXIS_JUNK, font=FakeFont(['.notdef'])) + self.assertEqual(getXML(table.toXML), STAT_XML_WITH_AXIS_JUNK) + + def test_decompile_toXML_format3(self): + table = newTable('STAT') + table.decompile(STAT_DATA_AXIS_VALUE_FORMAT3, + font=FakeFont(['.notdef'])) + self.assertEqual(getXML(table.toXML), STAT_XML_AXIS_VALUE_FORMAT3) + + def test_decompile_toXML_version_1_1(self): + table = newTable('STAT') + table.decompile(STAT_DATA_VERSION_1_1, + font=FakeFont(['.notdef'])) + self.assertEqual(getXML(table.toXML), STAT_XML_VERSION_1_1) + + def test_compile_fromXML(self): + table = newTable('STAT') + font = FakeFont(['.notdef']) + for name, attrs, content in parseXML(STAT_XML): + table.fromXML(name, attrs, content, font=font) + self.assertEqual(table.compile(font), STAT_DATA) + + def test_compile_fromXML_withAxisJunk(self): + table = newTable('STAT') + font = FakeFont(['.notdef']) + for name, attrs, content in parseXML(STAT_XML_WITH_AXIS_JUNK): + table.fromXML(name, attrs, content, font=font) + self.assertEqual(table.compile(font), STAT_DATA_WITH_AXIS_JUNK) + + def test_compile_fromXML_format3(self): + table = newTable('STAT') + font = FakeFont(['.notdef']) + for name, attrs, content in parseXML(STAT_XML_AXIS_VALUE_FORMAT3): + table.fromXML(name, attrs, content, font=font) + self.assertEqual(table.compile(font), STAT_DATA_AXIS_VALUE_FORMAT3) + + def test_compile_fromXML_version_1_1(self): + table = newTable('STAT') + font = FakeFont(['.notdef']) + for name, attrs, content in parseXML(STAT_XML_VERSION_1_1): + table.fromXML(name, attrs, content, font=font) + self.assertEqual(table.compile(font), STAT_DATA_VERSION_1_1) + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/tables_test.py fonttools-3.21.2/Tests/ttLib/tables/tables_test.py --- fonttools-3.0/Tests/ttLib/tables/tables_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/tables_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,329 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont, tagToXML +import os +import sys +import re +import contextlib +import pytest + +try: + import unicodedata2 +except ImportError: + if sys.version_info[:2] < (3, 6): + unicodedata2 = None + else: + # on 3.6 the built-in unicodedata is the same as unicodedata2 backport + import unicodedata + unicodedata2 = unicodedata + + +# Font files in data/*.{o,t}tf; output gets compared to data/*.ttx.* +TESTS = { + "aots/base.otf": ('CFF ', 'cmap', 'head', + 'hhea', 'hmtx', 'maxp', + 'name', 'OS/2', 'post'), + "aots/classdef1_font1.otf": ('GSUB',), + "aots/classdef1_font2.otf": ('GSUB',), + "aots/classdef1_font3.otf": ('GSUB',), + "aots/classdef1_font4.otf": ('GSUB',), + "aots/classdef2_font1.otf": ('GSUB',), + "aots/classdef2_font2.otf": ('GSUB',), + "aots/classdef2_font3.otf": ('GSUB',), + "aots/classdef2_font4.otf": ('GSUB',), + "aots/cmap0_font1.otf": ('cmap',), + "aots/cmap10_font1.otf": ('cmap',), + "aots/cmap10_font2.otf": ('cmap',), + "aots/cmap12_font1.otf": ('cmap',), + "aots/cmap14_font1.otf": ('cmap',), + "aots/cmap2_font1.otf": ('cmap',), + "aots/cmap4_font1.otf": ('cmap',), + "aots/cmap4_font2.otf": ('cmap',), + "aots/cmap4_font3.otf": ('cmap',), + "aots/cmap4_font4.otf": ('cmap',), + "aots/cmap6_font1.otf": ('cmap',), + "aots/cmap6_font2.otf": ('cmap',), + "aots/cmap8_font1.otf": ('cmap',), + "aots/cmap_composition_font1.otf": ('cmap',), + "aots/cmap_subtableselection_font1.otf": ('cmap',), + "aots/cmap_subtableselection_font2.otf": ('cmap',), + "aots/cmap_subtableselection_font3.otf": ('cmap',), + "aots/cmap_subtableselection_font4.otf": ('cmap',), + "aots/cmap_subtableselection_font5.otf": ('cmap',), + "aots/gpos1_1_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos1_1_simple_f1.otf": ('GPOS',), + "aots/gpos1_1_simple_f2.otf": ('GPOS',), + "aots/gpos1_1_simple_f3.otf": ('GPOS',), + "aots/gpos1_1_simple_f4.otf": ('GPOS',), + "aots/gpos1_2_font1.otf": ('GPOS',), + "aots/gpos1_2_font2.otf": ('GDEF', 'GPOS'), + "aots/gpos2_1_font6.otf": ('GPOS',), + "aots/gpos2_1_font7.otf": ('GPOS',), + "aots/gpos2_1_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos2_1_lookupflag_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos2_1_next_glyph_f1.otf": ('GPOS',), + "aots/gpos2_1_next_glyph_f2.otf": ('GPOS',), + "aots/gpos2_1_simple_f1.otf": ('GPOS',), + "aots/gpos2_2_font1.otf": ('GPOS',), + "aots/gpos2_2_font2.otf": ('GDEF', 'GPOS'), + "aots/gpos2_2_font3.otf": ('GDEF', 'GPOS'), + "aots/gpos2_2_font4.otf": ('GPOS',), + "aots/gpos2_2_font5.otf": ('GPOS',), + "aots/gpos3_font1.otf": ('GPOS',), + "aots/gpos3_font2.otf": ('GDEF', 'GPOS'), + "aots/gpos3_font3.otf": ('GDEF', 'GPOS'), + "aots/gpos4_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos4_lookupflag_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos4_multiple_anchors_1.otf": ('GDEF', 'GPOS'), + "aots/gpos4_simple_1.otf": ('GDEF', 'GPOS'), + "aots/gpos5_font1.otf": ('GDEF', 'GPOS', 'GSUB'), + "aots/gpos6_font1.otf": ('GDEF', 'GPOS'), + "aots/gpos7_1_font1.otf": ('GPOS',), + "aots/gpos9_font1.otf": ('GPOS',), + "aots/gpos9_font2.otf": ('GPOS',), + "aots/gpos_chaining1_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_boundary_f3.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_boundary_f4.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_multiple_subrules_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_multiple_subrules_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_simple_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining1_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_boundary_f3.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_boundary_f4.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_multiple_subrules_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_multiple_subrules_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_simple_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining2_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_boundary_f3.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_boundary_f4.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_simple_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_chaining3_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_expansion_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_lookupflag_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_multiple_subrules_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_multiple_subrules_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_simple_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context1_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_classes_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_classes_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_expansion_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_lookupflag_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_multiple_subrules_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_multiple_subrules_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_simple_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context2_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_boundary_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_boundary_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_lookupflag_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_lookupflag_f2.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_next_glyph_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_simple_f1.otf": ('GDEF', 'GPOS'), + "aots/gpos_context3_successive_f1.otf": ('GDEF', 'GPOS'), + "aots/gsub1_1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub1_1_modulo_f1.otf": ('GSUB',), + "aots/gsub1_1_simple_f1.otf": ('GSUB',), + "aots/gsub1_2_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub1_2_simple_f1.otf": ('GSUB',), + "aots/gsub2_1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub2_1_multiple_sequences_f1.otf": ('GSUB',), + "aots/gsub2_1_simple_f1.otf": ('GSUB',), + "aots/gsub3_1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub3_1_multiple_f1.otf": ('GSUB',), + "aots/gsub3_1_simple_f1.otf": ('GSUB',), + "aots/gsub4_1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub4_1_multiple_ligatures_f1.otf": ('GSUB',), + "aots/gsub4_1_multiple_ligatures_f2.otf": ('GSUB',), + "aots/gsub4_1_multiple_ligsets_f1.otf": ('GSUB',), + "aots/gsub4_1_simple_f1.otf": ('GSUB',), + "aots/gsub7_font1.otf": ('GSUB',), + "aots/gsub7_font2.otf": ('GSUB',), + "aots/gsub_chaining1_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_boundary_f3.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_boundary_f4.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_multiple_subrules_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_multiple_subrules_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_simple_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining1_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_boundary_f3.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_boundary_f4.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_multiple_subrules_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_multiple_subrules_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_simple_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining2_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_boundary_f3.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_boundary_f4.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_simple_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_chaining3_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_expansion_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_lookupflag_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_multiple_subrules_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_multiple_subrules_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_simple_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context1_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_classes_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_classes_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_expansion_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_lookupflag_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_multiple_subrules_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_multiple_subrules_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_simple_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context2_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_boundary_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_boundary_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_lookupflag_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_lookupflag_f2.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_next_glyph_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_simple_f1.otf": ('GDEF', 'GSUB'), + "aots/gsub_context3_successive_f1.otf": ('GDEF', 'GSUB'), + "aots/lookupflag_ignore_attach_f1.otf": ('GDEF', 'GSUB'), + "aots/lookupflag_ignore_base_f1.otf": ('GDEF', 'GSUB'), + "aots/lookupflag_ignore_combination_f1.otf": ('GDEF', 'GSUB'), + "aots/lookupflag_ignore_ligatures_f1.otf": ('GDEF', 'GSUB'), + "aots/lookupflag_ignore_marks_f1.otf": ('GDEF', 'GSUB'), + "graphite/graphite_tests.ttf": ('Silf', 'Glat', 'Feat', 'Sill'), +} + + +TEST_REQUIREMENTS = { + "aots/cmap4_font4.otf": ("unicodedata2",), +} + + +ttLibVersion_RE = re.compile(r' ttLibVersion=".*"') + + +def getpath(testfile): + path = os.path.dirname(__file__) + return os.path.join(path, "data", testfile) + + +def read_expected_ttx(testfile, tableTag): + name = os.path.splitext(testfile)[0] + xml_expected_path = getpath("%s.ttx.%s" % (name, tagToXML(tableTag))) + with open(xml_expected_path, 'r', encoding="utf-8") as xml_file: + xml_expected = ttLibVersion_RE.sub('', xml_file.read()) + return xml_expected + + +def dump_ttx(font, tableTag): + f = UnicodeIO() + font.saveXML(f, newlinestr='\n', tables=[tableTag]) + return ttLibVersion_RE.sub('', f.getvalue()) + + +def load_ttx(ttx): + f = UnicodeIO() + f.write(ttx) + f.seek(0) + font = TTFont() + font.importXML(f) + return font + + +@contextlib.contextmanager +def open_font(testfile): + font = TTFont(getpath(testfile)) + try: + yield font + finally: + font.close() + + +def _skip_if_requirement_missing(testfile): + if testfile in TEST_REQUIREMENTS: + for req in TEST_REQUIREMENTS[testfile]: + if globals()[req] is None: + pytest.skip('%s not installed' % req) + + +def test_xml_from_binary(testfile, tableTag): + """Check XML from decompiled object.""" + _skip_if_requirement_missing(testfile) + + xml_expected = read_expected_ttx(testfile, tableTag) + + with open_font(testfile) as font: + xml_from_binary = dump_ttx(font, tableTag) + + assert xml_expected == xml_from_binary + + +def test_xml_from_xml(testfile, tableTag): + """Check XML from object read from XML.""" + _skip_if_requirement_missing(testfile) + + xml_expected = read_expected_ttx(testfile, tableTag) + + font = load_ttx(xml_expected) + name = os.path.splitext(testfile)[0] + setupfile = getpath("%s.ttx.%s.setup" % (name, tagToXML(tableTag))) + if os.path.exists(setupfile): +# import pdb; pdb.set_trace() + font.importXML(setupfile) + xml_from_xml = dump_ttx(font, tableTag) + + assert xml_expected == xml_from_xml + + +def pytest_generate_tests(metafunc): + # http://doc.pytest.org/en/latest/parametrize.html#basic-pytest-generate-tests-example + fixturenames = metafunc.fixturenames + argnames = ("testfile", "tableTag") + if all(fn in fixturenames for fn in argnames): + argvalues = [(testfile, tableTag) + for testfile, tableTags in sorted(TESTS.items()) + for tableTag in tableTags] + metafunc.parametrize(argnames, argvalues) + + +if __name__ == '__main__': + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/ttLib/tables/_t_r_a_k_test.py fonttools-3.21.2/Tests/ttLib/tables/_t_r_a_k_test.py --- fonttools-3.0/Tests/ttLib/tables/_t_r_a_k_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_t_r_a_k_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,341 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.testTools import parseXML, getXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import TTFont, TTLibError +from fontTools.ttLib.tables._t_r_a_k import * +from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord +import unittest + + +# /Library/Fonts/Osaka.ttf from OSX has trak table with both horiz and vertData +OSAKA_TRAK_TABLE_DATA = deHexStr( + '00 01 00 00 00 00 00 0c 00 40 00 00 00 03 00 02 00 00 00 2c ff ff ' + '00 00 01 06 00 34 00 00 00 00 01 07 00 38 00 01 00 00 01 08 00 3c ' + '00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c 00 03 ' + '00 02 00 00 00 60 ff ff 00 00 01 09 00 68 00 00 00 00 01 0a 00 6c ' + '00 01 00 00 01 0b 00 70 00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 ' + '00 00 00 0c 00 0c') + +# decompiled horizData and vertData entries from Osaka.ttf +OSAKA_HORIZ_TRACK_ENTRIES = { + -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=262), + 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=263), + 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=264) + } + +OSAKA_VERT_TRACK_ENTRIES = { + -1.0: TrackTableEntry({24.0: -12, 12.0: -12}, nameIndex=265), + 0.0: TrackTableEntry({24.0: 0, 12.0: 0}, nameIndex=266), + 1.0: TrackTableEntry({24.0: 12, 12.0: 12}, nameIndex=267) + } + +OSAKA_TRAK_TABLE_XML = [ + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + +# made-up table containing only vertData (no horizData) +OSAKA_VERT_ONLY_TRAK_TABLE_DATA = deHexStr( + '00 01 00 00 00 00 00 00 00 0c 00 00 00 03 00 02 00 00 00 2c ff ff ' + '00 00 01 09 00 34 00 00 00 00 01 0a 00 38 00 01 00 00 01 0b 00 3c ' + '00 0c 00 00 00 18 00 00 ff f4 ff f4 00 00 00 00 00 0c 00 0c') + +OSAKA_VERT_ONLY_TRAK_TABLE_XML = [ + '', + '', + '', + ' ', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', +] + + +# also /Library/Fonts/Skia.ttf contains a trak table with horizData +SKIA_TRAK_TABLE_DATA = deHexStr( + '00 01 00 00 00 00 00 0c 00 00 00 00 00 03 00 05 00 00 00 2c ff ff ' + '00 00 01 13 00 40 00 00 00 00 01 2f 00 4a 00 01 00 00 01 14 00 54 ' + '00 09 00 00 00 0a 00 00 00 0c 00 00 00 12 00 00 00 13 00 00 ff f6 ' + 'ff e2 ff c4 ff c1 ff c1 00 0f 00 00 ff fb ff e7 ff e7 00 8c 00 82 ' + '00 7d 00 73 00 73') + +SKIA_TRACK_ENTRIES = { + -1.0: TrackTableEntry( + {9.0: -10, 10.0: -30, 19.0: -63, 12.0: -60, 18.0: -63}, nameIndex=275), + 0.0: TrackTableEntry( + {9.0: 15, 10.0: 0, 19.0: -25, 12.0: -5, 18.0: -25}, nameIndex=303), + 1.0: TrackTableEntry( + {9.0: 140, 10.0: 130, 19.0: 115, 12.0: 125, 18.0: 115}, nameIndex=276) + } + +SKIA_TRAK_TABLE_XML = [ + '', + '', + '', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + ' ', + '', + '', + ' ', + '', +] + + +class TrackingTableTest(unittest.TestCase): + + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + table = table__t_r_a_k() + table.version = 1.0 + table.format = 0 + self.font = {'trak': table} + + def test_compile_horiz(self): + table = self.font['trak'] + table.horizData = TrackData(SKIA_TRACK_ENTRIES) + trakData = table.compile(self.font) + self.assertEqual(trakData, SKIA_TRAK_TABLE_DATA) + + def test_compile_vert(self): + table = self.font['trak'] + table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES) + trakData = table.compile(self.font) + self.assertEqual(trakData, OSAKA_VERT_ONLY_TRAK_TABLE_DATA) + + def test_compile_horiz_and_vert(self): + table = self.font['trak'] + table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES) + table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES) + trakData = table.compile(self.font) + self.assertEqual(trakData, OSAKA_TRAK_TABLE_DATA) + + def test_compile_longword_aligned(self): + table = self.font['trak'] + # without padding, this 'horizData' would end up 46 byte long + table.horizData = TrackData({ + 0.0: TrackTableEntry(nameIndex=256, values={12.0: 0, 24.0: 0, 36.0: 0}) + }) + table.vertData = TrackData({ + 0.0: TrackTableEntry(nameIndex=257, values={12.0: 0, 24.0: 0, 36.0: 0}) + }) + trakData = table.compile(self.font) + self.assertTrue(table.vertOffset % 4 == 0) + + def test_compile_sizes_mismatch(self): + table = self.font['trak'] + table.horizData = TrackData({ + -1.0: TrackTableEntry(nameIndex=256, values={9.0: -10, 10.0: -30}), + 0.0: TrackTableEntry(nameIndex=257, values={8.0: 20, 12.0: 0}) + }) + with self.assertRaisesRegex(TTLibError, 'entries must specify the same sizes'): + table.compile(self.font) + + def test_decompile_horiz(self): + table = self.font['trak'] + table.decompile(SKIA_TRAK_TABLE_DATA, self.font) + self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES) + self.assertEqual(table.vertData, TrackData()) + + def test_decompile_vert(self): + table = self.font['trak'] + table.decompile(OSAKA_VERT_ONLY_TRAK_TABLE_DATA, self.font) + self.assertEqual(table.horizData, TrackData()) + self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES) + + def test_decompile_horiz_and_vert(self): + table = self.font['trak'] + table.decompile(OSAKA_TRAK_TABLE_DATA, self.font) + self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES) + self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES) + + def test_roundtrip_decompile_compile(self): + for trakData in ( + OSAKA_TRAK_TABLE_DATA, + OSAKA_VERT_ONLY_TRAK_TABLE_DATA, + SKIA_TRAK_TABLE_DATA): + table = table__t_r_a_k() + table.decompile(trakData, ttFont=None) + newTrakData = table.compile(ttFont=None) + self.assertEqual(trakData, newTrakData) + + def test_fromXML_horiz(self): + table = self.font['trak'] + for name, attrs, content in parseXML(SKIA_TRAK_TABLE_XML): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.version, 1.0) + self.assertEqual(table.format, 0) + self.assertEqual(table.horizData, SKIA_TRACK_ENTRIES) + self.assertEqual(table.vertData, TrackData()) + + def test_fromXML_horiz_and_vert(self): + table = self.font['trak'] + for name, attrs, content in parseXML(OSAKA_TRAK_TABLE_XML): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.version, 1.0) + self.assertEqual(table.format, 0) + self.assertEqual(table.horizData, OSAKA_HORIZ_TRACK_ENTRIES) + self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES) + + def test_fromXML_vert(self): + table = self.font['trak'] + for name, attrs, content in parseXML(OSAKA_VERT_ONLY_TRAK_TABLE_XML): + table.fromXML(name, attrs, content, self.font) + self.assertEqual(table.version, 1.0) + self.assertEqual(table.format, 0) + self.assertEqual(table.horizData, TrackData()) + self.assertEqual(table.vertData, OSAKA_VERT_TRACK_ENTRIES) + + def test_toXML_horiz(self): + table = self.font['trak'] + table.horizData = TrackData(SKIA_TRACK_ENTRIES) + add_name(self.font, 'Tight', nameID=275) + add_name(self.font, 'Normal', nameID=303) + add_name(self.font, 'Loose', nameID=276) + self.assertEqual( + SKIA_TRAK_TABLE_XML, + getXML(table.toXML, self.font)) + + def test_toXML_horiz_and_vert(self): + table = self.font['trak'] + table.horizData = TrackData(OSAKA_HORIZ_TRACK_ENTRIES) + table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES) + add_name(self.font, 'Tight', nameID=262) + add_name(self.font, 'Normal', nameID=263) + add_name(self.font, 'Loose', nameID=264) + add_name(self.font, 'Tight', nameID=265) + add_name(self.font, 'Normal', nameID=266) + add_name(self.font, 'Loose', nameID=267) + self.assertEqual( + OSAKA_TRAK_TABLE_XML, + getXML(table.toXML, self.font)) + + def test_toXML_vert(self): + table = self.font['trak'] + table.vertData = TrackData(OSAKA_VERT_TRACK_ENTRIES) + add_name(self.font, 'Tight', nameID=265) + add_name(self.font, 'Normal', nameID=266) + add_name(self.font, 'Loose', nameID=267) + self.assertEqual( + OSAKA_VERT_ONLY_TRAK_TABLE_XML, + getXML(table.toXML, self.font)) + + def test_roundtrip_fromXML_toXML(self): + font = {} + add_name(font, 'Tight', nameID=275) + add_name(font, 'Normal', nameID=303) + add_name(font, 'Loose', nameID=276) + add_name(font, 'Tight', nameID=262) + add_name(font, 'Normal', nameID=263) + add_name(font, 'Loose', nameID=264) + add_name(font, 'Tight', nameID=265) + add_name(font, 'Normal', nameID=266) + add_name(font, 'Loose', nameID=267) + for input_xml in ( + SKIA_TRAK_TABLE_XML, + OSAKA_TRAK_TABLE_XML, + OSAKA_VERT_ONLY_TRAK_TABLE_XML): + table = table__t_r_a_k() + font['trak'] = table + for name, attrs, content in parseXML(input_xml): + table.fromXML(name, attrs, content, font) + output_xml = getXML(table.toXML, font) + self.assertEqual(input_xml, output_xml) + + +def add_name(font, string, nameID): + nameTable = font.get("name") + if nameTable is None: + nameTable = font["name"] = table__n_a_m_e() + nameTable.names = [] + namerec = NameRecord() + namerec.nameID = nameID + namerec.string = string.encode('mac_roman') + namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) + nameTable.names.append(namerec) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/T_S_I__0_test.py fonttools-3.21.2/Tests/ttLib/tables/T_S_I__0_test.py --- fonttools-3.0/Tests/ttLib/tables/T_S_I__0_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/T_S_I__0_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,106 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import SimpleNamespace +from fontTools.misc.textTools import deHexStr +from fontTools.misc.testTools import getXML +from fontTools.ttLib.tables.T_S_I__0 import table_T_S_I__0 +import pytest + + +# (gid, length, offset) for glyph programs +TSI0_INDICES = [ + (0, 1, 0), + (1, 5, 1), + (2, 0, 1), + (3, 0, 1), + (4, 8, 6)] + +# (type, length, offset) for 'extra' programs +TSI0_EXTRA_INDICES = [ + (0xFFFA, 2, 14), # ppgm + (0xFFFB, 4, 16), # cvt + (0xFFFC, 6, 20), # reserved + (0xFFFD, 10, 26)] # fpgm + +# compiled TSI0 table from data above +TSI0_DATA = deHexStr( + "0000 0001 00000000" + "0001 0005 00000001" + "0002 0000 00000001" + "0003 0000 00000001" + "0004 0008 00000006" + "FFFE 0000 ABFC1F34" # 'magic' separates glyph from extra programs + "FFFA 0002 0000000E" + "FFFB 0004 00000010" + "FFFC 0006 00000014" + "FFFD 000A 0000001A") + +# empty font has no glyph programs but 4 extra programs are always present +EMPTY_TSI0_EXTRA_INDICES = [ + (0xFFFA, 0, 0), + (0xFFFB, 0, 0), + (0xFFFC, 0, 0), + (0xFFFD, 0, 0)] + +EMPTY_TSI0_DATA = deHexStr( + "FFFE 0000 ABFC1F34" + "FFFA 0000 00000000" + "FFFB 0000 00000000" + "FFFC 0000 00000000" + "FFFD 0000 00000000") + + +@pytest.fixture +def table(): + return table_T_S_I__0() + + +@pytest.mark.parametrize( + "numGlyphs, data, expected_indices, expected_extra_indices", + [ + (5, TSI0_DATA, TSI0_INDICES, TSI0_EXTRA_INDICES), + (0, EMPTY_TSI0_DATA, [], EMPTY_TSI0_EXTRA_INDICES) + ], + ids=["simple", "empty"] +) +def test_decompile(table, numGlyphs, data, expected_indices, + expected_extra_indices): + font = {'maxp': SimpleNamespace(numGlyphs=numGlyphs)} + + table.decompile(data, font) + + assert len(table.indices) == numGlyphs + assert table.indices == expected_indices + assert len(table.extra_indices) == 4 + assert table.extra_indices == expected_extra_indices + + +@pytest.mark.parametrize( + "numGlyphs, indices, extra_indices, expected_data", + [ + (5, TSI0_INDICES, TSI0_EXTRA_INDICES, TSI0_DATA), + (0, [], EMPTY_TSI0_EXTRA_INDICES, EMPTY_TSI0_DATA) + ], + ids=["simple", "empty"] +) +def test_compile(table, numGlyphs, indices, extra_indices, expected_data): + assert table.compile(ttFont=None) == b"" + + table.set(indices, extra_indices) + data = table.compile(ttFont=None) + assert data == expected_data + + +def test_set(table): + table.set(TSI0_INDICES, TSI0_EXTRA_INDICES) + assert table.indices == TSI0_INDICES + assert table.extra_indices == TSI0_EXTRA_INDICES + + +def test_toXML(table): + assert getXML(table.toXML, ttFont=None) == [ + ''] + + +if __name__ == "__main__": + import sys + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/ttLib/tables/T_S_I__1_test.py fonttools-3.21.2/Tests/ttLib/tables/T_S_I__1_test.py --- fonttools-3.0/Tests/ttLib/tables/T_S_I__1_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/T_S_I__1_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,184 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals +) +from fontTools.misc.py23 import unichr, tobytes +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.ttLib import TTFont, TTLibError +from fontTools.ttLib.tables.T_S_I__0 import table_T_S_I__0 +from fontTools.ttLib.tables.T_S_I__1 import table_T_S_I__1 +import pytest + + +TSI1_DATA = b"""abcdefghijklmnopqrstuvxywz0123456789""" +TSI1_UTF8_DATA = b"""abcd\xc3\xa9ghijklmnopqrstuvxywz0123456789""" + + +@pytest.fixture +def indextable(): + table = table_T_S_I__0() + table.set( + [(0, 1, 0), # gid 0, length=1, offset=0, text='a' + (1, 5, 1), # gid 1, length=5, offset=1, text='bcdef' + (2, 0, 1), # gid 2, length=0, offset=1, text='' + (3, 0, 1), # gid 3, length=0, offset=1, text='' + (4, 8, 6)], # gid 4, length=8, offset=6, text='ghijklmn' + [(0xFFFA, 2, 14), # 'ppgm', length=2, offset=14, text='op' + (0xFFFB, 4, 16), # 'cvt', length=4, offset=16, text='qrst' + (0xFFFC, 6, 20), # 'reserved', length=6, offset=20, text='uvxywz' + (0xFFFD, 10, 26)] # 'fpgm', length=10, offset=26, text='0123456789' + ) + return table + + +@pytest.fixture +def font(indextable): + font = TTFont() + # ['a', 'b', 'c', ...] + ch = 0x61 + n = len(indextable.indices) + font.glyphOrder = [unichr(i) for i in range(ch, ch+n)] + font['TSI0'] = indextable + return font + + +@pytest.fixture +def empty_font(): + font = TTFont() + font.glyphOrder = [] + indextable = table_T_S_I__0() + indextable.set([], [(0xFFFA, 0, 0), + (0xFFFB, 0, 0), + (0xFFFC, 0, 0), + (0xFFFD, 0, 0)]) + font['TSI0'] = indextable + return font + + +def test_decompile(font): + table = table_T_S_I__1() + table.decompile(TSI1_DATA, font) + + assert table.glyphPrograms == { + 'a': 'a', + 'b': 'bcdef', + # 'c': '', # zero-length entries are skipped + # 'd': '', + 'e': 'ghijklmn'} + assert table.extraPrograms == { + 'ppgm': 'op', + 'cvt': 'qrst', + 'reserved': 'uvxywz', + 'fpgm': '0123456789'} + + +def test_decompile_utf8(font): + table = table_T_S_I__1() + table.decompile(TSI1_UTF8_DATA, font) + + assert table.glyphPrograms == { + 'a': 'a', + 'b': 'bcd\u00e9', + # 'c': '', # zero-length entries are skipped + # 'd': '', + 'e': 'ghijklmn'} + assert table.extraPrograms == { + 'ppgm': 'op', + 'cvt': 'qrst', + 'reserved': 'uvxywz', + 'fpgm': '0123456789'} + + +def test_decompile_empty(empty_font): + table = table_T_S_I__1() + table.decompile(b"", empty_font) + + assert table.glyphPrograms == {} + assert table.extraPrograms == {} + + +def test_decompile_invalid_length(empty_font): + empty_font.glyphOrder = ['a'] + empty_font['TSI0'].indices = [(0, 0x8000+1, 0)] + + table = table_T_S_I__1() + with pytest.raises(TTLibError) as excinfo: + table.decompile(b'', empty_font) + assert excinfo.match("textLength .* must not be > 32768") + + +def test_decompile_offset_past_end(empty_font): + empty_font.glyphOrder = ['foo', 'bar'] + content = 'baz' + data = tobytes(content) + empty_font['TSI0'].indices = [(0, len(data), 0), (1, 1, len(data)+1)] + + table = table_T_S_I__1() + with CapturingLogHandler(table.log, "WARNING") as captor: + table.decompile(data, empty_font) + + # the 'bar' program is skipped because its offset > len(data) + assert table.glyphPrograms == {'foo': 'baz'} + assert any("textOffset > totalLength" in r.msg for r in captor.records) + + +def test_decompile_magic_length_last_extra(empty_font): + indextable = empty_font['TSI0'] + indextable.extra_indices[-1] = (0xFFFD, 0x8000, 0) + content = "0" * (0x8000 + 1) + data = tobytes(content) + + table = table_T_S_I__1() + table.decompile(data, empty_font) + + assert table.extraPrograms['fpgm'] == content + + +def test_decompile_magic_length_last_glyph(empty_font): + empty_font.glyphOrder = ['foo', 'bar'] + indextable = empty_font['TSI0'] + indextable.indices = [ + (0, 3, 0), + (1, 0x8000, 3)] # the actual length of 'bar' program is + indextable.extra_indices = [ # the difference between the first extra's + (0xFFFA, 0, 0x8004), # offset and 'bar' offset: 0x8004 - 3 + (0xFFFB, 0, 0x8004), + (0xFFFC, 0, 0x8004), + (0xFFFD, 0, 0x8004)] + foo_content = "0" * 3 + bar_content = "1" * (0x8000 + 1) + data = tobytes(foo_content + bar_content) + + table = table_T_S_I__1() + table.decompile(data, empty_font) + + assert table.glyphPrograms['foo'] == foo_content + assert table.glyphPrograms['bar'] == bar_content + + +def test_decompile_magic_length_non_last(empty_font): + indextable = empty_font['TSI0'] + indextable.extra_indices = [ + (0xFFFA, 3, 0), + (0xFFFB, 0x8000, 3), # the actual length of 'cvt' program is: + (0xFFFC, 0, 0x8004), # nextTextOffset - textOffset: 0x8004 - 3 + (0xFFFD, 0, 0x8004)] + ppgm_content = "0" * 3 + cvt_content = "1" * (0x8000 + 1) + data = tobytes(ppgm_content + cvt_content) + + table = table_T_S_I__1() + table.decompile(data, empty_font) + + assert table.extraPrograms['ppgm'] == ppgm_content + assert table.extraPrograms['cvt'] == cvt_content + + table = table_T_S_I__1() + with CapturingLogHandler(table.log, "WARNING") as captor: + table.decompile(data[:-1], empty_font) # last entry is truncated + captor.assertRegex("nextTextOffset > totalLength") + assert table.extraPrograms['cvt'] == cvt_content[:-1] + + +if __name__ == "__main__": + import sys + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/ttLib/tables/ttProgram_test.py fonttools-3.21.2/Tests/ttLib/tables/ttProgram_test.py --- fonttools-3.0/Tests/ttLib/tables/ttProgram_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/ttProgram_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,119 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib.tables.ttProgram import Program +from fontTools.misc.textTools import deHexStr +import array +import os +import re +import unittest + +CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +DATA_DIR = os.path.join(CURR_DIR, 'data') + +TTPROGRAM_TTX = os.path.join(DATA_DIR, "ttProgram.ttx") +#TTPROGRAM_BIN = os.path.join(DATA_DIR, "ttProgram.bin") + +BYTECODE = deHexStr( + '403b3a393837363534333231302f2e2d2c2b2a292827262524232221201f1e1d1c1b1a' + '191817161514131211100f0e0d0c0b0a090807060504030201002c01b0184358456ab0' + '194360b0462344231020b0464ef04d2fb000121b21231133592d2c01b0184358b0052b' + 'b000134bb0145058b100403859b0062b1b21231133592d2c01b01843584eb0032510f2' + '21b000124d1b2045b00425b00425234a6164b0285258212310d61bb0032510f221b000' + '1259592d2cb01a435821211bb00225b0022549b00325b003254a612064b01050582121' + '211bb00325b0032549b0005058b0005058b8ffe238211bb0103821591bb0005258b01e' + '38211bb8fff03821595959592d2c01b0184358b0052bb000134bb0145058b90000ffc0' + '3859b0062b1b21231133592d2c4e018a10b146194344b00014b10046e2b00015b90000' + 'fff03800b0003cb0282bb0022510b0003c2d2c0118b0002fb00114f2b00113b001154d' + 'b000122d2c01b0184358b0052bb00013b90000ffe038b0062b1b21231133592d2c01b0' + '18435845646a23456469b01943646060b0462344231020b046f02fb000121b2121208a' + '208a525811331b212159592d2c01b10b0a432343650a2d2c00b10a0b4323430b2d2c00' + 'b0462370b101463e01b0462370b10246453ab10200080d2d2cb0122bb0022545b00225' + '456ab0408b60b0022523442121212d2cb0132bb0022545b00225456ab8ffc08c60b002' + '2523442121212d2cb000b0122b2121212d2cb000b0132b2121212d2c01b00643b00743' + '650a2d2c2069b04061b0008b20b12cc08a8cb8100062602b0c642364615c58b0036159' + '2d2cb1000325456854b01c4b505a58b0032545b0032545606820b004252344b0042523' + '441bb00325204568208a2344b00325456860b003252344592d2cb00325204568208a23' + '44b003254564686560b00425b0016023442d2cb00943588721c01bb01243588745b011' + '2bb0472344b0477ae41b038a45186920b04723448a8a8720b0a05158b0112bb0472344' + 'b0477ae41b21b0477ae4595959182d2c208a4523456860442d2c456a422d2c01182f2d' + '2c01b0184358b00425b00425496423456469b0408b6120b080626ab00225b00225618c' + 'b0194360b0462344218a10b046f6211b21212121592d2c01b0184358b0022545b00225' + '4564606ab00325456a6120b00425456a208a8b65b0042523448cb00325234421211b20' + '456a4420456a44592d2c012045b00055b018435a584568234569b0408b6120b080626a' + '208a236120b003258b65b0042523448cb00325234421211b2121b0192b592d2c018a8a' + '45642345646164422d2cb00425b00425b0192bb0184358b00425b00425b00325b01b2b' + '01b0022543b04054b0022543b000545a58b003252045b040614459b0022543b00054b0' + '022543b040545a58b004252045b04060445959212121212d2c014b525843b002254523' + '61441b2121592d2c014b525843b00225452360441b2121592d2c4b525845441b212159' + '2d2c0120b003252349b04060b0206320b000525823b002253823b002256538008a6338' + '1b212121212159012d2c4b505845441b2121592d2c01b005251023208af500b0016023' + 'edec2d2c01b005251023208af500b0016123edec2d2c01b0062510f500edec2d2c4623' + '46608a8a462320468a608a61b8ff8062232010238ab14b4b8a70456020b0005058b001' + '61b8ffba8b1bb0468c59b0106068013a2d2c2045b00325465258b0022546206861b003' + '25b003253f2321381b2111592d2c2045b00325465058b0022546206861b00325b00325' + '3f2321381b2111592d2c00b00743b006430b2d2c8a10ec2d2cb00c4358211b2046b000' + '5258b8fff0381bb0103859592d2c20b0005558b8100063b003254564b00325456461b0' + '005358b0021bb04061b00359254569535845441b2121591b21b0022545b00225456164' + 'b028515845441b212159592d2c21210c6423648bb84000622d2c21b08051580c642364' + '8bb82000621bb200402f2b59b002602d2c21b0c051580c6423648bb81555621bb20080' + '2f2b59b002602d2c0c6423648bb84000626023212d2c4b5358b00425b0042549642345' + '6469b0408b6120b080626ab00225b00225618cb0462344218a10b046f6211b218a1123' + '1220392f592d2cb00225b002254964b0c05458b8fff838b008381b2121592d2cb01343' + '58031b02592d2cb0134358021b03592d2cb00a2b2310203cb0172b2d2cb00225b8fff0' + '38b0282b8a102320d023b0102bb0054358c01b3c59201011b00012012d2c4b53234b51' + '5a58381b2121592d2c01b0022510d023c901b00113b0001410b0013cb001162d2c01b0' + '0013b001b0032549b0031738b001132d2c4b53234b515a5820458a60441b2121592d2c' + '20392f2d') + + +class TestFont(object): + disassembleInstructions = True + + +class ProgramTest(unittest.TestCase): + + def test__bool__(self): + p = Program() + assert not bool(p) + + bc = array.array("B", [0]) + p.fromBytecode(bc) + assert bool(p) + + assert p.bytecode.pop() == 0 + assert not bool(p) + + p = Program() + asm = ['SVTCA[0]'] + p.fromAssembly(asm) + assert bool(p) + + assert p.assembly.pop() == 'SVTCA[0]' + assert not bool(p) + + def test_roundtrip(self): + p = Program() + p.fromBytecode(BYTECODE) + asm = p.getAssembly(preserve=True) + p.fromAssembly(asm) + assert BYTECODE == p.getBytecode() + + def test_xml_indentation(self): + with open(TTPROGRAM_TTX, 'r', encoding='utf-8') as f: + ttProgramXML = f.read() + p = Program() + p.fromBytecode(BYTECODE) + ttfont = TestFont() + buf = UnicodeIO() + writer = XMLWriter(buf, newlinestr='\n') + try: + p.toXML(writer, ttfont) + finally: + output_string = buf.getvalue() + assert output_string == ttProgramXML + + +if __name__ == '__main__': + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/TupleVariation_test.py fonttools-3.21.2/Tests/ttLib/tables/TupleVariation_test.py --- fonttools-3.0/Tests/ttLib/tables/TupleVariation_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/TupleVariation_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,687 @@ +from __future__ import \ + print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.misc.testTools import parseXML +from fontTools.misc.textTools import deHexStr, hexStr +from fontTools.misc.xmlWriter import XMLWriter +from fontTools.ttLib.tables.TupleVariation import \ + log, TupleVariation, compileSharedTuples, decompileSharedTuples, \ + compileTupleVariationStore, decompileTupleVariationStore, inferRegion_ +import random +import unittest + + +def hexencode(s): + h = hexStr(s).upper() + return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) + + +AXES = { + "wdth": (0.3, 0.4, 0.5), + "wght": (0.0, 1.0, 1.0), + "opsz": (-0.7, -0.7, 0.0) +} + + +# Shared tuples in the 'gvar' table of the Skia font, as printed +# in Apple's TrueType specification. +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +SKIA_GVAR_SHARED_TUPLES_DATA = deHexStr( + "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " + "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") + +SKIA_GVAR_SHARED_TUPLES = [ + {"wght": 1.0, "wdth": 0.0}, + {"wght": -1.0, "wdth": 0.0}, + {"wght": 0.0, "wdth": 1.0}, + {"wght": 0.0, "wdth": -1.0}, + {"wght": -1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": -1.0}, + {"wght": 1.0, "wdth": 1.0}, + {"wght": -1.0, "wdth": 1.0} +] + + +# Tuple Variation Store of uppercase I in the Skia font, as printed in Apple's +# TrueType spec. The actual Skia font uses a different table for uppercase I +# than what is printed in Apple's spec, but we still want to make sure that +# we can parse the data as it appears in the specification. +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +SKIA_GVAR_I_DATA = deHexStr( + "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " + "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " + "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " + "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " + "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " + "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " + "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " + "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " + "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " + "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " + "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " + "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " + "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " + "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " + "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " + "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " + "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") + + +class TupleVariationTest(unittest.TestCase): + def test_equal(self): + var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + var2 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + self.assertEqual(var1, var2) + + def test_equal_differentAxes(self): + var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + var2 = TupleVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) + self.assertNotEqual(var1, var2) + + def test_equal_differentCoordinates(self): + var1 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) + var2 = TupleVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) + self.assertNotEqual(var1, var2) + + def test_hasImpact_someDeltasNotZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + var = TupleVariation(axes, [(0,0), (9,8), (7,6)]) + self.assertTrue(var.hasImpact()) + + def test_hasImpact_allDeltasZero(self): + axes = {"wght":(0.0, 1.0, 1.0)} + var = TupleVariation(axes, [(0,0), (0,0), (0,0)]) + self.assertTrue(var.hasImpact()) + + def test_hasImpact_allDeltasNone(self): + axes = {"wght":(0.0, 1.0, 1.0)} + var = TupleVariation(axes, [None, None, None]) + self.assertFalse(var.hasImpact()) + + def test_toXML_badDeltaFormat(self): + writer = XMLWriter(BytesIO()) + g = TupleVariation(AXES, ["String"]) + with CapturingLogHandler(log, "ERROR") as captor: + g.toXML(writer, ["wdth"]) + self.assertIn("bad delta format", [r.msg for r in captor.records]) + self.assertEqual([ + '', + '', + '', + '', + ], TupleVariationTest.xml_lines(writer)) + + def test_toXML_constants(self): + writer = XMLWriter(BytesIO()) + g = TupleVariation(AXES, [42, None, 23, 0, -17, None]) + g.toXML(writer, ["wdth", "wght", "opsz"]) + self.assertEqual([ + '', + '', + '', + '', + '', + '', + '', + '', + '' + ], TupleVariationTest.xml_lines(writer)) + + def test_toXML_points(self): + writer = XMLWriter(BytesIO()) + g = TupleVariation(AXES, [(9,8), None, (7,6), (0,0), (-1,-2), None]) + g.toXML(writer, ["wdth", "wght", "opsz"]) + self.assertEqual([ + '', + '', + '', + '', + '', + '', + '', + '', + '' + ], TupleVariationTest.xml_lines(writer)) + + def test_toXML_allDeltasNone(self): + writer = XMLWriter(BytesIO()) + axes = {"wght":(0.0, 1.0, 1.0)} + g = TupleVariation(axes, [None] * 5) + g.toXML(writer, ["wght", "wdth"]) + self.assertEqual([ + '', + '', + '', + '' + ], TupleVariationTest.xml_lines(writer)) + + def test_fromXML_badDeltaFormat(self): + g = TupleVariation({}, []) + with CapturingLogHandler(log, "WARNING") as captor: + for name, attrs, content in parseXML(''): + g.fromXML(name, attrs, content) + self.assertIn("bad delta format: a, b", + [r.msg for r in captor.records]) + + def test_fromXML_constants(self): + g = TupleVariation({}, [None] * 4) + for name, attrs, content in parseXML( + '' + '' + '' + '' + ''): + g.fromXML(name, attrs, content) + self.assertEqual(AXES, g.axes) + self.assertEqual([None, 42, -23, None], g.coordinates) + + def test_fromXML_points(self): + g = TupleVariation({}, [None] * 4) + for name, attrs, content in parseXML( + '' + '' + '' + '' + ''): + g.fromXML(name, attrs, content) + self.assertEqual(AXES, g.axes) + self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) + + def test_compile_sharedPeaks_nonIntermediate_sharedPoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedPeakIndices = { var.compileCoord(axisTags): 0x77 } + tup, deltas, _ = var.compile(axisTags, sharedPeakIndices, + sharedPoints={0,1,2}) + # len(deltas)=8; flags=None; tupleIndex=0x77 + # embeddedPeaks=[]; intermediateCoord=[] + self.assertEqual("00 08 00 77", hexencode(tup)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_sharedPeaks_intermediate_sharedPoints(self): + var = TupleVariation( + {"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedPeakIndices = { var.compileCoord(axisTags): 0x77 } + tup, deltas, _ = var.compile(axisTags, sharedPeakIndices, + sharedPoints={0,1,2}) + # len(deltas)=8; flags=INTERMEDIATE_REGION; tupleIndex=0x77 + # embeddedPeak=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] + self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tup)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_sharedPeaks_nonIntermediate_privatePoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedPeakIndices = { var.compileCoord(axisTags): 0x77 } + tup, deltas, _ = var.compile(axisTags, sharedPeakIndices, + sharedPoints=None) + # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedPeak=[]; intermediateCoord=[] + self.assertEqual("00 09 20 77", hexencode(tup)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_sharedPeaks_intermediate_privatePoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, + [(7,4), (8,5), (9,6)]) + axisTags = ["wght", "wdth"] + sharedPeakIndices = { var.compileCoord(axisTags): 0x77 } + tuple, deltas, _ = var.compile(axisTags, + sharedPeakIndices, sharedPoints=None) + # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 + # embeddedPeak=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] + self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", + hexencode(tuple)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_embeddedPeak_nonIntermediate_sharedPoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + tup, deltas, _ = var.compile(axisTags=["wght", "wdth"], + sharedCoordIndices={}, sharedPoints={0, 1, 2}) + # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE + # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tup)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_embeddedPeak_nonIntermediate_sharedConstants(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [3, 1, 4]) + tup, deltas, _ = var.compile(axisTags=["wght", "wdth"], + sharedCoordIndices={}, sharedPoints={0, 1, 2}) + # len(deltas)=4; flags=EMBEDDED_PEAK_TUPLE + # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 04 80 00 20 00 33 33", hexencode(tup)) + self.assertEqual("02 03 01 04", # delta: [3, 1, 4] + hexencode(deltas)) + + def test_compile_embeddedPeak_intermediate_sharedPoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + tup, deltas, _ = var.compile(axisTags=["wght", "wdth"], + sharedCoordIndices={}, + sharedPoints={0, 1, 2}) + # len(deltas)=8; flags=EMBEDDED_PEAK_TUPLE + # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] + self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", + hexencode(tup)) + self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_embeddedPeak_nonIntermediate_privatePoints(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [(7,4), (8,5), (9,6)]) + tup, deltas, _ = var.compile( + axisTags=["wght", "wdth"], sharedCoordIndices={}, sharedPoints=None) + # len(deltas)=9; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE + # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tup)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_embeddedPeak_nonIntermediate_privateConstants(self): + var = TupleVariation( + {"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, + [7, 8, 9]) + tup, deltas, _ = var.compile( + axisTags=["wght", "wdth"], sharedCoordIndices={}, sharedPoints=None) + # len(deltas)=5; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_PEAK_TUPLE + # embeddedPeak=[(0.5, 0.8)]; intermediateCoord=[] + self.assertEqual("00 05 A0 00 20 00 33 33", hexencode(tup)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09", # delta: [7, 8, 9] + hexencode(deltas)) + + def test_compile_embeddedPeak_intermediate_privatePoints(self): + var = TupleVariation( + {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, + [(7,4), (8,5), (9,6)]) + tup, deltas, _ = var.compile( + axisTags = ["wght", "wdth"], + sharedCoordIndices={}, sharedPoints=None) + # len(deltas)=9; + # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE + # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] + self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", + hexencode(tup)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09 " # deltaX: [7, 8, 9] + "02 04 05 06", # deltaY: [4, 5, 6] + hexencode(deltas)) + + def test_compile_embeddedPeak_intermediate_privateConstants(self): + var = TupleVariation( + {"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, + [7, 8, 9]) + tup, deltas, _ = var.compile( + axisTags = ["wght", "wdth"], + sharedCoordIndices={}, sharedPoints=None) + # len(deltas)=5; + # flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_REGION|EMBEDDED_PEAK_TUPLE + # embeddedPeak=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] + self.assertEqual("00 05 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", + hexencode(tup)) + self.assertEqual("00 " # all points in glyph + "02 07 08 09", # delta: [7, 8, 9] + hexencode(deltas)) + + def test_compileCoord(self): + var = TupleVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 20 00", hexencode(var.compileCoord(["wght", "wdth"]))) + self.assertEqual("20 00 C0 00", hexencode(var.compileCoord(["wdth", "wght"]))) + self.assertEqual("C0 00", hexencode(var.compileCoord(["wght"]))) + + def test_compileIntermediateCoord(self): + var = TupleVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) + self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(var.compileIntermediateCoord(["wght", "wdth"]))) + self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(var.compileIntermediateCoord(["wdth", "wght"]))) + self.assertEqual(None, var.compileIntermediateCoord(["wght"])) + self.assertEqual("19 9A 26 66", hexencode(var.compileIntermediateCoord(["wdth"]))) + + def test_decompileCoord(self): + decompileCoord = TupleVariation.decompileCoord_ + data = deHexStr("DE AD C0 00 20 00 DE AD") + self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) + + def test_decompileCoord_roundTrip(self): + # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 + data = deHexStr("7F B9 80 35") + values, _ = TupleVariation.decompileCoord_(["wght", "wdth"], data, 0) + axisValues = {axis:(val, val, val) for axis, val in values.items()} + var = TupleVariation(axisValues, [None] * 4) + self.assertEqual("7F B9 80 35", hexencode(var.compileCoord(["wght", "wdth"]))) + + def test_compilePoints(self): + compilePoints = lambda p: TupleVariation.compilePoints(set(p), numPointsInGlyph=999) + self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph + self.assertEqual("01 00 07", hexencode(compilePoints([7]))) + self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) + self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) + self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) + self.assertEqual("03 01 07 01 80 01 EC", hexencode(compilePoints([7, 8, 500]))) + self.assertEqual("04 01 07 01 81 BE E7 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) + self.maxDiff = None + self.assertEqual("81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255] + " 2B" + (44 * " 01"), # third run, contains 44 points: [256 .. 299] + hexencode(compilePoints(range(300)))) + self.assertEqual("81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255] + " 7F" + (128 * " 01") + # third run, contains 128 points: [256 .. 383] + " 0E" + (15 * " 01"), # fourth run, contains 15 points: [384 .. 398] + hexencode(compilePoints(range(399)))) + + def test_decompilePoints(self): + numPointsInGlyph = 65536 + allPoints = list(range(numPointsInGlyph)) + def decompilePoints(data, offset): + points, offset = TupleVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset, "gvar") + # Conversion to list needed for Python 3. + return (list(points), offset) + # all points in glyph + self.assertEqual((allPoints, 1), decompilePoints("00", 0)) + # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) + self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) + # 2 points; first run: [9, 9+6] + self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) + # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) + self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) + # 1 point; first run: [7] + self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) + # 1 point; first run: [7] in overly verbose encoding + self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) + # 1 point; first run: [65535]; requires words to be treated as unsigned numbers + self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) + # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). + self.assertEqual(([7, 8, 263, 265], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) + # combination of all encodings, preceded and followed by 4 bytes of unused data + data = "DE AD DE AD 04 01 07 01 81 BE E7 0C 0F DE AD DE AD" + self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) + self.assertSetEqual(set(range(300)), set(decompilePoints( + "81 2C" + # 300 points (0x12c) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255] + " AB" + (44 * " 00 01"), # third run, contains 44 points: [256 .. 299] + 0)[0])) + self.assertSetEqual(set(range(399)), set(decompilePoints( + "81 8F" + # 399 points (0x18f) in total + " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] + " 7F" + (128 * " 01") + # second run, contains 128 points: [128 .. 255] + " FF" + (128 * " 00 01") + # third run, contains 128 points: [256 .. 383] + " 8E" + (15 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] + 0)[0])) + + def test_decompilePoints_shouldAcceptBadPointNumbers(self): + decompilePoints = TupleVariation.decompilePoints_ + # 2 points; first run: [3, 9]. + numPointsInGlyph = 8 + with CapturingLogHandler(log, "WARNING") as captor: + decompilePoints(numPointsInGlyph, + deHexStr("02 01 03 06"), 0, "cvar") + self.assertIn("point 9 out of range in 'cvar' table", + [r.msg for r in captor.records]) + + def test_decompilePoints_roundTrip(self): + numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding + compile = lambda points: TupleVariation.compilePoints(points, numPointsInGlyph) + decompile = lambda data: set(TupleVariation.decompilePoints_(numPointsInGlyph, data, 0, "gvar")[0]) + for i in range(50): + points = set(random.sample(range(numPointsInGlyph), 30)) + self.assertSetEqual(points, decompile(compile(points)), + "failed round-trip decompile/compilePoints; points=%s" % points) + allPoints = set(range(numPointsInGlyph)) + self.assertSetEqual(allPoints, decompile(compile(allPoints))) + + def test_compileDeltas_points(self): + var = TupleVariation({}, [(0,0), (1, 0), (2, 0), None, (4, 0), (5, 0)]) + points = {1, 2, 3, 4} + # deltaX for points: [1, 2, 4]; deltaY for points: [0, 0, 0] + self.assertEqual("02 01 02 04 82", hexencode(var.compileDeltas(points))) + + def test_compileDeltas_constants(self): + var = TupleVariation({}, [0, 1, 2, None, 4, 5]) + cvts = {1, 2, 3, 4} + # delta for cvts: [1, 2, 4] + self.assertEqual("02 01 02 04", hexencode(var.compileDeltas(cvts))) + + def test_compileDeltaValues(self): + compileDeltaValues = lambda values: hexencode(TupleVariation.compileDeltaValues_(values)) + # zeroes + self.assertEqual("80", compileDeltaValues([0])) + self.assertEqual("BF", compileDeltaValues([0] * 64)) + self.assertEqual("BF 80", compileDeltaValues([0] * 65)) + self.assertEqual("BF A3", compileDeltaValues([0] * 100)) + self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) + # bytes + self.assertEqual("00 01", compileDeltaValues([1])) + self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) + self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) + self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) + # words + self.assertEqual("40 66 66", compileDeltaValues([0x6666])) + self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) + self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) + self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) + # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run + self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) + self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) + self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) + # bytes, zeroes + self.assertEqual("01 01 00", compileDeltaValues([1, 0])) + self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) + # words, bytes, words: a single byte is more compact when encoded as part of the words run + self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) + self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) + # words, zeroes, words + self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) + self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) + self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) + # words, zeroes, bytes + self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) + self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) + self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) + # words, zeroes + self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) + self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) + # bytes or words from floats + self.assertEqual("00 01", compileDeltaValues([1.1])) + self.assertEqual("00 02", compileDeltaValues([1.9])) + self.assertEqual("40 66 66", compileDeltaValues([0x6666 + 0.1])) + self.assertEqual("40 66 66", compileDeltaValues([0x6665 + 0.9])) + + def test_decompileDeltas(self): + decompileDeltas = TupleVariation.decompileDeltas_ + # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) + self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) + # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) + self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) + # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) + self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) + # combination of all three encodings, preceded and followed by 4 bytes of unused data + data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") + self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) + + def test_decompileDeltas_roundTrip(self): + numDeltas = 30 + compile = TupleVariation.compileDeltaValues_ + decompile = lambda data: TupleVariation.decompileDeltas_(numDeltas, data, 0)[0] + for i in range(50): + deltas = random.sample(range(-128, 127), 10) + deltas.extend(random.sample(range(-32768, 32767), 10)) + deltas.extend([0] * 10) + random.shuffle(deltas) + self.assertListEqual(deltas, decompile(compile(deltas))) + + def test_compileSharedTuples(self): + # Below, the peak coordinate {"wght": 1.0, "wdth": 0.7} appears + # three times; {"wght": 1.0, "wdth": 0.8} appears twice. + # Because the start and end of variation ranges is not encoded + # into the shared pool, they should get ignored. + deltas = [None] * 4 + variations = [ + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.5, 0.7, 1.0) + }, deltas), + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.2, 0.7, 1.0) + }, deltas), + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.2, 0.8, 1.0) + }, deltas), + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.3, 0.7, 1.0) + }, deltas), + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.3, 0.8, 1.0) + }, deltas), + TupleVariation({ + "wght": (1.0, 1.0, 1.0), + "wdth": (0.3, 0.9, 1.0) + }, deltas) + ] + result = compileSharedTuples(["wght", "wdth"], variations) + self.assertEqual([hexencode(c) for c in result], + ["40 00 2C CD", "40 00 33 33"]) + + def test_decompileSharedTuples_Skia(self): + sharedTuples = decompileSharedTuples( + axisTags=["wght", "wdth"], sharedTupleCount=8, + data=SKIA_GVAR_SHARED_TUPLES_DATA, offset=0) + self.assertEqual(sharedTuples, SKIA_GVAR_SHARED_TUPLES) + + def test_decompileSharedTuples_empty(self): + self.assertEqual(decompileSharedTuples(["wght"], 0, b"", 0), []) + + def test_compileTupleVariationStore_allVariationsRedundant(self): + axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} + variations = [ + TupleVariation(axes, [None] * 4), + TupleVariation(axes, [None] * 4), + TupleVariation(axes, [None] * 4) + ] + self.assertEqual( + compileTupleVariationStore(variations, pointCount=8, + axisTags=["wght", "opsz"], + sharedTupleIndices={}), + (0, b"", b"")) + + def test_compileTupleVariationStore_noVariations(self): + self.assertEqual( + compileTupleVariationStore(variations=[], pointCount=8, + axisTags=["wght", "opsz"], + sharedTupleIndices={}), + (0, b"", b"")) + + def test_compileTupleVariationStore_roundTrip_cvar(self): + deltas = [1, 2, 3, 4] + variations = [ + TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, + deltas), + TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, + deltas) + ] + tupleVariationCount, tuples, data = compileTupleVariationStore( + variations, pointCount=4, axisTags=["wght", "wdth"], + sharedTupleIndices={}) + self.assertEqual( + decompileTupleVariationStore("cvar", ["wght", "wdth"], + tupleVariationCount, pointCount=4, + sharedTuples={}, data=(tuples + data), + pos=0, dataPos=len(tuples)), + variations) + + def test_compileTupleVariationStore_roundTrip_gvar(self): + deltas = [(1,1), (2,2), (3,3), (4,4)] + variations = [ + TupleVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, + deltas), + TupleVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, + deltas) + ] + tupleVariationCount, tuples, data = compileTupleVariationStore( + variations, pointCount=4, axisTags=["wght", "wdth"], + sharedTupleIndices={}) + self.assertEqual( + decompileTupleVariationStore("gvar", ["wght", "wdth"], + tupleVariationCount, pointCount=4, + sharedTuples={}, data=(tuples + data), + pos=0, dataPos=len(tuples)), + variations) + + def test_decompileTupleVariationStore_Skia_I(self): + tvar = decompileTupleVariationStore( + tableTag="gvar", axisTags=["wght", "wdth"], + tupleVariationCount=8, pointCount=18, + sharedTuples=SKIA_GVAR_SHARED_TUPLES, + data=SKIA_GVAR_I_DATA, pos=4, dataPos=36) + self.assertEqual(len(tvar), 8) + self.assertEqual(tvar[0].axes, {"wght": (0.0, 1.0, 1.0)}) + self.assertEqual( + " ".join(["%d,%d" % c for c in tvar[0].coordinates]), + "257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 " + "257,0 259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0") + + def test_decompileTupleVariationStore_empty(self): + self.assertEqual( + decompileTupleVariationStore(tableTag="gvar", axisTags=[], + tupleVariationCount=0, pointCount=5, + sharedTuples=[], + data=b"", pos=4, dataPos=4), + []) + + def test_getTupleSize(self): + getTupleSize = TupleVariation.getTupleSize_ + numAxes = 3 + self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) + self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) + self.assertEqual(4, getTupleSize(0x2077, numAxes)) + self.assertEqual(4, getTupleSize(11, numAxes)) + + def test_inferRegion(self): + start, end = inferRegion_({"wght": -0.3, "wdth": 0.7}) + self.assertEqual(start, {"wght": -0.3, "wdth": 0.0}) + self.assertEqual(end, {"wght": 0.0, "wdth": 0.7}) + + @staticmethod + def xml_lines(writer): + content = writer.file.getvalue().decode("utf-8") + return [line.strip() for line in content.splitlines()][1:] + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_v_h_e_a_test.py fonttools-3.21.2/Tests/ttLib/tables/_v_h_e_a_test.py --- fonttools-3.0/Tests/ttLib/tables/_v_h_e_a_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_v_h_e_a_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,275 @@ +from __future__ import absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools.misc.loggingTools import CapturingLogHandler +from fontTools.misc.testTools import parseXML, getXML +from fontTools.misc.textTools import deHexStr +from fontTools.ttLib import TTFont, newTable +from fontTools.misc.fixedTools import log +import os +import unittest + + +CURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +DATA_DIR = os.path.join(CURR_DIR, 'data') + +VHEA_DATA_VERSION_11 = deHexStr( + '0001 1000 ' # 1.1 version + '01F4 ' # 500 ascent + 'FE0C ' # -500 descent + '0000 ' # 0 lineGap + '0BB8 ' # 3000 advanceHeightMax + 'FC16 ' # -1002 minTopSideBearing + 'FD5B ' # -677 minBottomSideBearing + '0B70 ' # 2928 yMaxExtent + '0000 ' # 0 caretSlopeRise + '0001 ' # 1 caretSlopeRun + '0000 ' # 0 caretOffset + '0000 ' # 0 reserved1 + '0000 ' # 0 reserved2 + '0000 ' # 0 reserved3 + '0000 ' # 0 reserved4 + '0000 ' # 0 metricDataFormat + '000C ' # 12 numberOfVMetrics +) + +VHEA_DATA_VERSION_10 = deHexStr('00010000') + VHEA_DATA_VERSION_11[4:] + +VHEA_VERSION_11_AS_DICT = { + 'tableTag': 'vhea', + 'tableVersion': 0x00011000, + 'ascent': 500, + 'descent': -500, + 'lineGap': 0, + 'advanceHeightMax': 3000, + 'minTopSideBearing': -1002, + 'minBottomSideBearing': -677, + 'yMaxExtent': 2928, + 'caretSlopeRise': 0, + 'caretSlopeRun': 1, + 'caretOffset': 0, + 'reserved1': 0, + 'reserved2': 0, + 'reserved3': 0, + 'reserved4': 0, + 'metricDataFormat': 0, + 'numberOfVMetrics': 12, +} + +VHEA_VERSION_10_AS_DICT = dict(VHEA_VERSION_11_AS_DICT) +VHEA_VERSION_10_AS_DICT['tableVersion'] = 0x00010000 + +VHEA_XML_VERSION_11 = [ + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', + '', +] + +VHEA_XML_VERSION_11_AS_FLOAT = [ + '', +] + VHEA_XML_VERSION_11[1:] + +VHEA_XML_VERSION_10 = [ + '', +] + VHEA_XML_VERSION_11[1:] + +VHEA_XML_VERSION_10_AS_FLOAT = [ + '', +] + VHEA_XML_VERSION_11[1:] + + +class VheaCompileOrToXMLTest(unittest.TestCase): + + def setUp(self): + vhea = newTable('vhea') + vhea.tableVersion = 0x00010000 + vhea.ascent = 500 + vhea.descent = -500 + vhea.lineGap = 0 + vhea.advanceHeightMax = 3000 + vhea.minTopSideBearing = -1002 + vhea.minBottomSideBearing = -677 + vhea.yMaxExtent = 2928 + vhea.caretSlopeRise = 0 + vhea.caretSlopeRun = 1 + vhea.caretOffset = 0 + vhea.metricDataFormat = 0 + vhea.numberOfVMetrics = 12 + vhea.reserved1 = vhea.reserved2 = vhea.reserved3 = vhea.reserved4 = 0 + self.font = TTFont(sfntVersion='OTTO') + self.font['vhea'] = vhea + + def test_compile_caretOffset_as_reserved0(self): + vhea = self.font['vhea'] + del vhea.caretOffset + vhea.reserved0 = 0 + self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font)) + + def test_compile_version_10(self): + vhea = self.font['vhea'] + vhea.tableVersion = 0x00010000 + self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font)) + + def test_compile_version_10_as_float(self): + vhea = self.font['vhea'] + vhea.tableVersion = 1.0 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(VHEA_DATA_VERSION_10, vhea.compile(self.font)) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + def test_compile_version_11(self): + vhea = self.font['vhea'] + vhea.tableVersion = 0x00011000 + self.assertEqual(VHEA_DATA_VERSION_11, vhea.compile(self.font)) + + def test_compile_version_11_as_float(self): + vhea = self.font['vhea'] + vhea.tableVersion = 1.0625 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(VHEA_DATA_VERSION_11, vhea.compile(self.font)) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + def test_toXML_caretOffset_as_reserved0(self): + vhea = self.font['vhea'] + del vhea.caretOffset + vhea.reserved0 = 0 + self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10) + + def test_toXML_version_10(self): + vhea = self.font['vhea'] + self.font['vhea'].tableVersion = 0x00010000 + self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10) + + def test_toXML_version_10_as_float(self): + vhea = self.font['vhea'] + vhea.tableVersion = 1.0 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_10) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + def test_toXML_version_11(self): + vhea = self.font['vhea'] + self.font['vhea'].tableVersion = 0x00011000 + self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_11) + + def test_toXML_version_11_as_float(self): + vhea = self.font['vhea'] + vhea.tableVersion = 1.0625 + with CapturingLogHandler(log, "WARNING") as captor: + self.assertEqual(getXML(vhea.toXML), VHEA_XML_VERSION_11) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + + +class VheaDecompileOrFromXMLTest(unittest.TestCase): + + def setUp(self): + vhea = newTable('vhea') + self.font = TTFont(sfntVersion='OTTO') + self.font['vhea'] = vhea + + def test_decompile_version_10(self): + vhea = self.font['vhea'] + vhea.decompile(VHEA_DATA_VERSION_10, self.font) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key]) + + def test_decompile_version_11(self): + vhea = self.font['vhea'] + vhea.decompile(VHEA_DATA_VERSION_11, self.font) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key]) + + def test_fromXML_version_10(self): + vhea = self.font['vhea'] + for name, attrs, content in parseXML(VHEA_XML_VERSION_10): + vhea.fromXML(name, attrs, content, self.font) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key]) + + def test_fromXML_version_10_as_float(self): + vhea = self.font['vhea'] + with CapturingLogHandler(log, "WARNING") as captor: + for name, attrs, content in parseXML(VHEA_XML_VERSION_10_AS_FLOAT): + vhea.fromXML(name, attrs, content, self.font) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_10_AS_DICT[key]) + + def test_fromXML_version_11(self): + vhea = self.font['vhea'] + for name, attrs, content in parseXML(VHEA_XML_VERSION_11): + vhea.fromXML(name, attrs, content, self.font) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key]) + + def test_fromXML_version_11_as_float(self): + vhea = self.font['vhea'] + with CapturingLogHandler(log, "WARNING") as captor: + for name, attrs, content in parseXML(VHEA_XML_VERSION_11_AS_FLOAT): + vhea.fromXML(name, attrs, content, self.font) + self.assertTrue( + len([r for r in captor.records + if "Table version value is a float" in r.msg]) == 1) + for key in vhea.__dict__: + self.assertEqual(getattr(vhea, key), VHEA_VERSION_11_AS_DICT[key]) + + +class VheaRecalcTest(unittest.TestCase): + + def test_recalc_TTF(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_TTF.ttx')) + vhea = font['vhea'] + vhea.recalc(font) + self.assertEqual(vhea.advanceHeightMax, 900) + self.assertEqual(vhea.minTopSideBearing, 200) + self.assertEqual(vhea.minBottomSideBearing, 377) + self.assertEqual(vhea.yMaxExtent, 312) + + def test_recalc_OTF(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_OTF.ttx')) + vhea = font['vhea'] + vhea.recalc(font) + self.assertEqual(vhea.advanceHeightMax, 900) + self.assertEqual(vhea.minTopSideBearing, 200) + self.assertEqual(vhea.minBottomSideBearing, 377) + self.assertEqual(vhea.yMaxExtent, 312) + + def test_recalc_empty(self): + font = TTFont() + font.importXML(os.path.join(DATA_DIR, '_v_h_e_a_recalc_empty.ttx')) + vhea = font['vhea'] + vhea.recalc(font) + self.assertEqual(vhea.advanceHeightMax, 900) + self.assertEqual(vhea.minTopSideBearing, 0) + self.assertEqual(vhea.minBottomSideBearing, 0) + self.assertEqual(vhea.yMaxExtent, 0) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/tables/_v_m_t_x_test.py fonttools-3.21.2/Tests/ttLib/tables/_v_m_t_x_test.py --- fonttools-3.0/Tests/ttLib/tables/_v_m_t_x_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/tables/_v_m_t_x_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,19 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.misc.py23 import * +from fontTools.ttLib.tables._v_m_t_x import table__v_m_t_x +import _h_m_t_x_test +import unittest + + +class VmtxTableTest(_h_m_t_x_test.HmtxTableTest): + + @classmethod + def setUpClass(cls): + cls.tableClass = table__v_m_t_x + cls.tag = "vmtx" + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttLib/woff2_test.py fonttools-3.21.2/Tests/ttLib/woff2_test.py --- fonttools-3.0/Tests/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttLib/woff2_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,840 @@ +from __future__ import print_function, division, absolute_import, unicode_literals +from fontTools.misc.py23 import * +from fontTools import ttLib +from fontTools.ttLib.woff2 import ( + WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, + woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, + getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, + WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, + WOFF2Writer, unpackBase128, unpack255UShort, pack255UShort) +import unittest +from fontTools.misc import sstruct +import struct +import os +import random +import copy +from collections import OrderedDict + +haveBrotli = False +try: + import brotli + haveBrotli = True +except ImportError: + pass + + +# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires +# deprecation warnings if a program uses the old name. +if not hasattr(unittest.TestCase, 'assertRaisesRegex'): + unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp + + +current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) +data_dir = os.path.join(current_dir, 'data') +TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') +OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') +METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') + +TT_WOFF2 = BytesIO() +CFF_WOFF2 = BytesIO() + + +def setUpModule(): + if not haveBrotli: + raise unittest.SkipTest("No module named brotli") + assert os.path.exists(TTX) + assert os.path.exists(OTX) + # import TT-flavoured test font and save it as WOFF2 + ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + ttf.importXML(TTX) + ttf.flavor = "woff2" + ttf.save(TT_WOFF2, reorderTables=None) + # import CFF-flavoured test font and save it as WOFF2 + otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + otf.importXML(OTX) + otf.flavor = "woff2" + otf.save(CFF_WOFF2, reorderTables=None) + + +class WOFF2ReaderTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(OTX) + + def setUp(self): + self.file.seek(0) + + def test_bad_signature(self): + with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): + WOFF2Reader(BytesIO(b"wOFF")) + + def test_not_enough_data_header(self): + incomplete_header = self.file.read(woff2DirectorySize - 1) + with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): + WOFF2Reader(BytesIO(incomplete_header)) + + def test_incorrect_compressed_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['totalCompressedSize'] = 0 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaises((brotli.error, ttLib.TTLibError)): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_incorrect_uncompressed_size(self): + decompress_backup = brotli.decompress + brotli.decompress = lambda data: b"" # return empty byte string + with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): + WOFF2Reader(self.file) + brotli.decompress = decompress_backup + + def test_incorrect_file_size(self): + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + header['length'] -= 1 + data = sstruct.pack(woff2DirectoryFormat, header) + with self.assertRaisesRegex( + ttLib.TTLibError, "doesn't match the actual file size"): + WOFF2Reader(BytesIO(data + self.file.read())) + + def test_num_tables(self): + tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] + data = self.file.read(woff2DirectorySize) + header = sstruct.unpack(woff2DirectoryFormat, data) + self.assertEqual(header['numTables'], len(tags)) + + def test_table_tags(self): + tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) + reader = WOFF2Reader(self.file) + self.assertEqual(set(reader.keys()), tags) + + def test_get_normal_tables(self): + woff2Reader = WOFF2Reader(self.file) + specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') + for tag in [t for t in self.font.keys() if t not in specialTags]: + origData = self.font.getTableData(tag) + decompressedData = woff2Reader[tag] + self.assertEqual(origData, decompressedData) + + def test_reconstruct_unknown(self): + reader = WOFF2Reader(self.file) + with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): + reader.reconstructTable('ZZZZ') + + +class WOFF2ReaderTTFTest(WOFF2ReaderTest): + """ Tests specific to TT-flavored fonts. """ + + @classmethod + def setUpClass(cls): + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + cls.font.importXML(TTX) + + def setUp(self): + self.file.seek(0) + + def test_reconstruct_glyf(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['glyf'] + self.assertEqual(self.font.getTableData('glyf'), reconstructedData) + + def test_reconstruct_loca(self): + woff2Reader = WOFF2Reader(self.file) + reconstructedData = woff2Reader['loca'] + self.assertEqual(self.font.getTableData('loca'), reconstructedData) + self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) + + def test_reconstruct_loca_not_match_orig_size(self): + reader = WOFF2Reader(self.file) + reader.tables['loca'].origLength -= 1 + with self.assertRaisesRegex( + ttLib.TTLibError, "'loca' table doesn't match original size"): + reader.reconstructTable('loca') + + +def normalise_table(font, tag, padding=4): + """ Return normalised table data. Keep 'font' instance unmodified. """ + assert tag in ('glyf', 'loca', 'head') + assert tag in font + if tag == 'head': + origHeadFlags = font['head'].flags + font['head'].flags |= (1 << 11) + tableData = font['head'].compile(font) + if font.sfntVersion in ("\x00\x01\x00\x00", "true"): + assert {'glyf', 'loca', 'head'}.issubset(font.keys()) + origIndexFormat = font['head'].indexToLocFormat + if hasattr(font['loca'], 'locations'): + origLocations = font['loca'].locations[:] + else: + origLocations = [] + glyfTable = ttLib.newTable('glyf') + glyfTable.decompile(font.getTableData('glyf'), font) + glyfTable.padding = padding + if tag == 'glyf': + tableData = glyfTable.compile(font) + elif tag == 'loca': + glyfTable.compile(font) + tableData = font['loca'].compile(font) + if tag == 'head': + glyfTable.compile(font) + font['loca'].compile(font) + tableData = font['head'].compile(font) + font['head'].indexToLocFormat = origIndexFormat + font['loca'].set(origLocations) + if tag == 'head': + font['head'].flags = origHeadFlags + return tableData + + +def normalise_font(font, padding=4): + """ Return normalised font data. Keep 'font' instance unmodified. """ + # drop DSIG but keep a copy + DSIG_copy = copy.deepcopy(font['DSIG']) + del font['DSIG'] + # ovverride TTFont attributes + origFlavor = font.flavor + origRecalcBBoxes = font.recalcBBoxes + origRecalcTimestamp = font.recalcTimestamp + origLazy = font.lazy + font.flavor = None + font.recalcBBoxes = False + font.recalcTimestamp = False + font.lazy = True + # save font to temporary stream + infile = BytesIO() + font.save(infile) + infile.seek(0) + # reorder tables alphabetically + outfile = BytesIO() + reader = ttLib.sfnt.SFNTReader(infile) + writer = ttLib.sfnt.SFNTWriter( + outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) + for tag in sorted(reader.keys()): + if tag in woff2TransformedTableTags + ('head',): + writer[tag] = normalise_table(font, tag, padding) + else: + writer[tag] = reader[tag] + writer.close() + # restore font attributes + font['DSIG'] = DSIG_copy + font.flavor = origFlavor + font.recalcBBoxes = origRecalcBBoxes + font.recalcTimestamp = origRecalcTimestamp + font.lazy = origLazy + return outfile.getvalue() + + +class WOFF2DirectoryEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = WOFF2DirectoryEntry() + + def test_not_enough_data_table_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): + self.entry.fromString(b"") + + def test_not_enough_data_table_tag(self): + incompleteData = bytearray([0x3F, 0, 0, 0]) + with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): + self.entry.fromString(bytes(incompleteData)) + + def test_table_reserved_flags(self): + with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): + self.entry.fromString(bytechr(0xC0)) + + def test_loca_zero_transformLength(self): + data = bytechr(getKnownTagIndex('loca')) # flags + data += packBase128(random.randint(1, 100)) # origLength + data += packBase128(1) # non-zero transformLength + with self.assertRaisesRegex( + ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): + self.entry.fromString(data) + + def test_fromFile(self): + unknownTag = Tag('ZZZZ') + data = bytechr(getKnownTagIndex(unknownTag)) + data += unknownTag.tobytes() + data += packBase128(random.randint(1, 100)) + expectedPos = len(data) + f = BytesIO(data + b'\0'*100) + self.entry.fromFile(f) + self.assertEqual(f.tell(), expectedPos) + + def test_transformed_toString(self): + self.entry.tag = Tag('glyf') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = random.randint(101, 200) + self.entry.length = random.randint(1, 100) + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + + base128Size(self.entry.length)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_known_toString(self): + self.entry.tag = Tag('head') + self.entry.flags = getKnownTagIndex(self.entry.tag) + self.entry.origLength = 54 + expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + def test_unknown_toString(self): + self.entry.tag = Tag('ZZZZ') + self.entry.flags = woff2UnknownTagIndex + self.entry.origLength = random.randint(1, 100) + expectedSize = (woff2FlagsSize + woff2UnknownTagSize + + base128Size(self.entry.origLength)) + data = self.entry.toString() + self.assertEqual(len(data), expectedSize) + + +class DummyReader(WOFF2Reader): + + def __init__(self, file, checkChecksums=1, fontNumber=-1): + self.file = file + for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', + 'metaOrigLength', 'privLength', 'privOffset'): + setattr(self, attr, 0) + + +class WOFF2FlavorDataTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + # make random byte strings; font data must be 4-byte aligned + cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file = BytesIO(self.fontdata) + self.file.seek(0, 2) + + def test_get_metaData_no_privData(self): + self.file.write(self.compressed_metadata) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + + def test_get_privData_no_metaData(self): + self.file.write(self.privData) + reader = DummyReader(self.file) + reader.privOffset = len(self.fontdata) + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_metaData_and_privData(self): + self.file.write(self.compressed_metadata + self.privData) + reader = DummyReader(self.file) + reader.metaOffset = len(self.fontdata) + reader.metaLength = len(self.compressed_metadata) + reader.metaOrigLength = len(self.xml_metadata) + reader.privOffset = reader.metaOffset + reader.metaLength + reader.privLength = len(self.privData) + flavorData = WOFF2FlavorData(reader) + self.assertEqual(self.xml_metadata, flavorData.metaData) + self.assertEqual(self.privData, flavorData.privData) + + def test_get_major_minorVersion(self): + reader = DummyReader(self.file) + reader.majorVersion = reader.minorVersion = 1 + flavorData = WOFF2FlavorData(reader) + self.assertEqual(flavorData.majorVersion, 1) + self.assertEqual(flavorData.minorVersion, 1) + + +class WOFF2WriterTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(OTX) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(CFF_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + @classmethod + def setUpFlavorData(cls): + assert os.path.exists(METADATA) + with open(METADATA, 'rb') as f: + cls.xml_metadata = f.read() + cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) + cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) + + def setUp(self): + self.file.seek(0) + self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) + + def test_DSIG_dropped(self): + self.writer['DSIG'] = b"\0" + self.assertEqual(len(self.writer.tables), 0) + self.assertEqual(self.writer.numTables, self.numTables-1) + + def test_no_rewrite_table(self): + self.writer['ZZZZ'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): + self.writer['ZZZZ'] = b"\0" + + def test_num_tables(self): + self.writer['ABCD'] = b"\0" + with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): + self.writer.close() + + def test_required_tables(self): + font = ttLib.TTFont(flavor="woff2") + with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): + font.save(BytesIO()) + + def test_head_transform_flag(self): + headData = self.font.getTableData('head') + origFlags = byteord(headData[16]) + woff2font = ttLib.TTFont(self.file) + newHeadData = woff2font.getTableData('head') + modifiedFlags = byteord(newHeadData[16]) + self.assertNotEqual(origFlags, modifiedFlags) + restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 + self.assertEqual(origFlags, restoredFlags) + + def test_tables_sorted_alphabetically(self): + expected = sorted([t for t in self.tags if t != 'DSIG']) + woff2font = ttLib.TTFont(self.file) + self.assertEqual(expected, list(woff2font.reader.keys())) + + def test_checksums(self): + normFile = BytesIO(normalise_font(self.font, padding=4)) + normFile.seek(0) + normFont = ttLib.TTFont(normFile, checkChecksums=2) + w2font = ttLib.TTFont(self.file) + # force reconstructing glyf table using 4-byte padding + w2font.reader.padding = 4 + for tag in [t for t in self.tags if t != 'DSIG']: + w2data = w2font.reader[tag] + normData = normFont.reader[tag] + if tag == "head": + w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] + normData = normData[:8] + b'\0\0\0\0' + normData[12:] + w2CheckSum = ttLib.sfnt.calcChecksum(w2data) + normCheckSum = ttLib.sfnt.calcChecksum(normData) + self.assertEqual(w2CheckSum, normCheckSum) + normCheckSumAdjustment = normFont['head'].checkSumAdjustment + self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) + + def test_calcSFNTChecksumsLengthsAndOffsets(self): + normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) + self.writer._calcSFNTChecksumsLengthsAndOffsets() + for tag, entry in normFont.reader.tables.items(): + self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) + self.assertEqual(entry.length, self.writer.tables[tag].origLength) + self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) + + def test_bad_sfntVersion(self): + for i in range(self.numTables): + self.writer[bytechr(65 + i)*4] = b"\0" + self.writer.sfntVersion = 'ZZZZ' + with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): + self.writer.close() + + def test_calcTotalSize_no_flavorData(self): + expected = self.length + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData(self): + expected = self.length + len(self.compressed_metadata) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_privData(self): + expected = self.length + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_calcTotalSize_with_metaData_and_privData(self): + metaDataLength = (len(self.compressed_metadata) + 3) & ~3 + expected = self.length + metaDataLength + len(self.privData) + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.metaData = self.xml_metadata + flavorData.privData = self.privData + self.writer.file = BytesIO() + for tag in self.tags: + self.writer[tag] = self.font.getTableData(tag) + self.writer.close() + self.assertEqual(expected, self.writer.length) + self.assertEqual(expected, self.writer.file.tell()) + + def test_getVersion(self): + # no version + self.assertEqual((0, 0), self.writer._getVersion()) + # version from head.fontRevision + fontRevision = self.font['head'].fontRevision + versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) + entry = self.writer.tables['head'] = ttLib.newTable('head') + entry.data = self.font.getTableData('head') + self.assertEqual(versionTuple, self.writer._getVersion()) + # version from writer.flavorData + flavorData = self.writer.flavorData = WOFF2FlavorData() + flavorData.majorVersion, flavorData.minorVersion = (10, 11) + self.assertEqual((10, 11), self.writer._getVersion()) + + +class WOFF2WriterTTFTest(WOFF2WriterTest): + + @classmethod + def setUpClass(cls): + cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") + cls.font.importXML(TTX) + cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] + cls.numTables = len(cls.tags) + cls.file = BytesIO(TT_WOFF2.getvalue()) + cls.file.seek(0, 2) + cls.length = (cls.file.tell() + 3) & ~3 + cls.setUpFlavorData() + + def test_normaliseGlyfAndLoca(self): + normTables = {} + for tag in ('head', 'loca', 'glyf'): + normTables[tag] = normalise_table(self.font, tag, padding=4) + for tag in self.tags: + tableData = self.font.getTableData(tag) + self.writer[tag] = tableData + if tag in normTables: + self.assertNotEqual(tableData, normTables[tag]) + self.writer._normaliseGlyfAndLoca(padding=4) + self.writer._setHeadTransformFlag() + for tag in normTables: + self.assertEqual(self.writer.tables[tag].data, normTables[tag]) + + +class WOFF2LocaTableTest(unittest.TestCase): + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font['head'] = ttLib.newTable('head') + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + + def test_compile_short_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0, 0x20000, 2))) + self.font['glyf'].indexFormat = 0 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20000) + + def test_compile_short_loca_overflow(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20000 + 1))) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex( + ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): + locaTable.compile(self.font) + + def test_compile_short_loca_not_multiples_of_2(self): + locaTable = self.font['loca'] + locaTable.set([1, 3, 5, 7]) + self.font['glyf'].indexFormat = 0 + with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): + locaTable.compile(self.font) + + def test_compile_long_loca(self): + locaTable = self.font['loca'] + locaTable.set(list(range(0x20001))) + self.font['glyf'].indexFormat = 1 + locaData = locaTable.compile(self.font) + self.assertEqual(len(locaData), 0x20001 * 4) + + def test_compile_set_indexToLocFormat_0(self): + locaTable = self.font['loca'] + # offsets are all multiples of 2 and max length is < 0x10000 + locaTable.set(list(range(0, 0x20000, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(0, newIndexFormat) + + def test_compile_set_indexToLocFormat_1(self): + locaTable = self.font['loca'] + # offsets are not multiples of 2 + locaTable.set(list(range(10))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + # max length is >= 0x10000 + locaTable.set(list(range(0, 0x20000 + 1, 2))) + locaTable.compile(self.font) + newIndexFormat = self.font['head'].indexToLocFormat + self.assertEqual(1, newIndexFormat) + + +class WOFF2GlyfTableTest(unittest.TestCase): + + @classmethod + def setUpClass(cls): + font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(TTX) + cls.tables = {} + cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') + for tag in reversed(cls.transformedTags): # compile in inverse order + cls.tables[tag] = font.getTableData(tag) + infile = BytesIO(TT_WOFF2.getvalue()) + reader = WOFF2Reader(infile) + cls.transformedGlyfData = reader.tables['glyf'].loadData( + reader.transformBuffer) + cls.glyphOrder = ['.notdef'] + ["glyph%.5d" % i for i in range(1, font['maxp'].numGlyphs)] + + def setUp(self): + self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.setGlyphOrder(self.glyphOrder) + font['head'] = ttLib.newTable('head') + font['maxp'] = ttLib.newTable('maxp') + font['loca'] = WOFF2LocaTable() + font['glyf'] = WOFF2GlyfTable() + for tag in self.transformedTags: + font[tag].decompile(self.tables[tag], font) + + def test_reconstruct_glyf_padded_4(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_padded_2(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + data = glyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) + self.assertEqual(normGlyfData, data) + + def test_reconstruct_glyf_unpadded(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.compile(self.font) + self.assertEqual(self.tables['glyf'], data) + + def test_reconstruct_glyf_incorrect_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + badGlyphOrder = self.font.getGlyphOrder()[:-1] + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.reconstruct(self.transformedGlyfData, self.font) + + def test_reconstruct_glyf_missing_glyphOrder(self): + glyfTable = WOFF2GlyfTable() + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.reconstruct(self.transformedGlyfData, self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_reconstruct_loca_padded_4(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 4 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_padded_2(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.padding = 2 + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) + self.assertEqual(normLocaData, data) + + def test_reconstruct_loca_unpadded(self): + locaTable = self.font['loca'] = WOFF2LocaTable() + glyfTable = self.font['glyf'] = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + glyfTable.compile(self.font) + data = locaTable.compile(self.font) + self.assertEqual(self.tables['loca'], data) + + def test_reconstruct_glyf_header_not_enough_data(self): + with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): + WOFF2GlyfTable().reconstruct(b"", self.font) + + def test_reconstruct_glyf_table_incorrect_size(self): + msg = "incorrect size of transformed 'glyf'" + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) + with self.assertRaisesRegex(ttLib.TTLibError, msg): + WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) + + def test_transform_glyf(self): + glyfTable = self.font['glyf'] + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_transform_glyf_incorrect_glyphOrder(self): + glyfTable = self.font['glyf'] + badGlyphOrder = self.font.getGlyphOrder()[:-1] + del glyfTable.glyphOrder + self.font.setGlyphOrder(badGlyphOrder) + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + glyfTable.glyphOrder = badGlyphOrder + with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): + glyfTable.transform(self.font) + + def test_transform_glyf_missing_glyphOrder(self): + glyfTable = self.font['glyf'] + del glyfTable.glyphOrder + del self.font.glyphOrder + numGlyphs = self.font['maxp'].numGlyphs + del self.font['maxp'] + glyfTable.transform(self.font) + expected = [".notdef"] + expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) + self.assertEqual(expected, glyfTable.glyphOrder) + + def test_roundtrip_glyf_reconstruct_and_transform(self): + glyfTable = WOFF2GlyfTable() + glyfTable.reconstruct(self.transformedGlyfData, self.font) + data = glyfTable.transform(self.font) + self.assertEqual(self.transformedGlyfData, data) + + def test_roundtrip_glyf_transform_and_reconstruct(self): + glyfTable = self.font['glyf'] + transformedData = glyfTable.transform(self.font) + newGlyfTable = WOFF2GlyfTable() + newGlyfTable.reconstruct(transformedData, self.font) + newGlyfTable.padding = 4 + reconstructedData = newGlyfTable.compile(self.font) + normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) + self.assertEqual(normGlyfData, reconstructedData) + + +class Base128Test(unittest.TestCase): + + def test_unpackBase128(self): + self.assertEqual(unpackBase128(b'\x3f\x00\x00'), (63, b"\x00\x00")) + self.assertEqual(unpackBase128(b'\x8f\xff\xff\xff\x7f')[0], 4294967295) + + self.assertRaisesRegex( + ttLib.TTLibError, + "UIntBase128 value must not start with leading zeros", + unpackBase128, b'\x80\x80\x3f') + + self.assertRaisesRegex( + ttLib.TTLibError, + "UIntBase128-encoded sequence is longer than 5 bytes", + unpackBase128, b'\x8f\xff\xff\xff\xff\x7f') + + self.assertRaisesRegex( + ttLib.TTLibError, + "UIntBase128 value exceeds 2\*\*32-1", + unpackBase128, b'\x90\x80\x80\x80\x00') + + self.assertRaisesRegex( + ttLib.TTLibError, + "not enough data to unpack UIntBase128", + unpackBase128, b'') + + def test_base128Size(self): + self.assertEqual(base128Size(0), 1) + self.assertEqual(base128Size(24567), 3) + self.assertEqual(base128Size(2**32-1), 5) + + def test_packBase128(self): + self.assertEqual(packBase128(63), b"\x3f") + self.assertEqual(packBase128(2**32-1), b'\x8f\xff\xff\xff\x7f') + self.assertRaisesRegex( + ttLib.TTLibError, + "UIntBase128 format requires 0 <= integer <= 2\*\*32-1", + packBase128, 2**32+1) + self.assertRaisesRegex( + ttLib.TTLibError, + "UIntBase128 format requires 0 <= integer <= 2\*\*32-1", + packBase128, -1) + + +class UShort255Test(unittest.TestCase): + + def test_unpack255UShort(self): + self.assertEqual(unpack255UShort(bytechr(252))[0], 252) + # some numbers (e.g. 506) can have multiple encodings + self.assertEqual( + unpack255UShort(struct.pack(b"BB", 254, 0))[0], 506) + self.assertEqual( + unpack255UShort(struct.pack(b"BB", 255, 253))[0], 506) + self.assertEqual( + unpack255UShort(struct.pack(b"BBB", 253, 1, 250))[0], 506) + + self.assertRaisesRegex( + ttLib.TTLibError, + "not enough data to unpack 255UInt16", + unpack255UShort, struct.pack(b"BB", 253, 0)) + + self.assertRaisesRegex( + ttLib.TTLibError, + "not enough data to unpack 255UInt16", + unpack255UShort, struct.pack(b"B", 254)) + + self.assertRaisesRegex( + ttLib.TTLibError, + "not enough data to unpack 255UInt16", + unpack255UShort, struct.pack(b"B", 255)) + + def test_pack255UShort(self): + self.assertEqual(pack255UShort(252), b'\xfc') + self.assertEqual(pack255UShort(505), b'\xff\xfc') + self.assertEqual(pack255UShort(506), b'\xfe\x00') + self.assertEqual(pack255UShort(762), b'\xfd\x02\xfa') + + self.assertRaisesRegex( + ttLib.TTLibError, + "255UInt16 format requires 0 <= integer <= 65535", + pack255UShort, -1) + + self.assertRaisesRegex( + ttLib.TTLibError, + "255UInt16 format requires 0 <= integer <= 65535", + pack255UShort, 0xFFFF+1) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/ttx/data/TestBOM.ttx fonttools-3.21.2/Tests/ttx/data/TestBOM.ttx --- fonttools-3.0/Tests/ttx/data/TestBOM.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/data/TestBOM.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,3 @@ + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestDFONT.dfont and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestDFONT.dfont differ diff -Nru fonttools-3.0/Tests/ttx/data/TestNoSFNT.ttx fonttools-3.21.2/Tests/ttx/data/TestNoSFNT.ttx --- fonttools-3.0/Tests/ttx/data/TestNoSFNT.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/data/TestNoSFNT.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,2 @@ + + diff -Nru fonttools-3.0/Tests/ttx/data/TestNoXML.ttx fonttools-3.21.2/Tests/ttx/data/TestNoXML.ttx --- fonttools-3.0/Tests/ttx/data/TestNoXML.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/data/TestNoXML.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,2 @@ + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestOTF.otf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestOTF.otf differ diff -Nru fonttools-3.0/Tests/ttx/data/TestOTF.ttx fonttools-3.21.2/Tests/ttx/data/TestOTF.ttx --- fonttools-3.0/Tests/ttx/data/TestOTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/data/TestOTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,519 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test OTF + + + Regular + + + FontTools: Test OTF: 2015 + + + Test OTF + + + Version 1.000 + + + TestOTF-Regular + + + Test OTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + Test TTF + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test OTF + + + Regular + + + FontTools: Test OTF: 2015 + + + Test OTF + + + Version 1.000 + + + TestOTF-Regular + + + Test OTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 131 122 -131 hlineto + return + + + + + + 500 450 hmoveto + 750 -400 -750 vlineto + 50 50 rmoveto + 650 300 -650 vlineto + endchar + + + 0 endchar + + + 250 endchar + + + 723 55 hmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + 241 -122 rmoveto + -107 callsubr + endchar + + + 241 55 hmoveto + -107 callsubr + endchar + + + 250 endchar + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestTTC.ttc and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestTTC.ttc differ Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestTTF.ttf and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestTTF.ttf differ diff -Nru fonttools-3.0/Tests/ttx/data/TestTTF.ttx fonttools-3.21.2/Tests/ttx/data/TestTTF.ttx --- fonttools-3.0/Tests/ttx/data/TestTTF.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/data/TestTTF.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,553 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + + + SVTCA[0] /* SetFPVectorToAxis */ + SVTCA[1] /* SetFPVectorToAxis */ + + + + + + + + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test TTF + + + Regular + + + FontTools: Test TTF: 2015 + + + Test TTF + + + Version 1.000 + + + TestTTF-Regular + + + Test TTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + Test TTF + + + Copyright (c) 2015 by FontTools. No rights reserved. + + + Test TTF + + + Regular + + + FontTools: Test TTF: 2015 + + + Test TTF + + + Version 1.000 + + + TestTTF-Regular + + + Test TTF is not a trademark of FontTools. + + + FontTools + + + FontTools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools + + + https://github.com/behdad/fonttools/blob/master/LICENSE.txt + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestWOFF2.woff2 and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestWOFF2.woff2 differ Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Tests/ttx/data/TestWOFF.woff and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Tests/ttx/data/TestWOFF.woff differ diff -Nru fonttools-3.0/Tests/ttx/ttx_test.py fonttools-3.21.2/Tests/ttx/ttx_test.py --- fonttools-3.0/Tests/ttx/ttx_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/ttx/ttx_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,187 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools import ttx +import getopt +import os +import shutil +import sys +import tempfile +import unittest + + +class TTXTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def temp_font(self, font_path, file_name): + self.temp_dir() + temppath = os.path.join(self.tempdir, file_name) + shutil.copy2(font_path, temppath) + return temppath + +# ----- +# Tests +# ----- + + def test_parseOptions_no_args(self): + with self.assertRaises(getopt.GetoptError) as cm: + ttx.parseOptions([]) + self.assertTrue('Must specify at least one input file' in str(cm.exception)) + + def test_parseOptions_invalid_path(self): + file_path = 'invalid_font_path' + with self.assertRaises(getopt.GetoptError) as cm: + ttx.parseOptions([file_path]) + self.assertTrue('File not found: "%s"' % file_path in str(cm.exception)) + + def test_parseOptions_font2ttx_1st_time(self): + file_name = 'TestOTF.otf' + font_path = self.getpath(file_name) + temp_path = self.temp_font(font_path, file_name) + jobs, _ = ttx.parseOptions([temp_path]) + self.assertEqual(jobs[0][0].__name__, 'ttDump') + self.assertEqual(jobs[0][1:], + (os.path.join(self.tempdir, file_name), + os.path.join(self.tempdir, file_name.split('.')[0] + '.ttx'))) + + def test_parseOptions_font2ttx_2nd_time(self): + file_name = 'TestTTF.ttf' + font_path = self.getpath(file_name) + temp_path = self.temp_font(font_path, file_name) + _, _ = ttx.parseOptions([temp_path]) # this is NOT a mistake + jobs, _ = ttx.parseOptions([temp_path]) + self.assertEqual(jobs[0][0].__name__, 'ttDump') + self.assertEqual(jobs[0][1:], + (os.path.join(self.tempdir, file_name), + os.path.join(self.tempdir, file_name.split('.')[0] + '#1.ttx'))) + + def test_parseOptions_ttx2font_1st_time(self): + file_name = 'TestTTF.ttx' + font_path = self.getpath(file_name) + temp_path = self.temp_font(font_path, file_name) + jobs, _ = ttx.parseOptions([temp_path]) + self.assertEqual(jobs[0][0].__name__, 'ttCompile') + self.assertEqual(jobs[0][1:], + (os.path.join(self.tempdir, file_name), + os.path.join(self.tempdir, file_name.split('.')[0] + '.ttf'))) + + def test_parseOptions_ttx2font_2nd_time(self): + file_name = 'TestOTF.ttx' + font_path = self.getpath(file_name) + temp_path = self.temp_font(font_path, file_name) + _, _ = ttx.parseOptions([temp_path]) # this is NOT a mistake + jobs, _ = ttx.parseOptions([temp_path]) + self.assertEqual(jobs[0][0].__name__, 'ttCompile') + self.assertEqual(jobs[0][1:], + (os.path.join(self.tempdir, file_name), + os.path.join(self.tempdir, file_name.split('.')[0] + '#1.otf'))) + + def test_parseOptions_multiple_fonts(self): + file_names = ['TestOTF.otf', 'TestTTF.ttf'] + font_paths = [self.getpath(file_name) for file_name in file_names] + temp_paths = [self.temp_font(font_path, file_name) \ + for font_path, file_name in zip(font_paths, file_names)] + jobs, _ = ttx.parseOptions(temp_paths) + for i in range(len(jobs)): + self.assertEqual(jobs[i][0].__name__, 'ttDump') + self.assertEqual(jobs[i][1:], + (os.path.join(self.tempdir, file_names[i]), + os.path.join(self.tempdir, file_names[i].split('.')[0] + '.ttx'))) + + def test_parseOptions_mixed_files(self): + operations = ['ttDump', 'ttCompile'] + extensions = ['.ttx', '.ttf'] + file_names = ['TestOTF.otf', 'TestTTF.ttx'] + font_paths = [self.getpath(file_name) for file_name in file_names] + temp_paths = [self.temp_font(font_path, file_name) \ + for font_path, file_name in zip(font_paths, file_names)] + jobs, _ = ttx.parseOptions(temp_paths) + for i in range(len(jobs)): + self.assertEqual(jobs[i][0].__name__, operations[i]) + self.assertEqual(jobs[i][1:], + (os.path.join(self.tempdir, file_names[i]), + os.path.join(self.tempdir, file_names[i].split('.')[0] + extensions[i]))) + + def test_guessFileType_ttf(self): + file_name = 'TestTTF.ttf' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTF') + + def test_guessFileType_otf(self): + file_name = 'TestOTF.otf' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'OTF') + + def test_guessFileType_woff(self): + file_name = 'TestWOFF.woff' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'WOFF') + + def test_guessFileType_woff2(self): + file_name = 'TestWOFF2.woff2' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'WOFF2') + + def test_guessFileType_ttc(self): + file_name = 'TestTTC.ttc' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTC') + + def test_guessFileType_dfont(self): + file_name = 'TestDFONT.dfont' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTF') + + def test_guessFileType_ttx_ttf(self): + file_name = 'TestTTF.ttx' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTX') + + def test_guessFileType_ttx_otf(self): + file_name = 'TestOTF.ttx' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'OTX') + + def test_guessFileType_ttx_bom(self): + file_name = 'TestBOM.ttx' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTX') + + def test_guessFileType_ttx_no_sfntVersion(self): + file_name = 'TestNoSFNT.ttx' + font_path = self.getpath(file_name) + self.assertEqual(ttx.guessFileType(font_path), 'TTX') + + def test_guessFileType_ttx_no_xml(self): + file_name = 'TestNoXML.ttx' + font_path = self.getpath(file_name) + self.assertIsNone(ttx.guessFileType(font_path)) + + def test_guessFileType_invalid_path(self): + font_path = 'invalid_font_path' + self.assertIsNone(ttx.guessFileType(font_path)) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/unicodedata_test.py fonttools-3.21.2/Tests/unicodedata_test.py --- fonttools-3.0/Tests/unicodedata_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/unicodedata_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,208 @@ +from __future__ import ( + print_function, division, absolute_import, unicode_literals) +from fontTools.misc.py23 import * + +from fontTools import unicodedata + +import pytest + + +def test_script(): + assert unicodedata.script("a") == "Latn" + assert unicodedata.script(unichr(0)) == "Zyyy" + assert unicodedata.script(unichr(0x0378)) == "Zzzz" + assert unicodedata.script(unichr(0x10FFFF)) == "Zzzz" + + # these were randomly sampled, one character per script + assert unicodedata.script(unichr(0x1E918)) == 'Adlm' + assert unicodedata.script(unichr(0x1170D)) == 'Ahom' + assert unicodedata.script(unichr(0x145A0)) == 'Hluw' + assert unicodedata.script(unichr(0x0607)) == 'Arab' + assert unicodedata.script(unichr(0x056C)) == 'Armn' + assert unicodedata.script(unichr(0x10B27)) == 'Avst' + assert unicodedata.script(unichr(0x1B41)) == 'Bali' + assert unicodedata.script(unichr(0x168AD)) == 'Bamu' + assert unicodedata.script(unichr(0x16ADD)) == 'Bass' + assert unicodedata.script(unichr(0x1BE5)) == 'Batk' + assert unicodedata.script(unichr(0x09F3)) == 'Beng' + assert unicodedata.script(unichr(0x11C5B)) == 'Bhks' + assert unicodedata.script(unichr(0x3126)) == 'Bopo' + assert unicodedata.script(unichr(0x1103B)) == 'Brah' + assert unicodedata.script(unichr(0x2849)) == 'Brai' + assert unicodedata.script(unichr(0x1A0A)) == 'Bugi' + assert unicodedata.script(unichr(0x174E)) == 'Buhd' + assert unicodedata.script(unichr(0x18EE)) == 'Cans' + assert unicodedata.script(unichr(0x102B7)) == 'Cari' + assert unicodedata.script(unichr(0x1053D)) == 'Aghb' + assert unicodedata.script(unichr(0x11123)) == 'Cakm' + assert unicodedata.script(unichr(0xAA1F)) == 'Cham' + assert unicodedata.script(unichr(0xAB95)) == 'Cher' + assert unicodedata.script(unichr(0x1F0C7)) == 'Zyyy' + assert unicodedata.script(unichr(0x2C85)) == 'Copt' + assert unicodedata.script(unichr(0x12014)) == 'Xsux' + assert unicodedata.script(unichr(0x1082E)) == 'Cprt' + assert unicodedata.script(unichr(0xA686)) == 'Cyrl' + assert unicodedata.script(unichr(0x10417)) == 'Dsrt' + assert unicodedata.script(unichr(0x093E)) == 'Deva' + assert unicodedata.script(unichr(0x1BC4B)) == 'Dupl' + assert unicodedata.script(unichr(0x1310C)) == 'Egyp' + assert unicodedata.script(unichr(0x1051C)) == 'Elba' + assert unicodedata.script(unichr(0x2DA6)) == 'Ethi' + assert unicodedata.script(unichr(0x10AD)) == 'Geor' + assert unicodedata.script(unichr(0x2C52)) == 'Glag' + assert unicodedata.script(unichr(0x10343)) == 'Goth' + assert unicodedata.script(unichr(0x11371)) == 'Gran' + assert unicodedata.script(unichr(0x03D0)) == 'Grek' + assert unicodedata.script(unichr(0x0AAA)) == 'Gujr' + assert unicodedata.script(unichr(0x0A4C)) == 'Guru' + assert unicodedata.script(unichr(0x23C9F)) == 'Hani' + assert unicodedata.script(unichr(0xC259)) == 'Hang' + assert unicodedata.script(unichr(0x1722)) == 'Hano' + assert unicodedata.script(unichr(0x108F5)) == 'Hatr' + assert unicodedata.script(unichr(0x05C2)) == 'Hebr' + assert unicodedata.script(unichr(0x1B072)) == 'Hira' + assert unicodedata.script(unichr(0x10847)) == 'Armi' + assert unicodedata.script(unichr(0x033A)) == 'Zinh' + assert unicodedata.script(unichr(0x10B66)) == 'Phli' + assert unicodedata.script(unichr(0x10B4B)) == 'Prti' + assert unicodedata.script(unichr(0xA98A)) == 'Java' + assert unicodedata.script(unichr(0x110B2)) == 'Kthi' + assert unicodedata.script(unichr(0x0CC6)) == 'Knda' + assert unicodedata.script(unichr(0x3337)) == 'Kana' + assert unicodedata.script(unichr(0xA915)) == 'Kali' + assert unicodedata.script(unichr(0x10A2E)) == 'Khar' + assert unicodedata.script(unichr(0x17AA)) == 'Khmr' + assert unicodedata.script(unichr(0x11225)) == 'Khoj' + assert unicodedata.script(unichr(0x112B6)) == 'Sind' + assert unicodedata.script(unichr(0x0ED7)) == 'Laoo' + assert unicodedata.script(unichr(0xAB3C)) == 'Latn' + assert unicodedata.script(unichr(0x1C48)) == 'Lepc' + assert unicodedata.script(unichr(0x1923)) == 'Limb' + assert unicodedata.script(unichr(0x1071D)) == 'Lina' + assert unicodedata.script(unichr(0x100EC)) == 'Linb' + assert unicodedata.script(unichr(0xA4E9)) == 'Lisu' + assert unicodedata.script(unichr(0x10284)) == 'Lyci' + assert unicodedata.script(unichr(0x10926)) == 'Lydi' + assert unicodedata.script(unichr(0x11161)) == 'Mahj' + assert unicodedata.script(unichr(0x0D56)) == 'Mlym' + assert unicodedata.script(unichr(0x0856)) == 'Mand' + assert unicodedata.script(unichr(0x10AF0)) == 'Mani' + assert unicodedata.script(unichr(0x11CB0)) == 'Marc' + assert unicodedata.script(unichr(0x11D28)) == 'Gonm' + assert unicodedata.script(unichr(0xABDD)) == 'Mtei' + assert unicodedata.script(unichr(0x1E897)) == 'Mend' + assert unicodedata.script(unichr(0x109B0)) == 'Merc' + assert unicodedata.script(unichr(0x10993)) == 'Mero' + assert unicodedata.script(unichr(0x16F5D)) == 'Plrd' + assert unicodedata.script(unichr(0x1160B)) == 'Modi' + assert unicodedata.script(unichr(0x18A8)) == 'Mong' + assert unicodedata.script(unichr(0x16A48)) == 'Mroo' + assert unicodedata.script(unichr(0x1128C)) == 'Mult' + assert unicodedata.script(unichr(0x105B)) == 'Mymr' + assert unicodedata.script(unichr(0x108AF)) == 'Nbat' + assert unicodedata.script(unichr(0x19B3)) == 'Talu' + assert unicodedata.script(unichr(0x1143D)) == 'Newa' + assert unicodedata.script(unichr(0x07F4)) == 'Nkoo' + assert unicodedata.script(unichr(0x1B192)) == 'Nshu' + assert unicodedata.script(unichr(0x169C)) == 'Ogam' + assert unicodedata.script(unichr(0x1C56)) == 'Olck' + assert unicodedata.script(unichr(0x10CE9)) == 'Hung' + assert unicodedata.script(unichr(0x10316)) == 'Ital' + assert unicodedata.script(unichr(0x10A93)) == 'Narb' + assert unicodedata.script(unichr(0x1035A)) == 'Perm' + assert unicodedata.script(unichr(0x103D5)) == 'Xpeo' + assert unicodedata.script(unichr(0x10A65)) == 'Sarb' + assert unicodedata.script(unichr(0x10C09)) == 'Orkh' + assert unicodedata.script(unichr(0x0B60)) == 'Orya' + assert unicodedata.script(unichr(0x104CF)) == 'Osge' + assert unicodedata.script(unichr(0x104A8)) == 'Osma' + assert unicodedata.script(unichr(0x16B12)) == 'Hmng' + assert unicodedata.script(unichr(0x10879)) == 'Palm' + assert unicodedata.script(unichr(0x11AF1)) == 'Pauc' + assert unicodedata.script(unichr(0xA869)) == 'Phag' + assert unicodedata.script(unichr(0x10909)) == 'Phnx' + assert unicodedata.script(unichr(0x10B81)) == 'Phlp' + assert unicodedata.script(unichr(0xA941)) == 'Rjng' + assert unicodedata.script(unichr(0x16C3)) == 'Runr' + assert unicodedata.script(unichr(0x0814)) == 'Samr' + assert unicodedata.script(unichr(0xA88C)) == 'Saur' + assert unicodedata.script(unichr(0x111C8)) == 'Shrd' + assert unicodedata.script(unichr(0x1045F)) == 'Shaw' + assert unicodedata.script(unichr(0x115AD)) == 'Sidd' + assert unicodedata.script(unichr(0x1D8C0)) == 'Sgnw' + assert unicodedata.script(unichr(0x0DB9)) == 'Sinh' + assert unicodedata.script(unichr(0x110F9)) == 'Sora' + assert unicodedata.script(unichr(0x11A60)) == 'Soyo' + assert unicodedata.script(unichr(0x1B94)) == 'Sund' + assert unicodedata.script(unichr(0xA81F)) == 'Sylo' + assert unicodedata.script(unichr(0x0740)) == 'Syrc' + assert unicodedata.script(unichr(0x1714)) == 'Tglg' + assert unicodedata.script(unichr(0x1761)) == 'Tagb' + assert unicodedata.script(unichr(0x1965)) == 'Tale' + assert unicodedata.script(unichr(0x1A32)) == 'Lana' + assert unicodedata.script(unichr(0xAA86)) == 'Tavt' + assert unicodedata.script(unichr(0x116A5)) == 'Takr' + assert unicodedata.script(unichr(0x0B8E)) == 'Taml' + assert unicodedata.script(unichr(0x1754D)) == 'Tang' + assert unicodedata.script(unichr(0x0C40)) == 'Telu' + assert unicodedata.script(unichr(0x07A4)) == 'Thaa' + assert unicodedata.script(unichr(0x0E42)) == 'Thai' + assert unicodedata.script(unichr(0x0F09)) == 'Tibt' + assert unicodedata.script(unichr(0x2D3A)) == 'Tfng' + assert unicodedata.script(unichr(0x114B0)) == 'Tirh' + assert unicodedata.script(unichr(0x1038B)) == 'Ugar' + assert unicodedata.script(unichr(0xA585)) == 'Vaii' + assert unicodedata.script(unichr(0x118CF)) == 'Wara' + assert unicodedata.script(unichr(0xA066)) == 'Yiii' + assert unicodedata.script(unichr(0x11A31)) == 'Zanb' + + +def test_script_extension(): + assert unicodedata.script_extension("a") == {"Latn"} + assert unicodedata.script_extension(unichr(0)) == {"Zyyy"} + assert unicodedata.script_extension(unichr(0x0378)) == {"Zzzz"} + assert unicodedata.script_extension(unichr(0x10FFFF)) == {"Zzzz"} + + assert unicodedata.script_extension("\u0660") == {'Arab', 'Thaa'} + assert unicodedata.script_extension("\u0964") == { + 'Beng', 'Deva', 'Gran', 'Gujr', 'Guru', 'Knda', 'Mahj', 'Mlym', + 'Orya', 'Sind', 'Sinh', 'Sylo', 'Takr', 'Taml', 'Telu', 'Tirh'} + + +def test_script_name(): + assert unicodedata.script_name("Latn") == "Latin" + assert unicodedata.script_name("Zyyy") == "Common" + assert unicodedata.script_name("Zzzz") == "Unknown" + # underscores in long names are replaced by spaces + assert unicodedata.script_name("Egyp") == "Egyptian Hieroglyphs" + + with pytest.raises(KeyError): + unicodedata.script_name("QQQQ") + assert unicodedata.script_name("QQQQ", default="Unknown") + + +def test_script_code(): + assert unicodedata.script_code("Latin") == "Latn" + assert unicodedata.script_code("Common") == "Zyyy" + assert unicodedata.script_code("Unknown") == "Zzzz" + # case, whitespace, underscores and hyphens are ignored + assert unicodedata.script_code("Egyptian Hieroglyphs") == "Egyp" + assert unicodedata.script_code("Egyptian_Hieroglyphs") == "Egyp" + assert unicodedata.script_code("egyptianhieroglyphs") == "Egyp" + assert unicodedata.script_code("Egyptian-Hieroglyphs") == "Egyp" + + with pytest.raises(KeyError): + unicodedata.script_code("Does not exist") + assert unicodedata.script_code("Does not exist", default="Zzzz") == "Zzzz" + + +def test_block(): + assert unicodedata.block("\x00") == "Basic Latin" + assert unicodedata.block("\x7F") == "Basic Latin" + assert unicodedata.block("\x80") == "Latin-1 Supplement" + assert unicodedata.block("\u1c90") == "No_Block" + + +if __name__ == "__main__": + import sys + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/varLib/builder_test.py fonttools-3.21.2/Tests/varLib/builder_test.py --- fonttools-3.0/Tests/varLib/builder_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/builder_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,64 @@ +from __future__ import print_function, division, absolute_import +from fontTools.varLib.builder import buildVarData +import pytest + + +@pytest.mark.parametrize("region_indices, items, expected_num_shorts", [ + ([], [], 0), + ([0], [[1]], 0), + ([0], [[128]], 1), + ([0, 1, 2], [[128, 1, 2], [3, -129, 5], [6, 7, 8]], 2), + ([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, -129]], 3), +], ids=[ + "0_regions_0_deltas", + "1_region_1_uint8", + "1_region_1_short", + "3_regions_2_shorts_ordered", + "3_regions_2_shorts_unordered", +]) +def test_buildVarData_no_optimize(region_indices, items, expected_num_shorts): + data = buildVarData(region_indices, items, optimize=False) + + assert data.ItemCount == len(items) + assert data.NumShorts == expected_num_shorts + assert data.VarRegionCount == len(region_indices) + assert data.VarRegionIndex == region_indices + assert data.Item == items + + +@pytest.mark.parametrize([ + "region_indices", "items", "expected_num_shorts", + "expected_regions", "expected_items" +], [ + ([0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]], 0, + [0, 1, 2], [[0, 1, 2], [3, 4, 5], [6, 7, 8]]), + ([0, 1, 2], [[0, 128, 2], [3, 4, 5], [6, 7, 8]], 1, + [1, 0, 2], [[128, 0, 2], [4, 3, 5], [7, 6, 8]]), + ([0, 1, 2], [[0, 1, 128], [3, 4, 5], [6, -129, 8]], 2, + [1, 2, 0], [[1, 128, 0], [4, 5, 3], [-129, 8, 6]]), + ([0, 1, 2], [[128, 1, -129], [3, 4, 5], [6, 7, 8]], 2, + [0, 2, 1], [[128, -129, 1], [3, 5, 4], [6, 8, 7]]), + ([0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]], 3, + [0, 1, 2], [[0, 1, 128], [3, -129, 5], [256, 7, 8]]), +], ids=[ + "0/3_shorts_no_reorder", + "1/3_shorts_reorder", + "2/3_shorts_reorder", + "2/3_shorts_same_row_reorder", + "3/3_shorts_no_reorder", +]) +def test_buildVarData_optimize( + region_indices, items, expected_num_shorts, expected_regions, + expected_items): + data = buildVarData(region_indices, items, optimize=True) + + assert data.ItemCount == len(items) + assert data.NumShorts == expected_num_shorts + assert data.VarRegionCount == len(region_indices) + assert data.VarRegionIndex == expected_regions + assert data.Item == expected_items + + +if __name__ == "__main__": + import sys + sys.exit(pytest.main(sys.argv)) diff -Nru fonttools-3.0/Tests/varLib/data/BuildAvarEmptyAxis.designspace fonttools-3.21.2/Tests/varLib/data/BuildAvarEmptyAxis.designspace --- fonttools-3.0/Tests/varLib/data/BuildAvarEmptyAxis.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/BuildAvarEmptyAxis.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ + + + + + + + + + Weight + + + Width + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/BuildAvarIdentityMaps.designspace fonttools-3.21.2/Tests/varLib/data/BuildAvarIdentityMaps.designspace --- fonttools-3.0/Tests/varLib/data/BuildAvarIdentityMaps.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/BuildAvarIdentityMaps.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + Weight + + + + + + + Width + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/BuildAvarSingleAxis.designspace fonttools-3.21.2/Tests/varLib/data/BuildAvarSingleAxis.designspace --- fonttools-3.0/Tests/varLib/data/BuildAvarSingleAxis.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/BuildAvarSingleAxis.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + Weight + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/Build.designspace fonttools-3.21.2/Tests/varLib/data/Build.designspace --- fonttools-3.0/Tests/varLib/data/Build.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/Build.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,300 @@ + + + + + + Contrast + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/Designspace2.designspace fonttools-3.21.2/Tests/varLib/data/Designspace2.designspace --- fonttools-3.0/Tests/varLib/data/Designspace2.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/Designspace2.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,8 @@ + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/Designspace.designspace fonttools-3.21.2/Tests/varLib/data/Designspace.designspace --- fonttools-3.0/Tests/varLib/data/Designspace.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/Designspace.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + Contrast + Kontrast + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/InterpolateLayout2.designspace fonttools-3.21.2/Tests/varLib/data/InterpolateLayout2.designspace --- fonttools-3.0/Tests/varLib/data/InterpolateLayout2.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/InterpolateLayout2.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/InterpolateLayout3.designspace fonttools-3.21.2/Tests/varLib/data/InterpolateLayout3.designspace --- fonttools-3.0/Tests/varLib/data/InterpolateLayout3.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/InterpolateLayout3.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/InterpolateLayout.designspace fonttools-3.21.2/Tests/varLib/data/InterpolateLayout.designspace --- fonttools-3.0/Tests/varLib/data/InterpolateLayout.designspace 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/InterpolateLayout.designspace 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master0.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master0.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,855 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Paul D. Hunt + + + Alternate a + + + Test Family 2 + + + Regular + + + Version 2.020;ADBO;Test Family 2 Regular + + + Test Family 2 + + + Version 2.020 + + + TestFamily2-Master0 + + + Paul D. Hunt + + + Master 0 + + + Alternate a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rmoveto + 18 0 14 14 0 18 rrcurveto + 0 18 -14 14 -18 0 rrcurveto + -18 0 -14 -14 0 -18 rrcurveto + 0 -18 14 -14 18 0 rrcurveto + return + + + rmoveto + 64 0 60 36 50 40 rrcurveto + 2 0 rlineto + 4 -64 rlineto + 26 0 rlineto + 0 310 rlineto + 0 96 -34 84 -112 0 rrcurveto + -78 0 -66 -40 -30 -22 rrcurveto + 16 -24 rlineto + 30 24 58 34 68 0 rrcurveto + 100 0 20 -86 -2 -78 rrcurveto + -216 -24 -98 -50 0 -107 rrcurveto + 0 -91 64 -38 74 0 rrcurveto + 2 28 rmoveto + -58 0 -50 28 0 74 rrcurveto + 0 82 72 48 210 24 rrcurveto + 0 -174 rlineto + -64 -54 -52 -28 -58 0 rrcurveto + endchar + + + -40 22 -58 0 rrcurveto + -116 0 -98 -98 0 -154 rrcurveto + 0 -162 78 -88 120 0 rrcurveto + 2 28 rmoveto + -108 0 -60 90 0 132 rrcurveto + 0 124 78 100 102 0 rrcurveto + 50 0 44 -18 54 -48 rrcurveto + 0 -296 rlineto + -54 -54 -50 -30 -56 0 rrcurveto + endchar + + + 0 rlineto + 0 114 rlineto + 0 72 24 42 54 0 rrcurveto + 18 0 20 -4 20 -10 rrcurveto + 10 26 rlineto + -22 10 -24 6 -20 0 rrcurveto + -68 0 -42 -44 0 -94 rrcurveto + 0 -118 rlineto + -66 -4 rlineto + 0 -24 rlineto + 66 0 rlineto + 0 -450 rlineto + 30 0 rlineto + 0 450 rlineto + return + + + 580 rmoveto + 63 0 16 66 4 56 rrcurveto + -26 2 rlineto + -2 -52 -16 -46 -37 0 rrcurveto + -59 0 -20 100 -76 0 rrcurveto + -64 0 -16 -65 -4 -57 rrcurveto + 26 -2 rlineto + 2 54 16 44 38 0 rrcurveto + 58 0 20 -100 77 0 rrcurveto + return + + + -10 26 rlineto + -18 -8 -26 -8 -18 0 rrcurveto + -70 0 -14 44 0 62 rrcurveto + 0 328 rlineto + 142 0 rlineto + 0 28 rlineto + -142 0 rlineto + 0 140 rlineto + -26 0 rlineto + -4 -140 rlineto + return + + + 540 252 -12 rmoveto + 66 0 54 36 40 40 rrcurveto + 2 0 rlineto + 4 -64 rlineto + 26 0 rlineto + 0 return + + + 0 21 rrcurveto + 0 20 -13 11 -18 0 rrcurveto + -16 0 -13 -11 0 -20 rrcurveto + 0 -21 13 return + + + + + + 624 96 0 rmoveto + 432 0 rlineto + 0 660 rlineto + -432 0 rlineto + 214 -294 rmoveto + -56 92 rlineto + -94 168 rlineto + 302 0 rlineto + -94 -168 rlineto + -54 -92 rlineto + -180 -292 rmoveto + 0 536 rlineto + 154 -270 rlineto + 200 -266 rmoveto + -152 266 rlineto + 152 270 rlineto + -344 -578 rmoveto + 102 176 rlineto + 64 106 rlineto + 4 0 rlineto + 62 -106 rlineto + 100 -176 rlineto + endchar + + + 520 476 0 rmoveto + 34 0 rlineto + -236 660 rlineto + -28 0 rlineto + -236 -660 rlineto + 32 0 rlineto + 83 236 rlineto + 269 0 rlineto + -212 160 rmoveto + 28 80 24 68 24 82 rrcurveto + 4 0 rlineto + 24 -82 24 -68 28 -80 rrcurveto + 46 -132 rlineto + -249 0 rlineto + endchar + + + 444 400 0 rmoveto + 34 0 rlineto + -198 510 rlineto + -29 0 rlineto + -197 -510 rlineto + 32 0 rlineto + 67 176 rlineto + 225 0 rlineto + -176 128 rmoveto + 23 62 18 48 21 61 rrcurveto + 4 0 rlineto + 21 -60 18 -48 23 -63 rrcurveto + 38 -100 rlineto + -204 0 rlineto + endchar + + + 486 198 -12 -106 callsubr + + + -101 callsubr + 478 rlineto + -28 0 rlineto + -2 -46 rlineto + -2 0 rlineto + -46 36 -105 callsubr + + + 562 550 16 rmoveto + -39 15 -44 27 -47 39 rrcurveto + 53 67 39 86 26 92 rrcurveto + -30 0 rlineto + -24 -88 -35 -77 -50 -62 rrcurveto + -70 64 -72 88 -47 90 rrcurveto + 76 58 78 57 0 84 rrcurveto + 0 66 -36 50 -68 0 rrcurveto + -76 0 -54 -60 0 -84 rrcurveto + 0 -52 17 -57 28 -57 rrcurveto + -70 -53 -67 -58 0 -85 rrcurveto + 0 -110 86 -68 100 0 rrcurveto + 73 0 56 35 48 51 rrcurveto + 51 -43 46 -28 40 -15 rrcurveto + -378 542 rmoveto + 0 62 36 52 62 0 rrcurveto + 56 0 20 -46 0 -44 rrcurveto + 0 -72 -66 -50 -69 -52 rrcurveto + -24 52 -15 51 0 47 rrcurveto + -90 -362 rmoveto + 0 71 54 51 63 49 rrcurveto + 48 -91 73 -88 72 -67 rrcurveto + -43 -45 -53 -32 -58 0 rrcurveto + -84 0 -72 60 0 92 rrcurveto + endchar + + + 486 319 -103 callsubr + -121 -592 -106 callsubr + + + -101 callsubr + 722 rlineto + -30 0 rlineto + 0 -202 rlineto + 2 -90 rlineto + -50 38 -105 callsubr + + + 252 244 450 rmoveto + 0 28 rlineto + -114 -104 callsubr + endchar + + + 518 508 6 rmoveto + -102 callsubr + -192 -104 callsubr + 192 0 rlineto + 0 -324 rlineto + 0 -82 24 -56 88 0 rrcurveto + 16 0 30 8 28 10 rrcurveto + endchar + + + 526 96 0 rmoveto + 30 0 rlineto + 0 366 rlineto + 62 64 44 32 60 0 rrcurveto + 82 0 34 -52 0 -106 rrcurveto + 0 -304 rlineto + 30 0 rlineto + 0 308 rlineto + 0 124 -46 58 -98 0 rrcurveto + -66 0 -50 -38 -50 -50 rrcurveto + -2 0 rlineto + -4 76 rlineto + -26 0 rlineto + endchar + + + 200 endchar + + + 302 218 -12 rmoveto + 16 0 30 8 28 10 rrcurveto + -102 callsubr + -76 -4 rlineto + 0 -24 rlineto + 76 0 rlineto + 0 -324 rlineto + 0 -82 24 -56 88 0 rrcurveto + endchar + + + 0 77 -103 callsubr + endchar + + + 0 -86 602 -107 callsubr + 172 0 -107 callsubr + endchar + + + 0 -86 -188 -107 callsubr + 172 0 -107 callsubr + endchar + + + 0 77 -220 rmoveto + 63 0 16 66 4 56 rrcurveto + -26 2 rlineto + -2 -52 -16 -46 -37 0 rrcurveto + -59 0 -20 100 -76 0 rrcurveto + -64 0 -16 -65 -4 -57 rrcurveto + 26 -2 rlineto + 2 54 16 44 38 0 rrcurveto + 58 0 20 -100 77 0 rrcurveto + endchar + + + 592 295 426 rmoveto + 18 0 13 12 0 20 rrcurveto + 0 20 -13 12 -18 0 rrcurveto + -16 0 -13 -12 0 -20 rrcurveto + 0 -20 13 -12 16 0 rrcurveto + -106 -26 rmoveto + 18 0 12 12 0 19 rrcurveto + 0 22 -13 10 -17 0 rrcurveto + -16 0 -13 -10 0 -22 rrcurveto + 0 -19 13 -12 16 0 rrcurveto + 212 -1 rmoveto + 19 0 11 13 0 19 rrcurveto + 0 21 -13 10 -17 0 rrcurveto + -15 0 -13 -10 0 -21 rrcurveto + 0 -19 13 -13 15 0 rrcurveto + -291 -81 rmoveto + 19 0 12 12 -100 callsubr + -12 16 0 rrcurveto + 370 -1 rmoveto + 19 0 11 12 0 20 rrcurveto + 0 20 -13 11 -17 0 rrcurveto + -15 0 -14 -11 0 -20 rrcurveto + 0 -20 14 -12 15 0 rrcurveto + -398 -110 rmoveto + 19 0 13 12 0 20 rrcurveto + 0 21 -13 10 -19 0 rrcurveto + -15 0 -13 -10 0 -21 rrcurveto + 0 -20 13 -12 15 0 rrcurveto + 426 0 rmoveto + 18 0 12 12 0 20 rrcurveto + 0 21 -15 10 -15 0 rrcurveto + -17 0 -13 -10 0 -21 rrcurveto + 0 -20 13 -12 17 0 rrcurveto + -398 -110 rmoveto + 19 0 12 13 0 19 rrcurveto + 0 21 -13 11 -18 0 rrcurveto + -16 0 -13 -11 0 -21 rrcurveto + 0 -19 13 -13 16 0 rrcurveto + 370 0 rmoveto + 19 0 11 13 0 19 rrcurveto + 0 21 -13 11 -17 0 rrcurveto + -15 0 -14 -11 0 -21 rrcurveto + 0 -19 14 -13 15 0 rrcurveto + -291 -82 rmoveto + 18 0 12 12 0 22 rrcurveto + 0 19 -13 10 -17 0 rrcurveto + -16 0 -13 -10 0 -19 rrcurveto + 0 -22 13 -12 16 0 rrcurveto + 212 0 rmoveto + 19 0 11 12 0 22 rrcurveto + 0 19 -13 10 -17 0 rrcurveto + -15 0 -13 -10 0 -19 rrcurveto + 0 -22 13 -12 15 0 rrcurveto + -106 -27 rmoveto + 18 0 13 11 -100 callsubr + -11 16 0 rrcurveto + endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master1.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master1.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_otf/TestFamily2-Master1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Paul D. Hunt + + + Test Family 2 + + + Regular + + + Version 2.020;ADBO;Test Family 2 Regular + + + Test Family 2 + + + Version 2.020 + + + TestFamily2-Master1 + + + Paul D. Hunt + + + Master 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rmoveto + 47 0 33 35 0 45 rrcurveto + 0 45 -33 35 -47 0 rrcurveto + -47 0 -33 -35 0 -45 rrcurveto + 0 -45 33 -35 47 0 rrcurveto + return + + + rmoveto + 54 0 44 24 40 36 rrcurveto + 4 0 rlineto + 12 -48 rlineto + 140 0 rlineto + 0 278 rlineto + 0 164 -78 70 -130 0 rrcurveto + -78 0 -72 -24 -70 -42 rrcurveto + 60 -112 rlineto + 52 28 38 14 36 0 rrcurveto + 44 0 22 -16 4 -36 rrcurveto + -192 -20 -80 -58 0 -104 rrcurveto + 0 -82 56 -72 94 0 rrcurveto + 60 132 rmoveto + -30 0 -16 13 0 23 rrcurveto + 0 28 26 26 82 12 rrcurveto + 0 -68 rlineto + -18 -20 -16 -14 -28 0 rrcurveto + endchar + + + 0 rlineto + 0 12 rlineto + 0 47 20 15 32 0 rrcurveto + 16 0 18 -4 16 -6 rrcurveto + 30 126 rlineto + -22 8 -38 10 -46 0 rrcurveto + -148 0 -50 -95 0 -107 rrcurveto + 0 -7 rlineto + -66 -5 rlineto + 0 -128 rlineto + 66 0 rlineto + 0 -366 rlineto + 172 0 rlineto + 0 366 rlineto + return + + + -98 0 -164 rrcurveto + 0 -162 80 -100 124 0 rrcurveto + 46 140 rmoveto + -46 0 -28 34 0 90 rrcurveto + 0 88 34 32 36 0 rrcurveto + 22 0 26 -6 20 -18 rrcurveto + 0 -184 rlineto + -18 -28 -20 -8 -26 0 rrcurveto + endchar + + + -26 124 rlineto + -12 -4 -16 -4 -16 0 rrcurveto + -32 0 -28 18 0 55 rrcurveto + 0 171 rlineto + 114 0 rlineto + 0 134 rlineto + -114 0 rlineto + 0 130 rlineto + -142 0 rlineto + -20 -130 rlineto + return + + + 113 rrcurveto + -106 6 rlineto + -4 -36 -10 -10 -16 0 rrcurveto + -26 0 -38 56 -60 0 rrcurveto + -80 0 -50 -45 -2 -113 rrcurveto + 106 -6 rlineto + 4 36 10 10 16 0 rrcurveto + 26 0 38 -56 60 0 rrcurveto + return + + + 580 240 -12 rmoveto + 44 0 48 24 34 34 rrcurveto + 4 0 rlineto + 12 -46 rlineto + 140 0 rlineto + 0 return + + + 0 rrcurveto + -23 0 -21 -16 0 -28 rrcurveto + 0 -30 21 -17 23 0 rrcurveto + return + + + rrcurveto + 0 28 -19 16 -26 0 rrcurveto + -23 0 -20 -16 0 -28 rrcurveto + 0 return + + + 0 rlineto + 0 -174 rlineto + 0 -122 54 -82 130 0 rrcurveto + return + + + rmoveto + 26 0 19 17 0 30 rrcurveto + 0 28 -21 16 -24 0 rrcurveto + return + + + + + + 704 76 0 rmoveto + 552 0 rlineto + 0 660 rlineto + -552 0 rlineto + 274 -236 rmoveto + -40 96 rlineto + -18 36 rlineto + 120 0 rlineto + -18 -36 rlineto + -40 -96 rlineto + -166 -252 rmoveto + 0 336 rlineto + 82 -168 rlineto + 246 -168 rmoveto + -82 168 rlineto + 82 168 rlineto + -228 -404 rmoveto + 26 56 rlineto + 36 96 rlineto + 4 0 rlineto + 36 -96 rlineto + 26 -56 rlineto + endchar + + + 584 412 0 rmoveto + 182 0 rlineto + -198 650 rlineto + -208 0 rlineto + -198 -650 rlineto + 176 0 rlineto + 32 138 rlineto + 182 0 rlineto + -140 178 rmoveto + 16 62 16 78 14 66 rrcurveto + 4 0 rlineto + 16 -65 16 -79 16 -62 rrcurveto + 11 -45 rlineto + -120 0 rlineto + endchar + + + 516 346 0 rmoveto + 180 0 rlineto + -165 532 rlineto + -206 0 rlineto + -165 -532 rlineto + 174 0 rlineto + 21 94 rlineto + 140 0 rlineto + -106 150 rmoveto + 11 48 11 66 11 51 rrcurveto + 4 0 rlineto + 13 -50 11 -67 11 -48 rrcurveto + 6 -28 rlineto + -84 0 rlineto + endchar + + + 536 188 -12 -106 callsubr + + + -101 callsubr + 500 rlineto + -134 0 rlineto + -14 -50 rlineto + -4 0 rlineto + -38 44 -40 18 -48 0 rrcurveto + -102 0 -106 -104 callsubr + + + 690 670 126 rmoveto + -31 4 -38 12 -39 19 rrcurveto + 49 66 34 71 23 76 rrcurveto + -156 0 rlineto + -15 -56 -25 -48 -30 -40 rrcurveto + -41 29 -40 33 -33 35 rrcurveto + 66 43 64 53 0 85 rrcurveto + 0 94 -68 60 -104 0 rrcurveto + -116 0 -72 -82 0 -94 rrcurveto + 0 -39 14 -45 25 -45 rrcurveto + -62 -38 -53 -52 0 -91 rrcurveto + 0 -98 73 -90 151 0 rrcurveto + 83 0 70 24 57 39 rrcurveto + 58 -31 59 -22 57 -10 rrcurveto + -391 498 rmoveto + 0 42 24 22 27 0 rrcurveto + 25 0 13 -14 0 -28 rrcurveto + 0 -38 -30 -25 -41 -24 rrcurveto + -12 23 -6 22 0 20 rrcurveto + -55 -300 rmoveto + 0 23 12 19 18 19 rrcurveto + 34 -40 40 -38 44 -35 rrcurveto + -22 -10 -21 -6 -21 0 rrcurveto + -52 0 -32 28 0 40 rrcurveto + endchar + + + 536 330 572 rmoveto + 80 0 50 45 2 -102 callsubr + -142 -584 -106 callsubr + + + -101 callsubr + 696 rlineto + -172 0 rlineto + 0 -162 rlineto + 6 -72 rlineto + -30 30 -32 20 -54 0 rrcurveto + -102 0 -102 -104 callsubr + + + 360 344 366 rmoveto + 0 134 rlineto + -84 -105 callsubr + endchar + + + 724 706 6 rmoveto + -103 callsubr + -154 -105 callsubr + 144 -98 callsubr + 55 0 37 10 26 8 rrcurveto + endchar + + + 582 58 0 rmoveto + 172 0 rlineto + 0 328 rlineto + 26 24 18 14 32 0 rrcurveto + 34 0 16 -16 0 -64 rrcurveto + 0 -286 rlineto + 172 0 rlineto + 0 308 rlineto + 0 124 -46 80 -110 0 rrcurveto + -68 0 -50 -34 -40 -38 rrcurveto + -4 0 rlineto + -12 60 rlineto + -140 0 rlineto + endchar + + + 200 endchar + + + 400 264 -12 rmoveto + 55 0 37 10 26 8 rrcurveto + -103 callsubr + -76 -6 rlineto + 0 -128 rlineto + 66 -98 callsubr + endchar + + + 0 64 572 rmoveto + 80 0 50 45 2 -102 callsubr + endchar + + + 0 -114 562 -107 callsubr + 228 0 -107 callsubr + endchar + + + 0 -114 -224 -107 callsubr + 228 0 -107 callsubr + endchar + + + 0 64 -228 rmoveto + 80 0 50 45 2 -102 callsubr + endchar + + + 574 287 421 rmoveto + 27 0 18 18 0 29 -99 callsubr + -29 20 -18 23 0 rrcurveto + -105 -26 rmoveto + 26 0 19 17 0 28 rrcurveto + 0 30 -21 16 -24 0 rrcurveto + -24 0 -21 -16 0 -30 rrcurveto + 0 -28 21 -17 24 0 rrcurveto + 210 -1 rmoveto + 28 0 18 17 0 29 rrcurveto + 0 29 -21 17 -25 0 rrcurveto + -23 0 -21 -17 0 -29 rrcurveto + 0 -29 21 -17 23 0 rrcurveto + -288 -81 rmoveto + 27 0 18 18 0 29 rrcurveto + 0 30 -19 15 -26 0 rrcurveto + -22 0 -21 -15 0 -30 rrcurveto + 0 -29 21 -18 22 0 rrcurveto + 368 0 rmoveto + 26 0 18 17 0 29 rrcurveto + 0 29 -20 16 -24 0 rrcurveto + -25 0 -21 -16 0 -29 rrcurveto + 0 -29 21 -17 25 0 rrcurveto + -396 -109 rmoveto + 28 0 18 17 0 30 rrcurveto + 0 28 -20 16 -26 -100 callsubr + 422 0 -97 callsubr + -25 0 -20 -16 0 -28 rrcurveto + 0 -30 20 -17 25 0 rrcurveto + -394 -108 rmoveto + 27 0 18 16 0 29 rrcurveto + 0 29 -19 17 -26 0 rrcurveto + -22 0 -21 -17 0 -29 rrcurveto + 0 -29 21 -16 22 0 rrcurveto + 368 0 rmoveto + 26 0 18 16 0 29 rrcurveto + 0 29 -20 17 -24 0 rrcurveto + -25 0 -21 -17 0 -29 rrcurveto + 0 -29 21 -16 25 0 rrcurveto + -290 -82 -97 callsubr + -24 0 -21 -16 0 -28 rrcurveto + 0 -30 21 -17 24 0 rrcurveto + 210 0 rmoveto + 28 0 18 17 0 30 rrcurveto + 0 28 -21 16 -25 -100 callsubr + -105 -27 rmoveto + 27 0 18 17 0 30 -99 callsubr + -30 20 -17 23 0 rrcurveto + endchar + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master0.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master0.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1149 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Paul D. Hunt + + + Alternate a + + + Test Family 2 + + + Regular + + + Version 2.020;ADBO;Test Family 2 Regular + + + Test Family 2 + + + Version 2.020 + + + TestFamily2-Master0 + + + Paul D. Hunt + + + Master 0 + + + Alternate a + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master1.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master1.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily2-Master1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,986 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Paul D. Hunt + + + Test Family 2 + + + Regular + + + Version 2.020;ADBO;Test Family 2 Regular + + + Test Family 2 + + + Version 2.020 + + + TestFamily2-Master1 + + + Paul D. Hunt + + + Master 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Bold.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Bold.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Bold.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Bold.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 + + + Bold + + + 1.902;GOOG;TestFamily3-Bold + + + Test Family 3 Bold + + + Version 1.902 + + + TestFamily3-Bold + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedBold.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedBold.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedBold.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedBold.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 Condensed Bold + + + Regular + + + 1.902;GOOG;TestFamily3-CondensedBold + + + Test Family 3 Condensed Bold + + + Version 1.902 + + + TestFamily3-CondensedBold + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + Condensed Bold + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedLight.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedLight.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedLight.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedLight.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 Condensed Light + + + Regular + + + 1.902;GOOG;TestFamily3-CondensedLight + + + Test Family 3 Condensed Light + + + Version 1.902 + + + TestFamily3-CondensedLight + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + Condensed Light + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedSemiBold.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedSemiBold.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedSemiBold.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-CondensedSemiBold.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 Condensed SemiBold + + + Regular + + + 1.902;GOOG;TestFamily3-CondensedSemiBold + + + Test Family 3 Condensed SemiBold + + + Version 1.902 + + + TestFamily3-CondensedSemiBold + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + Condensed SemiBold + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Condensed.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Condensed.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Condensed.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Condensed.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 Condensed + + + Regular + + + 1.902;GOOG;TestFamily3-Condensed + + + Test Family 3 Condensed + + + Version 1.902 + + + TestFamily3-Condensed + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + Condensed + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Light.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Light.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Light.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Light.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 Light + + + Regular + + + 1.902;GOOG;TestFamily3-Light + + + Test Family 3 Light + + + Version 1.902 + + + TestFamily3-Light + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + Light + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Regular.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Regular.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-Regular.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 + + + Regular + + + 1.902;GOOG;TestFamily3-Regular + + + Test Family 3 Regular + + + Version 1.902 + + + TestFamily3-Regular + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-SemiBold.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-SemiBold.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-SemiBold.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily3-SemiBold.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,526 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copyright 2015 Google Inc. All Rights Reserved. + + + Test Family 3 SemiBold + + + Regular + + + 1.902;GOOG;TestFamily3-SemiBold + + + Test Family 3 SemiBold + + + Version 1.902 + + + TestFamily3-SemiBold + + + Noto is a trademark of Google Inc. + + + Monotype Imaging Inc. + + + Monotype Design Team + + + Designed by Monotype design team. + + + http://www.google.com/get/noto/ + + + http://www.monotype.com/studio + + + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + + + http://scripts.sil.org/OFL + + + Test Family 3 + + + SemiBold + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master0.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master0.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master0.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master0.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[0] /* MoveDirectAbsPt */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master0 + + + Frank Grießhammer + + + Master 0 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master1.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master1.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master1.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master1.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[0] /* MoveDirectAbsPt */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master1 + + + Frank Grießhammer + + + Master 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master2.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master2.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master2 + + + Frank Grießhammer + + + Master 2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master3.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master3.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master3 + + + Frank Grießhammer + + + Master 3 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master4.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master4.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master4.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_interpolatable_ttf/TestFamily-Master4.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master4 + + + Frank Grießhammer + + + Master 4 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ttx_varfont_ttf/Mutator_IUP.ttx fonttools-3.21.2/Tests/varLib/data/master_ttx_varfont_ttf/Mutator_IUP.ttx --- fonttools-3.0/Tests/varLib/data/master_ttx_varfont_ttf/Mutator_IUP.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ttx_varfont_ttf/Mutator_IUP.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,549 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VarFont + + + Regular + + + VarFont Regular: 2017 + + + VarFont Regular + + + VarFont-Regular + + + Width + + + Ascender + + + Regular + + + VarFont + + + Regular + + + VarFont Regular: 2017 + + + VarFont Regular + + + VarFont-Regular + + + Width + + + Ascender + + + Regular + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wdth + 60.0 + 100.0 + 100.0 + 256 + + + + + ASCN + 608.0 + 608.0 + 648.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,81 @@ +table head { + FontRevision 2.020; +} head; + + +table name { + nameid 9 "Paul D. Hunt"; + nameid 9 1 "Paul D. Hunt"; +} name; + + +table hhea { + Ascender 984; + Descender -273; + LineGap 0; +} hhea; + + +table BASE { + HorizAxis.BaseTagList ideo romn; + HorizAxis.BaseScriptList + latn romn -170 0, + grek romn -170 0, + cyrl romn -170 0, + DFLT romn -170 0; +} BASE; + + +table OS/2 { + Panose 2 11 3 3 3 4 3 2 2 4; + XHeight 478; + WeightClass 200; + + TypoAscender 750; + TypoDescender -250; + TypoLineGap 0; + winAscent 984; + winDescent 273; + + CapHeight 660; + WidthClass 5; + Vendor "ADBO"; + FSType 0; +} OS/2; + + +languagesystem DFLT dflt; +languagesystem latn dflt; + +# GSUB ========================================= +# Merging of GSUB is not performed. The variable +# font will inherit the GSUB table from the +# base master. + +feature c2sc { + sub A by A.sc; # GSUB LookupType 1 +} c2sc; + +feature ss01 { + featureNames { + name "Alternate a"; + name 1 0 0 "Alternate a";}; + sub a by a.alt; +} ss01; + +feature ccmp { + sub ampersand by a n d; # GSUB LookupType 2 +} ccmp; + +feature salt { + sub a from [a.alt A.sc]; # GSUB LookupType 3 +} salt; + +feature liga { + sub f t by f_t; # GSUB LookupType 4 +} liga; + +feature calt { + sub a' t by a.alt; # GSUB LookupType 6 +} calt; + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 722 + capHeight + 660 + descender + -222 + familyName + Test Family 2 + italicAngle + 0 + openTypeHheaAscender + 984 + openTypeHheaDescender + -273 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Paul D. Hunt + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 11 + 5 + 3 + 3 + 4 + 3 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 750 + openTypeOS2TypoDescender + -250 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 200 + openTypeOS2WinAscent + 984 + openTypeOS2WinDescent + 273 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0625 + postscriptBlueValues + + -12 + 0 + 478 + 490 + 510 + 522 + 570 + 582 + 640 + 652 + 660 + 672 + 722 + 734 + + postscriptFamilyBlues + + -12 + 0 + 486 + 498 + 518 + 530 + 574 + 586 + 638 + 650 + 656 + 668 + 712 + 724 + + postscriptFamilyOtherBlues + + -217 + -205 + + postscriptFontName + TestFamily2-Master0 + postscriptForceBold + + postscriptOtherBlues + + -234 + -222 + + postscriptStemSnapH + + 28 + 40 + + postscriptStemSnapV + + 32 + 48 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 2 + styleName + Master 0 + unitsPerEm + 1000 + versionMajor + 2 + versionMinor + 20 + xHeight + 478 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.alt.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.alt.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.alt.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.alt.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/ampersand.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/ampersand.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/ampersand.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/ampersand.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.sc.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.sc.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.sc.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/A_.sc.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/atilde.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/atilde.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/atilde.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/atilde.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/circledotted.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/circledotted.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/circledotted.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/circledotted.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + A.sc + A_.sc.glif + a + a.glif + a.alt + a.alt.glif + ampersand + ampersand.glif + atilde + atilde.glif + circledotted + circledotted.glif + d + d.glif + dieresisbelowcmb + dieresisbelowcmb.glif + dieresiscmb + dieresiscmb.glif + f + f.glif + f_t + f_t.glif + n + n.glif + space + space.glif + t + t.glif + tildebelowcmb + tildebelowcmb.glif + tildecmb + tildecmb.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/d.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/d.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/d.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/d.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresisbelowcmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresisbelowcmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresisbelowcmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresisbelowcmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresiscmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresiscmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresiscmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/dieresiscmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f_t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f_t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f_t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/f_t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildebelowcmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildebelowcmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildebelowcmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildebelowcmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildecmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildecmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildecmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/glyphs/tildecmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + d + f + n + t + f_t + a.alt + A.sc + atilde + ampersand + circledotted + tildecmb + dieresiscmb + tildebelowcmb + dieresisbelowcmb + + public.postscriptNames + + circledotted + uni25CC + dieresisbelowcmb + uni0324 + dieresiscmb + uni0308 + tildebelowcmb + uni0330 + tildecmb + uni0303 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master0.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,54 @@ +table head { + FontRevision 2.020; +} head; + + +table name { + nameid 9 "Paul D. Hunt"; + nameid 9 1 "Paul D. Hunt"; +} name; + + +table hhea { + Ascender 984; + Descender -273; + LineGap 0; +} hhea; + + +table BASE { + HorizAxis.BaseTagList ideo romn; + HorizAxis.BaseScriptList + latn romn -170 0, + grek romn -170 0, + cyrl romn -170 0, + DFLT romn -170 0; +} BASE; + + +table OS/2 { + Panose 2 11 8 3 3 4 3 2 2 4; + XHeight 500; + WeightClass 900; + + TypoAscender 750; + TypoDescender -250; + TypoLineGap 0; + winAscent 984; + winDescent 273; + + CapHeight 660; + WidthClass 5; + Vendor "ADBO"; + FSType 0; +} OS/2; + + +languagesystem DFLT dflt; +languagesystem latn dflt; + +# GSUB ========================================= +# No merging of GSUB is performed. The variable +# font will inherit the GSUB table from the +# base master. + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 696 + capHeight + 650 + descender + -176 + familyName + Test Family 2 + italicAngle + 0 + openTypeHheaAscender + 984 + openTypeHheaDescender + -273 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Paul D. Hunt + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 11 + 5 + 3 + 3 + 4 + 3 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 750 + openTypeOS2TypoDescender + -250 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 900 + openTypeOS2WinAscent + 984 + openTypeOS2WinDescent + 273 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0625 + postscriptBlueValues + + -12 + 0 + 500 + 512 + 532 + 544 + 580 + 592 + 634 + 646 + 650 + 662 + 696 + 708 + + postscriptFamilyBlues + + -12 + 0 + 486 + 498 + 518 + 530 + 574 + 586 + 638 + 650 + 656 + 668 + 712 + 724 + + postscriptFamilyOtherBlues + + -217 + -205 + + postscriptFontName + TestFamily2-Master1 + postscriptForceBold + + postscriptOtherBlues + + -188 + -176 + + postscriptStemSnapH + + 134 + 144 + + postscriptStemSnapV + + 172 + 176 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 2 + styleName + Master 1 + unitsPerEm + 1000 + versionMajor + 2 + versionMinor + 20 + xHeight + 500 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.alt.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.alt.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.alt.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.alt.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/ampersand.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/ampersand.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/ampersand.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/ampersand.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.sc.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.sc.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.sc.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/A_.sc.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/atilde.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/atilde.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/atilde.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/atilde.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,9 @@ + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/circledotted.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/circledotted.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/circledotted.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/circledotted.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,42 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + A.sc + A_.sc.glif + a + a.glif + a.alt + a.alt.glif + ampersand + ampersand.glif + atilde + atilde.glif + circledotted + circledotted.glif + d + d.glif + dieresisbelowcmb + dieresisbelowcmb.glif + dieresiscmb + dieresiscmb.glif + f + f.glif + f_t + f_t.glif + n + n.glif + space + space.glif + t + t.glif + tildebelowcmb + tildebelowcmb.glif + tildecmb + tildecmb.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/d.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/d.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/d.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/d.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresisbelowcmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresisbelowcmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresisbelowcmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresisbelowcmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresiscmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresiscmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresiscmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/dieresiscmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f_t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f_t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f_t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/f_t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildebelowcmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildebelowcmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildebelowcmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildebelowcmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,7 @@ + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildecmb.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildecmb.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildecmb.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/glyphs/tildecmb.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,40 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + d + f + n + t + f_t + a.alt + A.sc + atilde + ampersand + circledotted + tildecmb + dieresiscmb + tildebelowcmb + dieresisbelowcmb + + public.postscriptNames + + circledotted + uni25CC + dieresisbelowcmb + uni0324 + dieresiscmb + uni0308 + tildebelowcmb + uni0330 + tildecmb + uni0303 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily2-Master1.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 10 + 2 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 553 + 563 + 591 + 601 + 714 + 725 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 115 + 143 + 158 + + postscriptStemSnapV + + 132 + 176 + 194 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 + styleMapStyleName + bold + styleName + Bold + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 553 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 20 + public.kern2.n.right + -50 + public.kern2.o.right + -70 + public.kern2.s.right + -60 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Bold.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 10 + 6 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 553 + 564 + 589 + 599 + 714 + 726 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 115 + 133 + 146 + + postscriptStemSnapV + + 117 + 150 + 170 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 Condensed Bold + styleMapStyleName + regular + styleName + Condensed Bold + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 553 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 12 + public.kern2.n.right + -30 + public.kern2.o.right + -42 + public.kern2.s.right + -36 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedBold.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,149 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -232 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 2 + 6 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 527 + 537 + 559 + 569 + 714 + 724 + 760 + 765 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -242 + -232 + + postscriptStemSnapH + + 26 + + postscriptStemSnapV + + 26 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 Condensed Light + styleMapStyleName + regular + styleName + Condensed Light + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 527 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 12 + public.kern2.n.right + -30 + public.kern2.o.right + -42 + public.kern2.s.right + -36 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedLight.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 8 + 6 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 547 + 558 + 582 + 592 + 714 + 726 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 95 + 108 + 119 + + postscriptStemSnapV + + 97 + 121 + 137 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 Condensed SemiBold + styleMapStyleName + regular + styleName + Condensed SemiBold + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 547 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 12 + public.kern2.n.right + -30 + public.kern2.o.right + -42 + public.kern2.s.right + -36 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-CondensedSemiBold.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 5 + 6 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 537 + 548 + 571 + 581 + 714 + 725 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 60 + 68 + 76 + + postscriptStemSnapV + + 64 + 75 + 84 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 Condensed + styleMapStyleName + regular + styleName + Condensed + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 537 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 12 + public.kern2.n.right + -30 + public.kern2.o.right + -42 + public.kern2.s.right + -36 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Condensed.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,149 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 2 + 2 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 528 + 537 + 559 + 569 + 714 + 724 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 25 + + postscriptStemSnapV + + 26 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 Light + styleMapStyleName + regular + styleName + Light + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 528 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 20 + public.kern2.n.right + -50 + public.kern2.o.right + -70 + public.kern2.s.right + -60 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Light.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 5 + 2 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 536 + 547 + 572 + 582 + 714 + 726 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 60 + 68 + 79 + + postscriptStemSnapV + + 64 + 75 + 90 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 + styleMapStyleName + regular + styleName + Regular + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 536 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 20 + public.kern2.n.right + -50 + public.kern2.o.right + -70 + public.kern2.s.right + -60 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-Regular.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,153 @@ + + + + + ascender + 760 + capHeight + 714 + copyright + Copyright 2015 Google Inc. All Rights Reserved. + descender + -240 + familyName + Test Family 3 + guidelines + + openTypeHeadCreated + 2016/03/15 19:50:39 + openTypeHheaAscender + 1069 + openTypeHheaDescender + -293 + openTypeHheaLineGap + 0 + openTypeNameDescription + Designed by Monotype design team. + openTypeNameDesigner + Monotype Design Team + openTypeNameDesignerURL + http://www.monotype.com/studio + openTypeNameLicense + This Font Software is licensed under the SIL Open Font License, Version 1.1. This Font Software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the SIL Open Font License for the specific language, permissions and limitations governing your use of this Font Software. + openTypeNameLicenseURL + http://scripts.sil.org/OFL + openTypeNameManufacturer + Monotype Imaging Inc. + openTypeNameManufacturerURL + http://www.google.com/get/noto/ + openTypeNameVersion + Version 1.902 + openTypeOS2Panose + + 2 + 11 + 8 + 2 + 4 + 5 + 4 + 2 + 2 + 4 + + openTypeOS2Selection + + 8 + + openTypeOS2Type + + openTypeOS2TypoAscender + 1069 + openTypeOS2TypoDescender + -293 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 9 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 62 + 64 + 67 + 69 + 91 + 116 + + openTypeOS2VendorID + GOOG + openTypeOS2WinAscent + 1069 + openTypeOS2WinDescent + 293 + postscriptBlueValues + + -15 + 0 + 546 + 557 + 582 + 593 + 714 + 726 + 760 + 766 + + postscriptFamilyBlues + + postscriptFamilyOtherBlues + + postscriptOtherBlues + + -256 + -240 + + postscriptStemSnapH + + 103 + 112 + 124 + + postscriptStemSnapV + + 117 + 144 + 151 + + postscriptUnderlinePosition + -100 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family 3 SemiBold + styleMapStyleName + regular + styleName + SemiBold + trademark + Noto is a trademark of Google Inc. + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 902 + xHeight + 546 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,20 @@ + + + + + F + F_.glif + T + T_.glif + l + l.glif + n + n.glif + o + o.glif + s + s.glif + t + t.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/F_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/F_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/F_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/F_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/layerinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/layerinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/layerinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/layerinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/l.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/l.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/l.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/l.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/n.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/n.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/n.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/n.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/o.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/o.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/o.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/o.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/s.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/s.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/s.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/s.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/t.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/t.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/t.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/t.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/T_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/T_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/T_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/glyphs/T_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/groups.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/groups.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/groups.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/groups.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + public.kern1.T.left + + T + + public.kern1.d + + l + + public.kern1.n.left + + n + + public.kern1.o.left + + o + + public.kern1.t.left + + t + + public.kern2.T.right + + T + + public.kern2.b + + l + + public.kern2.n.right + + n + + public.kern2.o.right + + o + + public.kern2.s.right + + s + + public.kern2.t.right + + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/kerning.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/kerning.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/kerning.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/kerning.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,17 @@ + + + + + public.kern1.T.left + + public.kern2.T.right + 20 + public.kern2.n.right + -50 + public.kern2.o.right + -70 + public.kern2.s.right + -60 + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/layercontents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/layercontents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/layercontents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/layercontents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + + public.default + glyphs + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,16 @@ + + + + + public.glyphOrder + + F + T + l + n + o + s + t + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily3-SemiBold.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 3 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 738 + capHeight + 677 + descender + -245 + familyName + Test Family + openTypeHheaAscender + 918 + openTypeHheaDescender + -335 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Frank Grießhammer + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 4 + 2 + 3 + 5 + 4 + 5 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 730 + openTypeOS2TypoDescender + -270 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 200 + openTypeOS2WinAscent + 918 + openTypeOS2WinDescent + 335 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0375 + postscriptBlueShift + 7 + postscriptBlueValues + + -13 + 0 + 470 + 483 + 534 + 547 + 556 + 569 + 654 + 667 + 677 + 690 + 738 + 758 + + postscriptFamilyBlues + + -20 + 0 + 473 + 491 + 525 + 540 + 549 + 562 + 644 + 659 + 669 + 689 + 729 + 749 + + postscriptFamilyOtherBlues + + -249 + -239 + + postscriptFontName + TestFamily-Master0 + postscriptForceBold + + postscriptOtherBlues + + -255 + -245 + + postscriptStemSnapH + + 26 + 20 + + postscriptStemSnapV + + 28 + 32 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family + styleName + Master 0 + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 1 + xHeight + 470 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + a + a.glif + dollar + dollar.glif + dollar.nostroke + dollar.nostroke.glif + space + space.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.nostroke.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.nostroke.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.nostroke.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/dollar.nostroke.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + dollar + dollar.nostroke + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master0.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 738 + capHeight + 677 + descender + -245 + familyName + Test Family + openTypeHheaAscender + 918 + openTypeHheaDescender + -335 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Frank Grießhammer + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 4 + 6 + 3 + 5 + 4 + 5 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 730 + openTypeOS2TypoDescender + -270 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 400 + openTypeOS2WinAscent + 918 + openTypeOS2WinDescent + 335 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0375 + postscriptBlueShift + 7 + postscriptBlueValues + + -15 + 0 + 474 + 487 + 527 + 540 + 550 + 563 + 647 + 660 + 670 + 685 + 730 + 750 + + postscriptFamilyBlues + + -20 + 0 + 473 + 491 + 525 + 540 + 549 + 562 + 644 + 659 + 669 + 689 + 729 + 749 + + postscriptFamilyOtherBlues + + -249 + -239 + + postscriptFontName + TestFamily-Master1 + postscriptForceBold + + postscriptOtherBlues + + -250 + -240 + + postscriptStemSnapH + + 55 + 40 + + postscriptStemSnapV + + 80 + 90 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family + styleName + Master 1 + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 1 + xHeight + 474 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + a + a.glif + dollar + dollar.glif + dollar.nostroke + dollar.nostroke.glif + space + space.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.nostroke.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.nostroke.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.nostroke.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/dollar.nostroke.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + dollar + dollar.nostroke + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master1.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 738 + capHeight + 677 + descender + -245 + familyName + Test Family + openTypeHheaAscender + 918 + openTypeHheaDescender + -335 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Frank Grießhammer + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 4 + 9 + 3 + 5 + 4 + 5 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 730 + openTypeOS2TypoDescender + -270 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 900 + openTypeOS2WinAscent + 918 + openTypeOS2WinDescent + 335 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0375 + postscriptBlueShift + 7 + postscriptBlueValues + + -20 + 0 + 487 + 503 + 515 + 531 + 536 + 552 + 624 + 640 + 652 + 672 + 711 + 731 + + postscriptFamilyBlues + + -20 + 0 + 473 + 491 + 525 + 540 + 549 + 562 + 644 + 659 + 669 + 689 + 729 + 749 + + postscriptFamilyOtherBlues + + -249 + -239 + + postscriptFontName + TestFamily-Master2 + postscriptForceBold + + postscriptOtherBlues + + -232 + -222 + + postscriptStemSnapH + + 74 + 60 + + postscriptStemSnapV + + 190 + 200 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family + styleName + Master 2 + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 1 + xHeight + 487 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + a + a.glif + dollar + dollar.glif + dollar.nostroke + dollar.nostroke.glif + space + space.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.nostroke.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.nostroke.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.nostroke.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/dollar.nostroke.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + dollar + dollar.nostroke + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master2.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 738 + capHeight + 677 + descender + -245 + familyName + Test Family + openTypeHheaAscender + 918 + openTypeHheaDescender + -335 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Frank Grießhammer + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 4 + 9 + 3 + 5 + 4 + 5 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 730 + openTypeOS2TypoDescender + -270 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 900 + openTypeOS2WinAscent + 918 + openTypeOS2WinDescent + 335 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0375 + postscriptBlueShift + 7 + postscriptBlueValues + + -20 + 0 + 487 + 503 + 515 + 531 + 536 + 552 + 624 + 640 + 652 + 672 + 711 + 731 + + postscriptFamilyBlues + + -20 + 0 + 473 + 491 + 525 + 540 + 549 + 562 + 644 + 659 + 669 + 689 + 729 + 749 + + postscriptFamilyOtherBlues + + -249 + -239 + + postscriptFontName + TestFamily-Master3 + postscriptForceBold + + postscriptOtherBlues + + -232 + -222 + + postscriptStemSnapH + + 50 + 38 + + postscriptStemSnapV + + 190 + 200 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family + styleName + Master 3 + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 1 + xHeight + 487 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + a + a.glif + dollar + dollar.glif + dollar.nostroke + dollar.nostroke.glif + space + space.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.nostroke.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.nostroke.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.nostroke.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/dollar.nostroke.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + dollar + dollar.nostroke + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master3.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/features.fea fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/features.fea --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/features.fea 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/features.fea 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/fontinfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/fontinfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/fontinfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/fontinfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,140 @@ + + + + + ascender + 738 + capHeight + 677 + descender + -245 + familyName + Test Family + openTypeHheaAscender + 918 + openTypeHheaDescender + -335 + openTypeHheaLineGap + 0 + openTypeNameDesigner + Frank Grießhammer + openTypeOS2CodePageRanges + + 0 + 1 + 29 + + openTypeOS2Panose + + 2 + 4 + 6 + 3 + 5 + 4 + 5 + 2 + 2 + 4 + + openTypeOS2TypoAscender + 730 + openTypeOS2TypoDescender + -270 + openTypeOS2TypoLineGap + 0 + openTypeOS2UnicodeRanges + + 0 + 1 + + openTypeOS2VendorID + ADBO + openTypeOS2WeightClass + 400 + openTypeOS2WinAscent + 918 + openTypeOS2WinDescent + 335 + postscriptBlueFuzz + 0 + postscriptBlueScale + 0.0375 + postscriptBlueShift + 7 + postscriptBlueValues + + -15 + 0 + 474 + 487 + 527 + 540 + 550 + 563 + 647 + 660 + 670 + 685 + 730 + 750 + + postscriptFamilyBlues + + -20 + 0 + 473 + 491 + 525 + 540 + 549 + 562 + 644 + 659 + 669 + 689 + 729 + 749 + + postscriptFamilyOtherBlues + + -249 + -239 + + postscriptFontName + TestFamily-Master4 + postscriptForceBold + + postscriptOtherBlues + + -250 + -240 + + postscriptStemSnapH + + 55 + 40 + + postscriptStemSnapV + + 80 + 90 + + postscriptUnderlinePosition + -75 + postscriptUnderlineThickness + 50 + styleMapFamilyName + Test Family + styleName + Master 4 + unitsPerEm + 1000 + versionMajor + 1 + versionMinor + 1 + xHeight + 474 + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/a.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/a.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/a.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/a.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/A_.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/A_.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/A_.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/A_.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/contents.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/contents.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/contents.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/contents.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + .notdef + _notdef.glif + A + A_.glif + a + a.glif + dollar + dollar.glif + dollar.nostroke + dollar.nostroke.glif + space + space.glif + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,75 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.nostroke.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.nostroke.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.nostroke.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/dollar.nostroke.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/_notdef.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/_notdef.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/_notdef.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/_notdef.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/space.glif fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/space.glif --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/space.glif 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/glyphs/space.glif 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,5 @@ + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/lib.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/lib.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/lib.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/lib.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ + + + + + public.glyphOrder + + .notdef + space + A + a + dollar + dollar.nostroke + + + diff -Nru fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/metainfo.plist fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/metainfo.plist --- fonttools-3.0/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/metainfo.plist 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/master_ufo/TestFamily-Master4.ufo/metainfo.plist 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ + + + + + creator + org.robofab.ufoLib + formatVersion + 2 + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/Build3.ttx fonttools-3.21.2/Tests/varLib/data/test_results/Build3.ttx --- fonttools-3.0/Tests/varLib/data/test_results/Build3.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/Build3.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,750 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wght + 0x0 + 0.0 + 0.0 + 1000.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx --- fonttools-3.0/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarEmptyAxis.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx --- fonttools-3.0/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarIdentityMaps.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx --- fonttools-3.0/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/BuildAvarSingleAxis.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/BuildMain.ttx fonttools-3.21.2/Tests/varLib/data/test_results/BuildMain.ttx --- fonttools-3.0/Tests/varLib/data/test_results/BuildMain.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/BuildMain.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,2337 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[0] /* MoveDirectAbsPt */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Weight + + + Contrast + + + ExtraLight + + + TestFamily-ExtraLight + + + Light + + + TestFamily-Light + + + Regular + + + TestFamily-Regular + + + Semibold + + + TestFamily-Semibold + + + Bold + + + TestFamily-Bold + + + Black + + + TestFamily-Black + + + Black Medium Contrast + + + TestFamily-BlackMediumContrast + + + Black High Contrast + + + TestFamily-BlackHighContrast + + + Weight + + + Contrast + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master1 + + + Frank Grießhammer + + + Master 1 + + + Weight + + + Contrast + + + ExtraLight + + + TestFamily-ExtraLight + + + Light + + + TestFamily-Light + + + Regular + + + TestFamily-Regular + + + Semibold + + + TestFamily-Semibold + + + Bold + + + TestFamily-Bold + + + Black + + + TestFamily-Black + + + Black Medium Contrast + + + TestFamily-BlackMediumContrast + + + Black High Contrast + + + TestFamily-BlackHighContrast + + + Weight + + + Contrast + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wght + 0x0 + 0.0 + 368.0 + 1000.0 + 256 + + + + + cntr + 0x0 + 0.0 + 0.0 + 100.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/Build.ttx fonttools-3.21.2/Tests/varLib/data/test_results/Build.ttx --- fonttools-3.0/Tests/varLib/data/test_results/Build.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/Build.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1689 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + wght + 0x0 + 0.0 + 368.0 + 1000.0 + 256 + + + + + cntr + 0x0 + 0.0 + 0.0 + 100.0 + 257 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayout2.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayout2.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayout2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayout2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,4 @@ + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff2.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff2.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_1_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff2.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff2.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_class_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff2.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff2.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff2.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff2.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_2_spec_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_3_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_4_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_5_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_6_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_diff.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_diff.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_diff.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_diff.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_8_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,116 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_size_feat_same.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_size_feat_same.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutGPOS_size_feat_same.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutGPOS_size_feat_same.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutMain.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutMain.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayoutMain.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayoutMain.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[0] /* MoveDirectAbsPt */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master1 + + + Frank Grießhammer + + + Master 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayout.ttx fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayout.ttx --- fonttools-3.0/Tests/varLib/data/test_results/InterpolateLayout.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/InterpolateLayout.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,161 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/Mutator_IUP-instance.ttx fonttools-3.21.2/Tests/varLib/data/test_results/Mutator_IUP-instance.ttx --- fonttools-3.0/Tests/varLib/data/test_results/Mutator_IUP-instance.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/Mutator_IUP-instance.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,300 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + VarFont + + + Regular + + + VarFont Regular: 2017 + + + VarFont Regular + + + VarFont-Regular + + + Width + + + Ascender + + + Regular + + + VarFont + + + Regular + + + VarFont Regular: 2017 + + + VarFont Regular + + + VarFont-Regular + + + Width + + + Ascender + + + Regular + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/data/test_results/Mutator.ttx fonttools-3.21.2/Tests/varLib/data/test_results/Mutator.ttx --- fonttools-3.0/Tests/varLib/data/test_results/Mutator.ttx 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/data/test_results/Mutator.ttx 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,619 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + FDEF[ ] /* FunctionDefinition */ + POP[ ] /* PopTopStack */ + ENDF[ ] /* EndFunctionDefinition */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + PUSHB[ ] /* 1 value pushed */ + 0 + MDAP[0] /* MoveDirectAbsPt */ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Weight + + + Contrast + + + ExtraLight + + + TestFamily-ExtraLight + + + Light + + + TestFamily-Light + + + Regular + + + TestFamily-Regular + + + Semibold + + + TestFamily-Semibold + + + Bold + + + TestFamily-Bold + + + Black + + + TestFamily-Black + + + Black Medium Contrast + + + TestFamily-BlackMediumContrast + + + Black High Contrast + + + TestFamily-BlackHighContrast + + + Weight + + + Contrast + + + Test Family + + + Regular + + + Version 1.001;ADBO;Test Family Regular + + + Test Family + + + Version 1.001 + + + TestFamily-Master1 + + + Frank Grießhammer + + + Master 1 + + + Weight + + + Contrast + + + ExtraLight + + + TestFamily-ExtraLight + + + Light + + + TestFamily-Light + + + Regular + + + TestFamily-Regular + + + Semibold + + + TestFamily-Semibold + + + Bold + + + TestFamily-Bold + + + Black + + + TestFamily-Black + + + Black Medium Contrast + + + TestFamily-BlackMediumContrast + + + Black High Contrast + + + TestFamily-BlackHighContrast + + + Weight + + + Contrast + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff -Nru fonttools-3.0/Tests/varLib/designspace_test.py fonttools-3.21.2/Tests/varLib/designspace_test.py --- fonttools-3.0/Tests/varLib/designspace_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/designspace_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,69 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.varLib import designspace +import os +import unittest + + +class DesignspaceTest(unittest.TestCase): + def test_load(self): + self.maxDiff = None + self.assertEqual( + designspace.load(_getpath("Designspace.designspace")), + + {'sources': + [{'location': {'weight': 0.0}, + 'groups': {'copy': True}, + 'filename': 'DesignspaceTest-Light.ufo', + 'info': {'copy': True}, + 'name': 'master_1', + 'lib': {'copy': True}}, + {'location': {'weight': 1.0}, + 'name': 'master_2', + 'filename': 'DesignspaceTest-Bold.ufo'}], + + 'instances': + [{'location': {'weight': 0.5}, + 'familyname': 'DesignspaceTest', + 'filename': 'instance/DesignspaceTest-Medium.ufo', + 'kerning': {}, + 'info': {}, + 'stylename': 'Medium'}], + + 'axes': + [{'name': 'weight', + 'map': [{'input': 0.0, 'output': 10.0}, + {'input': 401.0, 'output': 66.0}, + {'input': 1000.0, 'output': 990.0}], + 'tag': 'wght', + 'maximum': 1000.0, + 'minimum': 0.0, + 'default': 0.0}, + {'maximum': 1000.0, + 'default': 250.0, + 'minimum': 0.0, + 'name': 'width', + 'tag': 'wdth'}, + {'name': 'contrast', + 'tag': 'cntr', + 'maximum': 100.0, + 'minimum': 0.0, + 'default': 0.0, + 'labelname': {'de': 'Kontrast', 'en': 'Contrast'}}] + } + ) + + def test_load2(self): + self.assertEqual( + designspace.load(_getpath("Designspace2.designspace")), + {'sources': [], 'instances': [{}]}) + + +def _getpath(testfile): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", testfile) + + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/varLib/interpolatable_test.py fonttools-3.21.2/Tests/varLib/interpolatable_test.py --- fonttools-3.0/Tests/varLib/interpolatable_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/interpolatable_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,102 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib.interpolatable import main as interpolatable_main +import os +import shutil +import sys +import tempfile +import unittest + +try: + import scipy +except: + scipy = None + +try: + import munkres +except ImportError: + munkres = None + + +@unittest.skipUnless(scipy or munkres, "scipy or munkres not installed") +class InterpolatableTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def get_test_input(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", test_file_or_folder) + + @staticmethod + def get_file_list(folder, suffix, prefix=''): + all_files = os.listdir(folder) + file_list = [] + for p in all_files: + if p.startswith(prefix) and p.endswith(suffix): + file_list.append(os.path.abspath(os.path.join(folder, p))) + return file_list + + def temp_path(self, suffix): + self.temp_dir() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def compile_font(self, path, suffix, temp_dir): + ttx_filename = os.path.basename(path) + savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix)) + font = TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(path) + font.save(savepath, reorderTables=None) + return font, savepath + +# ----- +# Tests +# ----- + + def test_interpolatable_ttf(self): + suffix = '.ttf' + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + ttf_paths = self.get_file_list(self.tempdir, suffix) + self.assertIsNone(interpolatable_main(ttf_paths)) + + + def test_interpolatable_otf(self): + suffix = '.otf' + ttx_dir = self.get_test_input('master_ttx_interpolatable_otf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + otf_paths = self.get_file_list(self.tempdir, suffix) + self.assertIsNone(interpolatable_main(otf_paths)) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/varLib/interpolate_layout_test.py fonttools-3.21.2/Tests/varLib/interpolate_layout_test.py --- fonttools-3.0/Tests/varLib/interpolate_layout_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/interpolate_layout_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,890 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib import build +from fontTools.varLib.interpolate_layout import interpolate_layout +from fontTools.varLib.interpolate_layout import main as interpolate_layout_main +from fontTools.feaLib.builder import addOpenTypeFeaturesFromString +import difflib +import os +import shutil +import sys +import tempfile +import unittest + + +class InterpolateLayoutTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def get_test_input(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", test_file_or_folder) + + @staticmethod + def get_test_output(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", "test_results", test_file_or_folder) + + @staticmethod + def get_file_list(folder, suffix, prefix=''): + all_files = os.listdir(folder) + file_list = [] + for p in all_files: + if p.startswith(prefix) and p.endswith(suffix): + file_list.append(os.path.abspath(os.path.join(folder, p))) + return file_list + + def temp_path(self, suffix): + self.temp_dir() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def read_ttx(self, path): + lines = [] + with open(path, "r", encoding="utf-8") as ttx: + for line in ttx.readlines(): + # Elide ttFont attributes because ttLibVersion may change, + # and use os-native line separators so we can run difflib. + if line.startswith("" + os.linesep) + else: + lines.append(line.rstrip() + os.linesep) + return lines + + def expect_ttx(self, font, expected_ttx, tables): + path = self.temp_path(suffix=".ttx") + font.saveXML(path, tables=tables) + actual = self.read_ttx(path) + expected = self.read_ttx(expected_ttx) + if actual != expected: + for line in difflib.unified_diff( + expected, actual, fromfile=expected_ttx, tofile=path): + sys.stdout.write(line) + self.fail("TTX output is different from expected") + + def check_ttx_dump(self, font, expected_ttx, tables, suffix): + """Ensure the TTX dump is the same after saving and reloading the font.""" + path = self.temp_path(suffix=suffix) + font.save(path) + self.expect_ttx(TTFont(path), expected_ttx, tables) + + def compile_font(self, path, suffix, temp_dir, features=None): + ttx_filename = os.path.basename(path) + savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix)) + font = TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(path) + if features: + addOpenTypeFeaturesFromString(font, features) + font.save(savepath, reorderTables=None) + return font, savepath + +# ----- +# Tests +# ----- + + def test_varlib_interpolate_layout_GSUB_only_ttf(self): + """Only GSUB, and only in the base master. + + The variable font will inherit the GSUB table from the + base master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GSUB'] + expected_ttx_path = self.get_test_output('InterpolateLayout.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_no_GSUB_ttf(self): + """The base master has no GSUB table. + + The variable font will end up without a GSUB table. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout2.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GSUB'] + expected_ttx_path = self.get_test_output('InterpolateLayout2.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GSUB_only_no_axes_ttf(self): + """Only GSUB, and only in the base master. + Designspace file has no element. + + The variable font will inherit the GSUB table from the + base master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout3.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GSUB'] + expected_ttx_path = self.get_test_output('InterpolateLayout.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_size_feat_same_val_ttf(self): + """Only GPOS; 'size' feature; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + feature size { + parameters 10.0 0; + } size; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_size_feat_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_1_same_val_ttf(self): + """Only GPOS; LookupType 1; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + feature xxxx { + pos A <-80 0 -160 0>; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_1_diff_val_ttf(self): + """Only GPOS; LookupType 1; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos A <-80 0 -160 0>; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos A <-97 0 -195 0>; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_1_diff2_val_ttf(self): + """Only GPOS; LookupType 1; different values and items in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos A <-80 0 -160 0>; + pos a <-55 0 -105 0>; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos A <-97 0 -195 0>; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_1_diff2.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_same_val_ttf(self): + """Only GPOS; LookupType 2 specific pairs; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + feature xxxx { + pos A a -53; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff_val_ttf(self): + """Only GPOS; LookupType 2 specific pairs; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos A a -53; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos A a -27; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_spec_pairs_diff2_val_ttf(self): + """Only GPOS; LookupType 2 specific pairs; different values and items in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos A a -53; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos A a -27; + pos a a 19; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_spec_diff2.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_same_val_ttf(self): + """Only GPOS; LookupType 2 class pairs; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + feature xxxx { + pos [A] [a] -53; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff_val_ttf(self): + """Only GPOS; LookupType 2 class pairs; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos [A] [a] -53; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos [A] [a] -27; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_2_class_pairs_diff2_val_ttf(self): + """Only GPOS; LookupType 2 class pairs; different values and items in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos [A] [a] -53; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos [A] [a] -27; + pos [a] [a] 19; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_2_class_diff2.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_3_same_val_ttf(self): + """Only GPOS; LookupType 3; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + feature xxxx { + pos cursive a ; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_3_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_3_diff_val_ttf(self): + """Only GPOS; LookupType 3; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + feature xxxx { + pos cursive a ; + } xxxx; + """ + fea_str_1 = """ + feature xxxx { + pos cursive a ; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_3_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_4_same_val_ttf(self): + """Only GPOS; LookupType 4; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos base a mark @MARKS_ABOVE; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_4_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_4_diff_val_ttf(self): + """Only GPOS; LookupType 4; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos base a mark @MARKS_ABOVE; + } xxxx; + """ + fea_str_1 = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos base a mark @MARKS_ABOVE; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_4_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_5_same_val_ttf(self): + """Only GPOS; LookupType 5; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + markClass uni0330 @MARKS_BELOW; + feature xxxx { + pos ligature f_t mark @MARKS_BELOW + ligComponent mark @MARKS_BELOW; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_5_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_5_diff_val_ttf(self): + """Only GPOS; LookupType 5; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + markClass uni0330 @MARKS_BELOW; + feature xxxx { + pos ligature f_t mark @MARKS_BELOW + ligComponent mark @MARKS_BELOW; + } xxxx; + """ + fea_str_1 = """ + markClass uni0330 @MARKS_BELOW; + feature xxxx { + pos ligature f_t mark @MARKS_BELOW + ligComponent mark @MARKS_BELOW; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_5_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_6_same_val_ttf(self): + """Only GPOS; LookupType 6; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos mark uni0308 mark @MARKS_ABOVE; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_6_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_6_diff_val_ttf(self): + """Only GPOS; LookupType 6; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos mark uni0308 mark @MARKS_ABOVE; + } xxxx; + """ + fea_str_1 = """ + markClass uni0303 @MARKS_ABOVE; + feature xxxx { + pos mark uni0308 mark @MARKS_ABOVE; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_6_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_8_same_val_ttf(self): + """Only GPOS; LookupType 8; same values in all masters. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str = """ + markClass uni0303 @MARKS_ABOVE; + lookup CNTXT_PAIR_POS { + pos A a -23; + } CNTXT_PAIR_POS; + + lookup CNTXT_MARK_TO_BASE { + pos base a mark @MARKS_ABOVE; + } CNTXT_MARK_TO_BASE; + + feature xxxx { + pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE; + } xxxx; + """ + features = [fea_str] * 2 + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_8_same.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_GPOS_only_LookupType_8_diff_val_ttf(self): + """Only GPOS; LookupType 8; different values in each master. + """ + suffix = '.ttf' + ds_path = self.get_test_input('InterpolateLayout.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + fea_str_0 = """ + markClass uni0303 @MARKS_ABOVE; + lookup CNTXT_PAIR_POS { + pos A a -23; + } CNTXT_PAIR_POS; + + lookup CNTXT_MARK_TO_BASE { + pos base a mark @MARKS_ABOVE; + } CNTXT_MARK_TO_BASE; + + feature xxxx { + pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE; + } xxxx; + """ + fea_str_1 = """ + markClass uni0303 @MARKS_ABOVE; + lookup CNTXT_PAIR_POS { + pos A a 57; + } CNTXT_PAIR_POS; + + lookup CNTXT_MARK_TO_BASE { + pos base a mark @MARKS_ABOVE; + } CNTXT_MARK_TO_BASE; + + feature xxxx { + pos A' lookup CNTXT_PAIR_POS a' @MARKS_ABOVE' lookup CNTXT_MARK_TO_BASE; + } xxxx; + """ + features = [fea_str_0, fea_str_1] + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily2-') + for i, path in enumerate(ttx_paths): + self.compile_font(path, suffix, self.tempdir, features[i]) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + instfont = interpolate_layout(ds_path, {'weight': 500}, finder) + + tables = ['GPOS'] + expected_ttx_path = self.get_test_output('InterpolateLayoutGPOS_8_diff.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + self.check_ttx_dump(instfont, expected_ttx_path, tables, suffix) + + + def test_varlib_interpolate_layout_main_ttf(self): + """Mostly for testing varLib.interpolate_layout.main() + """ + suffix = '.ttf' + ds_path = self.get_test_input('Build.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable') + os.makedirs(ttf_dir) + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-') + for path in ttx_paths: + self.compile_font(path, suffix, ttf_dir) + + finder = lambda s: s.replace(ufo_dir, ttf_dir).replace('.ufo', suffix) + varfont, _, _ = build(ds_path, finder) + varfont_name = 'InterpolateLayoutMain' + varfont_path = os.path.join(self.tempdir, varfont_name + suffix) + varfont.save(varfont_path) + + ds_copy = os.path.splitext(varfont_path)[0] + '.designspace' + shutil.copy2(ds_path, ds_copy) + args = [ds_copy, 'weight=500', 'contrast=50'] + interpolate_layout_main(args) + + instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix + instfont = TTFont(instfont_path) + tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head'] + expected_ttx_path = self.get_test_output(varfont_name + '.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/varLib/models_test.py fonttools-3.21.2/Tests/varLib/models_test.py --- fonttools-3.0/Tests/varLib/models_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/models_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,77 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.varLib.models import ( + normalizeLocation, supportScalar, VariationModel) + + +def test_normalizeLocation(): + axes = {"wght": (100, 400, 900)} + assert normalizeLocation({"wght": 400}, axes) == {'wght': 0.0} + assert normalizeLocation({"wght": 100}, axes) == {'wght': -1.0} + assert normalizeLocation({"wght": 900}, axes) == {'wght': 1.0} + assert normalizeLocation({"wght": 650}, axes) == {'wght': 0.5} + assert normalizeLocation({"wght": 1000}, axes) == {'wght': 1.0} + assert normalizeLocation({"wght": 0}, axes) == {'wght': -1.0} + + axes = {"wght": (0, 0, 1000)} + assert normalizeLocation({"wght": 0}, axes) == {'wght': 0.0} + assert normalizeLocation({"wght": -1}, axes) == {'wght': 0.0} + assert normalizeLocation({"wght": 1000}, axes) == {'wght': 1.0} + assert normalizeLocation({"wght": 500}, axes) == {'wght': 0.5} + assert normalizeLocation({"wght": 1001}, axes) == {'wght': 1.0} + + axes = {"wght": (0, 1000, 1000)} + assert normalizeLocation({"wght": 0}, axes) == {'wght': -1.0} + assert normalizeLocation({"wght": -1}, axes) == {'wght': -1.0} + assert normalizeLocation({"wght": 500}, axes) == {'wght': -0.5} + assert normalizeLocation({"wght": 1000}, axes) == {'wght': 0.0} + assert normalizeLocation({"wght": 1001}, axes) == {'wght': 0.0} + + +def test_supportScalar(): + assert supportScalar({}, {}) == 1.0 + assert supportScalar({'wght':.2}, {}) == 1.0 + assert supportScalar({'wght':.2}, {'wght':(0,2,3)}) == 0.1 + assert supportScalar({'wght':2.5}, {'wght':(0,2,4)}) == 0.75 + + +def test_VariationModel(): + locations = [ + {'wght':100}, + {'wght':-100}, + {'wght':-180}, + {'wdth':+.3}, + {'wght':+120,'wdth':.3}, + {'wght':+120,'wdth':.2}, + {}, + {'wght':+180,'wdth':.3}, + {'wght':+180}, + ] + model = VariationModel(locations, axisOrder=['wght']) + + assert model.locations == [ + {}, + {'wght': -100}, + {'wght': -180}, + {'wght': 100}, + {'wght': 180}, + {'wdth': 0.3}, + {'wdth': 0.3, 'wght': 180}, + {'wdth': 0.3, 'wght': 120}, + {'wdth': 0.2, 'wght': 120}] + + assert model.deltaWeights == [ + {}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0}, + {0: 1.0, 4: 1.0, 5: 1.0}, + {0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.25}, + {0: 1.0, + 3: 0.75, + 4: 0.25, + 5: 0.6666666666666667, + 6: 0.16666666666666669, + 7: 0.6666666666666667}] diff -Nru fonttools-3.0/Tests/varLib/mutator_test.py fonttools-3.21.2/Tests/varLib/mutator_test.py --- fonttools-3.0/Tests/varLib/mutator_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/mutator_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,144 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib import build +from fontTools.varLib.mutator import main as mutator +import difflib +import os +import shutil +import sys +import tempfile +import unittest + + +class MutatorTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def get_test_input(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", test_file_or_folder) + + @staticmethod + def get_test_output(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", "test_results", test_file_or_folder) + + @staticmethod + def get_file_list(folder, suffix, prefix=''): + all_files = os.listdir(folder) + file_list = [] + for p in all_files: + if p.startswith(prefix) and p.endswith(suffix): + file_list.append(os.path.abspath(os.path.join(folder, p))) + return file_list + + def temp_path(self, suffix): + self.temp_dir() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def read_ttx(self, path): + lines = [] + with open(path, "r", encoding="utf-8") as ttx: + for line in ttx.readlines(): + # Elide ttFont attributes because ttLibVersion may change, + # and use os-native line separators so we can run difflib. + if line.startswith("" + os.linesep) + else: + lines.append(line.rstrip() + os.linesep) + return lines + + def expect_ttx(self, font, expected_ttx, tables): + path = self.temp_path(suffix=".ttx") + font.saveXML(path, tables=tables) + actual = self.read_ttx(path) + expected = self.read_ttx(expected_ttx) + if actual != expected: + for line in difflib.unified_diff( + expected, actual, fromfile=expected_ttx, tofile=path): + sys.stdout.write(line) + self.fail("TTX output is different from expected") + + def compile_font(self, path, suffix, temp_dir): + ttx_filename = os.path.basename(path) + savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix)) + font = TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(path) + font.save(savepath, reorderTables=None) + return font, savepath + +# ----- +# Tests +# ----- + + def test_varlib_mutator_ttf(self): + suffix = '.ttf' + ds_path = self.get_test_input('Build.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + varfont, _, _ = build(ds_path, finder) + varfont_name = 'Mutator' + varfont_path = os.path.join(self.tempdir, varfont_name + suffix) + varfont.save(varfont_path) + + args = [varfont_path, 'wght=500', 'cntr=50'] + mutator(args) + + instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix + instfont = TTFont(instfont_path) + tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head'] + expected_ttx_path = self.get_test_output(varfont_name + '.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + + def test_varlib_mutator_iup_ttf(self): + suffix = '.ttf' + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_varfont_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'Mutator_IUP') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + varfont_name = 'Mutator_IUP' + varfont_path = os.path.join(self.tempdir, varfont_name + suffix) + + args = [varfont_path, 'wdth=80', 'ASCN=628'] + mutator(args) + + instfont_path = os.path.splitext(varfont_path)[0] + '-instance' + suffix + instfont = TTFont(instfont_path) + tables = [table_tag for table_tag in instfont.keys() if table_tag != 'head'] + expected_ttx_path = self.get_test_output(varfont_name + '-instance.ttx') + self.expect_ttx(instfont, expected_ttx_path, tables) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/varLib/varLib_test.py fonttools-3.21.2/Tests/varLib/varLib_test.py --- fonttools-3.0/Tests/varLib/varLib_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/varLib/varLib_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,215 @@ +from __future__ import print_function, division, absolute_import +from fontTools.misc.py23 import * +from fontTools.ttLib import TTFont +from fontTools.varLib import build +from fontTools.varLib import main as varLib_main +import difflib +import os +import shutil +import sys +import tempfile +import unittest + + +class BuildTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + @staticmethod + def get_test_input(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", test_file_or_folder) + + @staticmethod + def get_test_output(test_file_or_folder): + path, _ = os.path.split(__file__) + return os.path.join(path, "data", "test_results", test_file_or_folder) + + @staticmethod + def get_file_list(folder, suffix, prefix=''): + all_files = os.listdir(folder) + file_list = [] + for p in all_files: + if p.startswith(prefix) and p.endswith(suffix): + file_list.append(os.path.abspath(os.path.join(folder, p))) + return file_list + + def temp_path(self, suffix): + self.temp_dir() + self.num_tempfiles += 1 + return os.path.join(self.tempdir, + "tmp%d%s" % (self.num_tempfiles, suffix)) + + def temp_dir(self): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + + def read_ttx(self, path): + lines = [] + with open(path, "r", encoding="utf-8") as ttx: + for line in ttx.readlines(): + # Elide ttFont attributes because ttLibVersion may change, + # and use os-native line separators so we can run difflib. + if line.startswith("" + os.linesep) + else: + lines.append(line.rstrip() + os.linesep) + return lines + + def expect_ttx(self, font, expected_ttx, tables): + path = self.temp_path(suffix=".ttx") + font.saveXML(path, tables=tables) + actual = self.read_ttx(path) + expected = self.read_ttx(expected_ttx) + if actual != expected: + for line in difflib.unified_diff( + expected, actual, fromfile=expected_ttx, tofile=path): + sys.stdout.write(line) + self.fail("TTX output is different from expected") + + def check_ttx_dump(self, font, expected_ttx, tables, suffix): + """Ensure the TTX dump is the same after saving and reloading the font.""" + path = self.temp_path(suffix=suffix) + font.save(path) + self.expect_ttx(TTFont(path), expected_ttx, tables) + + def compile_font(self, path, suffix, temp_dir): + ttx_filename = os.path.basename(path) + savepath = os.path.join(temp_dir, ttx_filename.replace('.ttx', suffix)) + font = TTFont(recalcBBoxes=False, recalcTimestamp=False) + font.importXML(path) + font.save(savepath, reorderTables=None) + return font, savepath + + def _run_varlib_build_test(self, designspace_name, font_name, tables, + expected_ttx_name): + suffix = '.ttf' + ds_path = self.get_test_input(designspace_name + '.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttx_paths = self.get_file_list(ttx_dir, '.ttx', font_name + '-') + for path in ttx_paths: + self.compile_font(path, suffix, self.tempdir) + + finder = lambda s: s.replace(ufo_dir, self.tempdir).replace('.ufo', suffix) + varfont, model, _ = build(ds_path, finder) + + expected_ttx_path = self.get_test_output(expected_ttx_name + '.ttx') + self.expect_ttx(varfont, expected_ttx_path, tables) + self.check_ttx_dump(varfont, expected_ttx_path, tables, suffix) +# ----- +# Tests +# ----- + + def test_varlib_build_ttf(self): + """Designspace file contains element.""" + self._run_varlib_build_test( + designspace_name='Build', + font_name='TestFamily', + tables=['GDEF', 'HVAR', 'MVAR', 'fvar', 'gvar'], + expected_ttx_name='Build' + ) + + def test_varlib_build_no_axes_ttf(self): + """Designspace file does not contain an element.""" + self._run_varlib_build_test( + designspace_name='InterpolateLayout3', + font_name='TestFamily2', + tables=['GDEF', 'HVAR', 'MVAR', 'fvar', 'gvar'], + expected_ttx_name='Build3' + ) + + def test_varlib_avar_single_axis(self): + """Designspace file contains a 'weight' axis with elements + modifying the normalization mapping. An 'avar' table is generated. + """ + test_name = 'BuildAvarSingleAxis' + self._run_varlib_build_test( + designspace_name=test_name, + font_name='TestFamily3', + tables=['avar'], + expected_ttx_name=test_name + ) + + def test_varlib_avar_with_identity_maps(self): + """Designspace file contains two 'weight' and 'width' axes both with + elements. + + The 'width' axis only contains identity mappings, however the resulting + avar segment will not be empty but will contain the default axis value + maps: {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}. + + This is to to work around an issue with some rasterizers: + https://github.com/googlei18n/fontmake/issues/295 + https://github.com/fonttools/fonttools/issues/1011 + """ + test_name = 'BuildAvarIdentityMaps' + self._run_varlib_build_test( + designspace_name=test_name, + font_name='TestFamily3', + tables=['avar'], + expected_ttx_name=test_name + ) + + def test_varlib_avar_empty_axis(self): + """Designspace file contains two 'weight' and 'width' axes, but + only one axis ('weight') has some elements. + + Even if no elements are defined for the 'width' axis, the + resulting avar segment still contains the default axis value maps: + {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}. + + This is again to to work around an issue with some rasterizers: + https://github.com/googlei18n/fontmake/issues/295 + https://github.com/fonttools/fonttools/issues/1011 + """ + test_name = 'BuildAvarEmptyAxis' + self._run_varlib_build_test( + designspace_name=test_name, + font_name='TestFamily3', + tables=['avar'], + expected_ttx_name=test_name + ) + + def test_varlib_main_ttf(self): + """Mostly for testing varLib.main() + """ + suffix = '.ttf' + ds_path = self.get_test_input('Build.designspace') + ufo_dir = self.get_test_input('master_ufo') + ttx_dir = self.get_test_input('master_ttx_interpolatable_ttf') + + self.temp_dir() + ttf_dir = os.path.join(self.tempdir, 'master_ttf_interpolatable') + os.makedirs(ttf_dir) + ttx_paths = self.get_file_list(ttx_dir, '.ttx', 'TestFamily-') + for path in ttx_paths: + self.compile_font(path, suffix, ttf_dir) + + ds_copy = os.path.join(self.tempdir, 'BuildMain.designspace') + shutil.copy2(ds_path, ds_copy) + varLib_main([ds_copy]) + + varfont_path = os.path.splitext(ds_copy)[0] + '-VF' + suffix + varfont = TTFont(varfont_path) + tables = [table_tag for table_tag in varfont.keys() if table_tag != 'head'] + expected_ttx_path = self.get_test_output('BuildMain.ttx') + self.expect_ttx(varfont, expected_ttx_path, tables) + + +if __name__ == "__main__": + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/voltLib/lexer_test.py fonttools-3.21.2/Tests/voltLib/lexer_test.py --- fonttools-3.0/Tests/voltLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/voltLib/lexer_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,35 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError +from fontTools.voltLib.lexer import Lexer +import unittest + + +def lex(s): + return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.vtp")] + + +class LexerTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + + def test_empty(self): + self.assertEqual(lex(""), []) + self.assertEqual(lex("\t"), []) + + def test_string(self): + self.assertEqual(lex('"foo" "bar"'), + [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) + self.assertRaises(VoltLibError, lambda: lex('"foo\n bar"')) + + def test_name(self): + self.assertEqual(lex('DEF_FOO bar.alt1'), + [(Lexer.NAME, "DEF_FOO"), (Lexer.NAME, "bar.alt1")]) + + def test_number(self): + self.assertEqual(lex("123 -456"), + [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tests/voltLib/parser_test.py fonttools-3.21.2/Tests/voltLib/parser_test.py --- fonttools-3.0/Tests/voltLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/Tests/voltLib/parser_test.py 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,1034 @@ +from __future__ import print_function, division, absolute_import +from __future__ import unicode_literals +from fontTools.voltLib.error import VoltLibError +from fontTools.voltLib.parser import Parser +from io import open +import os +import shutil +import tempfile +import unittest + + +class ParserTest(unittest.TestCase): + def __init__(self, methodName): + unittest.TestCase.__init__(self, methodName) + # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, + # and fires deprecation warnings if a program uses the old name. + if not hasattr(self, "assertRaisesRegex"): + self.assertRaisesRegex = self.assertRaisesRegexp + + def test_def_glyph_base(self): + [def_glyph] = self.parse( + 'DEF_GLYPH ".notdef" ID 0 TYPE BASE END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + (".notdef", 0, None, "BASE", None)) + + def test_def_glyph_base_with_unicode(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "space" ID 3 UNICODE 32 TYPE BASE END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("space", 3, [0x0020], "BASE", None)) + + def test_def_glyph_base_with_unicodevalues(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009" ' + 'TYPE BASE END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("CR", 2, [0x0009], "BASE", None)) + + def test_def_glyph_base_with_mult_unicodevalues(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "CR" ID 2 UNICODEVALUES "U+0009,U+000D" ' + 'TYPE BASE END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("CR", 2, [0x0009, 0x000D], "BASE", None)) + + def test_def_glyph_base_with_empty_unicodevalues(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "i.locl" ID 269 UNICODEVALUES "" ' + 'TYPE BASE END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("i.locl", 269, None, "BASE", None)) + + def test_def_glyph_base_2_components(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "glyphBase" ID 320 TYPE BASE COMPONENTS 2 END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("glyphBase", 320, None, "BASE", 2)) + + def test_def_glyph_ligature_2_components(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "f_f" ID 320 TYPE LIGATURE COMPONENTS 2 END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("f_f", 320, None, "LIGATURE", 2)) + + def test_def_glyph_no_type(self): + [def_glyph] = self.parse( + 'DEF_GLYPH "glyph20" ID 20 END_GLYPH' + ).statements + self.assertEqual((def_glyph.name, def_glyph.id, def_glyph.unicode, + def_glyph.type, def_glyph.components), + ("glyph20", 20, None, None, None)) + + def test_def_glyph_case_sensitive(self): + def_glyphs = self.parse( + 'DEF_GLYPH "A" ID 3 UNICODE 65 TYPE BASE END_GLYPH\n' + 'DEF_GLYPH "a" ID 4 UNICODE 97 TYPE BASE END_GLYPH\n' + ).statements + self.assertEqual((def_glyphs[0].name, def_glyphs[0].id, + def_glyphs[0].unicode, def_glyphs[0].type, + def_glyphs[0].components), + ("A", 3, [0x41], "BASE", None)) + self.assertEqual((def_glyphs[1].name, def_glyphs[1].id, + def_glyphs[1].unicode, def_glyphs[1].type, + def_glyphs[1].components), + ("a", 4, [0x61], "BASE", None)) + + def test_def_group_glyphs(self): + [def_group] = self.parse( + 'DEF_GROUP "aaccented"\n' + 'ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" ' + 'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" ' + 'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n' + 'END_GROUP\n' + ).statements + self.assertEqual((def_group.name, def_group.enum), + ("aaccented", + ("aacute", "abreve", "acircumflex", "adieresis", + "ae", "agrave", "amacron", "aogonek", "aring", + "atilde"))) + + def test_def_group_groups(self): + [group1, group2, test_group] = self.parse( + 'DEF_GROUP "Group1"\n' + 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "Group2"\n' + 'ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "TestGroup"\n' + 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n' + 'END_GROUP\n' + ).statements + self.assertEqual( + (test_group.name, test_group.enum), + ("TestGroup", + (("Group1",), ("Group2",)))) + + def test_def_group_groups_not_yet_defined(self): + [group1, test_group1, test_group2, test_group3, group2] = self.parse( + 'DEF_GROUP "Group1"\n' + 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "TestGroup1"\n' + 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "TestGroup2"\n' + 'ENUM GROUP "Group2" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "TestGroup3"\n' + 'ENUM GROUP "Group2" GROUP "Group1" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "Group2"\n' + 'ENUM GLYPH "e" GLYPH "f" GLYPH "g" GLYPH "h" END_ENUM\n' + 'END_GROUP\n' + ).statements + self.assertEqual( + (test_group1.name, test_group1.enum), + ("TestGroup1", + (("Group1", ), ("Group2", )))) + self.assertEqual( + (test_group2.name, test_group2.enum), + ("TestGroup2", + (("Group2", ), ))) + self.assertEqual( + (test_group3.name, test_group3.enum), + ("TestGroup3", + (("Group2", ), ("Group1", )))) + + # def test_def_group_groups_undefined(self): + # with self.assertRaisesRegex( + # VoltLibError, + # r'Group "Group2" is used but undefined.'): + # [group1, test_group, group2] = self.parse( + # 'DEF_GROUP "Group1"\n' + # 'ENUM GLYPH "a" GLYPH "b" GLYPH "c" GLYPH "d" END_ENUM\n' + # 'END_GROUP\n' + # 'DEF_GROUP "TestGroup"\n' + # 'ENUM GROUP "Group1" GROUP "Group2" END_ENUM\n' + # 'END_GROUP\n' + # ).statements + + def test_def_group_glyphs_and_group(self): + [def_group1, def_group2] = self.parse( + 'DEF_GROUP "aaccented"\n' + 'ENUM GLYPH "aacute" GLYPH "abreve" GLYPH "acircumflex" ' + 'GLYPH "adieresis" GLYPH "ae" GLYPH "agrave" GLYPH "amacron" ' + 'GLYPH "aogonek" GLYPH "aring" GLYPH "atilde" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "KERN_lc_a_2ND"\n' + 'ENUM GLYPH "a" GROUP "aaccented" END_ENUM\n' + 'END_GROUP' + ).statements + self.assertEqual((def_group2.name, def_group2.enum), + ("KERN_lc_a_2ND", + ("a", ("aaccented", )))) + + def test_def_group_range(self): + [def_group] = self.parse( + 'DEF_GROUP "KERN_lc_a_2ND"\n' + 'ENUM RANGE "a" TO "atilde" GLYPH "b" RANGE "c" TO "cdotaccent" ' + 'END_ENUM\n' + 'END_GROUP' + ).statements + self.assertEqual((def_group.name, def_group.enum), + ("KERN_lc_a_2ND", + (("a", "atilde"), "b", ("c", "cdotaccent")))) + + def test_group_duplicate(self): + self.assertRaisesRegex( + VoltLibError, + 'Glyph group "dupe" already defined, ' + 'group names are case insensitive', + self.parse, 'DEF_GROUP "dupe"\n' + 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "dupe"\n' + 'ENUM GLYPH "x" END_ENUM\n' + 'END_GROUP\n' + ) + + def test_group_duplicate_case_insensitive(self): + self.assertRaisesRegex( + VoltLibError, + 'Glyph group "Dupe" already defined, ' + 'group names are case insensitive', + self.parse, 'DEF_GROUP "dupe"\n' + 'ENUM GLYPH "a" GLYPH "b" END_ENUM\n' + 'END_GROUP\n' + 'DEF_GROUP "Dupe"\n' + 'ENUM GLYPH "x" END_ENUM\n' + 'END_GROUP\n' + ) + + def test_script_without_langsys(self): + [script] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'END_SCRIPT' + ).statements + self.assertEqual((script.name, script.tag, script.langs), + ("Latin", "latn", [])) + + def test_langsys_normal(self): + [def_script] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n' + 'END_LANGSYS\n' + 'DEF_LANGSYS NAME "Moldavian" TAG "MOL "\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + self.assertEqual((def_script.name, def_script.tag), + ("Latin", + "latn")) + def_lang = def_script.langs[0] + self.assertEqual((def_lang.name, def_lang.tag), + ("Romanian", + "ROM ")) + def_lang = def_script.langs[1] + self.assertEqual((def_lang.name, def_lang.tag), + ("Moldavian", + "MOL ")) + + def test_langsys_no_script_name(self): + [langsys] = self.parse( + 'DEF_SCRIPT TAG "latn"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + self.assertEqual((langsys.name, langsys.tag), + (None, + "latn")) + lang = langsys.langs[0] + self.assertEqual((lang.name, lang.tag), + ("Default", + "dflt")) + + def test_langsys_no_script_tag_fails(self): + with self.assertRaisesRegex( + VoltLibError, + r'.*Expected "TAG"'): + [langsys] = self.parse( + 'DEF_SCRIPT NAME "Latin"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + + def test_langsys_duplicate_script(self): + with self.assertRaisesRegex( + VoltLibError, + 'Script "DFLT" already defined, ' + 'script tags are case insensitive'): + [langsys1, langsys2] = self.parse( + 'DEF_SCRIPT NAME "Default" TAG "DFLT"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT\n' + 'DEF_SCRIPT TAG "DFLT"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + + def test_langsys_duplicate_lang(self): + with self.assertRaisesRegex( + VoltLibError, + 'Language "dflt" already defined in script "DFLT", ' + 'language tags are case insensitive'): + [langsys] = self.parse( + 'DEF_SCRIPT NAME "Default" TAG "DFLT"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT\n' + ).statements + + def test_langsys_lang_in_separate_scripts(self): + [langsys1, langsys2] = self.parse( + 'DEF_SCRIPT NAME "Default" TAG "DFLT"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'DEF_LANGSYS NAME "Default" TAG "ROM "\n' + 'END_LANGSYS\n' + 'END_SCRIPT\n' + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS NAME "Default" TAG "dflt"\n' + 'END_LANGSYS\n' + 'DEF_LANGSYS NAME "Default" TAG "ROM "\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + self.assertEqual((langsys1.langs[0].tag, langsys1.langs[1].tag), + ("dflt", "ROM ")) + self.assertEqual((langsys2.langs[0].tag, langsys2.langs[1].tag), + ("dflt", "ROM ")) + + def test_langsys_no_lang_name(self): + [langsys] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS TAG "dflt"\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + self.assertEqual((langsys.name, langsys.tag), + ("Latin", + "latn")) + lang = langsys.langs[0] + self.assertEqual((lang.name, lang.tag), + (None, + "dflt")) + + def test_langsys_no_langsys_tag_fails(self): + with self.assertRaisesRegex( + VoltLibError, + r'.*Expected "TAG"'): + [langsys] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS NAME "Default"\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + + def test_feature(self): + [def_script] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n' + 'DEF_FEATURE NAME "Fractions" TAG "frac"\n' + 'LOOKUP "fraclookup"\n' + 'END_FEATURE\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + def_feature = def_script.langs[0].features[0] + self.assertEqual((def_feature.name, def_feature.tag, + def_feature.lookups), + ("Fractions", + "frac", + ["fraclookup"])) + [def_script] = self.parse( + 'DEF_SCRIPT NAME "Latin" TAG "latn"\n' + 'DEF_LANGSYS NAME "Romanian" TAG "ROM "\n' + 'DEF_FEATURE NAME "Kerning" TAG "kern"\n' + 'LOOKUP "kern1" LOOKUP "kern2"\n' + 'END_FEATURE\n' + 'END_LANGSYS\n' + 'END_SCRIPT' + ).statements + def_feature = def_script.langs[0].features[0] + self.assertEqual((def_feature.name, def_feature.tag, + def_feature.lookups), + ("Kerning", + "kern", + ["kern1", "kern2"])) + + def test_lookup_duplicate(self): + with self.assertRaisesRegex( + VoltLibError, + 'Lookup "dupe" already defined, ' + 'lookup names are case insensitive', + ): + [lookup1, lookup2] = self.parse( + 'DEF_LOOKUP "dupe"\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "a"\n' + 'WITH GLYPH "a.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION\n' + 'DEF_LOOKUP "dupe"\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "b"\n' + 'WITH GLYPH "b.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION\n' + ).statements + + def test_lookup_duplicate_insensitive_case(self): + with self.assertRaisesRegex( + VoltLibError, + 'Lookup "Dupe" already defined, ' + 'lookup names are case insensitive', + ): + [lookup1, lookup2] = self.parse( + 'DEF_LOOKUP "dupe"\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "a"\n' + 'WITH GLYPH "a.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION\n' + 'DEF_LOOKUP "Dupe"\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "b"\n' + 'WITH GLYPH "b.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION\n' + ).statements + + def test_lookup_name_starts_with_letter(self): + with self.assertRaisesRegex( + VoltLibError, + 'Lookup name "\\\lookupname" must start with a letter' + ): + [lookup] = self.parse( + 'DEF_LOOKUP "\lookupname"\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "a"\n' + 'WITH GLYPH "a.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION\n' + ).statements + + def test_substitution_empty(self): + with self.assertRaisesRegex( + VoltLibError, + r'Expected SUB'): + [lookup] = self.parse( + 'DEF_LOOKUP "empty_substitution" PROCESS_BASE PROCESS_MARKS ' + 'ALL DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'END_SUBSTITUTION' + ).statements + + def test_substitution_invalid_many_to_many(self): + with self.assertRaisesRegex( + VoltLibError, + r'Invalid substitution type'): + [lookup] = self.parse( + 'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS ' + 'ALL DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "f" GLYPH "i"\n' + 'WITH GLYPH "f.alt" GLYPH "i.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + + def test_substitution_invalid_reverse_chaining_single(self): + with self.assertRaisesRegex( + VoltLibError, + r'Invalid substitution type'): + [lookup] = self.parse( + 'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS ' + 'ALL DIRECTION LTR REVERSAL\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "f" GLYPH "i"\n' + 'WITH GLYPH "f_i"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + + def test_substitution_invalid_mixed(self): + with self.assertRaisesRegex( + VoltLibError, + r'Invalid substitution type'): + [lookup] = self.parse( + 'DEF_LOOKUP "invalid_substitution" PROCESS_BASE PROCESS_MARKS ' + 'ALL DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "fi"\n' + 'WITH GLYPH "f" GLYPH "i"\n' + 'END_SUB\n' + 'SUB GLYPH "f" GLYPH "l"\n' + 'WITH GLYPH "f_l"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + + def test_substitution_single(self): + [lookup] = self.parse( + 'DEF_LOOKUP "smcp" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "a"\n' + 'WITH GLYPH "a.sc"\n' + 'END_SUB\n' + 'SUB GLYPH "b"\n' + 'WITH GLYPH "b.sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual((lookup.name, list(lookup.sub.mapping.items())), + ("smcp", [(("a",), ("a.sc",)), (("b",), ("b.sc",))])) + + def test_substitution_single_in_context(self): + [group, lookup] = self.parse( + 'DEF_GROUP "Denominators" ENUM GLYPH "one.dnom" GLYPH "two.dnom" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "fracdnom" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT LEFT ENUM GROUP "Denominators" GLYPH "fraction" ' + 'END_ENUM\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "one"\n' + 'WITH GLYPH "one.dnom"\n' + 'END_SUB\n' + 'SUB GLYPH "two"\n' + 'WITH GLYPH "two.dnom"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + context = lookup.context[0] + self.assertEqual( + (lookup.name, list(lookup.sub.mapping.items()), + context.ex_or_in, context.left, context.right), + ("fracdnom", [(("one",), ("one.dnom",)), (("two",), ("two.dnom",))], + "IN_CONTEXT", [((("Denominators",), "fraction"),)], []) + ) + + def test_substitution_single_in_contexts(self): + [group, lookup] = self.parse( + 'DEF_GROUP "Hebrew" ENUM GLYPH "uni05D0" GLYPH "uni05D1" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "HebrewCurrency" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'RIGHT GROUP "Hebrew"\n' + 'RIGHT GLYPH "one.Hebr"\n' + 'END_CONTEXT\n' + 'IN_CONTEXT\n' + 'LEFT GROUP "Hebrew"\n' + 'LEFT GLYPH "one.Hebr"\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "dollar"\n' + 'WITH GLYPH "dollar.Hebr"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + context1 = lookup.context[0] + context2 = lookup.context[1] + self.assertEqual( + (lookup.name, context1.ex_or_in, context1.left, + context1.right, context2.ex_or_in, + context2.left, context2.right), + ("HebrewCurrency", "IN_CONTEXT", [], + [(("Hebrew",),), ("one.Hebr",)], "IN_CONTEXT", + [(("Hebrew",),), ("one.Hebr",)], [])) + + def test_substitution_skip_base(self): + [group, lookup] = self.parse( + 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "SomeSub" SKIP_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "A"\n' + 'WITH GLYPH "A.c2sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + process_base = lookup.process_base + self.assertEqual( + (lookup.name, process_base), + ("SomeSub", False)) + + def test_substitution_process_base(self): + [group, lookup] = self.parse( + 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "SomeSub" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "A"\n' + 'WITH GLYPH "A.c2sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + process_base = lookup.process_base + self.assertEqual( + (lookup.name, process_base), + ("SomeSub", True)) + + def test_substitution_skip_marks(self): + [group, lookup] = self.parse( + 'DEF_GROUP "SomeMarks" ENUM GLYPH "marka" GLYPH "markb" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "SomeSub" PROCESS_BASE SKIP_MARKS ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "A"\n' + 'WITH GLYPH "A.c2sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + process_marks = lookup.process_marks + self.assertEqual( + (lookup.name, process_marks), + ("SomeSub", False)) + + def test_substitution_process_marks(self): + [group, lookup] = self.parse( + 'DEF_GROUP "SomeMarks" ENUM GLYPH "acutecmb" GLYPH "gravecmb" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "SomeSub" PROCESS_BASE ' + 'PROCESS_MARKS "SomeMarks" \n' + 'DIRECTION RTL\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "A"\n' + 'WITH GLYPH "A.c2sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + process_marks = lookup.process_marks + self.assertEqual( + (lookup.name, process_marks), + ("SomeSub", "SomeMarks")) + + def test_substitution_process_all_marks(self): + [group, lookup] = self.parse( + 'DEF_GROUP "SomeMarks" ENUM GLYPH "acutecmb" GLYPH "gravecmb" ' + 'END_ENUM END_GROUP\n' + 'DEF_LOOKUP "SomeSub" PROCESS_BASE ' + 'PROCESS_MARKS ALL \n' + 'DIRECTION RTL\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "A"\n' + 'WITH GLYPH "A.c2sc"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + process_marks = lookup.process_marks + self.assertEqual( + (lookup.name, process_marks), + ("SomeSub", True)) + + def test_substitution_no_reversal(self): + # TODO: check right context with no reversal + [lookup] = self.parse( + 'DEF_LOOKUP "Lookup" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "a"\n' + 'WITH GLYPH "a.alt"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual( + (lookup.name, lookup.reversal), + ("Lookup", None) + ) + + def test_substitution_reversal(self): + [lookup] = self.parse( + 'DEF_LOOKUP "RevLookup" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR REVERSAL\n' + 'IN_CONTEXT\n' + 'RIGHT ENUM GLYPH "a" GLYPH "b" END_ENUM\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GROUP "DFLT_Num_standardFigures"\n' + 'WITH GROUP "DFLT_Num_numerators"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual( + (lookup.name, lookup.reversal), + ("RevLookup", True) + ) + + def test_substitution_single_to_multiple(self): + [lookup] = self.parse( + 'DEF_LOOKUP "ccmp" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "aacute"\n' + 'WITH GLYPH "a" GLYPH "acutecomb"\n' + 'END_SUB\n' + 'SUB GLYPH "agrave"\n' + 'WITH GLYPH "a" GLYPH "gravecomb"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual((lookup.name, list(lookup.sub.mapping.items())), + ("ccmp", + [(("aacute",), ("a", "acutecomb")), + (("agrave",), ("a", "gravecomb"))] + )) + + def test_substitution_multiple_to_single(self): + [lookup] = self.parse( + 'DEF_LOOKUP "liga" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB GLYPH "f" GLYPH "i"\n' + 'WITH GLYPH "f_i"\n' + 'END_SUB\n' + 'SUB GLYPH "f" GLYPH "t"\n' + 'WITH GLYPH "f_t"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual((lookup.name, list(lookup.sub.mapping.items())), + ("liga", + [(("f", "i"), ("f_i",)), + (("f", "t"), ("f_t",))])) + + def test_substitution_reverse_chaining_single(self): + [lookup] = self.parse( + 'DEF_LOOKUP "numr" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR REVERSAL\n' + 'IN_CONTEXT\n' + 'RIGHT ENUM ' + 'GLYPH "fraction" ' + 'RANGE "zero.numr" TO "nine.numr" ' + 'END_ENUM\n' + 'END_CONTEXT\n' + 'AS_SUBSTITUTION\n' + 'SUB RANGE "zero" TO "nine"\n' + 'WITH RANGE "zero.numr" TO "nine.numr"\n' + 'END_SUB\n' + 'END_SUBSTITUTION' + ).statements + self.assertEqual( + (lookup.name, lookup.context[0].right, + list(lookup.sub.mapping.items())), + ("numr", [(("fraction", ("zero.numr", "nine.numr")),)], + [((("zero", "nine"),), (("zero.numr", "nine.numr"),))])) + + # GPOS + # ATTACH_CURSIVE + # ATTACH + # ADJUST_PAIR + # ADJUST_SINGLE + def test_position_empty(self): + with self.assertRaisesRegex( + VoltLibError, + 'Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE'): + [lookup] = self.parse( + 'DEF_LOOKUP "empty_position" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'EXCEPT_CONTEXT\n' + 'LEFT GLYPH "glyph"\n' + 'END_CONTEXT\n' + 'AS_POSITION\n' + 'END_POSITION' + ).statements + + def test_position_attach(self): + [lookup, anchor1, anchor2, anchor3, anchor4] = self.parse( + 'DEF_LOOKUP "anchor_top" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION RTL\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_POSITION\n' + 'ATTACH GLYPH "a" GLYPH "e"\n' + 'TO GLYPH "acutecomb" AT ANCHOR "top" ' + 'GLYPH "gravecomb" AT ANCHOR "top"\n' + 'END_ATTACH\n' + 'END_POSITION\n' + 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb COMPONENT 1 ' + 'AT POS DX 0 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "MARK_top" ON 121 GLYPH gravecomb COMPONENT 1 ' + 'AT POS DX 0 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "top" ON 31 GLYPH a COMPONENT 1 ' + 'AT POS DX 210 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "top" ON 35 GLYPH e COMPONENT 1 ' + 'AT POS DX 215 DY 450 END_POS END_ANCHOR\n' + ).statements + self.assertEqual( + (lookup.name, lookup.pos.coverage, lookup.pos.coverage_to), + ("anchor_top", ("a", "e"), [(("acutecomb",), "top"), + (("gravecomb",), "top")]) + ) + self.assertEqual( + (anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component, + anchor1.locked, anchor1.pos), + ("MARK_top", 120, "acutecomb", 1, False, (None, 0, 450, {}, {}, + {})) + ) + self.assertEqual( + (anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component, + anchor2.locked, anchor2.pos), + ("MARK_top", 121, "gravecomb", 1, False, (None, 0, 450, {}, {}, + {})) + ) + self.assertEqual( + (anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component, + anchor3.locked, anchor3.pos), + ("top", 31, "a", 1, False, (None, 210, 450, {}, {}, {})) + ) + self.assertEqual( + (anchor4.name, anchor4.gid, anchor4.glyph_name, anchor4.component, + anchor4.locked, anchor4.pos), + ("top", 35, "e", 1, False, (None, 215, 450, {}, {}, {})) + ) + + def test_position_attach_cursive(self): + [lookup] = self.parse( + 'DEF_LOOKUP "SomeLookup" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION RTL\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_POSITION\n' + 'ATTACH_CURSIVE EXIT GLYPH "a" GLYPH "b" ENTER GLYPH "c"\n' + 'END_ATTACH\n' + 'END_POSITION\n' + ).statements + self.assertEqual( + (lookup.name, + lookup.pos.coverages_exit, lookup.pos.coverages_enter), + ("SomeLookup", + [("a", "b")], [("c",)]) + ) + + def test_position_adjust_pair(self): + [lookup] = self.parse( + 'DEF_LOOKUP "kern1" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION RTL\n' + 'IN_CONTEXT\n' + 'END_CONTEXT\n' + 'AS_POSITION\n' + 'ADJUST_PAIR\n' + ' FIRST GLYPH "A"\n' + ' SECOND GLYPH "V"\n' + ' 1 2 BY POS ADV -30 END_POS POS END_POS\n' + ' 2 1 BY POS ADV -30 END_POS POS END_POS\n' + 'END_ADJUST\n' + 'END_POSITION\n' + ).statements + self.assertEqual( + (lookup.name, lookup.pos.coverages_1, lookup.pos.coverages_2, + lookup.pos.adjust_pair), + ("kern1", [("A",)], [("V",)], + {(1, 2): ((-30, None, None, {}, {}, {}), + (None, None, None, {}, {}, {})), + (2, 1): ((-30, None, None, {}, {}, {}), + (None, None, None, {}, {}, {}))}) + ) + + def test_position_adjust_single(self): + [lookup] = self.parse( + 'DEF_LOOKUP "TestLookup" PROCESS_BASE PROCESS_MARKS ALL ' + 'DIRECTION LTR\n' + 'IN_CONTEXT\n' + # 'LEFT GLYPH "leftGlyph"\n' + # 'RIGHT GLYPH "rightGlyph"\n' + 'END_CONTEXT\n' + 'AS_POSITION\n' + 'ADJUST_SINGLE' + ' GLYPH "glyph1" BY POS ADV 0 DX 123 END_POS\n' + ' GLYPH "glyph2" BY POS ADV 0 DX 456 END_POS\n' + 'END_ADJUST\n' + 'END_POSITION\n' + ).statements + self.assertEqual( + (lookup.name, lookup.pos.adjust_single), + ("TestLookup", + [(("glyph1",), (0, 123, None, {}, {}, {})), + (("glyph2",), (0, 456, None, {}, {}, {}))]) + ) + + def test_def_anchor(self): + [anchor1, anchor2, anchor3] = self.parse( + 'DEF_ANCHOR "top" ON 120 GLYPH a ' + 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "MARK_top" ON 120 GLYPH acutecomb ' + 'COMPONENT 1 AT POS DX 0 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "bottom" ON 120 GLYPH a ' + 'COMPONENT 1 AT POS DX 250 DY 0 END_POS END_ANCHOR\n' + ).statements + self.assertEqual( + (anchor1.name, anchor1.gid, anchor1.glyph_name, anchor1.component, + anchor1.locked, anchor1.pos), + ("top", 120, "a", 1, + False, (None, 250, 450, {}, {}, {})) + ) + self.assertEqual( + (anchor2.name, anchor2.gid, anchor2.glyph_name, anchor2.component, + anchor2.locked, anchor2.pos), + ("MARK_top", 120, "acutecomb", 1, + False, (None, 0, 450, {}, {}, {})) + ) + self.assertEqual( + (anchor3.name, anchor3.gid, anchor3.glyph_name, anchor3.component, + anchor3.locked, anchor3.pos), + ("bottom", 120, "a", 1, + False, (None, 250, 0, {}, {}, {})) + ) + + def test_def_anchor_duplicate(self): + self.assertRaisesRegex( + VoltLibError, + 'Anchor "dupe" already defined, ' + 'anchor names are case insensitive', + self.parse, + 'DEF_ANCHOR "dupe" ON 120 GLYPH a ' + 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n' + 'DEF_ANCHOR "dupe" ON 120 GLYPH a ' + 'COMPONENT 1 AT POS DX 250 DY 450 END_POS END_ANCHOR\n' + ) + + def test_def_anchor_locked(self): + [anchor] = self.parse( + 'DEF_ANCHOR "top" ON 120 GLYPH a ' + 'COMPONENT 1 LOCKED AT POS DX 250 DY 450 END_POS END_ANCHOR\n' + ).statements + self.assertEqual( + (anchor.name, anchor.gid, anchor.glyph_name, anchor.component, + anchor.locked, anchor.pos), + ("top", 120, "a", 1, + True, (None, 250, 450, {}, {}, {})) + ) + + def test_anchor_adjust_device(self): + [anchor] = self.parse( + 'DEF_ANCHOR "MARK_top" ON 123 GLYPH diacglyph ' + 'COMPONENT 1 AT POS DX 0 DY 456 ADJUST_BY 12 AT 34 ' + 'ADJUST_BY 56 AT 78 END_POS END_ANCHOR' + ).statements + self.assertEqual( + (anchor.name, anchor.pos), + ("MARK_top", (None, 0, 456, {}, {}, {34: 12, 78: 56})) + ) + + def test_ppem(self): + [grid_ppem, pres_ppem, ppos_ppem] = self.parse( + 'GRID_PPEM 20\n' + 'PRESENTATION_PPEM 72\n' + 'PPOSITIONING_PPEM 144\n' + ).statements + self.assertEqual( + ((grid_ppem.name, grid_ppem.value), + (pres_ppem.name, pres_ppem.value), + (ppos_ppem.name, ppos_ppem.value)), + (("GRID_PPEM", 20), ("PRESENTATION_PPEM", 72), + ("PPOSITIONING_PPEM", 144)) + ) + + def test_compiler_flags(self): + [setting1, setting2] = self.parse( + 'COMPILER_USEEXTENSIONLOOKUPS\n' + 'COMPILER_USEPAIRPOSFORMAT2\n' + ).statements + self.assertEqual( + ((setting1.name, setting1.value), + (setting2.name, setting2.value)), + (("COMPILER_USEEXTENSIONLOOKUPS", True), + ("COMPILER_USEPAIRPOSFORMAT2", True)) + ) + + def test_cmap(self): + [cmap_format1, cmap_format2, cmap_format3] = self.parse( + 'CMAP_FORMAT 0 3 4\n' + 'CMAP_FORMAT 1 0 6\n' + 'CMAP_FORMAT 3 1 4\n' + ).statements + self.assertEqual( + ((cmap_format1.name, cmap_format1.value), + (cmap_format2.name, cmap_format2.value), + (cmap_format3.name, cmap_format3.value)), + (("CMAP_FORMAT", (0, 3, 4)), + ("CMAP_FORMAT", (1, 0, 6)), + ("CMAP_FORMAT", (3, 1, 4))) + ) + + def setUp(self): + self.tempdir = None + self.num_tempfiles = 0 + + def tearDown(self): + if self.tempdir: + shutil.rmtree(self.tempdir) + + def parse(self, text): + if not self.tempdir: + self.tempdir = tempfile.mkdtemp() + self.num_tempfiles += 1 + path = os.path.join(self.tempdir, "tmp%d.vtp" % self.num_tempfiles) + with open(path, "w") as outfile: + outfile.write(text) + return Parser(path).parse() + +if __name__ == "__main__": + import sys + sys.exit(unittest.main()) diff -Nru fonttools-3.0/Tools/fontTools/afmLib.py fonttools-3.21.2/Tools/fontTools/afmLib.py --- fonttools-3.0/Tools/fontTools/afmLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/afmLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,376 +0,0 @@ -"""Module for reading and writing AFM files.""" - -# XXX reads AFM's generated by Fog, not tested with much else. -# It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics -# File Format Specification). Still, it should read most "common" AFM files. - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import re - -# every single line starts with a "word" -identifierRE = re.compile("^([A-Za-z]+).*") - -# regular expression to parse char lines -charRE = re.compile( - "(-?\d+)" # charnum - "\s*;\s*WX\s+" # ; WX - "(-?\d+)" # width - "\s*;\s*N\s+" # ; N - "([.A-Za-z0-9_]+)" # charname - "\s*;\s*B\s+" # ; B - "(-?\d+)" # left - "\s+" - "(-?\d+)" # bottom - "\s+" - "(-?\d+)" # right - "\s+" - "(-?\d+)" # top - "\s*;\s*" # ; - ) - -# regular expression to parse kerning lines -kernRE = re.compile( - "([.A-Za-z0-9_]+)" # leftchar - "\s+" - "([.A-Za-z0-9_]+)" # rightchar - "\s+" - "(-?\d+)" # value - "\s*" - ) - -# regular expressions to parse composite info lines of the form: -# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; -compositeRE = re.compile( - "([.A-Za-z0-9_]+)" # char name - "\s+" - "(\d+)" # number of parts - "\s*;\s*" - ) -componentRE = re.compile( - "PCC\s+" # PPC - "([.A-Za-z0-9_]+)" # base char name - "\s+" - "(-?\d+)" # x offset - "\s+" - "(-?\d+)" # y offset - "\s*;\s*" - ) - -preferredAttributeOrder = [ - "FontName", - "FullName", - "FamilyName", - "Weight", - "ItalicAngle", - "IsFixedPitch", - "FontBBox", - "UnderlinePosition", - "UnderlineThickness", - "Version", - "Notice", - "EncodingScheme", - "CapHeight", - "XHeight", - "Ascender", - "Descender", -] - - -class error(Exception): - pass - - -class AFM(object): - - _attrs = None - - _keywords = ['StartFontMetrics', - 'EndFontMetrics', - 'StartCharMetrics', - 'EndCharMetrics', - 'StartKernData', - 'StartKernPairs', - 'EndKernPairs', - 'EndKernData', - 'StartComposites', - 'EndComposites', - ] - - def __init__(self, path=None): - self._attrs = {} - self._chars = {} - self._kerning = {} - self._index = {} - self._comments = [] - self._composites = {} - if path is not None: - self.read(path) - - def read(self, path): - lines = readlines(path) - for line in lines: - if not line.strip(): - continue - m = identifierRE.match(line) - if m is None: - raise error("syntax error in AFM file: " + repr(line)) - - pos = m.regs[1][1] - word = line[:pos] - rest = line[pos:].strip() - if word in self._keywords: - continue - if word == "C": - self.parsechar(rest) - elif word == "KPX": - self.parsekernpair(rest) - elif word == "CC": - self.parsecomposite(rest) - else: - self.parseattr(word, rest) - - def parsechar(self, rest): - m = charRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - things = [] - for fr, to in m.regs[1:]: - things.append(rest[fr:to]) - charname = things[2] - del things[2] - charnum, width, l, b, r, t = (int(thing) for thing in things) - self._chars[charname] = charnum, width, (l, b, r, t) - - def parsekernpair(self, rest): - m = kernRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - things = [] - for fr, to in m.regs[1:]: - things.append(rest[fr:to]) - leftchar, rightchar, value = things - value = int(value) - self._kerning[(leftchar, rightchar)] = value - - def parseattr(self, word, rest): - if word == "FontBBox": - l, b, r, t = [int(thing) for thing in rest.split()] - self._attrs[word] = l, b, r, t - elif word == "Comment": - self._comments.append(rest) - else: - try: - value = int(rest) - except (ValueError, OverflowError): - self._attrs[word] = rest - else: - self._attrs[word] = value - - def parsecomposite(self, rest): - m = compositeRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - charname = m.group(1) - ncomponents = int(m.group(2)) - rest = rest[m.regs[0][1]:] - components = [] - while True: - m = componentRE.match(rest) - if m is None: - raise error("syntax error in AFM file: " + repr(rest)) - basechar = m.group(1) - xoffset = int(m.group(2)) - yoffset = int(m.group(3)) - components.append((basechar, xoffset, yoffset)) - rest = rest[m.regs[0][1]:] - if not rest: - break - assert len(components) == ncomponents - self._composites[charname] = components - - def write(self, path, sep='\r'): - import time - lines = [ "StartFontMetrics 2.0", - "Comment Generated by afmLib; at %s" % ( - time.strftime("%m/%d/%Y %H:%M:%S", - time.localtime(time.time())))] - - # write comments, assuming (possibly wrongly!) they should - # all appear at the top - for comment in self._comments: - lines.append("Comment " + comment) - - # write attributes, first the ones we know about, in - # a preferred order - attrs = self._attrs - for attr in preferredAttributeOrder: - if attr in attrs: - value = attrs[attr] - if attr == "FontBBox": - value = "%s %s %s %s" % value - lines.append(attr + " " + str(value)) - # then write the attributes we don't know about, - # in alphabetical order - items = sorted(attrs.items()) - for attr, value in items: - if attr in preferredAttributeOrder: - continue - lines.append(attr + " " + str(value)) - - # write char metrics - lines.append("StartCharMetrics " + repr(len(self._chars))) - items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()] - - def myKey(a): - """Custom key function to make sure unencoded chars (-1) - end up at the end of the list after sorting.""" - if a[0] == -1: - a = (0xffff,) + a[1:] # 0xffff is an arbitrary large number - return a - items.sort(key=myKey) - - for charnum, (charname, width, (l, b, r, t)) in items: - lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" % - (charnum, width, charname, l, b, r, t)) - lines.append("EndCharMetrics") - - # write kerning info - lines.append("StartKernData") - lines.append("StartKernPairs " + repr(len(self._kerning))) - items = sorted(self._kerning.items()) - for (leftchar, rightchar), value in items: - lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) - lines.append("EndKernPairs") - lines.append("EndKernData") - - if self._composites: - composites = sorted(self._composites.items()) - lines.append("StartComposites %s" % len(self._composites)) - for charname, components in composites: - line = "CC %s %s ;" % (charname, len(components)) - for basechar, xoffset, yoffset in components: - line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) - lines.append(line) - lines.append("EndComposites") - - lines.append("EndFontMetrics") - - writelines(path, lines, sep) - - def has_kernpair(self, pair): - return pair in self._kerning - - def kernpairs(self): - return list(self._kerning.keys()) - - def has_char(self, char): - return char in self._chars - - def chars(self): - return list(self._chars.keys()) - - def comments(self): - return self._comments - - def addComment(self, comment): - self._comments.append(comment) - - def addComposite(self, glyphName, components): - self._composites[glyphName] = components - - def __getattr__(self, attr): - if attr in self._attrs: - return self._attrs[attr] - else: - raise AttributeError(attr) - - def __setattr__(self, attr, value): - # all attrs *not* starting with "_" are consider to be AFM keywords - if attr[:1] == "_": - self.__dict__[attr] = value - else: - self._attrs[attr] = value - - def __delattr__(self, attr): - # all attrs *not* starting with "_" are consider to be AFM keywords - if attr[:1] == "_": - try: - del self.__dict__[attr] - except KeyError: - raise AttributeError(attr) - else: - try: - del self._attrs[attr] - except KeyError: - raise AttributeError(attr) - - def __getitem__(self, key): - if isinstance(key, tuple): - # key is a tuple, return the kernpair - return self._kerning[key] - else: - # return the metrics instead - return self._chars[key] - - def __setitem__(self, key, value): - if isinstance(key, tuple): - # key is a tuple, set kernpair - self._kerning[key] = value - else: - # set char metrics - self._chars[key] = value - - def __delitem__(self, key): - if isinstance(key, tuple): - # key is a tuple, del kernpair - del self._kerning[key] - else: - # del char metrics - del self._chars[key] - - def __repr__(self): - if hasattr(self, "FullName"): - return '' % self.FullName - else: - return '' % id(self) - - -def readlines(path): - f = open(path, 'rb') - data = f.read() - f.close() - # read any text file, regardless whether it's formatted for Mac, Unix or Dos - sep = "" - if '\r' in data: - sep = sep + '\r' # mac or dos - if '\n' in data: - sep = sep + '\n' # unix or dos - return data.split(sep) - -def writelines(path, lines, sep='\r'): - f = open(path, 'wb') - for line in lines: - f.write(line + sep) - f.close() - - -if __name__ == "__main__": - import EasyDialogs - path = EasyDialogs.AskFileForOpen() - if path: - afm = AFM(path) - char = 'A' - if afm.has_char(char): - print(afm[char]) # print charnum, width and boundingbox - pair = ('A', 'V') - if afm.has_kernpair(pair): - print(afm[pair]) # print kerning value for pair - print(afm.Version) # various other afm entries have become attributes - print(afm.Weight) - # afm.comments() returns a list of all Comment lines found in the AFM - print(afm.comments()) - #print afm.chars() - #print afm.kernpairs() - print(afm) - afm.write(path + ".muck") diff -Nru fonttools-3.0/Tools/fontTools/agl.py fonttools-3.21.2/Tools/fontTools/agl.py --- fonttools-3.0/Tools/fontTools/agl.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/agl.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,738 +0,0 @@ -# The table below is taken from -# http://www.adobe.com/devnet/opentype/archives/aglfn.txt - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -_aglText = """\ -# ----------------------------------------------------------- -# Copyright 2003, 2005-2008, 2010 Adobe Systems Incorporated. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or -# without modification, are permitted provided that the -# following conditions are met: -# -# Redistributions of source code must retain the above -# copyright notice, this list of conditions and the following -# disclaimer. -# -# Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials -# provided with the distribution. -# -# Neither the name of Adobe Systems Incorporated nor the names -# of its contributors may be used to endorse or promote -# products derived from this software without specific prior -# written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND -# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------- -# Name: Adobe Glyph List For New Fonts -# Table version: 1.7 -# Date: November 6, 2008 -# URL: http://sourceforge.net/adobe/aglfn/ -# -# Description: -# -# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph -# names that are recommended for new fonts, which are compatible with -# the AGL (Adobe Glyph List) Specification, and which should be used -# as described in Section 6 of that document. AGLFN comprises the set -# of glyph names from AGL that map via the AGL Specification rules to -# the semantically correct UV (Unicode Value). For example, "Asmall" -# is omitted because AGL maps this glyph name to the PUA (Private Use -# Area) value U+F761, rather than to the UV that maps from the glyph -# name "A." Also omitted is "ffi," because AGL maps this to the -# Alphabetic Presentation Forms value U+FB03, rather than decomposing -# it into the following sequence of three UVs: U+0066, U+0066, and -# U+0069. The name "arrowvertex" has been omitted because this glyph -# now has a real UV, and AGL is now incorrect in mapping it to the PUA -# value U+F8E6. If you do not find an appropriate name for your glyph -# in this list, then please refer to Section 6 of the AGL -# Specification. -# -# Format: three semicolon-delimited fields: -# (1) Standard UV or CUS UV--four uppercase hexadecimal digits -# (2) Glyph name--upper/lowercase letters and digits -# (3) Character names: Unicode character names for standard UVs, and -# descriptive names for CUS UVs--uppercase letters, hyphen, and -# space -# -# The records are sorted by glyph name in increasing ASCII order, -# entries with the same glyph name are sorted in decreasing priority -# order, the UVs and Unicode character names are provided for -# convenience, lines starting with "#" are comments, and blank lines -# should be ignored. -# -# Revision History: -# -# 1.7 [6 November 2008] -# - Reverted to the original 1.4 and earlier mappings for Delta, -# Omega, and mu. -# - Removed mappings for "afii" names. These should now be assigned -# "uni" names. -# - Removed mappings for "commaaccent" names. These should now be -# assigned "uni" names. -# -# 1.6 [30 January 2006] -# - Completed work intended in 1.5. -# -# 1.5 [23 November 2005] -# - Removed duplicated block at end of file. -# - Changed mappings: -# 2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA -# 2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA -# 03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU -# - Corrected statement above about why "ffi" is omitted. -# -# 1.4 [24 September 2003] -# - Changed version to 1.4, to avoid confusion with the AGL 1.3. -# - Fixed spelling errors in the header. -# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode -# value in some fonts. -# -# 1.1 [17 April 2003] -# - Renamed [Tt]cedilla back to [Tt]commaaccent. -# -# 1.0 [31 January 2003] -# - Original version. -# - Derived from the AGLv1.2 by: -# removing the PUA area codes; -# removing duplicate Unicode mappings; and -# renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla" -# -0041;A;LATIN CAPITAL LETTER A -00C6;AE;LATIN CAPITAL LETTER AE -01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE -00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE -0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE -00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX -00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS -00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE -0391;Alpha;GREEK CAPITAL LETTER ALPHA -0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS -0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON -0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK -00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE -01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE -00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE -0042;B;LATIN CAPITAL LETTER B -0392;Beta;GREEK CAPITAL LETTER BETA -0043;C;LATIN CAPITAL LETTER C -0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE -010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON -00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA -0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX -010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE -03A7;Chi;GREEK CAPITAL LETTER CHI -0044;D;LATIN CAPITAL LETTER D -010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON -0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE -2206;Delta;INCREMENT -0045;E;LATIN CAPITAL LETTER E -00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE -0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE -011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON -00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX -00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS -0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE -00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE -0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON -014A;Eng;LATIN CAPITAL LETTER ENG -0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK -0395;Epsilon;GREEK CAPITAL LETTER EPSILON -0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS -0397;Eta;GREEK CAPITAL LETTER ETA -0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS -00D0;Eth;LATIN CAPITAL LETTER ETH -20AC;Euro;EURO SIGN -0046;F;LATIN CAPITAL LETTER F -0047;G;LATIN CAPITAL LETTER G -0393;Gamma;GREEK CAPITAL LETTER GAMMA -011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE -01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON -011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX -0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE -0048;H;LATIN CAPITAL LETTER H -25CF;H18533;BLACK CIRCLE -25AA;H18543;BLACK SMALL SQUARE -25AB;H18551;WHITE SMALL SQUARE -25A1;H22073;WHITE SQUARE -0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE -0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX -0049;I;LATIN CAPITAL LETTER I -0132;IJ;LATIN CAPITAL LIGATURE IJ -00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE -012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE -00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX -00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS -0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE -2111;Ifraktur;BLACK-LETTER CAPITAL I -00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE -012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON -012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK -0399;Iota;GREEK CAPITAL LETTER IOTA -03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA -038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS -0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE -004A;J;LATIN CAPITAL LETTER J -0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX -004B;K;LATIN CAPITAL LETTER K -039A;Kappa;GREEK CAPITAL LETTER KAPPA -004C;L;LATIN CAPITAL LETTER L -0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE -039B;Lambda;GREEK CAPITAL LETTER LAMDA -013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON -013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT -0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE -004D;M;LATIN CAPITAL LETTER M -039C;Mu;GREEK CAPITAL LETTER MU -004E;N;LATIN CAPITAL LETTER N -0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE -0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON -00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE -039D;Nu;GREEK CAPITAL LETTER NU -004F;O;LATIN CAPITAL LETTER O -0152;OE;LATIN CAPITAL LIGATURE OE -00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE -014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE -00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX -00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS -00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE -01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN -0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE -014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON -2126;Omega;OHM SIGN -038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS -039F;Omicron;GREEK CAPITAL LETTER OMICRON -038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS -00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE -01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE -00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE -0050;P;LATIN CAPITAL LETTER P -03A6;Phi;GREEK CAPITAL LETTER PHI -03A0;Pi;GREEK CAPITAL LETTER PI -03A8;Psi;GREEK CAPITAL LETTER PSI -0051;Q;LATIN CAPITAL LETTER Q -0052;R;LATIN CAPITAL LETTER R -0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE -0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON -211C;Rfraktur;BLACK-LETTER CAPITAL R -03A1;Rho;GREEK CAPITAL LETTER RHO -0053;S;LATIN CAPITAL LETTER S -250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT -2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT -2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT -2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT -253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL -252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL -2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL -251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT -2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT -2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL -2502;SF110000;BOX DRAWINGS LIGHT VERTICAL -2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE -2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE -2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE -2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE -2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT -2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL -2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT -255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT -255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE -255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE -255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE -255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE -255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT -2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT -2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL -2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL -2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT -2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL -256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL -2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE -2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE -2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE -2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE -2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE -2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE -2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE -2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE -256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE -256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE -015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE -0160;Scaron;LATIN CAPITAL LETTER S WITH CARON -015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA -015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX -03A3;Sigma;GREEK CAPITAL LETTER SIGMA -0054;T;LATIN CAPITAL LETTER T -03A4;Tau;GREEK CAPITAL LETTER TAU -0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE -0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON -0398;Theta;GREEK CAPITAL LETTER THETA -00DE;Thorn;LATIN CAPITAL LETTER THORN -0055;U;LATIN CAPITAL LETTER U -00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE -016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE -00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX -00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS -00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE -01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN -0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE -016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON -0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK -03A5;Upsilon;GREEK CAPITAL LETTER UPSILON -03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL -03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA -038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS -016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE -0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE -0056;V;LATIN CAPITAL LETTER V -0057;W;LATIN CAPITAL LETTER W -1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE -0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX -1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS -1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE -0058;X;LATIN CAPITAL LETTER X -039E;Xi;GREEK CAPITAL LETTER XI -0059;Y;LATIN CAPITAL LETTER Y -00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE -0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX -0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS -1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE -005A;Z;LATIN CAPITAL LETTER Z -0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE -017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON -017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE -0396;Zeta;GREEK CAPITAL LETTER ZETA -0061;a;LATIN SMALL LETTER A -00E1;aacute;LATIN SMALL LETTER A WITH ACUTE -0103;abreve;LATIN SMALL LETTER A WITH BREVE -00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX -00B4;acute;ACUTE ACCENT -0301;acutecomb;COMBINING ACUTE ACCENT -00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS -00E6;ae;LATIN SMALL LETTER AE -01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE -00E0;agrave;LATIN SMALL LETTER A WITH GRAVE -2135;aleph;ALEF SYMBOL -03B1;alpha;GREEK SMALL LETTER ALPHA -03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS -0101;amacron;LATIN SMALL LETTER A WITH MACRON -0026;ampersand;AMPERSAND -2220;angle;ANGLE -2329;angleleft;LEFT-POINTING ANGLE BRACKET -232A;angleright;RIGHT-POINTING ANGLE BRACKET -0387;anoteleia;GREEK ANO TELEIA -0105;aogonek;LATIN SMALL LETTER A WITH OGONEK -2248;approxequal;ALMOST EQUAL TO -00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE -01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE -2194;arrowboth;LEFT RIGHT ARROW -21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW -21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW -21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW -21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW -21D1;arrowdblup;UPWARDS DOUBLE ARROW -2193;arrowdown;DOWNWARDS ARROW -2190;arrowleft;LEFTWARDS ARROW -2192;arrowright;RIGHTWARDS ARROW -2191;arrowup;UPWARDS ARROW -2195;arrowupdn;UP DOWN ARROW -21A8;arrowupdnbse;UP DOWN ARROW WITH BASE -005E;asciicircum;CIRCUMFLEX ACCENT -007E;asciitilde;TILDE -002A;asterisk;ASTERISK -2217;asteriskmath;ASTERISK OPERATOR -0040;at;COMMERCIAL AT -00E3;atilde;LATIN SMALL LETTER A WITH TILDE -0062;b;LATIN SMALL LETTER B -005C;backslash;REVERSE SOLIDUS -007C;bar;VERTICAL LINE -03B2;beta;GREEK SMALL LETTER BETA -2588;block;FULL BLOCK -007B;braceleft;LEFT CURLY BRACKET -007D;braceright;RIGHT CURLY BRACKET -005B;bracketleft;LEFT SQUARE BRACKET -005D;bracketright;RIGHT SQUARE BRACKET -02D8;breve;BREVE -00A6;brokenbar;BROKEN BAR -2022;bullet;BULLET -0063;c;LATIN SMALL LETTER C -0107;cacute;LATIN SMALL LETTER C WITH ACUTE -02C7;caron;CARON -21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS -010D;ccaron;LATIN SMALL LETTER C WITH CARON -00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA -0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX -010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE -00B8;cedilla;CEDILLA -00A2;cent;CENT SIGN -03C7;chi;GREEK SMALL LETTER CHI -25CB;circle;WHITE CIRCLE -2297;circlemultiply;CIRCLED TIMES -2295;circleplus;CIRCLED PLUS -02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT -2663;club;BLACK CLUB SUIT -003A;colon;COLON -20A1;colonmonetary;COLON SIGN -002C;comma;COMMA -2245;congruent;APPROXIMATELY EQUAL TO -00A9;copyright;COPYRIGHT SIGN -00A4;currency;CURRENCY SIGN -0064;d;LATIN SMALL LETTER D -2020;dagger;DAGGER -2021;daggerdbl;DOUBLE DAGGER -010F;dcaron;LATIN SMALL LETTER D WITH CARON -0111;dcroat;LATIN SMALL LETTER D WITH STROKE -00B0;degree;DEGREE SIGN -03B4;delta;GREEK SMALL LETTER DELTA -2666;diamond;BLACK DIAMOND SUIT -00A8;dieresis;DIAERESIS -0385;dieresistonos;GREEK DIALYTIKA TONOS -00F7;divide;DIVISION SIGN -2593;dkshade;DARK SHADE -2584;dnblock;LOWER HALF BLOCK -0024;dollar;DOLLAR SIGN -20AB;dong;DONG SIGN -02D9;dotaccent;DOT ABOVE -0323;dotbelowcomb;COMBINING DOT BELOW -0131;dotlessi;LATIN SMALL LETTER DOTLESS I -22C5;dotmath;DOT OPERATOR -0065;e;LATIN SMALL LETTER E -00E9;eacute;LATIN SMALL LETTER E WITH ACUTE -0115;ebreve;LATIN SMALL LETTER E WITH BREVE -011B;ecaron;LATIN SMALL LETTER E WITH CARON -00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX -00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS -0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE -00E8;egrave;LATIN SMALL LETTER E WITH GRAVE -0038;eight;DIGIT EIGHT -2208;element;ELEMENT OF -2026;ellipsis;HORIZONTAL ELLIPSIS -0113;emacron;LATIN SMALL LETTER E WITH MACRON -2014;emdash;EM DASH -2205;emptyset;EMPTY SET -2013;endash;EN DASH -014B;eng;LATIN SMALL LETTER ENG -0119;eogonek;LATIN SMALL LETTER E WITH OGONEK -03B5;epsilon;GREEK SMALL LETTER EPSILON -03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS -003D;equal;EQUALS SIGN -2261;equivalence;IDENTICAL TO -212E;estimated;ESTIMATED SYMBOL -03B7;eta;GREEK SMALL LETTER ETA -03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS -00F0;eth;LATIN SMALL LETTER ETH -0021;exclam;EXCLAMATION MARK -203C;exclamdbl;DOUBLE EXCLAMATION MARK -00A1;exclamdown;INVERTED EXCLAMATION MARK -2203;existential;THERE EXISTS -0066;f;LATIN SMALL LETTER F -2640;female;FEMALE SIGN -2012;figuredash;FIGURE DASH -25A0;filledbox;BLACK SQUARE -25AC;filledrect;BLACK RECTANGLE -0035;five;DIGIT FIVE -215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS -0192;florin;LATIN SMALL LETTER F WITH HOOK -0034;four;DIGIT FOUR -2044;fraction;FRACTION SLASH -20A3;franc;FRENCH FRANC SIGN -0067;g;LATIN SMALL LETTER G -03B3;gamma;GREEK SMALL LETTER GAMMA -011F;gbreve;LATIN SMALL LETTER G WITH BREVE -01E7;gcaron;LATIN SMALL LETTER G WITH CARON -011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX -0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE -00DF;germandbls;LATIN SMALL LETTER SHARP S -2207;gradient;NABLA -0060;grave;GRAVE ACCENT -0300;gravecomb;COMBINING GRAVE ACCENT -003E;greater;GREATER-THAN SIGN -2265;greaterequal;GREATER-THAN OR EQUAL TO -00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK -00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK -2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK -203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK -0068;h;LATIN SMALL LETTER H -0127;hbar;LATIN SMALL LETTER H WITH STROKE -0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX -2665;heart;BLACK HEART SUIT -0309;hookabovecomb;COMBINING HOOK ABOVE -2302;house;HOUSE -02DD;hungarumlaut;DOUBLE ACUTE ACCENT -002D;hyphen;HYPHEN-MINUS -0069;i;LATIN SMALL LETTER I -00ED;iacute;LATIN SMALL LETTER I WITH ACUTE -012D;ibreve;LATIN SMALL LETTER I WITH BREVE -00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX -00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS -00EC;igrave;LATIN SMALL LETTER I WITH GRAVE -0133;ij;LATIN SMALL LIGATURE IJ -012B;imacron;LATIN SMALL LETTER I WITH MACRON -221E;infinity;INFINITY -222B;integral;INTEGRAL -2321;integralbt;BOTTOM HALF INTEGRAL -2320;integraltp;TOP HALF INTEGRAL -2229;intersection;INTERSECTION -25D8;invbullet;INVERSE BULLET -25D9;invcircle;INVERSE WHITE CIRCLE -263B;invsmileface;BLACK SMILING FACE -012F;iogonek;LATIN SMALL LETTER I WITH OGONEK -03B9;iota;GREEK SMALL LETTER IOTA -03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA -0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS -03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS -0129;itilde;LATIN SMALL LETTER I WITH TILDE -006A;j;LATIN SMALL LETTER J -0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX -006B;k;LATIN SMALL LETTER K -03BA;kappa;GREEK SMALL LETTER KAPPA -0138;kgreenlandic;LATIN SMALL LETTER KRA -006C;l;LATIN SMALL LETTER L -013A;lacute;LATIN SMALL LETTER L WITH ACUTE -03BB;lambda;GREEK SMALL LETTER LAMDA -013E;lcaron;LATIN SMALL LETTER L WITH CARON -0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT -003C;less;LESS-THAN SIGN -2264;lessequal;LESS-THAN OR EQUAL TO -258C;lfblock;LEFT HALF BLOCK -20A4;lira;LIRA SIGN -2227;logicaland;LOGICAL AND -00AC;logicalnot;NOT SIGN -2228;logicalor;LOGICAL OR -017F;longs;LATIN SMALL LETTER LONG S -25CA;lozenge;LOZENGE -0142;lslash;LATIN SMALL LETTER L WITH STROKE -2591;ltshade;LIGHT SHADE -006D;m;LATIN SMALL LETTER M -00AF;macron;MACRON -2642;male;MALE SIGN -2212;minus;MINUS SIGN -2032;minute;PRIME -00B5;mu;MICRO SIGN -00D7;multiply;MULTIPLICATION SIGN -266A;musicalnote;EIGHTH NOTE -266B;musicalnotedbl;BEAMED EIGHTH NOTES -006E;n;LATIN SMALL LETTER N -0144;nacute;LATIN SMALL LETTER N WITH ACUTE -0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE -0148;ncaron;LATIN SMALL LETTER N WITH CARON -0039;nine;DIGIT NINE -2209;notelement;NOT AN ELEMENT OF -2260;notequal;NOT EQUAL TO -2284;notsubset;NOT A SUBSET OF -00F1;ntilde;LATIN SMALL LETTER N WITH TILDE -03BD;nu;GREEK SMALL LETTER NU -0023;numbersign;NUMBER SIGN -006F;o;LATIN SMALL LETTER O -00F3;oacute;LATIN SMALL LETTER O WITH ACUTE -014F;obreve;LATIN SMALL LETTER O WITH BREVE -00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX -00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS -0153;oe;LATIN SMALL LIGATURE OE -02DB;ogonek;OGONEK -00F2;ograve;LATIN SMALL LETTER O WITH GRAVE -01A1;ohorn;LATIN SMALL LETTER O WITH HORN -0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE -014D;omacron;LATIN SMALL LETTER O WITH MACRON -03C9;omega;GREEK SMALL LETTER OMEGA -03D6;omega1;GREEK PI SYMBOL -03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS -03BF;omicron;GREEK SMALL LETTER OMICRON -03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS -0031;one;DIGIT ONE -2024;onedotenleader;ONE DOT LEADER -215B;oneeighth;VULGAR FRACTION ONE EIGHTH -00BD;onehalf;VULGAR FRACTION ONE HALF -00BC;onequarter;VULGAR FRACTION ONE QUARTER -2153;onethird;VULGAR FRACTION ONE THIRD -25E6;openbullet;WHITE BULLET -00AA;ordfeminine;FEMININE ORDINAL INDICATOR -00BA;ordmasculine;MASCULINE ORDINAL INDICATOR -221F;orthogonal;RIGHT ANGLE -00F8;oslash;LATIN SMALL LETTER O WITH STROKE -01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE -00F5;otilde;LATIN SMALL LETTER O WITH TILDE -0070;p;LATIN SMALL LETTER P -00B6;paragraph;PILCROW SIGN -0028;parenleft;LEFT PARENTHESIS -0029;parenright;RIGHT PARENTHESIS -2202;partialdiff;PARTIAL DIFFERENTIAL -0025;percent;PERCENT SIGN -002E;period;FULL STOP -00B7;periodcentered;MIDDLE DOT -22A5;perpendicular;UP TACK -2030;perthousand;PER MILLE SIGN -20A7;peseta;PESETA SIGN -03C6;phi;GREEK SMALL LETTER PHI -03D5;phi1;GREEK PHI SYMBOL -03C0;pi;GREEK SMALL LETTER PI -002B;plus;PLUS SIGN -00B1;plusminus;PLUS-MINUS SIGN -211E;prescription;PRESCRIPTION TAKE -220F;product;N-ARY PRODUCT -2282;propersubset;SUBSET OF -2283;propersuperset;SUPERSET OF -221D;proportional;PROPORTIONAL TO -03C8;psi;GREEK SMALL LETTER PSI -0071;q;LATIN SMALL LETTER Q -003F;question;QUESTION MARK -00BF;questiondown;INVERTED QUESTION MARK -0022;quotedbl;QUOTATION MARK -201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK -201C;quotedblleft;LEFT DOUBLE QUOTATION MARK -201D;quotedblright;RIGHT DOUBLE QUOTATION MARK -2018;quoteleft;LEFT SINGLE QUOTATION MARK -201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK -2019;quoteright;RIGHT SINGLE QUOTATION MARK -201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK -0027;quotesingle;APOSTROPHE -0072;r;LATIN SMALL LETTER R -0155;racute;LATIN SMALL LETTER R WITH ACUTE -221A;radical;SQUARE ROOT -0159;rcaron;LATIN SMALL LETTER R WITH CARON -2286;reflexsubset;SUBSET OF OR EQUAL TO -2287;reflexsuperset;SUPERSET OF OR EQUAL TO -00AE;registered;REGISTERED SIGN -2310;revlogicalnot;REVERSED NOT SIGN -03C1;rho;GREEK SMALL LETTER RHO -02DA;ring;RING ABOVE -2590;rtblock;RIGHT HALF BLOCK -0073;s;LATIN SMALL LETTER S -015B;sacute;LATIN SMALL LETTER S WITH ACUTE -0161;scaron;LATIN SMALL LETTER S WITH CARON -015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA -015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX -2033;second;DOUBLE PRIME -00A7;section;SECTION SIGN -003B;semicolon;SEMICOLON -0037;seven;DIGIT SEVEN -215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS -2592;shade;MEDIUM SHADE -03C3;sigma;GREEK SMALL LETTER SIGMA -03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA -223C;similar;TILDE OPERATOR -0036;six;DIGIT SIX -002F;slash;SOLIDUS -263A;smileface;WHITE SMILING FACE -0020;space;SPACE -2660;spade;BLACK SPADE SUIT -00A3;sterling;POUND SIGN -220B;suchthat;CONTAINS AS MEMBER -2211;summation;N-ARY SUMMATION -263C;sun;WHITE SUN WITH RAYS -0074;t;LATIN SMALL LETTER T -03C4;tau;GREEK SMALL LETTER TAU -0167;tbar;LATIN SMALL LETTER T WITH STROKE -0165;tcaron;LATIN SMALL LETTER T WITH CARON -2234;therefore;THEREFORE -03B8;theta;GREEK SMALL LETTER THETA -03D1;theta1;GREEK THETA SYMBOL -00FE;thorn;LATIN SMALL LETTER THORN -0033;three;DIGIT THREE -215C;threeeighths;VULGAR FRACTION THREE EIGHTHS -00BE;threequarters;VULGAR FRACTION THREE QUARTERS -02DC;tilde;SMALL TILDE -0303;tildecomb;COMBINING TILDE -0384;tonos;GREEK TONOS -2122;trademark;TRADE MARK SIGN -25BC;triagdn;BLACK DOWN-POINTING TRIANGLE -25C4;triaglf;BLACK LEFT-POINTING POINTER -25BA;triagrt;BLACK RIGHT-POINTING POINTER -25B2;triagup;BLACK UP-POINTING TRIANGLE -0032;two;DIGIT TWO -2025;twodotenleader;TWO DOT LEADER -2154;twothirds;VULGAR FRACTION TWO THIRDS -0075;u;LATIN SMALL LETTER U -00FA;uacute;LATIN SMALL LETTER U WITH ACUTE -016D;ubreve;LATIN SMALL LETTER U WITH BREVE -00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX -00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS -00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE -01B0;uhorn;LATIN SMALL LETTER U WITH HORN -0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE -016B;umacron;LATIN SMALL LETTER U WITH MACRON -005F;underscore;LOW LINE -2017;underscoredbl;DOUBLE LOW LINE -222A;union;UNION -2200;universal;FOR ALL -0173;uogonek;LATIN SMALL LETTER U WITH OGONEK -2580;upblock;UPPER HALF BLOCK -03C5;upsilon;GREEK SMALL LETTER UPSILON -03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA -03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS -03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS -016F;uring;LATIN SMALL LETTER U WITH RING ABOVE -0169;utilde;LATIN SMALL LETTER U WITH TILDE -0076;v;LATIN SMALL LETTER V -0077;w;LATIN SMALL LETTER W -1E83;wacute;LATIN SMALL LETTER W WITH ACUTE -0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX -1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS -2118;weierstrass;SCRIPT CAPITAL P -1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE -0078;x;LATIN SMALL LETTER X -03BE;xi;GREEK SMALL LETTER XI -0079;y;LATIN SMALL LETTER Y -00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE -0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX -00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS -00A5;yen;YEN SIGN -1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE -007A;z;LATIN SMALL LETTER Z -017A;zacute;LATIN SMALL LETTER Z WITH ACUTE -017E;zcaron;LATIN SMALL LETTER Z WITH CARON -017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE -0030;zero;DIGIT ZERO -03B6;zeta;GREEK SMALL LETTER ZETA -#END -""" - - -class AGLError(Exception): - pass - -AGL2UV = {} -UV2AGL = {} - -def _builddicts(): - import re - - lines = _aglText.splitlines() - - parseAGL_RE = re.compile("([0-9A-F]{4});([A-Za-z_0-9.]+);.*?$") - - for line in lines: - if not line or line[:1] == '#': - continue - m = parseAGL_RE.match(line) - if not m: - raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20])) - unicode = m.group(1) - assert len(unicode) == 4 - unicode = int(unicode, 16) - glyphName = m.group(2) - if glyphName in AGL2UV: - # the above table contains identical duplicates - assert AGL2UV[glyphName] == unicode - else: - AGL2UV[glyphName] = unicode - UV2AGL[unicode] = glyphName - -_builddicts() diff -Nru fonttools-3.0/Tools/fontTools/cffLib.py fonttools-3.21.2/Tools/fontTools/cffLib.py --- fonttools-3.0/Tools/fontTools/cffLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/cffLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1810 +0,0 @@ -"""cffLib.py -- read/write tools for Adobe CFF fonts.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc import psCharStrings -from fontTools.misc.textTools import safeEval -import struct - -DEBUG = 0 - - -cffHeaderFormat = """ - major: B - minor: B - hdrSize: B - offSize: B -""" - -class CFFFontSet(object): - - def __init__(self): - pass - - def decompile(self, file, otFont): - sstruct.unpack(cffHeaderFormat, file.read(4), self) - assert self.major == 1 and self.minor == 0, \ - "unknown CFF format: %d.%d" % (self.major, self.minor) - - file.seek(self.hdrSize) - self.fontNames = list(Index(file)) - self.topDictIndex = TopDictIndex(file) - self.strings = IndexedStrings(file) - self.GlobalSubrs = GlobalSubrsIndex(file) - self.topDictIndex.strings = self.strings - self.topDictIndex.GlobalSubrs = self.GlobalSubrs - - def __len__(self): - return len(self.fontNames) - - def keys(self): - return list(self.fontNames) - - def values(self): - return self.topDictIndex - - def __getitem__(self, name): - try: - index = self.fontNames.index(name) - except ValueError: - raise KeyError(name) - return self.topDictIndex[index] - - def compile(self, file, otFont): - strings = IndexedStrings() - writer = CFFWriter() - writer.add(sstruct.pack(cffHeaderFormat, self)) - fontNames = Index() - for name in self.fontNames: - fontNames.append(name) - writer.add(fontNames.getCompiler(strings, None)) - topCompiler = self.topDictIndex.getCompiler(strings, None) - writer.add(topCompiler) - writer.add(strings.getCompiler()) - writer.add(self.GlobalSubrs.getCompiler(strings, None)) - - for topDict in self.topDictIndex: - if not hasattr(topDict, "charset") or topDict.charset is None: - charset = otFont.getGlyphOrder() - topDict.charset = charset - - for child in topCompiler.getChildren(strings): - writer.add(child) - - writer.toFile(file) - - def toXML(self, xmlWriter, progress=None): - for fontName in self.fontNames: - xmlWriter.begintag("CFFFont", name=tostr(fontName)) - xmlWriter.newline() - font = self[fontName] - font.toXML(xmlWriter, progress) - xmlWriter.endtag("CFFFont") - xmlWriter.newline() - xmlWriter.newline() - xmlWriter.begintag("GlobalSubrs") - xmlWriter.newline() - self.GlobalSubrs.toXML(xmlWriter, progress) - xmlWriter.endtag("GlobalSubrs") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if not hasattr(self, "GlobalSubrs"): - self.GlobalSubrs = GlobalSubrsIndex() - self.major = 1 - self.minor = 0 - self.hdrSize = 4 - self.offSize = 4 # XXX ?? - if name == "CFFFont": - if not hasattr(self, "fontNames"): - self.fontNames = [] - self.topDictIndex = TopDictIndex() - fontName = attrs["name"] - topDict = TopDict(GlobalSubrs=self.GlobalSubrs) - topDict.charset = None # gets filled in later - self.fontNames.append(fontName) - self.topDictIndex.append(topDict) - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - topDict.fromXML(name, attrs, content) - elif name == "GlobalSubrs": - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.GlobalSubrs.append(subr) - - -class CFFWriter(object): - - def __init__(self): - self.data = [] - - def add(self, table): - self.data.append(table) - - def toFile(self, file): - lastPosList = None - count = 1 - while True: - if DEBUG: - print("CFFWriter.toFile() iteration:", count) - count = count + 1 - pos = 0 - posList = [pos] - for item in self.data: - if hasattr(item, "getDataLength"): - endPos = pos + item.getDataLength() - else: - endPos = pos + len(item) - if hasattr(item, "setPos"): - item.setPos(pos, endPos) - pos = endPos - posList.append(pos) - if posList == lastPosList: - break - lastPosList = posList - if DEBUG: - print("CFFWriter.toFile() writing to file.") - begin = file.tell() - posList = [0] - for item in self.data: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - posList.append(file.tell() - begin) - assert posList == lastPosList - - -def calcOffSize(largestOffset): - if largestOffset < 0x100: - offSize = 1 - elif largestOffset < 0x10000: - offSize = 2 - elif largestOffset < 0x1000000: - offSize = 3 - else: - offSize = 4 - return offSize - - -class IndexCompiler(object): - - def __init__(self, items, strings, parent): - self.items = self.getItems(items, strings) - self.parent = parent - - def getItems(self, items, strings): - return items - - def getOffsets(self): - pos = 1 - offsets = [pos] - for item in self.items: - if hasattr(item, "getDataLength"): - pos = pos + item.getDataLength() - else: - pos = pos + len(item) - offsets.append(pos) - return offsets - - def getDataLength(self): - lastOffset = self.getOffsets()[-1] - offSize = calcOffSize(lastOffset) - dataLength = ( - 2 + # count - 1 + # offSize - (len(self.items) + 1) * offSize + # the offsets - lastOffset - 1 # size of object data - ) - return dataLength - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(tobytes(item, encoding="latin1")) - - -class IndexedStringsCompiler(IndexCompiler): - - def getItems(self, items, strings): - return items.strings - - -class TopDictIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for topDict in self.items: - children.extend(topDict.getChildren(strings)) - return children - - -class FDArrayIndexCompiler(IndexCompiler): - - def getItems(self, items, strings): - out = [] - for item in items: - out.append(item.getCompiler(strings, self)) - return out - - def getChildren(self, strings): - children = [] - for fontDict in self.items: - children.extend(fontDict.getChildren(strings)) - return children - - def toFile(self, file): - offsets = self.getOffsets() - writeCard16(file, len(self.items)) - offSize = calcOffSize(offsets[-1]) - writeCard8(file, offSize) - offSize = -offSize - pack = struct.pack - for offset in offsets: - binOffset = pack(">l", offset)[offSize:] - assert len(binOffset) == -offSize - file.write(binOffset) - for item in self.items: - if hasattr(item, "toFile"): - item.toFile(file) - else: - file.write(item) - - def setPos(self, pos, endPos): - self.parent.rawDict["FDArray"] = pos - - -class GlobalSubrsCompiler(IndexCompiler): - def getItems(self, items, strings): - out = [] - for cs in items: - cs.compile() - out.append(cs.bytecode) - return out - -class SubrsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - offset = pos - self.parent.pos - self.parent.rawDict["Subrs"] = offset - -class CharStringsCompiler(GlobalSubrsCompiler): - def setPos(self, pos, endPos): - self.parent.rawDict["CharStrings"] = pos - - -class Index(object): - - """This class represents what the CFF spec calls an INDEX.""" - - compilerClass = IndexCompiler - - def __init__(self, file=None): - self.items = [] - name = self.__class__.__name__ - if file is None: - return - if DEBUG: - print("loading %s at %s" % (name, file.tell())) - self.file = file - count = readCard16(file) - if count == 0: - return - self.items = [None] * count - offSize = readCard8(file) - if DEBUG: - print(" index count: %s offSize: %s" % (count, offSize)) - assert offSize <= 4, "offSize too large: %s" % offSize - self.offsets = offsets = [] - pad = b'\0' * (4 - offSize) - for index in range(count+1): - chunk = file.read(offSize) - chunk = pad + chunk - offset, = struct.unpack(">L", chunk) - offsets.append(int(offset)) - self.offsetBase = file.tell() - 1 - file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot - if DEBUG: - print(" end of %s at %s" % (name, file.tell())) - - def __len__(self): - return len(self.items) - - def __getitem__(self, index): - item = self.items[index] - if item is not None: - return item - offset = self.offsets[index] + self.offsetBase - size = self.offsets[index+1] - self.offsets[index] - file = self.file - file.seek(offset) - data = file.read(size) - assert len(data) == size - item = self.produceItem(index, data, file, offset, size) - self.items[index] = item - return item - - def produceItem(self, index, data, file, offset, size): - return data - - def append(self, item): - self.items.append(item) - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - -class GlobalSubrsIndex(Index): - - compilerClass = GlobalSubrsCompiler - - def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None): - Index.__init__(self, file) - self.globalSubrs = globalSubrs - self.private = private - if fdSelect: - self.fdSelect = fdSelect - if fdArray: - self.fdArray = fdArray - - def produceItem(self, index, data, file, offset, size): - if self.private is not None: - private = self.private - elif hasattr(self, 'fdArray') and self.fdArray is not None: - private = self.fdArray[self.fdSelect[index]].Private - else: - private = None - return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs) - - def toXML(self, xmlWriter, progress): - xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.") - xmlWriter.newline() - for i in range(len(self)): - subr = self[i] - if subr.needsDecompilation(): - xmlWriter.begintag("CharString", index=i, raw=1) - else: - xmlWriter.begintag("CharString", index=i) - xmlWriter.newline() - subr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - - def fromXML(self, name, attrs, content): - if name != "CharString": - return - subr = psCharStrings.T2CharString() - subr.fromXML(name, attrs, content) - self.append(subr) - - def getItemAndSelector(self, index): - sel = None - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] - return self[index], sel - - -class SubrsIndex(GlobalSubrsIndex): - compilerClass = SubrsCompiler - - -class TopDictIndex(Index): - - compilerClass = TopDictIndexCompiler - - def produceItem(self, index, data, file, offset, size): - top = TopDict(self.strings, file, offset, self.GlobalSubrs) - top.decompile(data) - return top - - def toXML(self, xmlWriter, progress): - for i in range(len(self)): - xmlWriter.begintag("FontDict", index=i) - xmlWriter.newline() - self[i].toXML(xmlWriter, progress) - xmlWriter.endtag("FontDict") - xmlWriter.newline() - - -class FDArrayIndex(TopDictIndex): - - compilerClass = FDArrayIndexCompiler - - def fromXML(self, name, attrs, content): - if name != "FontDict": - return - fontDict = FontDict() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fontDict.fromXML(name, attrs, content) - self.append(fontDict) - - -class FDSelect: - def __init__(self, file=None, numGlyphs=None, format=None): - if file: - # read data in from file - self.format = readCard8(file) - if self.format == 0: - from array import array - self.gidArray = array("B", file.read(numGlyphs)).tolist() - elif self.format == 3: - gidArray = [None] * numGlyphs - nRanges = readCard16(file) - fd = None - prev = None - for i in range(nRanges): - first = readCard16(file) - if prev is not None: - for glyphID in range(prev, first): - gidArray[glyphID] = fd - prev = first - fd = readCard8(file) - if prev is not None: - first = readCard16(file) - for glyphID in range(prev, first): - gidArray[glyphID] = fd - self.gidArray = gidArray - else: - assert False, "unsupported FDSelect format: %s" % format - else: - # reading from XML. Make empty gidArray,, and leave format as passed in. - # format is None will result in the smallest representation being used. - self.format = format - self.gidArray = [] - - def __len__(self): - return len(self.gidArray) - - def __getitem__(self, index): - return self.gidArray[index] - - def __setitem__(self, index, fdSelectValue): - self.gidArray[index] = fdSelectValue - - def append(self, fdSelectValue): - self.gidArray.append(fdSelectValue) - - -class CharStrings(object): - - def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray): - if file is not None: - self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray) - self.charStrings = charStrings = {} - for i in range(len(charset)): - charStrings[charset[i]] = i - self.charStringsAreIndexed = 1 - else: - self.charStrings = {} - self.charStringsAreIndexed = 0 - self.globalSubrs = globalSubrs - self.private = private - if fdSelect is not None: - self.fdSelect = fdSelect - if fdArray is not None: - self.fdArray = fdArray - - def keys(self): - return list(self.charStrings.keys()) - - def values(self): - if self.charStringsAreIndexed: - return self.charStringsIndex - else: - return list(self.charStrings.values()) - - def has_key(self, name): - return name in self.charStrings - - __contains__ = has_key - - def __len__(self): - return len(self.charStrings) - - def __getitem__(self, name): - charString = self.charStrings[name] - if self.charStringsAreIndexed: - charString = self.charStringsIndex[charString] - return charString - - def __setitem__(self, name, charString): - if self.charStringsAreIndexed: - index = self.charStrings[name] - self.charStringsIndex[index] = charString - else: - self.charStrings[name] = charString - - def getItemAndSelector(self, name): - if self.charStringsAreIndexed: - index = self.charStrings[name] - return self.charStringsIndex.getItemAndSelector(index) - else: - if hasattr(self, 'fdSelect'): - sel = self.fdSelect[index] # index is not defined at this point. Read R. ? - else: - raise KeyError("fdSelect array not yet defined.") - return self.charStrings[name], sel - - def toXML(self, xmlWriter, progress): - names = sorted(self.keys()) - i = 0 - step = 10 - numGlyphs = len(names) - for name in names: - charStr, fdSelectIndex = self.getItemAndSelector(name) - if charStr.needsDecompilation(): - raw = [("raw", 1)] - else: - raw = [] - if fdSelectIndex is None: - xmlWriter.begintag("CharString", [('name', name)] + raw) - else: - xmlWriter.begintag("CharString", - [('name', name), ('fdSelectIndex', fdSelectIndex)] + raw) - xmlWriter.newline() - charStr.toXML(xmlWriter) - xmlWriter.endtag("CharString") - xmlWriter.newline() - if not i % step and progress is not None: - progress.setLabel("Dumping 'CFF ' table... (%s)" % name) - progress.increment(step / numGlyphs) - i = i + 1 - - def fromXML(self, name, attrs, content): - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - if name != "CharString": - continue - fdID = -1 - if hasattr(self, "fdArray"): - fdID = safeEval(attrs["fdSelectIndex"]) - private = self.fdArray[fdID].Private - else: - private = self.private - - glyphName = attrs["name"] - charString = psCharStrings.T2CharString( - private=private, - globalSubrs=self.globalSubrs) - charString.fromXML(name, attrs, content) - if fdID >= 0: - charString.fdSelectIndex = fdID - self[glyphName] = charString - - -def readCard8(file): - return byteord(file.read(1)) - -def readCard16(file): - value, = struct.unpack(">H", file.read(2)) - return value - -def writeCard8(file, value): - file.write(bytechr(value)) - -def writeCard16(file, value): - file.write(struct.pack(">H", value)) - -def packCard8(value): - return bytechr(value) - -def packCard16(value): - return struct.pack(">H", value) - -def buildOperatorDict(table): - d = {} - for op, name, arg, default, conv in table: - d[op] = (name, arg) - return d - -def buildOpcodeDict(table): - d = {} - for op, name, arg, default, conv in table: - if isinstance(op, tuple): - op = bytechr(op[0]) + bytechr(op[1]) - else: - op = bytechr(op) - d[name] = (op, arg) - return d - -def buildOrder(table): - l = [] - for op, name, arg, default, conv in table: - l.append(name) - return l - -def buildDefaults(table): - d = {} - for op, name, arg, default, conv in table: - if default is not None: - d[name] = default - return d - -def buildConverters(table): - d = {} - for op, name, arg, default, conv in table: - d[name] = conv - return d - - -class SimpleConverter(object): - def read(self, parent, value): - return value - def write(self, parent, value): - return value - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return attrs["value"] - -class ASCIIConverter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='ascii') - def write(self, parent, value): - return tobytes(value, encoding='ascii') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="ascii")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("ascii")) - -class Latin1Converter(SimpleConverter): - def read(self, parent, value): - return tostr(value, encoding='latin1') - def write(self, parent, value): - return tobytes(value, encoding='latin1') - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, value=tounicode(value, encoding="latin1")) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - return tobytes(attrs["value"], encoding=("latin1")) - - -def parseNum(s): - try: - value = int(s) - except: - value = float(s) - return value - -class NumberConverter(SimpleConverter): - def xmlRead(self, name, attrs, content, parent): - return parseNum(attrs["value"]) - -class ArrayConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - value = " ".join(map(str, value)) - xmlWriter.simpletag(name, value=value) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - values = attrs["value"].split() - return [parseNum(value) for value in values] - -class TableConverter(SimpleConverter): - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.begintag(name) - xmlWriter.newline() - value.toXML(xmlWriter, progress) - xmlWriter.endtag(name) - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - ob = self.getClass()() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - ob.fromXML(name, attrs, content) - return ob - -class PrivateDictConverter(TableConverter): - def getClass(self): - return PrivateDict - def read(self, parent, value): - size, offset = value - file = parent.file - priv = PrivateDict(parent.strings, file, offset) - file.seek(offset) - data = file.read(size) - assert len(data) == size - priv.decompile(data) - return priv - def write(self, parent, value): - return (0, 0) # dummy value - -class SubrsConverter(TableConverter): - def getClass(self): - return SubrsIndex - def read(self, parent, value): - file = parent.file - file.seek(parent.offset + value) # Offset(self) - return SubrsIndex(file) - def write(self, parent, value): - return 0 # dummy value - -class CharStringsConverter(TableConverter): - def read(self, parent, value): - file = parent.file - charset = parent.charset - globalSubrs = parent.GlobalSubrs - if hasattr(parent, "ROS"): - fdSelect, fdArray = parent.FDSelect, parent.FDArray - private = None - else: - fdSelect, fdArray = None, None - private = parent.Private - file.seek(value) # Offset(0) - return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray) - def write(self, parent, value): - return 0 # dummy value - def xmlRead(self, name, attrs, content, parent): - if hasattr(parent, "ROS"): - # if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray - private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray - else: - # if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. - private, fdSelect, fdArray = parent.Private, None, None - charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray) - charStrings.fromXML(name, attrs, content) - return charStrings - -class CharsetConverter(object): - def read(self, parent, value): - isCID = hasattr(parent, "ROS") - if value > 2: - numGlyphs = parent.numGlyphs - file = parent.file - file.seek(value) - if DEBUG: - print("loading charset at %s" % value) - format = readCard8(file) - if format == 0: - charset = parseCharset0(numGlyphs, file, parent.strings, isCID) - elif format == 1 or format == 2: - charset = parseCharset(numGlyphs, file, parent.strings, isCID, format) - else: - raise NotImplementedError - assert len(charset) == numGlyphs - if DEBUG: - print(" charset end at %s" % file.tell()) - else: # offset == 0 -> no charset data. - if isCID or "CharStrings" not in parent.rawDict: - assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset. - charset = None - elif value == 0: - charset = cffISOAdobeStrings - elif value == 1: - charset = cffIExpertStrings - elif value == 2: - charset = cffExpertSubsetStrings - return charset - - def write(self, parent, value): - return 0 # dummy value - def xmlWrite(self, xmlWriter, name, value, progress): - # XXX only write charset when not in OT/TTX context, where we - # dump charset as a separate "GlyphOrder" table. - ##xmlWriter.simpletag("charset") - xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element") - xmlWriter.newline() - def xmlRead(self, name, attrs, content, parent): - if 0: - return safeEval(attrs["value"]) - - -class CharsetCompiler(object): - - def __init__(self, strings, charset, parent): - assert charset[0] == '.notdef' - isCID = hasattr(parent.dictObj, "ROS") - data0 = packCharset0(charset, isCID, strings) - data = packCharset(charset, isCID, strings) - if len(data) < len(data0): - self.data = data - else: - self.data = data0 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["charset"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -def getCIDfromName(name, strings): - return int(name[3:]) - -def getSIDfromName(name, strings): - return strings.getSID(name) - -def packCharset0(charset, isCID, strings): - fmt = 0 - data = [packCard8(fmt)] - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - data.append(packCard16(getNameID(name,strings))) - return bytesjoin(data) - - -def packCharset(charset, isCID, strings): - fmt = 1 - ranges = [] - first = None - end = 0 - if isCID: - getNameID = getCIDfromName - else: - getNameID = getSIDfromName - - for name in charset[1:]: - SID = getNameID(name, strings) - if first is None: - first = SID - elif end + 1 != SID: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - first = SID - end = SID - if end: - nLeft = end - first - if nLeft > 255: - fmt = 2 - ranges.append((first, nLeft)) - - data = [packCard8(fmt)] - if fmt == 1: - nLeftFunc = packCard8 - else: - nLeftFunc = packCard16 - for first, nLeft in ranges: - data.append(packCard16(first) + nLeftFunc(nLeft)) - return bytesjoin(data) - -def parseCharset0(numGlyphs, file, strings, isCID): - charset = [".notdef"] - if isCID: - for i in range(numGlyphs - 1): - CID = readCard16(file) - charset.append("cid" + str(CID).zfill(5)) - else: - for i in range(numGlyphs - 1): - SID = readCard16(file) - charset.append(strings[SID]) - return charset - -def parseCharset(numGlyphs, file, strings, isCID, fmt): - charset = ['.notdef'] - count = 1 - if fmt == 1: - nLeftFunc = readCard8 - else: - nLeftFunc = readCard16 - while count < numGlyphs: - first = readCard16(file) - nLeft = nLeftFunc(file) - if isCID: - for CID in range(first, first+nLeft+1): - charset.append("cid" + str(CID).zfill(5)) - else: - for SID in range(first, first+nLeft+1): - charset.append(strings[SID]) - count = count + nLeft + 1 - return charset - - -class EncodingCompiler(object): - - def __init__(self, strings, encoding, parent): - assert not isinstance(encoding, basestring) - data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings) - data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings) - if len(data0) < len(data1): - self.data = data0 - else: - self.data = data1 - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["Encoding"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class EncodingConverter(SimpleConverter): - - def read(self, parent, value): - if value == 0: - return "StandardEncoding" - elif value == 1: - return "ExpertEncoding" - else: - assert value > 1 - file = parent.file - file.seek(value) - if DEBUG: - print("loading Encoding at %s" % value) - fmt = readCard8(file) - haveSupplement = fmt & 0x80 - if haveSupplement: - raise NotImplementedError("Encoding supplements are not yet supported") - fmt = fmt & 0x7f - if fmt == 0: - encoding = parseEncoding0(parent.charset, file, haveSupplement, - parent.strings) - elif fmt == 1: - encoding = parseEncoding1(parent.charset, file, haveSupplement, - parent.strings) - return encoding - - def write(self, parent, value): - if value == "StandardEncoding": - return 0 - elif value == "ExpertEncoding": - return 1 - return 0 # dummy value - - def xmlWrite(self, xmlWriter, name, value, progress): - if value in ("StandardEncoding", "ExpertEncoding"): - xmlWriter.simpletag(name, name=value) - xmlWriter.newline() - return - xmlWriter.begintag(name) - xmlWriter.newline() - for code in range(len(value)): - glyphName = value[code] - if glyphName != ".notdef": - xmlWriter.simpletag("map", code=hex(code), name=glyphName) - xmlWriter.newline() - xmlWriter.endtag(name) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - if "name" in attrs: - return attrs["name"] - encoding = [".notdef"] * 256 - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - code = safeEval(attrs["code"]) - glyphName = attrs["name"] - encoding[code] = glyphName - return encoding - - -def parseEncoding0(charset, file, haveSupplement, strings): - nCodes = readCard8(file) - encoding = [".notdef"] * 256 - for glyphID in range(1, nCodes + 1): - code = readCard8(file) - if code != 0: - encoding[code] = charset[glyphID] - return encoding - -def parseEncoding1(charset, file, haveSupplement, strings): - nRanges = readCard8(file) - encoding = [".notdef"] * 256 - glyphID = 1 - for i in range(nRanges): - code = readCard8(file) - nLeft = readCard8(file) - for glyphID in range(glyphID, glyphID + nLeft + 1): - encoding[code] = charset[glyphID] - code = code + 1 - glyphID = glyphID + 1 - return encoding - -def packEncoding0(charset, encoding, strings): - fmt = 0 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - codes = [] - for name in charset[1:]: - code = m.get(name) - codes.append(code) - - while codes and codes[-1] is None: - codes.pop() - - data = [packCard8(fmt), packCard8(len(codes))] - for code in codes: - if code is None: - code = 0 - data.append(packCard8(code)) - return bytesjoin(data) - -def packEncoding1(charset, encoding, strings): - fmt = 1 - m = {} - for code in range(len(encoding)): - name = encoding[code] - if name != ".notdef": - m[name] = code - ranges = [] - first = None - end = 0 - for name in charset[1:]: - code = m.get(name, -1) - if first is None: - first = code - elif end + 1 != code: - nLeft = end - first - ranges.append((first, nLeft)) - first = code - end = code - nLeft = end - first - ranges.append((first, nLeft)) - - # remove unencoded glyphs at the end. - while ranges and ranges[-1][0] == -1: - ranges.pop() - - data = [packCard8(fmt), packCard8(len(ranges))] - for first, nLeft in ranges: - if first == -1: # unencoded - first = 0 - data.append(packCard8(first) + packCard8(nLeft)) - return bytesjoin(data) - - -class FDArrayConverter(TableConverter): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdArray = FDArrayIndex(file) - fdArray.strings = parent.strings - fdArray.GlobalSubrs = parent.GlobalSubrs - return fdArray - - def write(self, parent, value): - return 0 # dummy value - - def xmlRead(self, name, attrs, content, parent): - fdArray = FDArrayIndex() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - fdArray.fromXML(name, attrs, content) - return fdArray - - -class FDSelectConverter(object): - - def read(self, parent, value): - file = parent.file - file.seek(value) - fdSelect = FDSelect(file, parent.numGlyphs) - return fdSelect - - def write(self, parent, value): - return 0 # dummy value - - # The FDSelect glyph data is written out to XML in the charstring keys, - # so we write out only the format selector - def xmlWrite(self, xmlWriter, name, value, progress): - xmlWriter.simpletag(name, [('format', value.format)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - fmt = safeEval(attrs["format"]) - file = None - numGlyphs = None - fdSelect = FDSelect(file, numGlyphs, fmt) - return fdSelect - - -def packFDSelect0(fdSelectArray): - fmt = 0 - data = [packCard8(fmt)] - for index in fdSelectArray: - data.append(packCard8(index)) - return bytesjoin(data) - - -def packFDSelect3(fdSelectArray): - fmt = 3 - fdRanges = [] - first = None - end = 0 - lenArray = len(fdSelectArray) - lastFDIndex = -1 - for i in range(lenArray): - fdIndex = fdSelectArray[i] - if lastFDIndex != fdIndex: - fdRanges.append([i, fdIndex]) - lastFDIndex = fdIndex - sentinelGID = i + 1 - - data = [packCard8(fmt)] - data.append(packCard16( len(fdRanges) )) - for fdRange in fdRanges: - data.append(packCard16(fdRange[0])) - data.append(packCard8(fdRange[1])) - data.append(packCard16(sentinelGID)) - return bytesjoin(data) - - -class FDSelectCompiler(object): - - def __init__(self, fdSelect, parent): - fmt = fdSelect.format - fdSelectArray = fdSelect.gidArray - if fmt == 0: - self.data = packFDSelect0(fdSelectArray) - elif fmt == 3: - self.data = packFDSelect3(fdSelectArray) - else: - # choose smaller of the two formats - data0 = packFDSelect0(fdSelectArray) - data3 = packFDSelect3(fdSelectArray) - if len(data0) < len(data3): - self.data = data0 - fdSelect.format = 0 - else: - self.data = data3 - fdSelect.format = 3 - - self.parent = parent - - def setPos(self, pos, endPos): - self.parent.rawDict["FDSelect"] = pos - - def getDataLength(self): - return len(self.data) - - def toFile(self, file): - file.write(self.data) - - -class ROSConverter(SimpleConverter): - - def xmlWrite(self, xmlWriter, name, value, progress): - registry, order, supplement = value - xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)), - ('Supplement', supplement)]) - xmlWriter.newline() - - def xmlRead(self, name, attrs, content, parent): - return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement'])) - - -topDictOperators = [ -# opcode name argument type default converter - ((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()), - ((12, 20), 'SyntheticBase', 'number', None, None), - (0, 'version', 'SID', None, None), - (1, 'Notice', 'SID', None, Latin1Converter()), - ((12, 0), 'Copyright', 'SID', None, Latin1Converter()), - (2, 'FullName', 'SID', None, None), - ((12, 38), 'FontName', 'SID', None, None), - (3, 'FamilyName', 'SID', None, None), - (4, 'Weight', 'SID', None, None), - ((12, 1), 'isFixedPitch', 'number', 0, None), - ((12, 2), 'ItalicAngle', 'number', 0, None), - ((12, 3), 'UnderlinePosition', 'number', None, None), - ((12, 4), 'UnderlineThickness', 'number', 50, None), - ((12, 5), 'PaintType', 'number', 0, None), - ((12, 6), 'CharstringType', 'number', 2, None), - ((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None), - (13, 'UniqueID', 'number', None, None), - (5, 'FontBBox', 'array', [0, 0, 0, 0], None), - ((12, 8), 'StrokeWidth', 'number', 0, None), - (14, 'XUID', 'array', None, None), - ((12, 21), 'PostScript', 'SID', None, None), - ((12, 22), 'BaseFontName', 'SID', None, None), - ((12, 23), 'BaseFontBlend', 'delta', None, None), - ((12, 31), 'CIDFontVersion', 'number', 0, None), - ((12, 32), 'CIDFontRevision', 'number', 0, None), - ((12, 33), 'CIDFontType', 'number', 0, None), - ((12, 34), 'CIDCount', 'number', 8720, None), - (15, 'charset', 'number', 0, CharsetConverter()), - ((12, 35), 'UIDBase', 'number', None, None), - (16, 'Encoding', 'number', 0, EncodingConverter()), - (18, 'Private', ('number', 'number'), None, PrivateDictConverter()), - ((12, 37), 'FDSelect', 'number', None, FDSelectConverter()), - ((12, 36), 'FDArray', 'number', None, FDArrayConverter()), - (17, 'CharStrings', 'number', None, CharStringsConverter()), -] - -# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order, -# in order for the font to compile back from xml. - - -privateDictOperators = [ -# opcode name argument type default converter - (6, 'BlueValues', 'delta', None, None), - (7, 'OtherBlues', 'delta', None, None), - (8, 'FamilyBlues', 'delta', None, None), - (9, 'FamilyOtherBlues', 'delta', None, None), - ((12, 9), 'BlueScale', 'number', 0.039625, None), - ((12, 10), 'BlueShift', 'number', 7, None), - ((12, 11), 'BlueFuzz', 'number', 1, None), - (10, 'StdHW', 'number', None, None), - (11, 'StdVW', 'number', None, None), - ((12, 12), 'StemSnapH', 'delta', None, None), - ((12, 13), 'StemSnapV', 'delta', None, None), - ((12, 14), 'ForceBold', 'number', 0, None), - ((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated - ((12, 16), 'lenIV', 'number', None, None), # deprecated - ((12, 17), 'LanguageGroup', 'number', 0, None), - ((12, 18), 'ExpansionFactor', 'number', 0.06, None), - ((12, 19), 'initialRandomSeed', 'number', 0, None), - (20, 'defaultWidthX', 'number', 0, None), - (21, 'nominalWidthX', 'number', 0, None), - (19, 'Subrs', 'number', None, SubrsConverter()), -] - -def addConverters(table): - for i in range(len(table)): - op, name, arg, default, conv = table[i] - if conv is not None: - continue - if arg in ("delta", "array"): - conv = ArrayConverter() - elif arg == "number": - conv = NumberConverter() - elif arg == "SID": - conv = ASCIIConverter() - else: - assert False - table[i] = op, name, arg, default, conv - -addConverters(privateDictOperators) -addConverters(topDictOperators) - - -class TopDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(topDictOperators) - - -class PrivateDictDecompiler(psCharStrings.DictDecompiler): - operators = buildOperatorDict(privateDictOperators) - - -class DictCompiler(object): - - def __init__(self, dictObj, strings, parent): - assert isinstance(strings, IndexedStrings) - self.dictObj = dictObj - self.strings = strings - self.parent = parent - rawDict = {} - for name in dictObj.order: - value = getattr(dictObj, name, None) - if value is None: - continue - conv = dictObj.converters[name] - value = conv.write(dictObj, value) - if value == dictObj.defaults.get(name): - continue - rawDict[name] = value - self.rawDict = rawDict - - def setPos(self, pos, endPos): - pass - - def getDataLength(self): - return len(self.compile("getDataLength")) - - def compile(self, reason): - if DEBUG: - print("-- compiling %s for %s" % (self.__class__.__name__, reason)) - print("in baseDict: ", self) - rawDict = self.rawDict - data = [] - for name in self.dictObj.order: - value = rawDict.get(name) - if value is None: - continue - op, argType = self.opcodes[name] - if isinstance(argType, tuple): - l = len(argType) - assert len(value) == l, "value doesn't match arg type" - for i in range(l): - arg = argType[i] - v = value[i] - arghandler = getattr(self, "arg_" + arg) - data.append(arghandler(v)) - else: - arghandler = getattr(self, "arg_" + argType) - data.append(arghandler(value)) - data.append(op) - return bytesjoin(data) - - def toFile(self, file): - file.write(self.compile("toFile")) - - def arg_number(self, num): - return encodeNumber(num) - def arg_SID(self, s): - return psCharStrings.encodeIntCFF(self.strings.getSID(s)) - def arg_array(self, value): - data = [] - for num in value: - data.append(encodeNumber(num)) - return bytesjoin(data) - def arg_delta(self, value): - out = [] - last = 0 - for v in value: - out.append(v - last) - last = v - data = [] - for num in out: - data.append(encodeNumber(num)) - return bytesjoin(data) - - -def encodeNumber(num): - if isinstance(num, float): - return psCharStrings.encodeFloat(num) - else: - return psCharStrings.encodeIntCFF(num) - - -class TopDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "charset") and self.dictObj.charset: - children.append(CharsetCompiler(strings, self.dictObj.charset, self)) - if hasattr(self.dictObj, "Encoding"): - encoding = self.dictObj.Encoding - if not isinstance(encoding, basestring): - children.append(EncodingCompiler(strings, encoding, self)) - if hasattr(self.dictObj, "FDSelect"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that - # either the font was read from XML, and teh FDSelect indices are all - # in the charstring data, or the FDSelect array is already fully defined. - fdSelect = self.dictObj.FDSelect - if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - fdSelect.append(charStrings[name].fdSelectIndex) - fdSelectComp = FDSelectCompiler(fdSelect, self) - children.append(fdSelectComp) - if hasattr(self.dictObj, "CharStrings"): - items = [] - charStrings = self.dictObj.CharStrings - for name in self.dictObj.charset: - items.append(charStrings[name]) - charStringsComp = CharStringsCompiler(items, strings, self) - children.append(charStringsComp) - if hasattr(self.dictObj, "FDArray"): - # I have not yet supported merging a ttx CFF-CID font, as there are interesting - # issues about merging the FDArrays. Here I assume that the FDArray info is correct - # and complete. - fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self) - children.append(fdArrayIndexComp) - children.extend(fdArrayIndexComp.getChildren(strings)) - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class FontDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(topDictOperators) - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Private"): - privComp = self.dictObj.Private.getCompiler(strings, self) - children.append(privComp) - children.extend(privComp.getChildren(strings)) - return children - - -class PrivateDictCompiler(DictCompiler): - - opcodes = buildOpcodeDict(privateDictOperators) - - def setPos(self, pos, endPos): - size = endPos - pos - self.parent.rawDict["Private"] = size, pos - self.pos = pos - - def getChildren(self, strings): - children = [] - if hasattr(self.dictObj, "Subrs"): - children.append(self.dictObj.Subrs.getCompiler(strings, self)) - return children - - -class BaseDict(object): - - def __init__(self, strings=None, file=None, offset=None): - self.rawDict = {} - if DEBUG: - print("loading %s at %s" % (self.__class__.__name__, offset)) - self.file = file - self.offset = offset - self.strings = strings - self.skipNames = [] - - def decompile(self, data): - if DEBUG: - print(" length %s is %s" % (self.__class__.__name__, len(data))) - dec = self.decompilerClass(self.strings) - dec.decompile(data) - self.rawDict = dec.getDict() - self.postDecompile() - - def postDecompile(self): - pass - - def getCompiler(self, strings, parent): - return self.compilerClass(self, strings, parent) - - def __getattr__(self, name): - value = self.rawDict.get(name) - if value is None: - value = self.defaults.get(name) - if value is None: - raise AttributeError(name) - conv = self.converters[name] - value = conv.read(self, value) - setattr(self, name, value) - return value - - def toXML(self, xmlWriter, progress): - for name in self.order: - if name in self.skipNames: - continue - value = getattr(self, name, None) - if value is None: - continue - conv = self.converters[name] - conv.xmlWrite(xmlWriter, name, value, progress) - - def fromXML(self, name, attrs, content): - conv = self.converters[name] - value = conv.xmlRead(name, attrs, content, self) - setattr(self, name, value) - - -class TopDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = TopDictDecompiler - compilerClass = TopDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def postDecompile(self): - offset = self.rawDict.get("CharStrings") - if offset is None: - return - # get the number of glyphs beforehand. - self.file.seek(offset) - self.numGlyphs = readCard16(self.file) - - def toXML(self, xmlWriter, progress): - if hasattr(self, "CharStrings"): - self.decompileAllCharStrings(progress) - if hasattr(self, "ROS"): - self.skipNames = ['Encoding'] - if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"): - # these values have default values, but I only want them to show up - # in CID fonts. - self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType', - 'CIDCount'] - BaseDict.toXML(self, xmlWriter, progress) - - def decompileAllCharStrings(self, progress): - # XXX only when doing ttdump -i? - i = 0 - for charString in self.CharStrings.values(): - try: - charString.decompile() - except: - print("Error in charstring ", i) - import sys - typ, value = sys.exc_info()[0:2] - raise typ(value) - if not i % 30 and progress: - progress.increment(0) # update - i = i + 1 - - -class FontDict(BaseDict): - - defaults = buildDefaults(topDictOperators) - converters = buildConverters(topDictOperators) - order = buildOrder(topDictOperators) - decompilerClass = None - compilerClass = FontDictCompiler - - def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None): - BaseDict.__init__(self, strings, file, offset) - self.GlobalSubrs = GlobalSubrs - - def getGlyphOrder(self): - return self.charset - - def toXML(self, xmlWriter, progress): - self.skipNames = ['Encoding'] - BaseDict.toXML(self, xmlWriter, progress) - - -class PrivateDict(BaseDict): - defaults = buildDefaults(privateDictOperators) - converters = buildConverters(privateDictOperators) - order = buildOrder(privateDictOperators) - decompilerClass = PrivateDictDecompiler - compilerClass = PrivateDictCompiler - - -class IndexedStrings(object): - - """SID -> string mapping.""" - - def __init__(self, file=None): - if file is None: - strings = [] - else: - strings = [tostr(s, encoding="latin1") for s in Index(file)] - self.strings = strings - - def getCompiler(self): - return IndexedStringsCompiler(self, None, None) - - def __len__(self): - return len(self.strings) - - def __getitem__(self, SID): - if SID < cffStandardStringCount: - return cffStandardStrings[SID] - else: - return self.strings[SID - cffStandardStringCount] - - def getSID(self, s): - if not hasattr(self, "stringMapping"): - self.buildStringMapping() - if s in cffStandardStringMapping: - SID = cffStandardStringMapping[s] - elif s in self.stringMapping: - SID = self.stringMapping[s] - else: - SID = len(self.strings) + cffStandardStringCount - self.strings.append(s) - self.stringMapping[s] = SID - return SID - - def getStrings(self): - return self.strings - - def buildStringMapping(self): - self.stringMapping = {} - for index in range(len(self.strings)): - self.stringMapping[self.strings[index]] = index + cffStandardStringCount - - -# The 391 Standard Strings as used in the CFF format. -# from Adobe Technical None #5176, version 1.0, 18 March 1998 - -cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', - 'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', - 'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', - 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', - 'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', - 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', - 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', - 'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', - 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', - 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', - 'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', - 'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', - 'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', - 'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', - 'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', - 'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', - 'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', - 'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', - 'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', - 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', - 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', - 'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', - 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', - 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', - 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', - 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', - 'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', - 'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', - 'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', - 'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', - 'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', - 'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', - 'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', - 'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', - 'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', - 'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', - 'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', - 'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', - 'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', - 'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', - 'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', - 'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', - 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', - 'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', - 'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', - 'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', - 'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', - 'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', - 'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', - 'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', - 'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', - 'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', - 'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', - 'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', - 'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', - 'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', - 'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', - 'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', - 'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', - 'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', - 'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', - '001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', - 'Semibold' -] - -cffStandardStringCount = 391 -assert len(cffStandardStrings) == cffStandardStringCount -# build reverse mapping -cffStandardStringMapping = {} -for _i in range(cffStandardStringCount): - cffStandardStringMapping[cffStandardStrings[_i]] = _i - -cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign", -"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright", -"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two", -"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon", -"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", -"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", -"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", -"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", -"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", -"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", -"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle", -"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl", -"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet", -"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis", -"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde", -"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut", -"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE", -"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls", -"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", -"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn", -"threequarters", "twosuperior", "registered", "minus", "eth", "multiply", -"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", -"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", -"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", -"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", -"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute", -"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute", -"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis", -"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde", -"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis", -"zcaron"] - -cffISOAdobeStringCount = 229 -assert len(cffISOAdobeStrings) == cffISOAdobeStringCount - -cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall", -"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall", -"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader", -"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle", -"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", -"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon", -"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall", -"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", -"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", -"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", -"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall", -"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall", -"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall", -"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall", -"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth", -"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", -"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior", -"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior", -"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior", -"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior", -"centinferior", "dollarinferior", "periodinferior", "commainferior", -"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall", -"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", -"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall", -"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall", -"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall", -"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall", -"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall", -"Ydieresissmall"] - -cffExpertStringCount = 166 -assert len(cffIExpertStrings) == cffExpertStringCount - -cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle", -"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader", -"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle", -"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", -"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", -"semicolon", "commasuperior", "threequartersemdash", "periodsuperior", -"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", -"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", -"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior", -"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah", -"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf", -"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths", -"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior", -"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior", -"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior", -"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior", -"eightinferior", "nineinferior", "centinferior", "dollarinferior", -"periodinferior", "commainferior"] - -cffExpertSubsetStringCount = 87 -assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount diff -Nru fonttools-3.0/Tools/fontTools/encodings/codecs.py fonttools-3.21.2/Tools/fontTools/encodings/codecs.py --- fonttools-3.0/Tools/fontTools/encodings/codecs.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/encodings/codecs.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,135 +0,0 @@ -"""Extend the Python codecs module with a few encodings that are used in OpenType (name table) -but missing from Python. See https://github.com/behdad/fonttools/issues/236 for details.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import codecs -import encodings - -class ExtendCodec(codecs.Codec): - - def __init__(self, name, base_encoding, mapping): - self.name = name - self.base_encoding = base_encoding - self.mapping = mapping - self.reverse = {v:k for k,v in mapping.items()} - self.max_len = max(len(v) for v in mapping.values()) - self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode) - codecs.register_error(name, self.error) - - def encode(self, input, errors='strict'): - assert errors == 'strict' - #return codecs.encode(input, self.base_encoding, self.name), len(input) - - # The above line could totally be all we needed, relying on the error - # handling to replace the unencodable Unicode characters with our extended - # byte sequences. - # - # However, there seems to be a design bug in Python (probably intentional): - # the error handler for encoding is supposed to return a **Unicode** character, - # that then needs to be encodable itself... Ugh. - # - # So we implement what codecs.encode() should have been doing: which is expect - # error handler to return bytes() to be added to the output. - # - # This seems to have been fixed in Python 3.3. We should try using that and - # use fallback only if that failed. - # https://docs.python.org/3.3/library/codecs.html#codecs.register_error - - length = len(input) - out = b'' - while input: - try: - part = codecs.encode(input, self.base_encoding) - out += part - input = '' # All converted - except UnicodeEncodeError as e: - # Convert the correct part - out += codecs.encode(input[:e.start], self.base_encoding) - replacement, pos = self.error(e) - out += replacement - input = input[pos:] - return out, length - - def decode(self, input, errors='strict'): - assert errors == 'strict' - return codecs.decode(input, self.base_encoding, self.name), len(input) - - def error(self, e): - if isinstance(e, UnicodeDecodeError): - for end in range(e.start + 1, e.end + 1): - s = e.object[e.start:end] - if s in self.mapping: - return self.mapping[s], end - elif isinstance(e, UnicodeEncodeError): - for end in range(e.start + 1, e.start + self.max_len + 1): - s = e.object[e.start:end] - if s in self.reverse: - return self.reverse[s], end - e.encoding = self.name - raise e - - -_extended_encodings = { - "x_mac_japanese_ttx": ("shift_jis", { - b"\xFC": unichr(0x007C), - b"\x7E": unichr(0x007E), - b"\x80": unichr(0x005C), - b"\xA0": unichr(0x00A0), - b"\xFD": unichr(0x00A9), - b"\xFE": unichr(0x2122), - b"\xFF": unichr(0x2026), - }), - "x_mac_trad_chinese_ttx": ("big5", { - b"\x80": unichr(0x005C), - b"\xA0": unichr(0x00A0), - b"\xFD": unichr(0x00A9), - b"\xFE": unichr(0x2122), - b"\xFF": unichr(0x2026), - }), - "x_mac_korean_ttx": ("euc_kr", { - b"\x80": unichr(0x00A0), - b"\x81": unichr(0x20A9), - b"\x82": unichr(0x2014), - b"\x83": unichr(0x00A9), - b"\xFE": unichr(0x2122), - b"\xFF": unichr(0x2026), - }), - "x_mac_simp_chinese_ttx": ("gb2312", { - b"\x80": unichr(0x00FC), - b"\xA0": unichr(0x00A0), - b"\xFD": unichr(0x00A9), - b"\xFE": unichr(0x2122), - b"\xFF": unichr(0x2026), - }), -} - -_cache = {} - -def search_function(name): - name = encodings.normalize_encoding(name) # Rather undocumented... - if name in _extended_encodings: - if name not in _cache: - base_encoding, mapping = _extended_encodings[name] - assert(name[-4:] == "_ttx") - # Python 2 didn't have any of the encodings that we are implementing - # in this file. Python 3 added aliases for the East Asian ones, mapping - # them "temporarily" to the same base encoding as us, with a comment - # suggesting that full implementation will appear some time later. - # As such, try the Python version of the x_mac_... first, if that is found, - # use *that* as our base encoding. This would make our encoding upgrade - # to the full encoding when and if Python finally implements that. - # http://bugs.python.org/issue24041 - base_encodings = [name[:-4], base_encoding] - for base_encoding in base_encodings: - try: - codecs.lookup(base_encoding) - except LookupError: - continue - _cache[name] = ExtendCodec(name, base_encoding, mapping) - break - return _cache[name].info - - return None - -codecs.register(search_function) diff -Nru fonttools-3.0/Tools/fontTools/encodings/codecs_test.py fonttools-3.21.2/Tools/fontTools/encodings/codecs_test.py --- fonttools-3.0/Tools/fontTools/encodings/codecs_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/encodings/codecs_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,25 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -import fontTools.encodings.codecs # Not to be confused with "import codecs" - -class ExtendedCodecsTest(unittest.TestCase): - - def test_decode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy'.decode("x_mac_japanese_ttx"), - unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)) - - def test_encode_mac_japanese(self): - self.assertEqual(b'x\xfe\xfdy', - (unichr(0x78)+unichr(0x2122)+unichr(0x00A9)+unichr(0x79)).encode("x_mac_japanese_ttx")) - - def test_decode_mac_trad_chinese(self): - self.assertEqual(b'\x80'.decode("x_mac_trad_chinese_ttx"), - unichr(0x5C)) - - def test_decode_mac_romanian(self): - self.assertEqual(b'x\xfb'.decode("mac_romanian"), - unichr(0x78)+unichr(0x02DA)) - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/encodings/__init__.py fonttools-3.21.2/Tools/fontTools/encodings/__init__.py --- fonttools-3.0/Tools/fontTools/encodings/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/encodings/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -"""Empty __init__.py file to signal Python this directory is a package.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * diff -Nru fonttools-3.0/Tools/fontTools/encodings/MacRoman.py fonttools-3.21.2/Tools/fontTools/encodings/MacRoman.py --- fonttools-3.0/Tools/fontTools/encodings/MacRoman.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/encodings/MacRoman.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,39 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -MacRoman = [ - 'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', - 'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', - 'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', - 'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', - 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', - 'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', - 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', - 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', - 'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', - 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', - 'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', - 'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', - 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', - 'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', - 'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', - 'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', - 'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', - 'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', - 'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', - 'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', - 'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', - 'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', - 'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', - 'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', - 'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', - 'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', - 'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', - 'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', - 'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', - 'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', - 'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', - 'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', - 'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', - 'hungarumlaut', 'ogonek', 'caron' - ] diff -Nru fonttools-3.0/Tools/fontTools/encodings/StandardEncoding.py fonttools-3.21.2/Tools/fontTools/encodings/StandardEncoding.py --- fonttools-3.0/Tools/fontTools/encodings/StandardEncoding.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/encodings/StandardEncoding.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -StandardEncoding = [ - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', 'space', 'exclam', 'quotedbl', - 'numbersign', 'dollar', 'percent', 'ampersand', - 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus', - 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', - 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', - 'colon', 'semicolon', 'less', 'equal', 'greater', - 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', - 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', - 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', - 'bracketright', 'asciicircum', 'underscore', 'quoteleft', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', - 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', - 'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown', - 'cent', 'sterling', 'fraction', 'yen', 'florin', 'section', - 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', - 'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef', - 'endash', 'dagger', 'daggerdbl', 'periodcentered', - '.notdef', 'paragraph', 'bullet', 'quotesinglbase', - 'quotedblbase', 'quotedblright', 'guillemotright', - 'ellipsis', 'perthousand', '.notdef', 'questiondown', - '.notdef', 'grave', 'acute', 'circumflex', 'tilde', - 'macron', 'breve', 'dotaccent', 'dieresis', '.notdef', - 'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek', - 'caron', 'emdash', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', '.notdef', - '.notdef', '.notdef', '.notdef', 'AE', '.notdef', - 'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef', - 'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef', - '.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef', - '.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef', - 'lslash', 'oslash', 'oe', 'germandbls', '.notdef', - '.notdef', '.notdef', '.notdef' - ] diff -Nru fonttools-3.0/Tools/fontTools/feaLib/ast.py fonttools-3.21.2/Tools/fontTools/feaLib/ast.py --- fonttools-3.0/Tools/fontTools/feaLib/ast.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/ast.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,98 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals - - -class FeatureFile(object): - def __init__(self): - self.statements = [] - - -class FeatureBlock(object): - def __init__(self, location, name, use_extension): - self.location = location - self.name, self.use_extension = name, use_extension - self.statements = [] - - -class LookupBlock(object): - def __init__(self, location, name, use_extension): - self.location = location - self.name, self.use_extension = name, use_extension - self.statements = [] - - -class GlyphClassDefinition(object): - def __init__(self, location, name, glyphs): - self.location = location - self.name = name - self.glyphs = glyphs - - -class AlternateSubstitution(object): - def __init__(self, location, glyph, from_class): - self.location = location - self.glyph, self.from_class = (glyph, from_class) - - -class AnchorDefinition(object): - def __init__(self, location, name, x, y, contourpoint): - self.location = location - self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint - - -class LanguageStatement(object): - def __init__(self, location, language, include_default, required): - self.location = location - self.language = language - self.include_default = include_default - self.required = required - - -class LanguageSystemStatement(object): - def __init__(self, location, script, language): - self.location = location - self.script, self.language = (script, language) - - -class IgnoreSubstitutionRule(object): - def __init__(self, location, prefix, glyphs, suffix): - self.location = location - self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix) - - -class LookupReferenceStatement(object): - def __init__(self, location, lookup): - self.location, self.lookup = (location, lookup) - - -class ScriptStatement(object): - def __init__(self, location, script): - self.location = location - self.script = script - - -class SubtableStatement(object): - def __init__(self, location): - self.location = location - - -class SubstitutionRule(object): - def __init__(self, location, old, new): - self.location, self.old, self.new = (location, old, new) - self.old_prefix = [] - self.old_suffix = [] - self.lookups = [None] * len(old) - - -class ValueRecord(object): - def __init__(self, location, xPlacement, yPlacement, xAdvance, yAdvance): - self.location = location - self.xPlacement, self.yPlacement = (xPlacement, yPlacement) - self.xAdvance, self.yAdvance = (xAdvance, yAdvance) - - -class ValueRecordDefinition(object): - def __init__(self, location, name, value): - self.location = location - self.name = name - self.value = value diff -Nru fonttools-3.0/Tools/fontTools/feaLib/__init__.py fonttools-3.21.2/Tools/fontTools/feaLib/__init__.py --- fonttools-3.0/Tools/fontTools/feaLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -"""fontTools.feaLib -- a package for dealing with OpenType feature files.""" - -# The structure of OpenType feature files is defined here: -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html diff -Nru fonttools-3.0/Tools/fontTools/feaLib/lexer.py fonttools-3.21.2/Tools/fontTools/feaLib/lexer.py --- fonttools-3.0/Tools/fontTools/feaLib/lexer.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/lexer.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,203 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -import codecs -import os - - -class LexerError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message - - -class Lexer(object): - NUMBER = "NUMBER" - STRING = "STRING" - NAME = "NAME" - FILENAME = "FILENAME" - GLYPHCLASS = "GLYPHCLASS" - CID = "CID" - SYMBOL = "SYMBOL" - COMMENT = "COMMENT" - NEWLINE = "NEWLINE" - - CHAR_WHITESPACE_ = " \t" - CHAR_NEWLINE_ = "\r\n" - CHAR_SYMBOL_ = ";:-+'{}[]<>()=" - CHAR_DIGIT_ = "0123456789" - CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef" - CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" - CHAR_NAME_START_ = CHAR_LETTER_ + "_.\\" - CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_." - - MODE_NORMAL_ = "NORMAL" - MODE_FILENAME_ = "FILENAME" - - def __init__(self, text, filename): - self.filename_ = filename - self.line_ = 1 - self.pos_ = 0 - self.line_start_ = 0 - self.text_ = text - self.text_length_ = len(text) - self.mode_ = Lexer.MODE_NORMAL_ - - def __iter__(self): - return self - - def next(self): # Python 2 - return self.__next__() - - def __next__(self): # Python 3 - while True: - token_type, token, location = self.next_() - if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}: - return (token_type, token, location) - - def next_(self): - self.scan_over_(Lexer.CHAR_WHITESPACE_) - column = self.pos_ - self.line_start_ + 1 - location = (self.filename_, self.line_, column) - start = self.pos_ - text = self.text_ - limit = len(text) - if start >= limit: - raise StopIteration() - cur_char = text[start] - next_char = text[start + 1] if start + 1 < limit else None - - if cur_char == "\n": - self.pos_ += 1 - self.line_ += 1 - self.line_start_ = self.pos_ - return (Lexer.NEWLINE, None, location) - if cur_char == "\r": - self.pos_ += (2 if next_char == "\n" else 1) - self.line_ += 1 - self.line_start_ = self.pos_ - return (Lexer.NEWLINE, None, location) - if cur_char == "#": - self.scan_until_(Lexer.CHAR_NEWLINE_) - return (Lexer.COMMENT, text[start:self.pos_], location) - - if self.mode_ is Lexer.MODE_FILENAME_: - if cur_char != "(": - raise LexerError("Expected '(' before file name", location) - self.scan_until_(")") - cur_char = text[self.pos_] if self.pos_ < limit else None - if cur_char != ")": - raise LexerError("Expected ')' after file name", location) - self.pos_ += 1 - self.mode_ = Lexer.MODE_NORMAL_ - return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location) - - if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_: - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.CID, int(text[start + 1:self.pos_], 10), location) - if cur_char == "@": - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - glyphclass = text[start + 1:self.pos_] - if len(glyphclass) < 1: - raise LexerError("Expected glyph class name", location) - if len(glyphclass) > 30: - raise LexerError( - "Glyph class names must not be longer than 30 characters", - location) - return (Lexer.GLYPHCLASS, glyphclass, location) - if cur_char in Lexer.CHAR_NAME_START_: - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_) - token = text[start:self.pos_] - if token == "include": - self.mode_ = Lexer.MODE_FILENAME_ - return (Lexer.NAME, token, location) - if cur_char == "0" and next_char in "xX": - self.pos_ += 2 - self.scan_over_(Lexer.CHAR_HEXDIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 16), location) - if cur_char in Lexer.CHAR_DIGIT_: - self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) - if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_: - self.pos_ += 1 - self.scan_over_(Lexer.CHAR_DIGIT_) - return (Lexer.NUMBER, int(text[start:self.pos_], 10), location) - if cur_char in Lexer.CHAR_SYMBOL_: - self.pos_ += 1 - return (Lexer.SYMBOL, cur_char, location) - if cur_char == '"': - self.pos_ += 1 - self.scan_until_('"\r\n') - if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"': - self.pos_ += 1 - return (Lexer.STRING, text[start + 1:self.pos_ - 1], location) - else: - raise LexerError("Expected '\"' to terminate string", location) - raise LexerError("Unexpected character: '%s'" % cur_char, location) - - def scan_over_(self, valid): - p = self.pos_ - while p < self.text_length_ and self.text_[p] in valid: - p += 1 - self.pos_ = p - - def scan_until_(self, stop_at): - p = self.pos_ - while p < self.text_length_ and self.text_[p] not in stop_at: - p += 1 - self.pos_ = p - - -class IncludingLexer(object): - def __init__(self, filename): - self.lexers_ = [self.make_lexer_(filename, (filename, 0, 0))] - - def __iter__(self): - return self - - def next(self): # Python 2 - return self.__next__() - - def __next__(self): # Python 3 - while self.lexers_: - lexer = self.lexers_[-1] - try: - token_type, token, location = lexer.next() - except StopIteration: - self.lexers_.pop() - continue - if token_type is Lexer.NAME and token == "include": - fname_type, fname_token, fname_location = lexer.next() - if fname_type is not Lexer.FILENAME: - raise LexerError("Expected file name", fname_location) - semi_type, semi_token, semi_location = lexer.next() - if semi_type is not Lexer.SYMBOL or semi_token != ";": - raise LexerError("Expected ';'", semi_location) - curpath, _ = os.path.split(lexer.filename_) - path = os.path.join(curpath, fname_token) - if len(self.lexers_) >= 5: - raise LexerError("Too many recursive includes", - fname_location) - self.lexers_.append(self.make_lexer_(path, fname_location)) - continue - else: - return (token_type, token, location) - raise StopIteration() - - @staticmethod - def make_lexer_(filename, location): - try: - with codecs.open(filename, "rb", "utf-8") as f: - return Lexer(f.read(), filename) - except IOError as err: - raise LexerError(str(err), location) diff -Nru fonttools-3.0/Tools/fontTools/feaLib/lexer_test.py fonttools-3.21.2/Tools/fontTools/feaLib/lexer_test.py --- fonttools-3.0/Tools/fontTools/feaLib/lexer_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/lexer_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,160 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import IncludingLexer, Lexer, LexerError -import os -import unittest - - -def lex(s): - return [(typ, tok) for (typ, tok, _) in Lexer(s, "test.fea")] - - -class LexerErrorTest(unittest.TestCase): - def test_str(self): - err = LexerError("Squeak!", ("foo.fea", 23, 42)) - self.assertEqual(str(err), "foo.fea:23:42: Squeak!") - - def test_str_nolocation(self): - err = LexerError("Squeak!", None) - self.assertEqual(str(err), "Squeak!") - - -class LexerTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_empty(self): - self.assertEqual(lex(""), []) - self.assertEqual(lex(" \t "), []) - - def test_name(self): - self.assertEqual(lex("a17"), [(Lexer.NAME, "a17")]) - self.assertEqual(lex(".notdef"), [(Lexer.NAME, ".notdef")]) - self.assertEqual(lex("two.oldstyle"), [(Lexer.NAME, "two.oldstyle")]) - self.assertEqual(lex("_"), [(Lexer.NAME, "_")]) - self.assertEqual(lex("\\table"), [(Lexer.NAME, "\\table")]) - - def test_cid(self): - self.assertEqual(lex("\\0 \\987"), [(Lexer.CID, 0), (Lexer.CID, 987)]) - - def test_glyphclass(self): - self.assertEqual(lex("@Vowel.sc"), [(Lexer.GLYPHCLASS, "Vowel.sc")]) - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@(a)") - self.assertRaisesRegex(LexerError, "Expected glyph class", lex, "@ A") - self.assertRaisesRegex(LexerError, "not be longer than 30 characters", - lex, "@a123456789.a123456789.a123456789.x") - - def test_include(self): - self.assertEqual(lex("include (~/foo/bar baz.fea);"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "~/foo/bar baz.fea"), - (Lexer.SYMBOL, ";") - ]) - self.assertEqual(lex("include # Comment\n (foo) \n;"), [ - (Lexer.NAME, "include"), - (Lexer.FILENAME, "foo"), - (Lexer.SYMBOL, ";") - ]) - self.assertRaises(LexerError, lex, "include blah") - self.assertRaises(LexerError, lex, "include (blah") - - def test_number(self): - self.assertEqual(lex("123 -456"), - [(Lexer.NUMBER, 123), (Lexer.NUMBER, -456)]) - self.assertEqual(lex("0xCAFED00D"), [(Lexer.NUMBER, 0xCAFED00D)]) - self.assertEqual(lex("0xcafed00d"), [(Lexer.NUMBER, 0xCAFED00D)]) - - def test_symbol(self): - self.assertEqual(lex("a'"), [(Lexer.NAME, "a"), (Lexer.SYMBOL, "'")]) - self.assertEqual( - lex("foo - -2"), - [(Lexer.NAME, "foo"), (Lexer.SYMBOL, "-"), (Lexer.NUMBER, -2)]) - - def test_comment(self): - self.assertEqual(lex("# Comment\n#"), []) - - def test_string(self): - self.assertEqual(lex('"foo" "bar"'), - [(Lexer.STRING, "foo"), (Lexer.STRING, "bar")]) - self.assertRaises(LexerError, lambda: lex('"foo\n bar"')) - - def test_bad_character(self): - self.assertRaises(LexerError, lambda: lex("123 \u0001")) - - def test_newline(self): - lines = lambda s: [loc[1] for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(lines("FOO\n\nBAR\nBAZ"), [1, 3, 4]) # Unix - self.assertEqual(lines("FOO\r\rBAR\rBAZ"), [1, 3, 4]) # Macintosh - self.assertEqual(lines("FOO\r\n\r\n BAR\r\nBAZ"), [1, 3, 4]) # Windows - self.assertEqual(lines("FOO\n\rBAR\r\nBAZ"), [1, 3, 4]) # mixed - - def test_location(self): - locs = lambda s: ["%s:%d:%d" % loc - for (_, _, loc) in Lexer(s, "test.fea")] - self.assertEqual(locs("a b # Comment\n12 @x"), [ - "test.fea:1:1", "test.fea:1:3", "test.fea:2:1", - "test.fea:2:4" - ]) - - def test_scan_over_(self): - lexer = Lexer("abbacabba12", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("xyz") - self.assertEqual(lexer.pos_, 0) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("abc") - self.assertEqual(lexer.pos_, 9) - lexer.scan_over_("0123456789") - self.assertEqual(lexer.pos_, 11) - - def test_scan_until_(self): - lexer = Lexer("foo'bar", "test.fea") - self.assertEqual(lexer.pos_, 0) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - lexer.scan_until_("'") - self.assertEqual(lexer.pos_, 3) - - -class IncludingLexerTest(unittest.TestCase): - @staticmethod - def getpath(filename): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", filename) - - def test_include(self): - lexer = IncludingLexer(self.getpath("include4.fea")) - result = ['%s %s:%d' % (token, os.path.split(loc[0])[1], loc[1]) - for _, token, loc in lexer] - self.assertEqual(result, [ - "I4a include4.fea:1", - "I3a include3.fea:1", - "I2a include2.fea:1", - "I1a include1.fea:1", - "I0 include0.fea:1", - "I1b include1.fea:3", - "I2b include2.fea:3", - "I3b include3.fea:3", - "I4b include4.fea:3" - ]) - - def test_include_limit(self): - lexer = IncludingLexer(self.getpath("include6.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_self(self): - lexer = IncludingLexer(self.getpath("includeself.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - def test_include_missing_file(self): - lexer = IncludingLexer(self.getpath("includemissingfile.fea")) - self.assertRaises(LexerError, lambda: list(lexer)) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/feaLib/parser.py fonttools-3.21.2/Tools/fontTools/feaLib/parser.py --- fonttools-3.0/Tools/fontTools/feaLib/parser.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/parser.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,466 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import Lexer, IncludingLexer -import fontTools.feaLib.ast as ast -import os -import re - - -class ParserError(Exception): - def __init__(self, message, location): - Exception.__init__(self, message) - self.location = location - - def __str__(self): - message = Exception.__str__(self) - if self.location: - path, line, column = self.location - return "%s:%d:%d: %s" % (path, line, column, message) - else: - return message - - -class Parser(object): - def __init__(self, path): - self.doc_ = ast.FeatureFile() - self.anchors_ = SymbolTable() - self.glyphclasses_ = SymbolTable() - self.lookups_ = SymbolTable() - self.valuerecords_ = SymbolTable() - self.symbol_tables_ = { - self.anchors_, self.glyphclasses_, - self.lookups_, self.valuerecords_ - } - self.next_token_type_, self.next_token_ = (None, None) - self.next_token_location_ = None - self.lexer_ = IncludingLexer(path) - self.advance_lexer_() - - def parse(self): - statements = self.doc_.statements - while self.next_token_type_ is not None: - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: - statements.append(self.parse_glyphclass_definition_()) - elif self.is_cur_keyword_("anchorDef"): - statements.append(self.parse_anchordef_()) - elif self.is_cur_keyword_("languagesystem"): - statements.append(self.parse_languagesystem_()) - elif self.is_cur_keyword_("lookup"): - statements.append(self.parse_lookup_(vertical=False)) - elif self.is_cur_keyword_("feature"): - statements.append(self.parse_feature_block_()) - elif self.is_cur_keyword_("valueRecordDef"): - statements.append( - self.parse_valuerecord_definition_(vertical=False)) - else: - raise ParserError("Expected feature, languagesystem, " - "lookup, or glyph class definition", - self.cur_token_location_) - return self.doc_ - - def parse_anchordef_(self): - assert self.is_cur_keyword_("anchorDef") - location = self.cur_token_location_ - x, y = self.expect_number_(), self.expect_number_() - contourpoint = None - if self.next_token_ == "contourpoint": - self.expect_keyword_("contourpoint") - contourpoint = self.expect_number_() - name = self.expect_name_() - self.expect_symbol_(";") - anchordef = ast.AnchorDefinition(location, name, x, y, contourpoint) - self.anchors_.define(name, anchordef) - return anchordef - - def parse_glyphclass_definition_(self): - location, name = self.cur_token_location_, self.cur_token_ - self.expect_symbol_("=") - glyphs = self.parse_glyphclass_(accept_glyphname=False) - self.expect_symbol_(";") - if self.glyphclasses_.resolve(name) is not None: - raise ParserError("Glyph class @%s already defined" % name, - location) - glyphclass = ast.GlyphClassDefinition(location, name, glyphs) - self.glyphclasses_.define(name, glyphclass) - return glyphclass - - def parse_glyphclass_(self, accept_glyphname): - result = set() - if accept_glyphname and self.next_token_type_ is Lexer.NAME: - result.add(self.expect_name_()) - return result - if self.next_token_type_ is Lexer.GLYPHCLASS: - self.advance_lexer_() - gc = self.glyphclasses_.resolve(self.cur_token_) - if gc is None: - raise ParserError("Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_) - result.update(gc.glyphs) - return result - - self.expect_symbol_("[") - while self.next_token_ != "]": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: - if self.next_token_ == "-": - range_location_ = self.cur_token_location_ - range_start = self.cur_token_ - self.expect_symbol_("-") - range_end = self.expect_name_() - result.update(self.make_glyph_range_(range_location_, - range_start, - range_end)) - else: - result.add(self.cur_token_) - elif self.cur_token_type_ is Lexer.GLYPHCLASS: - gc = self.glyphclasses_.resolve(self.cur_token_) - if gc is None: - raise ParserError( - "Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_) - result.update(gc.glyphs) - else: - raise ParserError( - "Expected glyph name, glyph range, " - "or glyph class reference", - self.cur_token_location_) - self.expect_symbol_("]") - return result - - def parse_glyph_pattern_(self): - prefix, glyphs, lookups, suffix = ([], [], [], []) - while self.next_token_ not in {"by", "from", ";"}: - gc = self.parse_glyphclass_(accept_glyphname=True) - marked = False - if self.next_token_ == "'": - self.expect_symbol_("'") - marked = True - if marked: - glyphs.append(gc) - elif glyphs: - suffix.append(gc) - else: - prefix.append(gc) - - lookup = None - if self.next_token_ == "lookup": - self.expect_keyword_("lookup") - if not marked: - raise ParserError("Lookups can only follow marked glyphs", - self.cur_token_location_) - lookup_name = self.expect_name_() - lookup = self.lookups_.resolve(lookup_name) - if lookup is None: - raise ParserError('Unknown lookup "%s"' % lookup_name, - self.cur_token_location_) - if marked: - lookups.append(lookup) - - if not glyphs and not suffix: # eg., "sub f f i by" - assert lookups == [] - return ([], prefix, [None] * len(prefix), []) - else: - return (prefix, glyphs, lookups, suffix) - - def parse_ignore_(self): - assert self.is_cur_keyword_("ignore") - location = self.cur_token_location_ - self.advance_lexer_() - if self.cur_token_ in ["substitute", "sub"]: - prefix, glyphs, lookups, suffix = self.parse_glyph_pattern_() - self.expect_symbol_(";") - return ast.IgnoreSubstitutionRule(location, prefix, glyphs, suffix) - raise ParserError("Expected \"substitute\"", self.next_token_location_) - - def parse_language_(self): - assert self.is_cur_keyword_("language") - location, language = self.cur_token_location_, self.expect_tag_() - include_default, required = (True, False) - if self.next_token_ in {"exclude_dflt", "include_dflt"}: - include_default = (self.expect_name_() == "include_dflt") - if self.next_token_ == "required": - self.expect_keyword_("required") - required = True - self.expect_symbol_(";") - return ast.LanguageStatement(location, language.strip(), - include_default, required) - - def parse_lookup_(self, vertical): - assert self.is_cur_keyword_("lookup") - location, name = self.cur_token_location_, self.expect_name_() - - if self.next_token_ == ";": - lookup = self.lookups_.resolve(name) - if lookup is None: - raise ParserError("Unknown lookup \"%s\"" % name, - self.cur_token_location_) - self.expect_symbol_(";") - return ast.LookupReferenceStatement(location, lookup) - - use_extension = False - if self.next_token_ == "useExtension": - self.expect_keyword_("useExtension") - use_extension = True - - block = ast.LookupBlock(location, name, use_extension) - self.parse_block_(block, vertical) - self.lookups_.define(name, block) - return block - - def parse_script_(self): - assert self.is_cur_keyword_("script") - location, script = self.cur_token_location_, self.expect_tag_() - self.expect_symbol_(";") - return ast.ScriptStatement(location, script) - - def parse_substitute_(self): - assert self.cur_token_ in {"substitute", "sub"} - location = self.cur_token_location_ - old_prefix, old, lookups, old_suffix = self.parse_glyph_pattern_() - - new = [] - if self.next_token_ == "by": - keyword = self.expect_keyword_("by") - while self.next_token_ != ";": - new.append(self.parse_glyphclass_(accept_glyphname=True)) - elif self.next_token_ == "from": - keyword = self.expect_keyword_("from") - new = [self.parse_glyphclass_(accept_glyphname=False)] - else: - keyword = None - self.expect_symbol_(";") - if len(new) is 0 and not any(lookups): - raise ParserError( - 'Expected "by", "from" or explicit lookup references', - self.cur_token_location_) - - if keyword == "from": - if len(old) != 1 or len(old[0]) != 1: - raise ParserError('Expected a single glyph before "from"', - location) - if len(new) != 1: - raise ParserError('Expected a single glyphclass after "from"', - location) - return ast.AlternateSubstitution(location, list(old[0])[0], new[0]) - - rule = ast.SubstitutionRule(location, old, new) - rule.old_prefix, rule.old_suffix = old_prefix, old_suffix - rule.lookups = lookups - return rule - - def parse_subtable_(self): - assert self.is_cur_keyword_("subtable") - location = self.cur_token_location_ - self.expect_symbol_(";") - return ast.SubtableStatement(location) - - def parse_valuerecord_(self, vertical): - if self.next_token_type_ is Lexer.NUMBER: - number, location = self.expect_number_(), self.cur_token_location_ - if vertical: - val = ast.ValueRecord(location, 0, 0, 0, number) - else: - val = ast.ValueRecord(location, 0, 0, number, 0) - return val - self.expect_symbol_("<") - location = self.cur_token_location_ - if self.next_token_type_ is Lexer.NAME: - name = self.expect_name_() - vrd = self.valuerecords_.resolve(name) - if vrd is None: - raise ParserError("Unknown valueRecordDef \"%s\"" % name, - self.cur_token_location_) - value = vrd.value - xPlacement, yPlacement = (value.xPlacement, value.yPlacement) - xAdvance, yAdvance = (value.xAdvance, value.yAdvance) - else: - xPlacement, yPlacement, xAdvance, yAdvance = ( - self.expect_number_(), self.expect_number_(), - self.expect_number_(), self.expect_number_()) - self.expect_symbol_(">") - return ast.ValueRecord( - location, xPlacement, yPlacement, xAdvance, yAdvance) - - def parse_valuerecord_definition_(self, vertical): - assert self.is_cur_keyword_("valueRecordDef") - location = self.cur_token_location_ - value = self.parse_valuerecord_(vertical) - name = self.expect_name_() - self.expect_symbol_(";") - vrd = ast.ValueRecordDefinition(location, name, value) - self.valuerecords_.define(name, vrd) - return vrd - - def parse_languagesystem_(self): - assert self.cur_token_ == "languagesystem" - location = self.cur_token_location_ - script, language = self.expect_tag_(), self.expect_tag_() - self.expect_symbol_(";") - return ast.LanguageSystemStatement(location, script, language) - - def parse_feature_block_(self): - assert self.cur_token_ == "feature" - location = self.cur_token_location_ - tag = self.expect_tag_() - vertical = (tag == "vkrn") - - use_extension = False - if self.next_token_ == "useExtension": - self.expect_keyword_("useExtension") - use_extension = True - - block = ast.FeatureBlock(location, tag, use_extension) - self.parse_block_(block, vertical) - return block - - def parse_block_(self, block, vertical): - self.expect_symbol_("{") - for symtab in self.symbol_tables_: - symtab.enter_scope() - - statements = block.statements - while self.next_token_ != "}": - self.advance_lexer_() - if self.cur_token_type_ is Lexer.GLYPHCLASS: - statements.append(self.parse_glyphclass_definition_()) - elif self.is_cur_keyword_("anchorDef"): - statements.append(self.parse_anchordef_()) - elif self.is_cur_keyword_("ignore"): - statements.append(self.parse_ignore_()) - elif self.is_cur_keyword_("language"): - statements.append(self.parse_language_()) - elif self.is_cur_keyword_("lookup"): - statements.append(self.parse_lookup_(vertical)) - elif self.is_cur_keyword_("script"): - statements.append(self.parse_script_()) - elif (self.is_cur_keyword_("substitute") or - self.is_cur_keyword_("sub")): - statements.append(self.parse_substitute_()) - elif self.is_cur_keyword_("subtable"): - statements.append(self.parse_subtable_()) - elif self.is_cur_keyword_("valueRecordDef"): - statements.append(self.parse_valuerecord_definition_(vertical)) - else: - raise ParserError( - "Expected glyph class definition or statement", - self.cur_token_location_) - - self.expect_symbol_("}") - for symtab in self.symbol_tables_: - symtab.exit_scope() - - name = self.expect_name_() - if name != block.name.strip(): - raise ParserError("Expected \"%s\"" % block.name.strip(), - self.cur_token_location_) - self.expect_symbol_(";") - - def is_cur_keyword_(self, k): - return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k) - - def expect_tag_(self): - self.advance_lexer_() - if self.cur_token_type_ is not Lexer.NAME: - raise ParserError("Expected a tag", self.cur_token_location_) - if len(self.cur_token_) > 4: - raise ParserError("Tags can not be longer than 4 characters", - self.cur_token_location_) - return (self.cur_token_ + " ")[:4] - - def expect_symbol_(self, symbol): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: - return symbol - raise ParserError("Expected '%s'" % symbol, self.cur_token_location_) - - def expect_keyword_(self, keyword): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: - return self.cur_token_ - raise ParserError("Expected \"%s\"" % keyword, - self.cur_token_location_) - - def expect_name_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: - return self.cur_token_ - raise ParserError("Expected a name", self.cur_token_location_) - - def expect_number_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NUMBER: - return self.cur_token_ - raise ParserError("Expected a number", self.cur_token_location_) - - def advance_lexer_(self): - self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( - self.next_token_type_, self.next_token_, self.next_token_location_) - try: - (self.next_token_type_, self.next_token_, - self.next_token_location_) = self.lexer_.next() - except StopIteration: - self.next_token_type_, self.next_token_ = (None, None) - - def make_glyph_range_(self, location, start, limit): - """("a.sc", "d.sc") --> {"a.sc", "b.sc", "c.sc", "d.sc"}""" - result = set() - if len(start) != len(limit): - raise ParserError( - "Bad range: \"%s\" and \"%s\" should have the same length" % - (start, limit), location) - rev = lambda s: ''.join(reversed(list(s))) # string reversal - prefix = os.path.commonprefix([start, limit]) - suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) - if len(suffix) > 0: - start_range = start[len(prefix):-len(suffix)] - limit_range = limit[len(prefix):-len(suffix)] - else: - start_range = start[len(prefix):] - limit_range = limit[len(prefix):] - - if start_range >= limit_range: - raise ParserError("Start of range must be smaller than its end", - location) - - uppercase = re.compile(r'^[A-Z]$') - if uppercase.match(start_range) and uppercase.match(limit_range): - for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) - return result - - lowercase = re.compile(r'^[a-z]$') - if lowercase.match(start_range) and lowercase.match(limit_range): - for c in range(ord(start_range), ord(limit_range) + 1): - result.add("%s%c%s" % (prefix, c, suffix)) - return result - - digits = re.compile(r'^[0-9]{1,3}$') - if digits.match(start_range) and digits.match(limit_range): - for i in range(int(start_range, 10), int(limit_range, 10) + 1): - number = ("000" + str(i))[-len(start_range):] - result.add("%s%s%s" % (prefix, number, suffix)) - return result - - raise ParserError("Bad range: \"%s-%s\"" % (start, limit), location) - - -class SymbolTable(object): - def __init__(self): - self.scopes_ = [{}] - - def enter_scope(self): - self.scopes_.append({}) - - def exit_scope(self): - self.scopes_.pop() - - def define(self, name, item): - self.scopes_[-1][name] = item - - def resolve(self, name): - for scope in reversed(self.scopes_): - item = scope.get(name) - if item: - return item - return None diff -Nru fonttools-3.0/Tools/fontTools/feaLib/parser_test.py fonttools-3.21.2/Tools/fontTools/feaLib/parser_test.py --- fonttools-3.0/Tools/fontTools/feaLib/parser_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/parser_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,448 +0,0 @@ -from __future__ import print_function, division, absolute_import -from __future__ import unicode_literals -from fontTools.feaLib.lexer import LexerError -from fontTools.feaLib.parser import Parser, ParserError, SymbolTable -from fontTools.misc.py23 import * -import fontTools.feaLib.ast as ast -import codecs -import os -import shutil -import sys -import tempfile -import unittest - - -class ParserTest(unittest.TestCase): - def __init__(self, methodName): - unittest.TestCase.__init__(self, methodName) - # Python 3 renamed assertRaisesRegexp to assertRaisesRegex, - # and fires deprecation warnings if a program uses the old name. - if not hasattr(self, "assertRaisesRegex"): - self.assertRaisesRegex = self.assertRaisesRegexp - - def test_anchordef(self): - [foo] = self.parse("anchorDef 123 456 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, None) - - def test_anchordef_contourpoint(self): - [foo] = self.parse("anchorDef 123 456 contourpoint 5 foo;").statements - self.assertEqual(type(foo), ast.AnchorDefinition) - self.assertEqual(foo.name, "foo") - self.assertEqual(foo.x, 123) - self.assertEqual(foo.y, 456) - self.assertEqual(foo.contourpoint, 5) - - def test_feature_block(self): - [liga] = self.parse("feature liga {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertFalse(liga.use_extension) - - def test_feature_block_useExtension(self): - [liga] = self.parse("feature liga useExtension {} liga;").statements - self.assertEqual(liga.name, "liga") - self.assertTrue(liga.use_extension) - - def test_glyphclass(self): - [gc] = self.parse("@dash = [endash emdash figuredash];").statements - self.assertEqual(gc.name, "dash") - self.assertEqual(gc.glyphs, {"endash", "emdash", "figuredash"}) - - def test_glyphclass_bad(self): - self.assertRaisesRegex( - ParserError, - "Expected glyph name, glyph range, or glyph class reference", - self.parse, "@bad = [a 123];") - - def test_glyphclass_duplicate(self): - self.assertRaisesRegex( - ParserError, "Glyph class @dup already defined", - self.parse, "@dup = [a b]; @dup = [x];") - - def test_glyphclass_empty(self): - [gc] = self.parse("@empty_set = [];").statements - self.assertEqual(gc.name, "empty_set") - self.assertEqual(gc.glyphs, set()) - - def test_glyphclass_equality(self): - [foo, bar] = self.parse("@foo = [a b]; @bar = @foo;").statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(bar.glyphs, {"a", "b"}) - - def test_glyphclass_range_uppercase(self): - [gc] = self.parse("@swashes = [X.swash-Z.swash];").statements - self.assertEqual(gc.name, "swashes") - self.assertEqual(gc.glyphs, {"X.swash", "Y.swash", "Z.swash"}) - - def test_glyphclass_range_lowercase(self): - [gc] = self.parse("@defg.sc = [d.sc-g.sc];").statements - self.assertEqual(gc.name, "defg.sc") - self.assertEqual(gc.glyphs, {"d.sc", "e.sc", "f.sc", "g.sc"}) - - def test_glyphclass_range_digit1(self): - [gc] = self.parse("@range = [foo.2-foo.5];").statements - self.assertEqual(gc.glyphs, {"foo.2", "foo.3", "foo.4", "foo.5"}) - - def test_glyphclass_range_digit2(self): - [gc] = self.parse("@range = [foo.09-foo.11];").statements - self.assertEqual(gc.glyphs, {"foo.09", "foo.10", "foo.11"}) - - def test_glyphclass_range_digit3(self): - [gc] = self.parse("@range = [foo.123-foo.125];").statements - self.assertEqual(gc.glyphs, {"foo.123", "foo.124", "foo.125"}) - - def test_glyphclass_range_bad(self): - self.assertRaisesRegex( - ParserError, - "Bad range: \"a\" and \"foobar\" should have the same length", - self.parse, "@bad = [a-foobar];") - self.assertRaisesRegex( - ParserError, "Bad range: \"A.swash-z.swash\"", - self.parse, "@bad = [A.swash-z.swash];") - self.assertRaisesRegex( - ParserError, "Start of range must be smaller than its end", - self.parse, "@bad = [B.swash-A.swash];") - self.assertRaisesRegex( - ParserError, "Bad range: \"foo.1234-foo.9876\"", - self.parse, "@bad = [foo.1234-foo.9876];") - - def test_glyphclass_range_mixed(self): - [gc] = self.parse("@range = [a foo.09-foo.11 X.sc-Z.sc];").statements - self.assertEqual(gc.glyphs, { - "a", "foo.09", "foo.10", "foo.11", "X.sc", "Y.sc", "Z.sc" - }) - - def test_glyphclass_reference(self): - [vowels_lc, vowels_uc, vowels] = self.parse( - "@Vowels.lc = [a e i o u]; @Vowels.uc = [A E I O U];" - "@Vowels = [@Vowels.lc @Vowels.uc y Y];").statements - self.assertEqual(vowels_lc.glyphs, set(list("aeiou"))) - self.assertEqual(vowels_uc.glyphs, set(list("AEIOU"))) - self.assertEqual(vowels.glyphs, set(list("aeiouyAEIOUY"))) - self.assertRaisesRegex( - ParserError, "Unknown glyph class @unknown", - self.parse, "@bad = [@unknown];") - - def test_glyphclass_scoping(self): - [foo, liga, smcp] = self.parse( - "@foo = [a b];" - "feature liga { @bar = [@foo l]; } liga;" - "feature smcp { @bar = [@foo s]; } smcp;" - ).statements - self.assertEqual(foo.glyphs, {"a", "b"}) - self.assertEqual(liga.statements[0].glyphs, {"a", "b", "l"}) - self.assertEqual(smcp.statements[0].glyphs, {"a", "b", "s"}) - - def test_ignore_sub(self): - doc = self.parse("feature test {ignore sub e t' c;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"e"}]) - self.assertEqual(s.glyphs, [{"t"}]) - self.assertEqual(s.suffix, [{"c"}]) - - def test_ignore_substitute(self): - doc = self.parse( - "feature test {" - " ignore substitute f [a e] d' [a u]' [e y];" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.IgnoreSubstitutionRule) - self.assertEqual(s.prefix, [{"f"}, {"a", "e"}]) - self.assertEqual(s.glyphs, [{"d"}, {"a", "u"}]) - self.assertEqual(s.suffix, [{"e", "y"}]) - - def test_language(self): - doc = self.parse("feature test {language DEU;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt(self): - doc = self.parse("feature test {language DEU exclude_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertFalse(s.required) - - def test_language_exclude_dflt_required(self): - doc = self.parse("feature test {" - " language DEU exclude_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertFalse(s.include_default) - self.assertTrue(s.required) - - def test_language_include_dflt(self): - doc = self.parse("feature test {language DEU include_dflt;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertFalse(s.required) - - def test_language_include_dflt_required(self): - doc = self.parse("feature test {" - " language DEU include_dflt required;" - "} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.LanguageStatement) - self.assertEqual(s.language, "DEU") - self.assertTrue(s.include_default) - self.assertTrue(s.required) - - def test_lookup_block(self): - [lookup] = self.parse("lookup Ligatures {} Ligatures;").statements - self.assertEqual(lookup.name, "Ligatures") - self.assertFalse(lookup.use_extension) - - def test_lookup_block_useExtension(self): - [lookup] = self.parse("lookup Foo useExtension {} Foo;").statements - self.assertEqual(lookup.name, "Foo") - self.assertTrue(lookup.use_extension) - - def test_lookup_block_name_mismatch(self): - self.assertRaisesRegex( - ParserError, 'Expected "Foo"', - self.parse, "lookup Foo {} Bar;") - - def test_lookup_block_with_horizontal_valueRecordDef(self): - doc = self.parse("feature liga {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} liga;") - [liga] = doc.statements - [look] = liga.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 123) - self.assertEqual(foo.value.yAdvance, 0) - - def test_lookup_block_with_vertical_valueRecordDef(self): - doc = self.parse("feature vkrn {" - " lookup look {" - " valueRecordDef 123 foo;" - " } look;" - "} vkrn;") - [vkrn] = doc.statements - [look] = vkrn.statements - [foo] = look.statements - self.assertEqual(foo.value.xAdvance, 0) - self.assertEqual(foo.value.yAdvance, 123) - - def test_lookup_reference(self): - [foo, bar] = self.parse("lookup Foo {} Foo;" - "feature Bar {lookup Foo;} Bar;").statements - [ref] = bar.statements - self.assertEqual(type(ref), ast.LookupReferenceStatement) - self.assertEqual(ref.lookup, foo) - - def test_lookup_reference_unknown(self): - self.assertRaisesRegex( - ParserError, 'Unknown lookup "Huh"', - self.parse, "feature liga {lookup Huh;} liga;") - - def test_script(self): - doc = self.parse("feature test {script cyrl;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.ScriptStatement) - self.assertEqual(s.script, "cyrl") - - def test_substitute_single_format_a(self): # GSUB LookupType 1 - doc = self.parse("feature smcp {substitute a by a.sc;} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"a.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_b(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [one.fitted one.oldstyle] by one;" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"one.fitted", "one.oldstyle"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"one"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_single_format_c(self): # GSUB LookupType 1 - doc = self.parse( - "feature smcp {" - " substitute [a-d] by [A.sc-D.sc];" - "} smcp;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"a", "b", "c", "d"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"A.sc", "B.sc", "C.sc", "D.sc"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_multiple(self): # GSUB LookupType 2 - doc = self.parse("lookup Look {substitute f_f_i by f f i;} Look;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.SubstitutionRule) - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f_f_i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.lookups, [None]) - - def test_substitute_from(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " substitute a from [a.1 a.2 a.3];" - "} test;") - sub = doc.statements[0].statements[0] - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "a") - self.assertEqual(sub.from_class, {"a.1", "a.2", "a.3"}) - - def test_substitute_from_glyphclass(self): # GSUB LookupType 3 - doc = self.parse("feature test {" - " @Ampersands = [ampersand.1 ampersand.2];" - " substitute ampersand from @Ampersands;" - "} test;") - [glyphclass, sub] = doc.statements[0].statements - self.assertEqual(type(sub), ast.AlternateSubstitution) - self.assertEqual(sub.glyph, "ampersand") - self.assertEqual(sub.from_class, {"ampersand.1", "ampersand.2"}) - - def test_substitute_ligature(self): # GSUB LookupType 4 - doc = self.parse("feature liga {substitute f f i by f_f_i;} liga;") - sub = doc.statements[0].statements[0] - self.assertEqual(sub.old_prefix, []) - self.assertEqual(sub.old, [{"f"}, {"f"}, {"i"}]) - self.assertEqual(sub.old_suffix, []) - self.assertEqual(sub.new, [{"f_f_i"}]) - self.assertEqual(sub.lookups, [None, None, None]) - - def test_substitute_lookups(self): - doc = Parser(self.getpath("spec5fi.fea")).parse() - [ligs, sub, feature] = doc.statements - self.assertEqual(feature.statements[0].lookups, [ligs, None, sub]) - self.assertEqual(feature.statements[1].lookups, [ligs, None, sub]) - - def test_substitute_missing_by(self): - self.assertRaisesRegex( - ParserError, 'Expected "by", "from" or explicit lookup references', - self.parse, "feature liga {substitute f f i;} liga;") - - def test_subtable(self): - doc = self.parse("feature test {subtable;} test;") - s = doc.statements[0].statements[0] - self.assertEqual(type(s), ast.SubtableStatement) - - def test_valuerecord_format_a_horizontal(self): - doc = self.parse("feature liga {valueRecordDef 123 foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 123) - self.assertEqual(value.yAdvance, 0) - - def test_valuerecord_format_a_vertical(self): - doc = self.parse("feature vkrn {valueRecordDef 123 foo;} vkrn;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 0) - self.assertEqual(value.yPlacement, 0) - self.assertEqual(value.xAdvance, 0) - self.assertEqual(value.yAdvance, 123) - - def test_valuerecord_format_b(self): - doc = self.parse("feature liga {valueRecordDef <1 2 3 4> foo;} liga;") - value = doc.statements[0].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named(self): - doc = self.parse("valueRecordDef <1 2 3 4> foo;" - "feature liga {valueRecordDef bar;} liga;") - value = doc.statements[1].statements[0].value - self.assertEqual(value.xPlacement, 1) - self.assertEqual(value.yPlacement, 2) - self.assertEqual(value.xAdvance, 3) - self.assertEqual(value.yAdvance, 4) - - def test_valuerecord_named_unknown(self): - self.assertRaisesRegex( - ParserError, "Unknown valueRecordDef \"unknown\"", - self.parse, "valueRecordDef foo;") - - def test_valuerecord_scoping(self): - [foo, liga, smcp] = self.parse( - "valueRecordDef 789 foo;" - "feature liga {valueRecordDef bar;} liga;" - "feature smcp {valueRecordDef bar;} smcp;" - ).statements - self.assertEqual(foo.value.xAdvance, 789) - self.assertEqual(liga.statements[0].value.xAdvance, 789) - self.assertEqual(smcp.statements[0].value.xAdvance, 789) - - def test_languagesystem(self): - [langsys] = self.parse("languagesystem latn DEU;").statements - self.assertEqual(langsys.script, "latn") - self.assertEqual(langsys.language, "DEU ") - self.assertRaisesRegex( - ParserError, "Expected ';'", - self.parse, "languagesystem latn DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem foobar DEU") - self.assertRaisesRegex( - ParserError, "longer than 4 characters", - self.parse, "languagesystem latn FOOBAR") - - def setUp(self): - self.tempdir = None - self.num_tempfiles = 0 - - def tearDown(self): - if self.tempdir: - shutil.rmtree(self.tempdir) - - def parse(self, text): - if not self.tempdir: - self.tempdir = tempfile.mkdtemp() - self.num_tempfiles += 1 - path = os.path.join(self.tempdir, "tmp%d.fea" % self.num_tempfiles) - with codecs.open(path, "wb", "utf-8") as outfile: - outfile.write(text) - return Parser(path).parse() - - @staticmethod - def getpath(testfile): - path, _ = os.path.split(__file__) - return os.path.join(path, "testdata", testfile) - - -class SymbolTableTest(unittest.TestCase): - def test_scopes(self): - symtab = SymbolTable() - symtab.define("foo", 23) - self.assertEqual(symtab.resolve("foo"), 23) - symtab.enter_scope() - self.assertEqual(symtab.resolve("foo"), 23) - symtab.define("foo", 42) - self.assertEqual(symtab.resolve("foo"), 42) - symtab.exit_scope() - self.assertEqual(symtab.resolve("foo"), 23) - - def test_resolve_undefined(self): - self.assertEqual(SymbolTable().resolve("abc"), None) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include0.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include0.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include0.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include0.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -I0 diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include1.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include1.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include1.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include1.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I1a -include(include0.fea); -I1b diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include2.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include2.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include2.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include2.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I2a -include(include1.fea); -I2b diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include3.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include3.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include3.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include3.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I3a -include(include2.fea); -I3b - diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include4.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include4.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include4.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include4.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -I4a -include(include3.fea); -I4b - diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include5.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include5.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include5.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include5.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I5a -include(include4.fea); -I5b diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/include6.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include6.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/include6.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/include6.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -I6a -include(include5.fea); -I6b diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/includemissingfile.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/includemissingfile.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/includemissingfile.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/includemissingfile.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(missingfile.fea); diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/includeself.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/includeself.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/includeself.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/includeself.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -include(includeself.fea); diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/mini.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/mini.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/mini.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/mini.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,19 +0,0 @@ -# Example file from OpenType Feature File specification, section 1. -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -# Script and language coverage -languagesystem DFLT dflt; -languagesystem latn dflt; - -# Ligature formation -feature liga { - substitute f i by f_i; - substitute f l by f_l; -} liga; - -# Kerning -feature kern { - position A Y -100; - position a y -80; - position s f' <0 0 10 0> t; -} kern; diff -Nru fonttools-3.0/Tools/fontTools/feaLib/testdata/spec5fi.fea fonttools-3.21.2/Tools/fontTools/feaLib/testdata/spec5fi.fea --- fonttools-3.0/Tools/fontTools/feaLib/testdata/spec5fi.fea 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/feaLib/testdata/spec5fi.fea 1970-01-01 00:00:00.000000000 +0000 @@ -1,18 +0,0 @@ -# OpenType Feature File specification, section 5.f.i, example 1 -# "Specifying a Chain Sub rule and marking sub-runs" -# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html - -lookup CNTXT_LIGS { - substitute f i by f_i; - substitute c t by c_t; - } CNTXT_LIGS; - -lookup CNTXT_SUB { - substitute n by n.end; - substitute s by s.end; - } CNTXT_SUB; - -feature test { - substitute [a e i o u] f' lookup CNTXT_LIGS i' n' lookup CNTXT_SUB; - substitute [a e i o u] c' lookup CNTXT_LIGS t' s' lookup CNTXT_SUB; -} test; diff -Nru fonttools-3.0/Tools/fontTools/__init__.py fonttools-3.21.2/Tools/fontTools/__init__.py --- fonttools-3.0/Tools/fontTools/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -version = "3.0" diff -Nru fonttools-3.0/Tools/fontTools/inspect.py fonttools-3.21.2/Tools/fontTools/inspect.py --- fonttools-3.0/Tools/fontTools/inspect.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/inspect.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,265 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -"""GUI font inspector. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import misc, ttLib, cffLib -import pygtk -pygtk.require('2.0') -import gtk -import sys - - -class Row(object): - def __init__(self, parent, index, key, value, font): - self._parent = parent - self._index = index - self._key = key - self._value = value - self._font = font - - if isinstance(value, ttLib.TTFont): - self._add_font(value) - return - - if not isinstance(value, basestring): - # Try sequences - is_sequence = True - try: - len(value) - iter(value) - # It's hard to differentiate list-type sequences - # from dict-type ones. Try fetching item 0. - value[0] - except (TypeError, AttributeError, KeyError, IndexError): - is_sequence = False - if is_sequence: - self._add_list(key, value) - return - if hasattr(value, '__dict__'): - self._add_object(key, value) - return - if hasattr(value, 'items'): - self._add_dict(key, value) - return - - if isinstance(value, basestring): - self._value_str = '"'+value+'"' - self._children = [] - return - - # Everything else - self._children = [] - - def _filter_items(self): - items = [] - for k,v in self._items: - if isinstance(v, ttLib.TTFont): - continue - if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']: - continue - if isinstance(k, basestring) and k[0] == '_': - continue - items.append((k,v)) - self._items = items - - def _add_font(self, font): - self._items = [(tag,font[tag]) for tag in font.keys()] - - def _add_object(self, key, value): - # Make sure item is decompiled - try: - value["asdf"] - except (AttributeError, KeyError, TypeError, ttLib.TTLibError): - pass - if isinstance(value, ttLib.getTableModule('glyf').Glyph): - # Glyph type needs explicit expanding to be useful - value.expand(self._font['glyf']) - if isinstance(value, misc.psCharStrings.T2CharString): - try: - value.decompile() - except TypeError: # Subroutines can't be decompiled - pass - if isinstance(value, cffLib.BaseDict): - for k in value.rawDict.keys(): - getattr(value, k) - if isinstance(value, cffLib.Index): - # Load all items - for i in range(len(value)): - value[i] - # Discard offsets as should not be needed anymore - if hasattr(value, 'offsets'): - del value.offsets - - self._value_str = value.__class__.__name__ - if isinstance(value, ttLib.tables.DefaultTable.DefaultTable): - self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length - self._items = sorted(value.__dict__.items()) - self._filter_items() - - def _add_dict(self, key, value): - self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) - self._items = sorted(value.items()) - - def _add_list(self, key, value): - if len(value) and len(value) <= 32: - self._value_str = str(value) - else: - self._value_str = '%s of %d items' % (value.__class__.__name__, len(value)) - self._items = list(enumerate(value)) - - def __len__(self): - if hasattr(self, '_children'): - return len(self._children) - if hasattr(self, '_items'): - return len(self._items) - assert False - - def _ensure_children(self): - if hasattr(self, '_children'): - return - children = [] - for i,(k,v) in enumerate(self._items): - children.append(Row(self, i, k, v, self._font)) - self._children = children - del self._items - - def __getitem__(self, n): - if n >= len(self): - return None - if not hasattr(self, '_children'): - self._children = [None] * len(self) - c = self._children[n] - if c is None: - k,v = self._items[n] - c = self._children[n] = Row(self, n, k, v, self._font) - self._items[n] = None - return c - - def get_parent(self): - return self._parent - - def get_index(self): - return self._index - - def get_key(self): - return self._key - - def get_value(self): - return self._value - - def get_value_str(self): - if hasattr(self,'_value_str'): - return self._value_str - return str(self._value) - -class FontTreeModel(gtk.GenericTreeModel): - - __gtype_name__ = 'FontTreeModel' - - def __init__(self, font): - super(FontTreeModel, self).__init__() - self._columns = (str, str) - self.font = font - self._root = Row(None, 0, "font", font, font) - - def on_get_flags(self): - return 0 - - def on_get_n_columns(self): - return len(self._columns) - - def on_get_column_type(self, index): - return self._columns[index] - - def on_get_iter(self, path): - rowref = self._root - while path: - rowref = rowref[path[0]] - path = path[1:] - return rowref - - def on_get_path(self, rowref): - path = [] - while rowref != self._root: - path.append(rowref.get_index()) - rowref = rowref.get_parent() - path.reverse() - return tuple(path) - - def on_get_value(self, rowref, column): - if column == 0: - return rowref.get_key() - else: - return rowref.get_value_str() - - def on_iter_next(self, rowref): - return rowref.get_parent()[rowref.get_index() + 1] - - def on_iter_children(self, rowref): - return rowref[0] - - def on_iter_has_child(self, rowref): - return bool(len(rowref)) - - def on_iter_n_children(self, rowref): - return len(rowref) - - def on_iter_nth_child(self, rowref, n): - if not rowref: rowref = self._root - return rowref[n] - - def on_iter_parent(self, rowref): - return rowref.get_parent() - -class Inspect(object): - - def _delete_event(self, widget, event, data=None): - gtk.main_quit() - return False - - def __init__(self, fontfile): - - self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) - self.window.set_title("%s - pyftinspect" % fontfile) - self.window.connect("delete_event", self._delete_event) - self.window.set_size_request(400, 600) - - self.scrolled_window = gtk.ScrolledWindow() - self.window.add(self.scrolled_window) - - self.font = ttLib.TTFont(fontfile, lazy=True) - self.treemodel = FontTreeModel(self.font) - self.treeview = gtk.TreeView(self.treemodel) - #self.treeview.set_reorderable(True) - - for i in range(2): - col_name = ('Key', 'Value')[i] - col = gtk.TreeViewColumn(col_name) - col.set_sort_column_id(-1) - self.treeview.append_column(col) - - cell = gtk.CellRendererText() - col.pack_start(cell, True) - col.add_attribute(cell, 'text', i) - - self.treeview.set_search_column(1) - self.scrolled_window.add(self.treeview) - self.window.show_all() - -def main(args=None): - if args is None: - args = sys.argv[1:] - if len(args) < 1: - print("usage: pyftinspect font...", file=sys.stderr) - sys.exit(1) - for arg in args: - Inspect(arg) - gtk.main() - -if __name__ == "__main__": - main() diff -Nru fonttools-3.0/Tools/fontTools/merge.py fonttools-3.21.2/Tools/fontTools/merge.py --- fonttools-3.0/Tools/fontTools/merge.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/merge.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,949 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - -"""Font merger. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.timeTools import timestampNow -from fontTools import ttLib, cffLib -from fontTools.ttLib.tables import otTables, _h_e_a_d -from fontTools.ttLib.tables.DefaultTable import DefaultTable -from functools import reduce -import sys -import time -import operator - - -def _add_method(*clazzes, **kwargs): - """Returns a decorator function that adds a new method to one or - more classes.""" - allowDefault = kwargs.get('allowDefaultTable', False) - def wrapper(method): - for clazz in clazzes: - assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.' - assert method.__name__ not in clazz.__dict__, \ - "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__) - setattr(clazz, method.__name__, method) - return None - return wrapper - -# General utility functions for merging values from different fonts - -def equal(lst): - lst = list(lst) - t = iter(lst) - first = next(t) - assert all(item == first for item in t), "Expected all items to be equal: %s" % lst - return first - -def first(lst): - return next(iter(lst)) - -def recalculate(lst): - return NotImplemented - -def current_time(lst): - return timestampNow() - -def bitwise_and(lst): - return reduce(operator.and_, lst) - -def bitwise_or(lst): - return reduce(operator.or_, lst) - -def avg_int(lst): - lst = list(lst) - return sum(lst) // len(lst) - -def onlyExisting(func): - """Returns a filter func that when called with a list, - only calls func on the non-NotImplemented items of the list, - and only so if there's at least one item remaining. - Otherwise returns NotImplemented.""" - - def wrapper(lst): - items = [item for item in lst if item is not NotImplemented] - return func(items) if items else NotImplemented - - return wrapper - -def sumLists(lst): - l = [] - for item in lst: - l.extend(item) - return l - -def sumDicts(lst): - d = {} - for item in lst: - d.update(item) - return d - -def mergeObjects(lst): - lst = [item for item in lst if item is not NotImplemented] - if not lst: - return NotImplemented - lst = [item for item in lst if item is not None] - if not lst: - return None - - clazz = lst[0].__class__ - assert all(type(item) == clazz for item in lst), lst - - logic = clazz.mergeMap - returnTable = clazz() - returnDict = {} - - allKeys = set.union(set(), *(vars(table).keys() for table in lst)) - for key in allKeys: - try: - mergeLogic = logic[key] - except KeyError: - try: - mergeLogic = logic['*'] - except KeyError: - raise Exception("Don't know how to merge key %s of class %s" % - (key, clazz.__name__)) - if mergeLogic is NotImplemented: - continue - value = mergeLogic(getattr(table, key, NotImplemented) for table in lst) - if value is not NotImplemented: - returnDict[key] = value - - returnTable.__dict__ = returnDict - - return returnTable - -def mergeBits(bitmap): - - def wrapper(lst): - lst = list(lst) - returnValue = 0 - for bitNumber in range(bitmap['size']): - try: - mergeLogic = bitmap[bitNumber] - except KeyError: - try: - mergeLogic = bitmap['*'] - except KeyError: - raise Exception("Don't know how to merge bit %s" % bitNumber) - shiftedBit = 1 << bitNumber - mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst) - returnValue |= mergedValue << bitNumber - return returnValue - - return wrapper - - -@_add_method(DefaultTable, allowDefaultTable=True) -def merge(self, m, tables): - if not hasattr(self, 'mergeMap'): - m.log("Don't know how to merge '%s'." % self.tableTag) - return NotImplemented - - logic = self.mergeMap - - if isinstance(logic, dict): - return m.mergeObjects(self, self.mergeMap, tables) - else: - return logic(tables) - - -ttLib.getTableClass('maxp').mergeMap = { - '*': max, - 'tableTag': equal, - 'tableVersion': equal, - 'numGlyphs': sum, - 'maxStorage': first, - 'maxFunctionDefs': first, - 'maxInstructionDefs': first, - # TODO When we correctly merge hinting data, update these values: - # maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions -} - -headFlagsMergeBitMap = { - 'size': 16, - '*': bitwise_or, - 1: bitwise_and, # Baseline at y = 0 - 2: bitwise_and, # lsb at x = 0 - 3: bitwise_and, # Force ppem to integer values. FIXME? - 5: bitwise_and, # Font is vertical - 6: lambda bit: 0, # Always set to zero - 11: bitwise_and, # Font data is 'lossless' - 13: bitwise_and, # Optimized for ClearType - 14: bitwise_and, # Last resort font. FIXME? equal or first may be better - 15: lambda bit: 0, # Always set to zero -} - -ttLib.getTableClass('head').mergeMap = { - 'tableTag': equal, - 'tableVersion': max, - 'fontRevision': max, - 'checkSumAdjustment': lambda lst: 0, # We need *something* here - 'magicNumber': equal, - 'flags': mergeBits(headFlagsMergeBitMap), - 'unitsPerEm': equal, - 'created': current_time, - 'modified': current_time, - 'xMin': min, - 'yMin': min, - 'xMax': max, - 'yMax': max, - 'macStyle': first, - 'lowestRecPPEM': max, - 'fontDirectionHint': lambda lst: 2, - 'indexToLocFormat': recalculate, - 'glyphDataFormat': equal, -} - -ttLib.getTableClass('hhea').mergeMap = { - '*': equal, - 'tableTag': equal, - 'tableVersion': max, - 'ascent': max, - 'descent': min, - 'lineGap': max, - 'advanceWidthMax': max, - 'minLeftSideBearing': min, - 'minRightSideBearing': min, - 'xMaxExtent': max, - 'caretSlopeRise': first, - 'caretSlopeRun': first, - 'caretOffset': first, - 'numberOfHMetrics': recalculate, -} - -os2FsTypeMergeBitMap = { - 'size': 16, - '*': lambda bit: 0, - 1: bitwise_or, # no embedding permitted - 2: bitwise_and, # allow previewing and printing documents - 3: bitwise_and, # allow editing documents - 8: bitwise_or, # no subsetting permitted - 9: bitwise_or, # no embedding of outlines permitted -} - -def mergeOs2FsType(lst): - lst = list(lst) - if all(item == 0 for item in lst): - return 0 - - # Compute least restrictive logic for each fsType value - for i in range(len(lst)): - # unset bit 1 (no embedding permitted) if either bit 2 or 3 is set - if lst[i] & 0x000C: - lst[i] &= ~0x0002 - # set bit 2 (allow previewing) if bit 3 is set (allow editing) - elif lst[i] & 0x0008: - lst[i] |= 0x0004 - # set bits 2 and 3 if everything is allowed - elif lst[i] == 0: - lst[i] = 0x000C - - fsType = mergeBits(os2FsTypeMergeBitMap)(lst) - # unset bits 2 and 3 if bit 1 is set (some font is "no embedding") - if fsType & 0x0002: - fsType &= ~0x000C - return fsType - - -ttLib.getTableClass('OS/2').mergeMap = { - '*': first, - 'tableTag': equal, - 'version': max, - 'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this - 'fsType': mergeOs2FsType, # Will be overwritten - 'panose': first, # FIXME: should really be the first Latin font - 'ulUnicodeRange1': bitwise_or, - 'ulUnicodeRange2': bitwise_or, - 'ulUnicodeRange3': bitwise_or, - 'ulUnicodeRange4': bitwise_or, - 'fsFirstCharIndex': min, - 'fsLastCharIndex': max, - 'sTypoAscender': max, - 'sTypoDescender': min, - 'sTypoLineGap': max, - 'usWinAscent': max, - 'usWinDescent': max, - # Version 2,3,4 - 'ulCodePageRange1': onlyExisting(bitwise_or), - 'ulCodePageRange2': onlyExisting(bitwise_or), - 'usMaxContex': onlyExisting(max), - # TODO version 5 -} - -@_add_method(ttLib.getTableClass('OS/2')) -def merge(self, m, tables): - DefaultTable.merge(self, m, tables) - if self.version < 2: - # bits 8 and 9 are reserved and should be set to zero - self.fsType &= ~0x0300 - if self.version >= 3: - # Only one of bits 1, 2, and 3 may be set. We already take - # care of bit 1 implications in mergeOs2FsType. So unset - # bit 2 if bit 3 is already set. - if self.fsType & 0x0008: - self.fsType &= ~0x0004 - return self - -ttLib.getTableClass('post').mergeMap = { - '*': first, - 'tableTag': equal, - 'formatType': max, - 'isFixedPitch': min, - 'minMemType42': max, - 'maxMemType42': lambda lst: 0, - 'minMemType1': max, - 'maxMemType1': lambda lst: 0, - 'mapping': onlyExisting(sumDicts), - 'extraNames': lambda lst: [], -} - -ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = { - 'tableTag': equal, - 'metrics': sumDicts, -} - -ttLib.getTableClass('gasp').mergeMap = { - 'tableTag': equal, - 'version': max, - 'gaspRange': first, # FIXME? Appears irreconcilable -} - -ttLib.getTableClass('name').mergeMap = { - 'tableTag': equal, - 'names': first, # FIXME? Does mixing name records make sense? -} - -ttLib.getTableClass('loca').mergeMap = { - '*': recalculate, - 'tableTag': equal, -} - -ttLib.getTableClass('glyf').mergeMap = { - 'tableTag': equal, - 'glyphs': sumDicts, - 'glyphOrder': sumLists, -} - -@_add_method(ttLib.getTableClass('glyf')) -def merge(self, m, tables): - for i,table in enumerate(tables): - for g in table.glyphs.values(): - if i: - # Drop hints for all but first font, since - # we don't map functions / CVT values. - g.removeHinting() - # Expand composite glyphs to load their - # composite glyph names. - if g.isComposite(): - g.expand(table) - return DefaultTable.merge(self, m, tables) - -ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst) -ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst) -ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst) - -@_add_method(ttLib.getTableClass('cmap')) -def merge(self, m, tables): - # TODO Handle format=14. - cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables if t.isUnicode()] - # TODO Better handle format-4 and format-12 coexisting in same font. - # TODO Insert both a format-4 and format-12 if needed. - module = ttLib.getTableModule('cmap') - assert all(t.format in [4, 12] for t,_ in cmapTables) - format = max(t.format for t,_ in cmapTables) - cmapTable = module.cmap_classes[format](format) - cmapTable.cmap = {} - cmapTable.platformID = 3 - cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables) - cmapTable.language = 0 - cmap = cmapTable.cmap - for table,fontIdx in cmapTables: - # TODO handle duplicates. - for uni,gid in table.cmap.items(): - oldgid = cmap.get(uni, None) - if oldgid is None: - cmap[uni] = gid - elif oldgid != gid: - # Char previously mapped to oldgid, now to gid. - # Record, to fix up in GSUB 'locl' later. - assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid - m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid - self.tableVersion = 0 - self.tables = [cmapTable] - self.numSubTables = len(self.tables) - return self - - -otTables.ScriptList.mergeMap = { - 'ScriptCount': sum, - 'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag), -} -otTables.BaseScriptList.mergeMap = { - 'BaseScriptCount': sum, - 'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag), -} - -otTables.FeatureList.mergeMap = { - 'FeatureCount': sum, - 'FeatureRecord': sumLists, -} - -otTables.LookupList.mergeMap = { - 'LookupCount': sum, - 'Lookup': sumLists, -} - -otTables.Coverage.mergeMap = { - 'glyphs': sumLists, -} - -otTables.ClassDef.mergeMap = { - 'classDefs': sumDicts, -} - -otTables.LigCaretList.mergeMap = { - 'Coverage': mergeObjects, - 'LigGlyphCount': sum, - 'LigGlyph': sumLists, -} - -otTables.AttachList.mergeMap = { - 'Coverage': mergeObjects, - 'GlyphCount': sum, - 'AttachPoint': sumLists, -} - -# XXX Renumber MarkFilterSets of lookups -otTables.MarkGlyphSetsDef.mergeMap = { - 'MarkSetTableFormat': equal, - 'MarkSetCount': sum, - 'Coverage': sumLists, -} - -otTables.Axis.mergeMap = { - '*': mergeObjects, -} - -# XXX Fix BASE table merging -otTables.BaseTagList.mergeMap = { - 'BaseTagCount': sum, - 'BaselineTag': sumLists, -} - -otTables.GDEF.mergeMap = \ -otTables.GSUB.mergeMap = \ -otTables.GPOS.mergeMap = \ -otTables.BASE.mergeMap = \ -otTables.JSTF.mergeMap = \ -otTables.MATH.mergeMap = \ -{ - '*': mergeObjects, - 'Version': max, -} - -ttLib.getTableClass('GDEF').mergeMap = \ -ttLib.getTableClass('GSUB').mergeMap = \ -ttLib.getTableClass('GPOS').mergeMap = \ -ttLib.getTableClass('BASE').mergeMap = \ -ttLib.getTableClass('JSTF').mergeMap = \ -ttLib.getTableClass('MATH').mergeMap = \ -{ - 'tableTag': onlyExisting(equal), # XXX clean me up - 'table': mergeObjects, -} - -@_add_method(ttLib.getTableClass('GSUB')) -def merge(self, m, tables): - - assert len(tables) == len(m.duplicateGlyphsPerFont) - for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): - if not dups: continue - assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1) - lookupMap = {id(v):v for v in table.table.LookupList.Lookup} - featureMap = {id(v):v for v in table.table.FeatureList.FeatureRecord} - synthFeature = None - synthLookup = None - for script in table.table.ScriptList.ScriptRecord: - if script.ScriptTag == 'DFLT': continue # XXX - for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]: - feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl'] - assert len(feature) <= 1 - if feature: - feature = feature[0] - else: - if not synthFeature: - synthFeature = otTables.FeatureRecord() - synthFeature.FeatureTag = 'locl' - f = synthFeature.Feature = otTables.Feature() - f.FeatureParams = None - f.LookupCount = 0 - f.LookupListIndex = [] - langsys.FeatureIndex.append(id(synthFeature)) - featureMap[id(synthFeature)] = synthFeature - langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag) - table.table.FeatureList.FeatureRecord.append(synthFeature) - table.table.FeatureList.FeatureCount += 1 - feature = synthFeature - - if not synthLookup: - subtable = otTables.SingleSubst() - subtable.mapping = dups - synthLookup = otTables.Lookup() - synthLookup.LookupFlag = 0 - synthLookup.LookupType = 1 - synthLookup.SubTableCount = 1 - synthLookup.SubTable = [subtable] - table.table.LookupList.Lookup.append(synthLookup) - table.table.LookupList.LookupCount += 1 - - feature.Feature.LookupListIndex[:0] = [id(synthLookup)] - feature.Feature.LookupCount += 1 - - DefaultTable.merge(self, m, tables) - return self - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def mapLookups(self, lookupMap): - pass - -# Copied and trimmed down from subset.py -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def __merge_classify_context(self): - - class ContextHelper(object): - def __init__(self, klass, Format): - if klass.__name__.endswith('Subst'): - Typ = 'Sub' - Type = 'Subst' - else: - Typ = 'Pos' - Type = 'Pos' - if klass.__name__.startswith('Chain'): - Chain = 'Chain' - else: - Chain = '' - ChainTyp = Chain+Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - - self.LookupRecord = Type+'LookupRecord' - - if Format == 1: - self.Rule = ChainTyp+'Rule' - self.RuleSet = ChainTyp+'RuleSet' - elif Format == 2: - self.Rule = ChainTyp+'ClassRule' - self.RuleSet = ChainTyp+'ClassSet' - - if self.Format not in [1, 2, 3]: - return None # Don't shoot the messenger; let it go - if not hasattr(self.__class__, "__ContextHelpers"): - self.__class__.__ContextHelpers = {} - if self.Format not in self.__class__.__ContextHelpers: - helper = ContextHelper(self.__class__, self.Format) - self.__class__.__ContextHelpers[self.Format] = helper - return self.__class__.__ContextHelpers[self.Format] - - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def mapLookups(self, lookupMap): - c = self.__merge_classify_context() - - if self.Format in [1, 2]: - for rs in getattr(self, c.RuleSet): - if not rs: continue - for r in getattr(rs, c.Rule): - if not r: continue - for ll in getattr(r, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookupMap[ll.LookupListIndex] - elif self.Format == 3: - for ll in getattr(self, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookupMap[ll.LookupListIndex] - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def mapLookups(self, lookupMap): - if self.Format == 1: - self.ExtSubTable.mapLookups(lookupMap) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Lookup) -def mapLookups(self, lookupMap): - for st in self.SubTable: - if not st: continue - st.mapLookups(lookupMap) - -@_add_method(otTables.LookupList) -def mapLookups(self, lookupMap): - for l in self.Lookup: - if not l: continue - l.mapLookups(lookupMap) - -@_add_method(otTables.Feature) -def mapLookups(self, lookupMap): - self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] - -@_add_method(otTables.FeatureList) -def mapLookups(self, lookupMap): - for f in self.FeatureRecord: - if not f or not f.Feature: continue - f.Feature.mapLookups(lookupMap) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def mapFeatures(self, featureMap): - self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] - if self.ReqFeatureIndex != 65535: - self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] - -@_add_method(otTables.Script) -def mapFeatures(self, featureMap): - if self.DefaultLangSys: - self.DefaultLangSys.mapFeatures(featureMap) - for l in self.LangSysRecord: - if not l or not l.LangSys: continue - l.LangSys.mapFeatures(featureMap) - -@_add_method(otTables.ScriptList) -def mapFeatures(self, featureMap): - for s in self.ScriptRecord: - if not s or not s.Script: continue - s.Script.mapFeatures(featureMap) - - -class Options(object): - - class UnknownOptionError(Exception): - pass - - def __init__(self, **kwargs): - - self.set(**kwargs) - - def set(self, **kwargs): - for k,v in kwargs.items(): - if not hasattr(self, k): - raise self.UnknownOptionError("Unknown option '%s'" % k) - setattr(self, k, v) - - def parse_opts(self, argv, ignore_unknown=False): - ret = [] - opts = {} - for a in argv: - orig_a = a - if not a.startswith('--'): - ret.append(a) - continue - a = a[2:] - i = a.find('=') - op = '=' - if i == -1: - if a.startswith("no-"): - k = a[3:] - v = False - else: - k = a - v = True - else: - k = a[:i] - if k[-1] in "-+": - op = k[-1]+'=' # Ops is '-=' or '+=' now. - k = k[:-1] - v = a[i+1:] - k = k.replace('-', '_') - if not hasattr(self, k): - if ignore_unknown is True or k in ignore_unknown: - ret.append(orig_a) - continue - else: - raise self.UnknownOptionError("Unknown option '%s'" % a) - - ov = getattr(self, k) - if isinstance(ov, bool): - v = bool(v) - elif isinstance(ov, int): - v = int(v) - elif isinstance(ov, list): - vv = v.split(',') - if vv == ['']: - vv = [] - vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] - if op == '=': - v = vv - elif op == '+=': - v = ov - v.extend(vv) - elif op == '-=': - v = ov - for x in vv: - if x in v: - v.remove(x) - else: - assert 0 - - opts[k] = v - self.set(**opts) - - return ret - - -class Merger(object): - - def __init__(self, options=None, log=None): - - if not log: - log = Logger() - if not options: - options = Options() - - self.options = options - self.log = log - - def merge(self, fontfiles): - - mega = ttLib.TTFont() - - # - # Settle on a mega glyph order. - # - fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] - glyphOrders = [font.getGlyphOrder() for font in fonts] - megaGlyphOrder = self._mergeGlyphOrders(glyphOrders) - # Reload fonts and set new glyph names on them. - # TODO Is it necessary to reload font? I think it is. At least - # it's safer, in case tables were loaded to provide glyph names. - fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles] - for font,glyphOrder in zip(fonts, glyphOrders): - font.setGlyphOrder(glyphOrder) - mega.setGlyphOrder(megaGlyphOrder) - - for font in fonts: - self._preMerge(font) - - self.duplicateGlyphsPerFont = [{} for f in fonts] - - allTags = reduce(set.union, (list(font.keys()) for font in fonts), set()) - allTags.remove('GlyphOrder') - - # Make sure we process cmap before GSUB as we have a dependency there. - if 'GSUB' in allTags: - allTags.remove('GSUB') - allTags = ['GSUB'] + list(allTags) - if 'cmap' in allTags: - allTags.remove('cmap') - allTags = ['cmap'] + list(allTags) - - for tag in allTags: - - tables = [font.get(tag, NotImplemented) for font in fonts] - - clazz = ttLib.getTableClass(tag) - table = clazz(tag).merge(self, tables) - # XXX Clean this up and use: table = mergeObjects(tables) - - if table is not NotImplemented and table is not False: - mega[tag] = table - self.log("Merged '%s'." % tag) - else: - self.log("Dropped '%s'." % tag) - self.log.lapse("merge '%s'" % tag) - - del self.duplicateGlyphsPerFont - - self._postMerge(mega) - - return mega - - def _mergeGlyphOrders(self, glyphOrders): - """Modifies passed-in glyphOrders to reflect new glyph names. - Returns glyphOrder for the merged font.""" - # Simply append font index to the glyph name for now. - # TODO Even this simplistic numbering can result in conflicts. - # But then again, we have to improve this soon anyway. - mega = [] - for n,glyphOrder in enumerate(glyphOrders): - for i,glyphName in enumerate(glyphOrder): - glyphName += "#" + repr(n) - glyphOrder[i] = glyphName - mega.append(glyphName) - return mega - - def mergeObjects(self, returnTable, logic, tables): - # Right now we don't use self at all. Will use in the future - # for options and logging. - - allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented)) - for key in allKeys: - try: - mergeLogic = logic[key] - except KeyError: - try: - mergeLogic = logic['*'] - except KeyError: - raise Exception("Don't know how to merge key %s of class %s" % - (key, returnTable.__class__.__name__)) - if mergeLogic is NotImplemented: - continue - value = mergeLogic(getattr(table, key, NotImplemented) for table in tables) - if value is not NotImplemented: - setattr(returnTable, key, value) - - return returnTable - - def _preMerge(self, font): - - # Map indices to references - - GDEF = font.get('GDEF') - GSUB = font.get('GSUB') - GPOS = font.get('GPOS') - - for t in [GSUB, GPOS]: - if not t: continue - - if t.table.LookupList: - lookupMap = {i:id(v) for i,v in enumerate(t.table.LookupList.Lookup)} - t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) - - if t.table.FeatureList and t.table.ScriptList: - featureMap = {i:id(v) for i,v in enumerate(t.table.FeatureList.FeatureRecord)} - t.table.ScriptList.mapFeatures(featureMap) - - # TODO GDEF/Lookup MarkFilteringSets - # TODO FeatureParams nameIDs - - def _postMerge(self, font): - - # Map references back to indices - - GDEF = font.get('GDEF') - GSUB = font.get('GSUB') - GPOS = font.get('GPOS') - - for t in [GSUB, GPOS]: - if not t: continue - - if t.table.LookupList: - lookupMap = {id(v):i for i,v in enumerate(t.table.LookupList.Lookup)} - t.table.LookupList.mapLookups(lookupMap) - if t.table.FeatureList: - # XXX Handle present FeatureList but absent LookupList - t.table.FeatureList.mapLookups(lookupMap) - - if t.table.FeatureList and t.table.ScriptList: - # XXX Handle present ScriptList but absent FeatureList - featureMap = {id(v):i for i,v in enumerate(t.table.FeatureList.FeatureRecord)} - t.table.ScriptList.mapFeatures(featureMap) - - # TODO GDEF/Lookup MarkFilteringSets - # TODO FeatureParams nameIDs - - -class Logger(object): - - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - font.disassembleInstructions = False # Work around ttLib bug - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() - - -__all__ = [ - 'Options', - 'Merger', - 'Logger', - 'main' -] - -def main(args=None): - - if args is None: - args = sys.argv[1:] - - log = Logger() - args = log.parse_opts(args) - - options = Options() - args = options.parse_opts(args) - - if len(args) < 1: - print("usage: pyftmerge font...", file=sys.stderr) - sys.exit(1) - - merger = Merger(options=options, log=log) - font = merger.merge(args) - outfile = 'merged.ttf' - font.save(outfile) - log.lapse("compile and save font") - - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") - -if __name__ == "__main__": - main() diff -Nru fonttools-3.0/Tools/fontTools/misc/arrayTools.py fonttools-3.21.2/Tools/fontTools/misc/arrayTools.py --- fonttools-3.0/Tools/fontTools/misc/arrayTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/arrayTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,185 +0,0 @@ -# -# Various array and rectangle tools, but mostly rectangles, hence the -# name of this module (not). -# - - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import math - -def calcBounds(array): - """Return the bounding rectangle of a 2D points array as a tuple: - (xMin, yMin, xMax, yMax) - """ - if len(array) == 0: - return 0, 0, 0, 0 - xs = [x for x, y in array] - ys = [y for x, y in array] - return min(xs), min(ys), max(xs), max(ys) - -def calcIntBounds(array): - """Return the integer bounding rectangle of a 2D points array as a - tuple: (xMin, yMin, xMax, yMax) - """ - xMin, yMin, xMax, yMax = calcBounds(array) - xMin = int(math.floor(xMin)) - xMax = int(math.ceil(xMax)) - yMin = int(math.floor(yMin)) - yMax = int(math.ceil(yMax)) - return xMin, yMin, xMax, yMax - - -def updateBounds(bounds, p, min=min, max=max): - """Return the bounding recangle of rectangle bounds and point (x, y).""" - (x, y) = p - xMin, yMin, xMax, yMax = bounds - return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y) - -def pointInRect(p, rect): - """Return True when point (x, y) is inside rect.""" - (x, y) = p - xMin, yMin, xMax, yMax = rect - return (xMin <= x <= xMax) and (yMin <= y <= yMax) - -def pointsInRect(array, rect): - """Find out which points or array are inside rect. - Returns an array with a boolean for each point. - """ - if len(array) < 1: - return [] - xMin, yMin, xMax, yMax = rect - return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array] - -def vectorLength(vector): - """Return the length of the given vector.""" - x, y = vector - return math.sqrt(x**2 + y**2) - -def asInt16(array): - """Round and cast to 16 bit integer.""" - return [int(math.floor(i+0.5)) for i in array] - - -def normRect(rect): - """Normalize the rectangle so that the following holds: - xMin <= xMax and yMin <= yMax - """ - (xMin, yMin, xMax, yMax) = rect - return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax) - -def scaleRect(rect, x, y): - """Scale the rectangle by x, y.""" - (xMin, yMin, xMax, yMax) = rect - return xMin * x, yMin * y, xMax * x, yMax * y - -def offsetRect(rect, dx, dy): - """Offset the rectangle by dx, dy.""" - (xMin, yMin, xMax, yMax) = rect - return xMin+dx, yMin+dy, xMax+dx, yMax+dy - -def insetRect(rect, dx, dy): - """Inset the rectangle by dx, dy on all sides.""" - (xMin, yMin, xMax, yMax) = rect - return xMin+dx, yMin+dy, xMax-dx, yMax-dy - -def sectRect(rect1, rect2): - """Return a boolean and a rectangle. If the input rectangles intersect, return - True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input - rectangles don't intersect. - """ - (xMin1, yMin1, xMax1, yMax1) = rect1 - (xMin2, yMin2, xMax2, yMax2) = rect2 - xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2), - min(xMax1, xMax2), min(yMax1, yMax2)) - if xMin >= xMax or yMin >= yMax: - return False, (0, 0, 0, 0) - return True, (xMin, yMin, xMax, yMax) - -def unionRect(rect1, rect2): - """Return the smallest rectangle in which both input rectangles are fully - enclosed. In other words, return the total bounding rectangle of both input - rectangles. - """ - (xMin1, yMin1, xMax1, yMax1) = rect1 - (xMin2, yMin2, xMax2, yMax2) = rect2 - xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2), - max(xMax1, xMax2), max(yMax1, yMax2)) - return (xMin, yMin, xMax, yMax) - -def rectCenter(rect0): - """Return the center of the rectangle as an (x, y) coordinate.""" - (xMin, yMin, xMax, yMax) = rect0 - return (xMin+xMax)/2, (yMin+yMax)/2 - -def intRect(rect1): - """Return the rectangle, rounded off to integer values, but guaranteeing that - the resulting rectangle is NOT smaller than the original. - """ - (xMin, yMin, xMax, yMax) = rect1 - xMin = int(math.floor(xMin)) - yMin = int(math.floor(yMin)) - xMax = int(math.ceil(xMax)) - yMax = int(math.ceil(yMax)) - return (xMin, yMin, xMax, yMax) - - -def _test(): - """ - >>> import math - >>> calcBounds([]) - (0, 0, 0, 0) - >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)]) - (0, 10, 80, 100) - >>> updateBounds((0, 0, 0, 0), (100, 100)) - (0, 0, 100, 100) - >>> pointInRect((50, 50), (0, 0, 100, 100)) - True - >>> pointInRect((0, 0), (0, 0, 100, 100)) - True - >>> pointInRect((100, 100), (0, 0, 100, 100)) - True - >>> not pointInRect((101, 100), (0, 0, 100, 100)) - True - >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100))) - [True, True, True, False] - >>> vectorLength((3, 4)) - 5.0 - >>> vectorLength((1, 1)) == math.sqrt(2) - True - >>> list(asInt16([0, 0.1, 0.5, 0.9])) - [0, 0, 1, 1] - >>> normRect((0, 10, 100, 200)) - (0, 10, 100, 200) - >>> normRect((100, 200, 0, 10)) - (0, 10, 100, 200) - >>> scaleRect((10, 20, 50, 150), 1.5, 2) - (15.0, 40, 75.0, 300) - >>> offsetRect((10, 20, 30, 40), 5, 6) - (15, 26, 35, 46) - >>> insetRect((10, 20, 50, 60), 5, 10) - (15, 30, 45, 50) - >>> insetRect((10, 20, 50, 60), -5, -10) - (5, 10, 55, 70) - >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50)) - >>> not intersects - True - >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50)) - >>> intersects - 1 - >>> rect - (5, 20, 20, 30) - >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50)) - (0, 10, 20, 50) - >>> rectCenter((0, 0, 100, 200)) - (50.0, 100.0) - >>> rectCenter((0, 0, 100, 199.0)) - (50.0, 99.5) - >>> intRect((0.9, 2.9, 3.1, 4.1)) - (0, 2, 4, 5) - """ - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/misc/bezierTools.py fonttools-3.21.2/Tools/fontTools/misc/bezierTools.py --- fonttools-3.0/Tools/fontTools/misc/bezierTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/bezierTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,414 +0,0 @@ -"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -__all__ = [ - "calcQuadraticBounds", - "calcCubicBounds", - "splitLine", - "splitQuadratic", - "splitCubic", - "splitQuadraticAtT", - "splitCubicAtT", - "solveQuadratic", - "solveCubic", -] - -from fontTools.misc.arrayTools import calcBounds - -epsilon = 1e-12 - - -def calcQuadraticBounds(pt1, pt2, pt3): - """Return the bounding rectangle for a qudratic bezier segment. - pt1 and pt3 are the "anchor" points, pt2 is the "handle". - - >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0)) - (0, 0, 100, 50.0) - >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100)) - (0.0, 0.0, 100, 100) - """ - (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3) - ax2 = ax*2.0 - ay2 = ay*2.0 - roots = [] - if ax2 != 0: - roots.append(-bx/ax2) - if ay2 != 0: - roots.append(-by/ay2) - points = [(ax*t*t + bx*t + cx, ay*t*t + by*t + cy) for t in roots if 0 <= t < 1] + [pt1, pt3] - return calcBounds(points) - - -def calcCubicBounds(pt1, pt2, pt3, pt4): - """Return the bounding rectangle for a cubic bezier segment. - pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles". - - >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0)) - (0, 0, 100, 75.0) - >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100)) - (0.0, 0.0, 100, 100) - >>> print("%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))) - 35.566243 0.000000 64.433757 75.000000 - """ - (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4) - # calc first derivative - ax3 = ax * 3.0 - ay3 = ay * 3.0 - bx2 = bx * 2.0 - by2 = by * 2.0 - xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1] - yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1] - roots = xRoots + yRoots - - points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4] - return calcBounds(points) - - -def splitLine(pt1, pt2, where, isHorizontal): - """Split the line between pt1 and pt2 at position 'where', which - is an x coordinate if isHorizontal is False, a y coordinate if - isHorizontal is True. Return a list of two line segments if the - line was successfully split, or a list containing the original - line. - - >>> printSegments(splitLine((0, 0), (100, 100), 50, True)) - ((0, 0), (50, 50)) - ((50, 50), (100, 100)) - >>> printSegments(splitLine((0, 0), (100, 100), 100, True)) - ((0, 0), (100, 100)) - >>> printSegments(splitLine((0, 0), (100, 100), 0, True)) - ((0, 0), (0, 0)) - ((0, 0), (100, 100)) - >>> printSegments(splitLine((0, 0), (100, 100), 0, False)) - ((0, 0), (0, 0)) - ((0, 0), (100, 100)) - >>> printSegments(splitLine((100, 0), (0, 0), 50, False)) - ((100, 0), (50, 0)) - ((50, 0), (0, 0)) - >>> printSegments(splitLine((0, 100), (0, 0), 50, True)) - ((0, 100), (0, 50)) - ((0, 50), (0, 0)) - """ - pt1x, pt1y = pt1 - pt2x, pt2y = pt2 - - ax = (pt2x - pt1x) - ay = (pt2y - pt1y) - - bx = pt1x - by = pt1y - - a = (ax, ay)[isHorizontal] - - if a == 0: - return [(pt1, pt2)] - t = (where - (bx, by)[isHorizontal]) / a - if 0 <= t < 1: - midPt = ax * t + bx, ay * t + by - return [(pt1, midPt), (midPt, pt2)] - else: - return [(pt1, pt2)] - - -def splitQuadratic(pt1, pt2, pt3, where, isHorizontal): - """Split the quadratic curve between pt1, pt2 and pt3 at position 'where', - which is an x coordinate if isHorizontal is False, a y coordinate if - isHorizontal is True. Return a list of curve segments. - - >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False)) - ((0, 0), (50, 100), (100, 0)) - >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False)) - ((0, 0), (25, 50), (50, 50)) - ((50, 50), (75, 50), (100, 0)) - >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False)) - ((0, 0), (12.5, 25), (25, 37.5)) - ((25, 37.5), (62.5, 75), (100, 0)) - >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True)) - ((0, 0), (7.32233, 14.6447), (14.6447, 25)) - ((14.6447, 25), (50, 75), (85.3553, 25)) - ((85.3553, 25), (92.6777, 14.6447), (100, -7.10543e-15)) - >>> # XXX I'm not at all sure if the following behavior is desirable: - >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True)) - ((0, 0), (25, 50), (50, 50)) - ((50, 50), (50, 50), (50, 50)) - ((50, 50), (75, 50), (100, 0)) - """ - a, b, c = calcQuadraticParameters(pt1, pt2, pt3) - solutions = solveQuadratic(a[isHorizontal], b[isHorizontal], - c[isHorizontal] - where) - solutions = sorted([t for t in solutions if 0 <= t < 1]) - if not solutions: - return [(pt1, pt2, pt3)] - return _splitQuadraticAtT(a, b, c, *solutions) - - -def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal): - """Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where', - which is an x coordinate if isHorizontal is False, a y coordinate if - isHorizontal is True. Return a list of curve segments. - - >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False)) - ((0, 0), (25, 100), (75, 100), (100, 0)) - >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False)) - ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) - ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) - >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True)) - ((0, 0), (2.29379, 9.17517), (4.79804, 17.5085), (7.47414, 25)) - ((7.47414, 25), (31.2886, 91.6667), (68.7114, 91.6667), (92.5259, 25)) - ((92.5259, 25), (95.202, 17.5085), (97.7062, 9.17517), (100, 1.77636e-15)) - """ - a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) - solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal], - d[isHorizontal] - where) - solutions = sorted([t for t in solutions if 0 <= t < 1]) - if not solutions: - return [(pt1, pt2, pt3, pt4)] - return _splitCubicAtT(a, b, c, d, *solutions) - - -def splitQuadraticAtT(pt1, pt2, pt3, *ts): - """Split the quadratic curve between pt1, pt2 and pt3 at one or more - values of t. Return a list of curve segments. - - >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5)) - ((0, 0), (25, 50), (50, 50)) - ((50, 50), (75, 50), (100, 0)) - >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75)) - ((0, 0), (25, 50), (50, 50)) - ((50, 50), (62.5, 50), (75, 37.5)) - ((75, 37.5), (87.5, 25), (100, 0)) - """ - a, b, c = calcQuadraticParameters(pt1, pt2, pt3) - return _splitQuadraticAtT(a, b, c, *ts) - - -def splitCubicAtT(pt1, pt2, pt3, pt4, *ts): - """Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more - values of t. Return a list of curve segments. - - >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5)) - ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) - ((50, 75), (68.75, 75), (87.5, 50), (100, 0)) - >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75)) - ((0, 0), (12.5, 50), (31.25, 75), (50, 75)) - ((50, 75), (59.375, 75), (68.75, 68.75), (77.3438, 56.25)) - ((77.3438, 56.25), (85.9375, 43.75), (93.75, 25), (100, 0)) - """ - a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4) - return _splitCubicAtT(a, b, c, d, *ts) - - -def _splitQuadraticAtT(a, b, c, *ts): - ts = list(ts) - segments = [] - ts.insert(0, 0.0) - ts.append(1.0) - ax, ay = a - bx, by = b - cx, cy = c - for i in range(len(ts) - 1): - t1 = ts[i] - t2 = ts[i+1] - delta = (t2 - t1) - # calc new a, b and c - a1x = ax * delta**2 - a1y = ay * delta**2 - b1x = (2*ax*t1 + bx) * delta - b1y = (2*ay*t1 + by) * delta - c1x = ax*t1**2 + bx*t1 + cx - c1y = ay*t1**2 + by*t1 + cy - - pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y)) - segments.append((pt1, pt2, pt3)) - return segments - - -def _splitCubicAtT(a, b, c, d, *ts): - ts = list(ts) - ts.insert(0, 0.0) - ts.append(1.0) - segments = [] - ax, ay = a - bx, by = b - cx, cy = c - dx, dy = d - for i in range(len(ts) - 1): - t1 = ts[i] - t2 = ts[i+1] - delta = (t2 - t1) - # calc new a, b, c and d - a1x = ax * delta**3 - a1y = ay * delta**3 - b1x = (3*ax*t1 + bx) * delta**2 - b1y = (3*ay*t1 + by) * delta**2 - c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta - c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta - d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx - d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy - pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y)) - segments.append((pt1, pt2, pt3, pt4)) - return segments - - -# -# Equation solvers. -# - -from math import sqrt, acos, cos, pi - - -def solveQuadratic(a, b, c, - sqrt=sqrt): - """Solve a quadratic equation where a, b and c are real. - a*x*x + b*x + c = 0 - This function returns a list of roots. Note that the returned list - is neither guaranteed to be sorted nor to contain unique values! - """ - if abs(a) < epsilon: - if abs(b) < epsilon: - # We have a non-equation; therefore, we have no valid solution - roots = [] - else: - # We have a linear equation with 1 root. - roots = [-c/b] - else: - # We have a true quadratic equation. Apply the quadratic formula to find two roots. - DD = b*b - 4.0*a*c - if DD >= 0.0: - rDD = sqrt(DD) - roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a] - else: - # complex roots, ignore - roots = [] - return roots - - -def solveCubic(a, b, c, d): - """Solve a cubic equation where a, b, c and d are real. - a*x*x*x + b*x*x + c*x + d = 0 - This function returns a list of roots. Note that the returned list - is neither guaranteed to be sorted nor to contain unique values! - """ - # - # adapted from: - # CUBIC.C - Solve a cubic polynomial - # public domain by Ross Cottrell - # found at: http://www.strangecreations.com/library/snippets/Cubic.C - # - if abs(a) < epsilon: - # don't just test for zero; for very small values of 'a' solveCubic() - # returns unreliable results, so we fall back to quad. - return solveQuadratic(b, c, d) - a = float(a) - a1 = b/a - a2 = c/a - a3 = d/a - - Q = (a1*a1 - 3.0*a2)/9.0 - R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0 - R2_Q3 = R*R - Q*Q*Q - - if R2_Q3 < 0: - theta = acos(R/sqrt(Q*Q*Q)) - rQ2 = -2.0*sqrt(Q) - x0 = rQ2*cos(theta/3.0) - a1/3.0 - x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0 - x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0 - return [x0, x1, x2] - else: - if Q == 0 and R == 0: - x = 0 - else: - x = pow(sqrt(R2_Q3)+abs(R), 1/3.0) - x = x + Q/x - if R >= 0.0: - x = -x - x = x - a1/3.0 - return [x] - - -# -# Conversion routines for points to parameters and vice versa -# - -def calcQuadraticParameters(pt1, pt2, pt3): - x2, y2 = pt2 - x3, y3 = pt3 - cx, cy = pt1 - bx = (x2 - cx) * 2.0 - by = (y2 - cy) * 2.0 - ax = x3 - cx - bx - ay = y3 - cy - by - return (ax, ay), (bx, by), (cx, cy) - - -def calcCubicParameters(pt1, pt2, pt3, pt4): - x2, y2 = pt2 - x3, y3 = pt3 - x4, y4 = pt4 - dx, dy = pt1 - cx = (x2 -dx) * 3.0 - cy = (y2 -dy) * 3.0 - bx = (x3 - x2) * 3.0 - cx - by = (y3 - y2) * 3.0 - cy - ax = x4 - dx - cx - bx - ay = y4 - dy - cy - by - return (ax, ay), (bx, by), (cx, cy), (dx, dy) - - -def calcQuadraticPoints(a, b, c): - ax, ay = a - bx, by = b - cx, cy = c - x1 = cx - y1 = cy - x2 = (bx * 0.5) + cx - y2 = (by * 0.5) + cy - x3 = ax + bx + cx - y3 = ay + by + cy - return (x1, y1), (x2, y2), (x3, y3) - - -def calcCubicPoints(a, b, c, d): - ax, ay = a - bx, by = b - cx, cy = c - dx, dy = d - x1 = dx - y1 = dy - x2 = (cx / 3.0) + dx - y2 = (cy / 3.0) + dy - x3 = (bx + cx) / 3.0 + x2 - y3 = (by + cy) / 3.0 + y2 - x4 = ax + dx + cx + bx - y4 = ay + dy + cy + by - return (x1, y1), (x2, y2), (x3, y3), (x4, y4) - - -def _segmentrepr(obj): - """ - >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]]) - '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))' - """ - try: - it = iter(obj) - except TypeError: - return "%g" % obj - else: - return "(%s)" % ", ".join([_segmentrepr(x) for x in it]) - - -def printSegments(segments): - """Helper for the doctests, displaying each segment in a list of - segments on a single line as a tuple. - """ - for segment in segments: - print(_segmentrepr(segment)) - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/misc/eexec.py fonttools-3.21.2/Tools/fontTools/misc/eexec.py --- fonttools-3.0/Tools/fontTools/misc/eexec.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/eexec.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,55 +0,0 @@ -"""fontTools.misc.eexec.py -- Module implementing the eexec and -charstring encryption algorithm as used by PostScript Type 1 fonts. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -def _decryptChar(cipher, R): - cipher = byteord(cipher) - plain = ( (cipher ^ (R>>8)) ) & 0xFF - R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF - return bytechr(plain), R - -def _encryptChar(plain, R): - plain = byteord(plain) - cipher = ( (plain ^ (R>>8)) ) & 0xFF - R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF - return bytechr(cipher), R - - -def decrypt(cipherstring, R): - plainList = [] - for cipher in cipherstring: - plain, R = _decryptChar(cipher, R) - plainList.append(plain) - plainstring = strjoin(plainList) - return plainstring, int(R) - -def encrypt(plainstring, R): - cipherList = [] - for plain in plainstring: - cipher, R = _encryptChar(plain, R) - cipherList.append(cipher) - cipherstring = strjoin(cipherList) - return cipherstring, int(R) - - -def hexString(s): - import binascii - return binascii.hexlify(s) - -def deHexString(h): - import binascii - h = strjoin(h.split()) - return binascii.unhexlify(h) - - -def _test(): - testStr = "\0\0asdadads asds\265" - print(decrypt, decrypt(testStr, 12321)) - print(encrypt, encrypt(testStr, 12321)) - - -if __name__ == "__main__": - _test() diff -Nru fonttools-3.0/Tools/fontTools/misc/encodingTools.py fonttools-3.21.2/Tools/fontTools/misc/encodingTools.py --- fonttools-3.0/Tools/fontTools/misc/encodingTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/encodingTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import fontTools.encodings.codecs - -# Map keyed by platformID, then platEncID, then possibly langID -_encodingMap = { - 0: { # Unicode - 0: 'utf_16_be', - 1: 'utf_16_be', - 2: 'utf_16_be', - 3: 'utf_16_be', - 4: 'utf_16_be', - 5: 'utf_16_be', - 6: 'utf_16_be', - }, - 1: { # Macintosh - # See - # https://github.com/behdad/fonttools/issues/236 - 0: { # Macintosh, platEncID==0, keyed by langID - 15: "mac_iceland", - 17: "mac_turkish", - 18: "mac_croatian", - 24: "mac_latin2", - 25: "mac_latin2", - 26: "mac_latin2", - 27: "mac_latin2", - 28: "mac_latin2", - 36: "mac_latin2", - 37: "mac_romanian", - 38: "mac_latin2", - 39: "mac_latin2", - 40: "mac_latin2", - Ellipsis: 'mac_roman', # Other - }, - 1: 'x_mac_japanese_ttx', - 2: 'x_mac_trad_chinese_ttx', - 3: 'x_mac_korean_ttx', - 6: 'mac_greek', - 7: 'mac_cyrillic', - 25: 'x_mac_simp_chinese_ttx', - 29: 'mac_latin2', - 35: 'mac_turkish', - 37: 'mac_iceland', - }, - 2: { # ISO - 0: 'ascii', - 1: 'utf_16_be', - 2: 'latin1', - }, - 3: { # Microsoft - 0: 'utf_16_be', - 1: 'utf_16_be', - 2: 'shift_jis', - 3: 'gb2312', - 4: 'big5', - 5: 'euc_kr', - 6: 'johab', - 10: 'utf_16_be', - }, -} - -def getEncoding(platformID, platEncID, langID, default=None): - """Returns the Python encoding name for OpenType platformID/encodingID/langID - triplet. If encoding for these values is not known, by default None is - returned. That can be overriden by passing a value to the default argument. - """ - encoding = _encodingMap.get(platformID, {}).get(platEncID, default) - if isinstance(encoding, dict): - encoding = encoding.get(langID, encoding[Ellipsis]) - return encoding diff -Nru fonttools-3.0/Tools/fontTools/misc/encodingTools_test.py fonttools-3.21.2/Tools/fontTools/misc/encodingTools_test.py --- fonttools-3.0/Tools/fontTools/misc/encodingTools_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/encodingTools_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,31 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import unittest -from .encodingTools import getEncoding - -class EncodingTest(unittest.TestCase): - - def test_encoding_unicode(self): - - self.assertEqual(getEncoding(3, 0, None), "utf_16_be") # MS Symbol is Unicode as well - self.assertEqual(getEncoding(3, 1, None), "utf_16_be") - self.assertEqual(getEncoding(3, 10, None), "utf_16_be") - self.assertEqual(getEncoding(0, 3, None), "utf_16_be") - - def test_encoding_macroman_misc(self): - self.assertEqual(getEncoding(1, 0, 17), "mac_turkish") - self.assertEqual(getEncoding(1, 0, 37), "mac_romanian") - self.assertEqual(getEncoding(1, 0, 45), "mac_roman") - - def test_extended_mac_encodings(self): - encoding = getEncoding(1, 1, 0) # Mac Japanese - decoded = b'\xfe'.decode(encoding) - self.assertEqual(decoded, unichr(0x2122)) - - def test_extended_unknown(self): - self.assertEqual(getEncoding(10, 11, 12), None) - self.assertEqual(getEncoding(10, 11, 12, "ascii"), "ascii") - self.assertEqual(getEncoding(10, 11, 12, default="ascii"), "ascii") - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/misc/fixedTools.py fonttools-3.21.2/Tools/fontTools/misc/fixedTools.py --- fonttools-3.0/Tools/fontTools/misc/fixedTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/fixedTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -__all__ = [ - "fixedToFloat", - "floatToFixed", -] - -def fixedToFloat(value, precisionBits): - """Converts a fixed-point number to a float, choosing the float - that has the shortest decimal reprentation. Eg. to convert a - fixed number in a 2.14 format, use precisionBits=14. This is - pretty slow compared to a simple division. Use sporadically. - - precisionBits is only supported up to 16. - """ - if not value: return 0.0 - - scale = 1 << precisionBits - value /= scale - eps = .5 / scale - lo = value - eps - hi = value + eps - # If the range of valid choices spans an integer, return the integer. - if int(lo) != int(hi): - return float(round(value)) - fmt = "%.8f" - lo = fmt % lo - hi = fmt % hi - assert len(lo) == len(hi) and lo != hi - for i in range(len(lo)): - if lo[i] != hi[i]: - break - period = lo.find('.') - assert period < i - fmt = "%%.%df" % (i - period) - value = fmt % value - return float(value) - -def floatToFixed(value, precisionBits): - """Converts a float to a fixed-point number given the number of - precisionBits. Ie. int(round(value * (1<h", data[index:index+2]) - return value, index+2 - -def read_longInt(self, b0, data, index): - value, = struct.unpack(">l", data[index:index+4]) - return value, index+4 - -def read_fixed1616(self, b0, data, index): - value, = struct.unpack(">l", data[index:index+4]) - return value / 65536, index+4 - -def read_reserved(self, b0, data, index): - assert NotImplementedError - return NotImplemented, index - -def read_realNumber(self, b0, data, index): - number = '' - while True: - b = byteord(data[index]) - index = index + 1 - nibble0 = (b & 0xf0) >> 4 - nibble1 = b & 0x0f - if nibble0 == 0xf: - break - number = number + realNibbles[nibble0] - if nibble1 == 0xf: - break - number = number + realNibbles[nibble1] - return float(number), index - - -t1OperandEncoding = [None] * 256 -t1OperandEncoding[0:32] = (32) * [read_operator] -t1OperandEncoding[32:247] = (247 - 32) * [read_byte] -t1OperandEncoding[247:251] = (251 - 247) * [read_smallInt1] -t1OperandEncoding[251:255] = (255 - 251) * [read_smallInt2] -t1OperandEncoding[255] = read_longInt -assert len(t1OperandEncoding) == 256 - -t2OperandEncoding = t1OperandEncoding[:] -t2OperandEncoding[28] = read_shortInt -t2OperandEncoding[255] = read_fixed1616 - -cffDictOperandEncoding = t2OperandEncoding[:] -cffDictOperandEncoding[29] = read_longInt -cffDictOperandEncoding[30] = read_realNumber -cffDictOperandEncoding[255] = read_reserved - - -realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', - '.', 'E', 'E-', None, '-'] -realNibblesDict = {v:i for i,v in enumerate(realNibbles)} - - -class ByteCodeBase(object): - pass - - -def buildOperatorDict(operatorList): - oper = {} - opc = {} - for item in operatorList: - if len(item) == 2: - oper[item[0]] = item[1] - else: - oper[item[0]] = item[1:] - if isinstance(item[0], tuple): - opc[item[1]] = item[0] - else: - opc[item[1]] = (item[0],) - return oper, opc - - -t2Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (10, 'callsubr'), - (11, 'return'), - (14, 'endchar'), - (16, 'blend'), - (18, 'hstemhm'), - (19, 'hintmask'), - (20, 'cntrmask'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (23, 'vstemhm'), - (24, 'rcurveline'), - (25, 'rlinecurve'), - (26, 'vvcurveto'), - (27, 'hhcurveto'), -# (28, 'shortint'), # not really an operator - (29, 'callgsubr'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'ignore'), # dotsection. Yes, there a few very early OTF/CFF - # fonts with this deprecated operator. Just ignore it. - ((12, 3), 'and'), - ((12, 4), 'or'), - ((12, 5), 'not'), - ((12, 8), 'store'), - ((12, 9), 'abs'), - ((12, 10), 'add'), - ((12, 11), 'sub'), - ((12, 12), 'div'), - ((12, 13), 'load'), - ((12, 14), 'neg'), - ((12, 15), 'eq'), - ((12, 18), 'drop'), - ((12, 20), 'put'), - ((12, 21), 'get'), - ((12, 22), 'ifelse'), - ((12, 23), 'random'), - ((12, 24), 'mul'), - ((12, 26), 'sqrt'), - ((12, 27), 'dup'), - ((12, 28), 'exch'), - ((12, 29), 'index'), - ((12, 30), 'roll'), - ((12, 34), 'hflex'), - ((12, 35), 'flex'), - ((12, 36), 'hflex1'), - ((12, 37), 'flex1'), -] - - -def getIntEncoder(format): - if format == "cff": - fourByteOp = bytechr(29) - elif format == "t1": - fourByteOp = bytechr(255) - else: - assert format == "t2" - fourByteOp = None - - def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr, - pack=struct.pack, unpack=struct.unpack): - if -107 <= value <= 107: - code = bytechr(value + 139) - elif 108 <= value <= 1131: - value = value - 108 - code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF) - elif -1131 <= value <= -108: - value = -value - 108 - code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF) - elif fourByteOp is None: - # T2 only supports 2 byte ints - if -32768 <= value <= 32767: - code = bytechr(28) + pack(">h", value) - else: - # Backwards compatible hack: due to a previous bug in FontTools, - # 16.16 fixed numbers were written out as 4-byte ints. When - # these numbers were small, they were wrongly written back as - # small ints instead of 4-byte ints, breaking round-tripping. - # This here workaround doesn't do it any better, since we can't - # distinguish anymore between small ints that were supposed to - # be small fixed numbers and small ints that were just small - # ints. Hence the warning. - import sys - sys.stderr.write("Warning: 4-byte T2 number got passed to the " - "IntType handler. This should happen only when reading in " - "old XML files.\n") - code = bytechr(255) + pack(">l", value) - else: - code = fourByteOp + pack(">l", value) - return code - - return encodeInt - - -encodeIntCFF = getIntEncoder("cff") -encodeIntT1 = getIntEncoder("t1") -encodeIntT2 = getIntEncoder("t2") - -def encodeFixed(f, pack=struct.pack): - # For T2 only - return b"\xff" + pack(">l", int(round(f * 65536))) - -def encodeFloat(f): - # For CFF only, used in cffLib - s = str(f).upper() - if s[:2] == "0.": - s = s[1:] - elif s[:3] == "-0.": - s = "-" + s[2:] - nibbles = [] - while s: - c = s[0] - s = s[1:] - if c == "E" and s[:1] == "-": - s = s[1:] - c = "E-" - nibbles.append(realNibblesDict[c]) - nibbles.append(0xf) - if len(nibbles) % 2: - nibbles.append(0xf) - d = bytechr(30) - for i in range(0, len(nibbles), 2): - d = d + bytechr(nibbles[i] << 4 | nibbles[i+1]) - return d - - -class CharStringCompileError(Exception): pass - - -class T2CharString(ByteCodeBase): - - operandEncoding = t2OperandEncoding - operators, opcodes = buildOperatorDict(t2Operators) - - def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.private = private - self.globalSubrs = globalSubrs if globalSubrs is not None else [] - - def __repr__(self): - if self.bytecode is None: - return "<%s (source) at %x>" % (self.__class__.__name__, id(self)) - else: - return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self)) - - def getIntEncoder(self): - return encodeIntT2 - - def getFixedEncoder(self): - return encodeFixed - - def decompile(self): - if not self.needsDecompilation(): - return - subrs = getattr(self.private, "Subrs", []) - decompiler = SimpleT2Decompiler(subrs, self.globalSubrs) - decompiler.execute(self) - - def draw(self, pen): - subrs = getattr(self.private, "Subrs", []) - extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs, - self.private.nominalWidthX, self.private.defaultWidthX) - extractor.execute(self) - self.width = extractor.width - - def compile(self): - if self.bytecode is not None: - return - assert self.program, "illegal CharString: decompiled to empty program" - assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" - bytecode = [] - opcodes = self.opcodes - program = self.program - encodeInt = self.getIntEncoder() - encodeFixed = self.getFixedEncoder() - i = 0 - end = len(program) - while i < end: - token = program[i] - i = i + 1 - tp = type(token) - if issubclass(tp, basestring): - try: - bytecode.extend(bytechr(b) for b in opcodes[token]) - except KeyError: - raise CharStringCompileError("illegal operator: %s" % token) - if token in ('hintmask', 'cntrmask'): - bytecode.append(program[i]) # hint mask - i = i + 1 - elif tp == int: - bytecode.append(encodeInt(token)) - elif tp == float: - bytecode.append(encodeFixed(token)) - else: - assert 0, "unsupported type: %s" % tp - try: - bytecode = bytesjoin(bytecode) - except TypeError: - print(bytecode) - raise - self.setBytecode(bytecode) - - def needsDecompilation(self): - return self.bytecode is not None - - def setProgram(self, program): - self.program = program - self.bytecode = None - - def setBytecode(self, bytecode): - self.bytecode = bytecode - self.program = None - - def getToken(self, index, - len=len, byteord=byteord, basestring=basestring, - isinstance=isinstance): - if self.bytecode is not None: - if index >= len(self.bytecode): - return None, 0, 0 - b0 = byteord(self.bytecode[index]) - index = index + 1 - handler = self.operandEncoding[b0] - token, index = handler(self, b0, self.bytecode, index) - else: - if index >= len(self.program): - return None, 0, 0 - token = self.program[index] - index = index + 1 - isOperator = isinstance(token, basestring) - return token, isOperator, index - - def getBytes(self, index, nBytes): - if self.bytecode is not None: - newIndex = index + nBytes - bytes = self.bytecode[index:newIndex] - index = newIndex - else: - bytes = self.program[index] - index = index + 1 - assert len(bytes) == nBytes - return bytes, index - - def handle_operator(self, operator): - return operator - - def toXML(self, xmlWriter): - from fontTools.misc.textTools import num2binary - if self.bytecode is not None: - xmlWriter.dumphex(self.bytecode) - else: - index = 0 - args = [] - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - if isOperator: - args = [str(arg) for arg in args] - if token in ('hintmask', 'cntrmask'): - hintMask, isOperator, index = self.getToken(index) - bits = [] - for byte in hintMask: - bits.append(num2binary(byteord(byte), 8)) - hintMask = strjoin(bits) - line = ' '.join(args + [token, hintMask]) - else: - line = ' '.join(args + [token]) - xmlWriter.write(line) - xmlWriter.newline() - args = [] - else: - args.append(token) - - def fromXML(self, name, attrs, content): - from fontTools.misc.textTools import binary2num, readHex - if attrs.get("raw"): - self.setBytecode(readHex(content)) - return - content = strjoin(content) - content = content.split() - program = [] - end = len(content) - i = 0 - while i < end: - token = content[i] - i = i + 1 - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - program.append(token) - if token in ('hintmask', 'cntrmask'): - mask = content[i] - maskBytes = b"" - for j in range(0, len(mask), 8): - maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8])) - program.append(maskBytes) - i = i + 1 - else: - program.append(token) - else: - program.append(token) - self.setProgram(program) - - -t1Operators = [ -# opcode name - (1, 'hstem'), - (3, 'vstem'), - (4, 'vmoveto'), - (5, 'rlineto'), - (6, 'hlineto'), - (7, 'vlineto'), - (8, 'rrcurveto'), - (9, 'closepath'), - (10, 'callsubr'), - (11, 'return'), - (13, 'hsbw'), - (14, 'endchar'), - (21, 'rmoveto'), - (22, 'hmoveto'), - (30, 'vhcurveto'), - (31, 'hvcurveto'), - ((12, 0), 'dotsection'), - ((12, 1), 'vstem3'), - ((12, 2), 'hstem3'), - ((12, 6), 'seac'), - ((12, 7), 'sbw'), - ((12, 12), 'div'), - ((12, 16), 'callothersubr'), - ((12, 17), 'pop'), - ((12, 33), 'setcurrentpoint'), -] - -class T1CharString(T2CharString): - - operandEncoding = t1OperandEncoding - operators, opcodes = buildOperatorDict(t1Operators) - - def __init__(self, bytecode=None, program=None, subrs=None): - if program is None: - program = [] - self.bytecode = bytecode - self.program = program - self.subrs = subrs - - def getIntEncoder(self): - return encodeIntT1 - - def getFixedEncoder(self): - def encodeFixed(value): - raise TypeError("Type 1 charstrings don't support floating point operands") - - def decompile(self): - if self.bytecode is None: - return - program = [] - index = 0 - while True: - token, isOperator, index = self.getToken(index) - if token is None: - break - program.append(token) - self.setProgram(program) - - def draw(self, pen): - extractor = T1OutlineExtractor(pen, self.subrs) - extractor.execute(self) - self.width = extractor.width - - -class SimpleT2Decompiler(object): - - def __init__(self, localSubrs, globalSubrs): - self.localSubrs = localSubrs - self.localBias = calcSubrBias(localSubrs) - self.globalSubrs = globalSubrs - self.globalBias = calcSubrBias(globalSubrs) - self.reset() - - def reset(self): - self.callingStack = [] - self.operandStack = [] - self.hintCount = 0 - self.hintMaskBytes = 0 - - def execute(self, charString): - self.callingStack.append(charString) - needsDecompilation = charString.needsDecompilation() - if needsDecompilation: - program = [] - pushToProgram = program.append - else: - pushToProgram = lambda x: None - pushToStack = self.operandStack.append - index = 0 - while True: - token, isOperator, index = charString.getToken(index) - if token is None: - break # we're done! - pushToProgram(token) - if isOperator: - handlerName = "op_" + token - handler = getattr(self, handlerName, None) - if handler is not None: - rv = handler(index) - if rv: - hintMaskBytes, index = rv - pushToProgram(hintMaskBytes) - else: - self.popall() - else: - pushToStack(token) - if needsDecompilation: - assert program, "illegal CharString: decompiled to empty program" - assert program[-1] in ("endchar", "return", "callsubr", "callgsubr", - "seac"), "illegal CharString" - charString.setProgram(program) - del self.callingStack[-1] - - def pop(self): - value = self.operandStack[-1] - del self.operandStack[-1] - return value - - def popall(self): - stack = self.operandStack[:] - self.operandStack[:] = [] - return stack - - def push(self, value): - self.operandStack.append(value) - - def op_return(self, index): - if self.operandStack: - pass - - def op_endchar(self, index): - pass - - def op_ignore(self, index): - pass - - def op_callsubr(self, index): - subrIndex = self.pop() - subr = self.localSubrs[subrIndex+self.localBias] - self.execute(subr) - - def op_callgsubr(self, index): - subrIndex = self.pop() - subr = self.globalSubrs[subrIndex+self.globalBias] - self.execute(subr) - - def op_hstem(self, index): - self.countHints() - def op_vstem(self, index): - self.countHints() - def op_hstemhm(self, index): - self.countHints() - def op_vstemhm(self, index): - self.countHints() - - def op_hintmask(self, index): - if not self.hintMaskBytes: - self.countHints() - self.hintMaskBytes = (self.hintCount + 7) // 8 - hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes) - return hintMaskBytes, index - - op_cntrmask = op_hintmask - - def countHints(self): - args = self.popall() - self.hintCount = self.hintCount + len(args) // 2 - - # misc - def op_and(self, index): - raise NotImplementedError - def op_or(self, index): - raise NotImplementedError - def op_not(self, index): - raise NotImplementedError - def op_store(self, index): - raise NotImplementedError - def op_abs(self, index): - raise NotImplementedError - def op_add(self, index): - raise NotImplementedError - def op_sub(self, index): - raise NotImplementedError - def op_div(self, index): - raise NotImplementedError - def op_load(self, index): - raise NotImplementedError - def op_neg(self, index): - raise NotImplementedError - def op_eq(self, index): - raise NotImplementedError - def op_drop(self, index): - raise NotImplementedError - def op_put(self, index): - raise NotImplementedError - def op_get(self, index): - raise NotImplementedError - def op_ifelse(self, index): - raise NotImplementedError - def op_random(self, index): - raise NotImplementedError - def op_mul(self, index): - raise NotImplementedError - def op_sqrt(self, index): - raise NotImplementedError - def op_dup(self, index): - raise NotImplementedError - def op_exch(self, index): - raise NotImplementedError - def op_index(self, index): - raise NotImplementedError - def op_roll(self, index): - raise NotImplementedError - -class T2OutlineExtractor(SimpleT2Decompiler): - - def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX): - SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) - self.pen = pen - self.nominalWidthX = nominalWidthX - self.defaultWidthX = defaultWidthX - - def reset(self): - SimpleT2Decompiler.reset(self) - self.hints = [] - self.gotWidth = 0 - self.width = 0 - self.currentPoint = (0, 0) - self.sawMoveTo = 0 - - def _nextPoint(self, point): - x, y = self.currentPoint - point = x + point[0], y + point[1] - self.currentPoint = point - return point - - def rMoveTo(self, point): - self.pen.moveTo(self._nextPoint(point)) - self.sawMoveTo = 1 - - def rLineTo(self, point): - if not self.sawMoveTo: - self.rMoveTo((0, 0)) - self.pen.lineTo(self._nextPoint(point)) - - def rCurveTo(self, pt1, pt2, pt3): - if not self.sawMoveTo: - self.rMoveTo((0, 0)) - nextPoint = self._nextPoint - self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3)) - - def closePath(self): - if self.sawMoveTo: - self.pen.closePath() - self.sawMoveTo = 0 - - def endPath(self): - # In T2 there are no open paths, so always do a closePath when - # finishing a sub path. - self.closePath() - - def popallWidth(self, evenOdd=0): - args = self.popall() - if not self.gotWidth: - if evenOdd ^ (len(args) % 2): - self.width = self.nominalWidthX + args[0] - args = args[1:] - else: - self.width = self.defaultWidthX - self.gotWidth = 1 - return args - - def countHints(self): - args = self.popallWidth() - self.hintCount = self.hintCount + len(args) // 2 - - # - # hint operators - # - #def op_hstem(self, index): - # self.countHints() - #def op_vstem(self, index): - # self.countHints() - #def op_hstemhm(self, index): - # self.countHints() - #def op_vstemhm(self, index): - # self.countHints() - #def op_hintmask(self, index): - # self.countHints() - #def op_cntrmask(self, index): - # self.countHints() - - # - # path constructors, moveto - # - def op_rmoveto(self, index): - self.endPath() - self.rMoveTo(self.popallWidth()) - def op_hmoveto(self, index): - self.endPath() - self.rMoveTo((self.popallWidth(1)[0], 0)) - def op_vmoveto(self, index): - self.endPath() - self.rMoveTo((0, self.popallWidth(1)[0])) - def op_endchar(self, index): - self.endPath() - args = self.popallWidth() - if args: - from fontTools.encodings.StandardEncoding import StandardEncoding - # endchar can do seac accent bulding; The T2 spec says it's deprecated, - # but recent software that shall remain nameless does output it. - adx, ady, bchar, achar = args - baseGlyph = StandardEncoding[bchar] - self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) - accentGlyph = StandardEncoding[achar] - self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) - - # - # path constructors, lines - # - def op_rlineto(self, index): - args = self.popall() - for i in range(0, len(args), 2): - point = args[i:i+2] - self.rLineTo(point) - - def op_hlineto(self, index): - self.alternatingLineto(1) - def op_vlineto(self, index): - self.alternatingLineto(0) - - # - # path constructors, curves - # - def op_rrcurveto(self, index): - """{dxa dya dxb dyb dxc dyc}+ rrcurveto""" - args = self.popall() - for i in range(0, len(args), 6): - dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6] - self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc)) - - def op_rcurveline(self, index): - """{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline""" - args = self.popall() - for i in range(0, len(args)-2, 6): - dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6] - self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) - self.rLineTo(args[-2:]) - - def op_rlinecurve(self, index): - """{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve""" - args = self.popall() - lineArgs = args[:-6] - for i in range(0, len(lineArgs), 2): - self.rLineTo(lineArgs[i:i+2]) - dxb, dyb, dxc, dyc, dxd, dyd = args[-6:] - self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd)) - - def op_vvcurveto(self, index): - "dx1? {dya dxb dyb dyc}+ vvcurveto" - args = self.popall() - if len(args) % 2: - dx1 = args[0] - args = args[1:] - else: - dx1 = 0 - for i in range(0, len(args), 4): - dya, dxb, dyb, dyc = args[i:i+4] - self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc)) - dx1 = 0 - - def op_hhcurveto(self, index): - """dy1? {dxa dxb dyb dxc}+ hhcurveto""" - args = self.popall() - if len(args) % 2: - dy1 = args[0] - args = args[1:] - else: - dy1 = 0 - for i in range(0, len(args), 4): - dxa, dxb, dyb, dxc = args[i:i+4] - self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0)) - dy1 = 0 - - def op_vhcurveto(self, index): - """dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30) - {dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto - """ - args = self.popall() - while args: - args = self.vcurveto(args) - if args: - args = self.hcurveto(args) - - def op_hvcurveto(self, index): - """dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf? - {dxa dxb dyb dyc dyd dxe dye dxf}+ dyf? - """ - args = self.popall() - while args: - args = self.hcurveto(args) - if args: - args = self.vcurveto(args) - - # - # path constructors, flex - # - def op_hflex(self, index): - dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall() - dy1 = dy3 = dy4 = dy6 = 0 - dy5 = -dy2 - self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) - self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) - def op_flex(self, index): - dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall() - self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) - self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) - def op_hflex1(self, index): - dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall() - dy3 = dy4 = 0 - dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5) - - self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) - self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) - def op_flex1(self, index): - dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall() - dx = dx1 + dx2 + dx3 + dx4 + dx5 - dy = dy1 + dy2 + dy3 + dy4 + dy5 - if abs(dx) > abs(dy): - dx6 = d6 - dy6 = -dy - else: - dx6 = -dx - dy6 = d6 - self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3)) - self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6)) - - # - # MultipleMaster. Well... - # - def op_blend(self, index): - self.popall() - - # misc - def op_and(self, index): - raise NotImplementedError - def op_or(self, index): - raise NotImplementedError - def op_not(self, index): - raise NotImplementedError - def op_store(self, index): - raise NotImplementedError - def op_abs(self, index): - raise NotImplementedError - def op_add(self, index): - raise NotImplementedError - def op_sub(self, index): - raise NotImplementedError - def op_div(self, index): - num2 = self.pop() - num1 = self.pop() - d1 = num1//num2 - d2 = num1/num2 - if d1 == d2: - self.push(d1) - else: - self.push(d2) - def op_load(self, index): - raise NotImplementedError - def op_neg(self, index): - raise NotImplementedError - def op_eq(self, index): - raise NotImplementedError - def op_drop(self, index): - raise NotImplementedError - def op_put(self, index): - raise NotImplementedError - def op_get(self, index): - raise NotImplementedError - def op_ifelse(self, index): - raise NotImplementedError - def op_random(self, index): - raise NotImplementedError - def op_mul(self, index): - raise NotImplementedError - def op_sqrt(self, index): - raise NotImplementedError - def op_dup(self, index): - raise NotImplementedError - def op_exch(self, index): - raise NotImplementedError - def op_index(self, index): - raise NotImplementedError - def op_roll(self, index): - raise NotImplementedError - - # - # miscellaneous helpers - # - def alternatingLineto(self, isHorizontal): - args = self.popall() - for arg in args: - if isHorizontal: - point = (arg, 0) - else: - point = (0, arg) - self.rLineTo(point) - isHorizontal = not isHorizontal - - def vcurveto(self, args): - dya, dxb, dyb, dxc = args[:4] - args = args[4:] - if len(args) == 1: - dyc = args[0] - args = [] - else: - dyc = 0 - self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc)) - return args - - def hcurveto(self, args): - dxa, dxb, dyb, dyc = args[:4] - args = args[4:] - if len(args) == 1: - dxc = args[0] - args = [] - else: - dxc = 0 - self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc)) - return args - - -class T1OutlineExtractor(T2OutlineExtractor): - - def __init__(self, pen, subrs): - self.pen = pen - self.subrs = subrs - self.reset() - - def reset(self): - self.flexing = 0 - self.width = 0 - self.sbx = 0 - T2OutlineExtractor.reset(self) - - def endPath(self): - if self.sawMoveTo: - self.pen.endPath() - self.sawMoveTo = 0 - - def popallWidth(self, evenOdd=0): - return self.popall() - - def exch(self): - stack = self.operandStack - stack[-1], stack[-2] = stack[-2], stack[-1] - - # - # path constructors - # - def op_rmoveto(self, index): - if self.flexing: - return - self.endPath() - self.rMoveTo(self.popall()) - def op_hmoveto(self, index): - if self.flexing: - # We must add a parameter to the stack if we are flexing - self.push(0) - return - self.endPath() - self.rMoveTo((self.popall()[0], 0)) - def op_vmoveto(self, index): - if self.flexing: - # We must add a parameter to the stack if we are flexing - self.push(0) - self.exch() - return - self.endPath() - self.rMoveTo((0, self.popall()[0])) - def op_closepath(self, index): - self.closePath() - def op_setcurrentpoint(self, index): - args = self.popall() - x, y = args - self.currentPoint = x, y - - def op_endchar(self, index): - self.endPath() - - def op_hsbw(self, index): - sbx, wx = self.popall() - self.width = wx - self.sbx = sbx - self.currentPoint = sbx, self.currentPoint[1] - def op_sbw(self, index): - self.popall() # XXX - - # - def op_callsubr(self, index): - subrIndex = self.pop() - subr = self.subrs[subrIndex] - self.execute(subr) - def op_callothersubr(self, index): - subrIndex = self.pop() - nArgs = self.pop() - #print nArgs, subrIndex, "callothersubr" - if subrIndex == 0 and nArgs == 3: - self.doFlex() - self.flexing = 0 - elif subrIndex == 1 and nArgs == 0: - self.flexing = 1 - # ignore... - def op_pop(self, index): - pass # ignore... - - def doFlex(self): - finaly = self.pop() - finalx = self.pop() - self.pop() # flex height is unused - - p3y = self.pop() - p3x = self.pop() - bcp4y = self.pop() - bcp4x = self.pop() - bcp3y = self.pop() - bcp3x = self.pop() - p2y = self.pop() - p2x = self.pop() - bcp2y = self.pop() - bcp2x = self.pop() - bcp1y = self.pop() - bcp1x = self.pop() - rpy = self.pop() - rpx = self.pop() - - # call rrcurveto - self.push(bcp1x+rpx) - self.push(bcp1y+rpy) - self.push(bcp2x) - self.push(bcp2y) - self.push(p2x) - self.push(p2y) - self.op_rrcurveto(None) - - # call rrcurveto - self.push(bcp3x) - self.push(bcp3y) - self.push(bcp4x) - self.push(bcp4y) - self.push(p3x) - self.push(p3y) - self.op_rrcurveto(None) - - # Push back final coords so subr 0 can find them - self.push(finalx) - self.push(finaly) - - def op_dotsection(self, index): - self.popall() # XXX - def op_hstem3(self, index): - self.popall() # XXX - def op_seac(self, index): - "asb adx ady bchar achar seac" - from fontTools.encodings.StandardEncoding import StandardEncoding - asb, adx, ady, bchar, achar = self.popall() - baseGlyph = StandardEncoding[bchar] - self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0)) - accentGlyph = StandardEncoding[achar] - adx = adx + self.sbx - asb # seac weirdness - self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady)) - def op_vstem3(self, index): - self.popall() # XXX - - -class DictDecompiler(ByteCodeBase): - - operandEncoding = cffDictOperandEncoding - - def __init__(self, strings): - self.stack = [] - self.strings = strings - self.dict = {} - - def getDict(self): - assert len(self.stack) == 0, "non-empty stack" - return self.dict - - def decompile(self, data): - index = 0 - lenData = len(data) - push = self.stack.append - while index < lenData: - b0 = byteord(data[index]) - index = index + 1 - handler = self.operandEncoding[b0] - value, index = handler(self, b0, data, index) - if value is not None: - push(value) - - def pop(self): - value = self.stack[-1] - del self.stack[-1] - return value - - def popall(self): - args = self.stack[:] - del self.stack[:] - return args - - def handle_operator(self, operator): - operator, argType = operator - if isinstance(argType, type(())): - value = () - for i in range(len(argType)-1, -1, -1): - arg = argType[i] - arghandler = getattr(self, "arg_" + arg) - value = (arghandler(operator),) + value - else: - arghandler = getattr(self, "arg_" + argType) - value = arghandler(operator) - self.dict[operator] = value - - def arg_number(self, name): - return self.pop() - def arg_SID(self, name): - return self.strings[self.pop()] - def arg_array(self, name): - return self.popall() - def arg_delta(self, name): - out = [] - current = 0 - for v in self.popall(): - current = current + v - out.append(current) - return out - - -def calcSubrBias(subrs): - nSubrs = len(subrs) - if nSubrs < 1240: - bias = 107 - elif nSubrs < 33900: - bias = 1131 - else: - bias = 32768 - return bias diff -Nru fonttools-3.0/Tools/fontTools/misc/psLib.py fonttools-3.21.2/Tools/fontTools/misc/psLib.py --- fonttools-3.0/Tools/fontTools/misc/psLib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/psLib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,350 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import eexec -from .psOperators import * -import re -import collections -from string import whitespace - - -ps_special = '()<>[]{}%' # / is one too, but we take care of that one differently - -skipwhiteRE = re.compile("[%s]*" % whitespace) -endofthingPat = "[^][(){}<>/%%%s]*" % whitespace -endofthingRE = re.compile(endofthingPat) -commentRE = re.compile("%[^\n\r]*") - -# XXX This not entirely correct as it doesn't allow *nested* embedded parens: -stringPat = r""" - \( - ( - ( - [^()]* \ [()] - ) - | - ( - [^()]* \( [^()]* \) - ) - )* - [^()]* - \) -""" -stringPat = "".join(stringPat.split()) -stringRE = re.compile(stringPat) - -hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace) - -class PSTokenError(Exception): pass -class PSError(Exception): pass - - -class PSTokenizer(BytesIO): - - def getnexttoken(self, - # localize some stuff, for performance - len=len, - ps_special=ps_special, - stringmatch=stringRE.match, - hexstringmatch=hexstringRE.match, - commentmatch=commentRE.match, - endmatch=endofthingRE.match, - whitematch=skipwhiteRE.match): - - _, nextpos = whitematch(self.buf, self.pos).span() - self.pos = nextpos - if self.pos >= self.len: - return None, None - pos = self.pos - buf = self.buf - char = buf[pos] - if char in ps_special: - if char in '{}[]': - tokentype = 'do_special' - token = char - elif char == '%': - tokentype = 'do_comment' - _, nextpos = commentmatch(buf, pos).span() - token = buf[pos:nextpos] - elif char == '(': - tokentype = 'do_string' - m = stringmatch(buf, pos) - if m is None: - raise PSTokenError('bad string at character %d' % pos) - _, nextpos = m.span() - token = buf[pos:nextpos] - elif char == '<': - tokentype = 'do_hexstring' - m = hexstringmatch(buf, pos) - if m is None: - raise PSTokenError('bad hexstring at character %d' % pos) - _, nextpos = m.span() - token = buf[pos:nextpos] - else: - raise PSTokenError('bad token at character %d' % pos) - else: - if char == '/': - tokentype = 'do_literal' - m = endmatch(buf, pos+1) - else: - tokentype = '' - m = endmatch(buf, pos) - if m is None: - raise PSTokenError('bad token at character %d' % pos) - _, nextpos = m.span() - token = buf[pos:nextpos] - self.pos = pos + len(token) - return tokentype, token - - def skipwhite(self, whitematch=skipwhiteRE.match): - _, nextpos = whitematch(self.buf, self.pos).span() - self.pos = nextpos - - def starteexec(self): - self.pos = self.pos + 1 - #self.skipwhite() - self.dirtybuf = self.buf[self.pos:] - self.buf, R = eexec.decrypt(self.dirtybuf, 55665) - self.len = len(self.buf) - self.pos = 4 - - def stopeexec(self): - if not hasattr(self, 'dirtybuf'): - return - self.buf = self.dirtybuf - del self.dirtybuf - - def flush(self): - if self.buflist: - self.buf = self.buf + "".join(self.buflist) - self.buflist = [] - - -class PSInterpreter(PSOperators): - - def __init__(self): - systemdict = {} - userdict = {} - self.dictstack = [systemdict, userdict] - self.stack = [] - self.proclevel = 0 - self.procmark = ps_procmark() - self.fillsystemdict() - - def fillsystemdict(self): - systemdict = self.dictstack[0] - systemdict['['] = systemdict['mark'] = self.mark = ps_mark() - systemdict[']'] = ps_operator(']', self.do_makearray) - systemdict['true'] = ps_boolean(1) - systemdict['false'] = ps_boolean(0) - systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding) - systemdict['FontDirectory'] = ps_dict({}) - self.suckoperators(systemdict, self.__class__) - - def suckoperators(self, systemdict, klass): - for name in dir(klass): - attr = getattr(self, name) - if isinstance(attr, collections.Callable) and name[:3] == 'ps_': - name = name[3:] - systemdict[name] = ps_operator(name, attr) - for baseclass in klass.__bases__: - self.suckoperators(systemdict, baseclass) - - def interpret(self, data, getattr=getattr): - tokenizer = self.tokenizer = PSTokenizer(data) - getnexttoken = tokenizer.getnexttoken - do_token = self.do_token - handle_object = self.handle_object - try: - while 1: - tokentype, token = getnexttoken() - #print token - if not token: - break - if tokentype: - handler = getattr(self, tokentype) - object = handler(token) - else: - object = do_token(token) - if object is not None: - handle_object(object) - tokenizer.close() - self.tokenizer = None - finally: - if self.tokenizer is not None: - if 0: - print('ps error:\n- - - - - - -') - print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos]) - print('>>>') - print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50]) - print('- - - - - - -') - - def handle_object(self, object): - if not (self.proclevel or object.literal or object.type == 'proceduretype'): - if object.type != 'operatortype': - object = self.resolve_name(object.value) - if object.literal: - self.push(object) - else: - if object.type == 'proceduretype': - self.call_procedure(object) - else: - object.function() - else: - self.push(object) - - def call_procedure(self, proc): - handle_object = self.handle_object - for item in proc.value: - handle_object(item) - - def resolve_name(self, name): - dictstack = self.dictstack - for i in range(len(dictstack)-1, -1, -1): - if name in dictstack[i]: - return dictstack[i][name] - raise PSError('name error: ' + str(name)) - - def do_token(self, token, - int=int, - float=float, - ps_name=ps_name, - ps_integer=ps_integer, - ps_real=ps_real): - try: - num = int(token) - except (ValueError, OverflowError): - try: - num = float(token) - except (ValueError, OverflowError): - if '#' in token: - hashpos = token.find('#') - try: - base = int(token[:hashpos]) - num = int(token[hashpos+1:], base) - except (ValueError, OverflowError): - return ps_name(token) - else: - return ps_integer(num) - else: - return ps_name(token) - else: - return ps_real(num) - else: - return ps_integer(num) - - def do_comment(self, token): - pass - - def do_literal(self, token): - return ps_literal(token[1:]) - - def do_string(self, token): - return ps_string(token[1:-1]) - - def do_hexstring(self, token): - hexStr = "".join(token[1:-1].split()) - if len(hexStr) % 2: - hexStr = hexStr + '0' - cleanstr = [] - for i in range(0, len(hexStr), 2): - cleanstr.append(chr(int(hexStr[i:i+2], 16))) - cleanstr = "".join(cleanstr) - return ps_string(cleanstr) - - def do_special(self, token): - if token == '{': - self.proclevel = self.proclevel + 1 - return self.procmark - elif token == '}': - proc = [] - while 1: - topobject = self.pop() - if topobject == self.procmark: - break - proc.append(topobject) - self.proclevel = self.proclevel - 1 - proc.reverse() - return ps_procedure(proc) - elif token == '[': - return self.mark - elif token == ']': - return ps_name(']') - else: - raise PSTokenError('huh?') - - def push(self, object): - self.stack.append(object) - - def pop(self, *types): - stack = self.stack - if not stack: - raise PSError('stack underflow') - object = stack[-1] - if types: - if object.type not in types: - raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type)) - del stack[-1] - return object - - def do_makearray(self): - array = [] - while 1: - topobject = self.pop() - if topobject == self.mark: - break - array.append(topobject) - array.reverse() - self.push(ps_array(array)) - - def close(self): - """Remove circular references.""" - del self.stack - del self.dictstack - - -def unpack_item(item): - tp = type(item.value) - if tp == dict: - newitem = {} - for key, value in item.value.items(): - newitem[key] = unpack_item(value) - elif tp == list: - newitem = [None] * len(item.value) - for i in range(len(item.value)): - newitem[i] = unpack_item(item.value[i]) - if item.type == 'proceduretype': - newitem = tuple(newitem) - else: - newitem = item.value - return newitem - -def suckfont(data): - m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data) - if m: - fontName = m.group(1) - else: - fontName = None - interpreter = PSInterpreter() - interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop") - interpreter.interpret(data) - fontdir = interpreter.dictstack[0]['FontDirectory'].value - if fontName in fontdir: - rawfont = fontdir[fontName] - else: - # fall back, in case fontName wasn't found - fontNames = list(fontdir.keys()) - if len(fontNames) > 1: - fontNames.remove("Helvetica") - fontNames.sort() - rawfont = fontdir[fontNames[0]] - interpreter.close() - return unpack_item(rawfont) - - -if __name__ == "__main__": - import EasyDialogs - path = EasyDialogs.AskFileForOpen() - if path: - from fontTools import t1Lib - data, kind = t1Lib.read(path) - font = suckfont(data) diff -Nru fonttools-3.0/Tools/fontTools/misc/psOperators.py fonttools-3.21.2/Tools/fontTools/misc/psOperators.py --- fonttools-3.0/Tools/fontTools/misc/psOperators.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/psOperators.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,540 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"} - - -class ps_object(object): - - literal = 1 - access = 0 - value = None - - def __init__(self, value): - self.value = value - self.type = self.__class__.__name__[3:] + "type" - - def __repr__(self): - return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value)) - - -class ps_operator(ps_object): - - literal = 0 - - def __init__(self, name, function): - self.name = name - self.function = function - self.type = self.__class__.__name__[3:] + "type" - def __repr__(self): - return "" % self.name - -class ps_procedure(ps_object): - literal = 0 - def __repr__(self): - return "" - def __str__(self): - psstring = '{' - for i in range(len(self.value)): - if i: - psstring = psstring + ' ' + str(self.value[i]) - else: - psstring = psstring + str(self.value[i]) - return psstring + '}' - -class ps_name(ps_object): - literal = 0 - def __str__(self): - if self.literal: - return '/' + self.value - else: - return self.value - -class ps_literal(ps_object): - def __str__(self): - return '/' + self.value - -class ps_array(ps_object): - def __str__(self): - psstring = '[' - for i in range(len(self.value)): - item = self.value[i] - access = _accessstrings[item.access] - if access: - access = ' ' + access - if i: - psstring = psstring + ' ' + str(item) + access - else: - psstring = psstring + str(item) + access - return psstring + ']' - def __repr__(self): - return "" - -_type1_pre_eexec_order = [ - "FontInfo", - "FontName", - "Encoding", - "PaintType", - "FontType", - "FontMatrix", - "FontBBox", - "UniqueID", - "Metrics", - "StrokeWidth" - ] - -_type1_fontinfo_order = [ - "version", - "Notice", - "FullName", - "FamilyName", - "Weight", - "ItalicAngle", - "isFixedPitch", - "UnderlinePosition", - "UnderlineThickness" - ] - -_type1_post_eexec_order = [ - "Private", - "CharStrings", - "FID" - ] - -def _type1_item_repr(key, value): - psstring = "" - access = _accessstrings[value.access] - if access: - access = access + ' ' - if key == 'CharStrings': - psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value)) - elif key == 'Encoding': - psstring = psstring + _type1_Encoding_repr(value, access) - else: - psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) - return psstring - -def _type1_Encoding_repr(encoding, access): - encoding = encoding.value - psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n" - for i in range(256): - name = encoding[i].value - if name != '.notdef': - psstring = psstring + "dup %d /%s put\n" % (i, name) - return psstring + access + "def\n" - -def _type1_CharString_repr(charstrings): - items = sorted(charstrings.items()) - return 'xxx' - -class ps_font(ps_object): - def __str__(self): - psstring = "%d dict dup begin\n" % len(self.value) - for key in _type1_pre_eexec_order: - try: - value = self.value[key] - except KeyError: - pass - else: - psstring = psstring + _type1_item_repr(key, value) - items = sorted(self.value.items()) - for key, value in items: - if key not in _type1_pre_eexec_order + _type1_post_eexec_order: - psstring = psstring + _type1_item_repr(key, value) - psstring = psstring + "currentdict end\ncurrentfile eexec\ndup " - for key in _type1_post_eexec_order: - try: - value = self.value[key] - except KeyError: - pass - else: - psstring = psstring + _type1_item_repr(key, value) - return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \ - 8 * (64 * '0' + '\n') + 'cleartomark' + '\n' - def __repr__(self): - return '' - -class ps_file(ps_object): - pass - -class ps_dict(ps_object): - def __str__(self): - psstring = "%d dict dup begin\n" % len(self.value) - items = sorted(self.value.items()) - for key, value in items: - access = _accessstrings[value.access] - if access: - access = access + ' ' - psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access) - return psstring + 'end ' - def __repr__(self): - return "" - -class ps_mark(ps_object): - def __init__(self): - self.value = 'mark' - self.type = self.__class__.__name__[3:] + "type" - -class ps_procmark(ps_object): - def __init__(self): - self.value = 'procmark' - self.type = self.__class__.__name__[3:] + "type" - -class ps_null(ps_object): - def __init__(self): - self.type = self.__class__.__name__[3:] + "type" - -class ps_boolean(ps_object): - def __str__(self): - if self.value: - return 'true' - else: - return 'false' - -class ps_string(ps_object): - def __str__(self): - return "(%s)" % repr(self.value)[1:-1] - -class ps_integer(ps_object): - def __str__(self): - return repr(self.value) - -class ps_real(ps_object): - def __str__(self): - return repr(self.value) - - -class PSOperators(object): - - def ps_def(self): - obj = self.pop() - name = self.pop() - self.dictstack[-1][name.value] = obj - - def ps_bind(self): - proc = self.pop('proceduretype') - self.proc_bind(proc) - self.push(proc) - - def proc_bind(self, proc): - for i in range(len(proc.value)): - item = proc.value[i] - if item.type == 'proceduretype': - self.proc_bind(item) - else: - if not item.literal: - try: - obj = self.resolve_name(item.value) - except: - pass - else: - if obj.type == 'operatortype': - proc.value[i] = obj - - def ps_exch(self): - if len(self.stack) < 2: - raise RuntimeError('stack underflow') - obj1 = self.pop() - obj2 = self.pop() - self.push(obj1) - self.push(obj2) - - def ps_dup(self): - if not self.stack: - raise RuntimeError('stack underflow') - self.push(self.stack[-1]) - - def ps_exec(self): - obj = self.pop() - if obj.type == 'proceduretype': - self.call_procedure(obj) - else: - self.handle_object(obj) - - def ps_count(self): - self.push(ps_integer(len(self.stack))) - - def ps_eq(self): - any1 = self.pop() - any2 = self.pop() - self.push(ps_boolean(any1.value == any2.value)) - - def ps_ne(self): - any1 = self.pop() - any2 = self.pop() - self.push(ps_boolean(any1.value != any2.value)) - - def ps_cvx(self): - obj = self.pop() - obj.literal = 0 - self.push(obj) - - def ps_matrix(self): - matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)] - self.push(ps_array(matrix)) - - def ps_string(self): - num = self.pop('integertype').value - self.push(ps_string('\0' * num)) - - def ps_type(self): - obj = self.pop() - self.push(ps_string(obj.type)) - - def ps_store(self): - value = self.pop() - key = self.pop() - name = key.value - for i in range(len(self.dictstack)-1, -1, -1): - if name in self.dictstack[i]: - self.dictstack[i][name] = value - break - self.dictstack[-1][name] = value - - def ps_where(self): - name = self.pop() - # XXX - self.push(ps_boolean(0)) - - def ps_systemdict(self): - self.push(ps_dict(self.dictstack[0])) - - def ps_userdict(self): - self.push(ps_dict(self.dictstack[1])) - - def ps_currentdict(self): - self.push(ps_dict(self.dictstack[-1])) - - def ps_currentfile(self): - self.push(ps_file(self.tokenizer)) - - def ps_eexec(self): - f = self.pop('filetype').value - f.starteexec() - - def ps_closefile(self): - f = self.pop('filetype').value - f.skipwhite() - f.stopeexec() - - def ps_cleartomark(self): - obj = self.pop() - while obj != self.mark: - obj = self.pop() - - def ps_readstring(self, - ps_boolean=ps_boolean, - len=len): - s = self.pop('stringtype') - oldstr = s.value - f = self.pop('filetype') - #pad = file.value.read(1) - # for StringIO, this is faster - f.value.pos = f.value.pos + 1 - newstr = f.value.read(len(oldstr)) - s.value = newstr - self.push(s) - self.push(ps_boolean(len(oldstr) == len(newstr))) - - def ps_known(self): - key = self.pop() - d = self.pop('dicttype', 'fonttype') - self.push(ps_boolean(key.value in d.value)) - - def ps_if(self): - proc = self.pop('proceduretype') - if self.pop('booleantype').value: - self.call_procedure(proc) - - def ps_ifelse(self): - proc2 = self.pop('proceduretype') - proc1 = self.pop('proceduretype') - if self.pop('booleantype').value: - self.call_procedure(proc1) - else: - self.call_procedure(proc2) - - def ps_readonly(self): - obj = self.pop() - if obj.access < 1: - obj.access = 1 - self.push(obj) - - def ps_executeonly(self): - obj = self.pop() - if obj.access < 2: - obj.access = 2 - self.push(obj) - - def ps_noaccess(self): - obj = self.pop() - if obj.access < 3: - obj.access = 3 - self.push(obj) - - def ps_not(self): - obj = self.pop('booleantype', 'integertype') - if obj.type == 'booleantype': - self.push(ps_boolean(not obj.value)) - else: - self.push(ps_integer(~obj.value)) - - def ps_print(self): - str = self.pop('stringtype') - print('PS output --->', str.value) - - def ps_anchorsearch(self): - seek = self.pop('stringtype') - s = self.pop('stringtype') - seeklen = len(seek.value) - if s.value[:seeklen] == seek.value: - self.push(ps_string(s.value[seeklen:])) - self.push(seek) - self.push(ps_boolean(1)) - else: - self.push(s) - self.push(ps_boolean(0)) - - def ps_array(self): - num = self.pop('integertype') - array = ps_array([None] * num.value) - self.push(array) - - def ps_astore(self): - array = self.pop('arraytype') - for i in range(len(array.value)-1, -1, -1): - array.value[i] = self.pop() - self.push(array) - - def ps_load(self): - name = self.pop() - self.push(self.resolve_name(name.value)) - - def ps_put(self): - obj1 = self.pop() - obj2 = self.pop() - obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype') - tp = obj3.type - if tp == 'arraytype' or tp == 'proceduretype': - obj3.value[obj2.value] = obj1 - elif tp == 'dicttype': - obj3.value[obj2.value] = obj1 - elif tp == 'stringtype': - index = obj2.value - obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:] - - def ps_get(self): - obj1 = self.pop() - if obj1.value == "Encoding": - pass - obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype') - tp = obj2.type - if tp in ('arraytype', 'proceduretype'): - self.push(obj2.value[obj1.value]) - elif tp in ('dicttype', 'fonttype'): - self.push(obj2.value[obj1.value]) - elif tp == 'stringtype': - self.push(ps_integer(ord(obj2.value[obj1.value]))) - else: - assert False, "shouldn't get here" - - def ps_getinterval(self): - obj1 = self.pop('integertype') - obj2 = self.pop('integertype') - obj3 = self.pop('arraytype', 'stringtype') - tp = obj3.type - if tp == 'arraytype': - self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value])) - elif tp == 'stringtype': - self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value])) - - def ps_putinterval(self): - obj1 = self.pop('arraytype', 'stringtype') - obj2 = self.pop('integertype') - obj3 = self.pop('arraytype', 'stringtype') - tp = obj3.type - if tp == 'arraytype': - obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value - elif tp == 'stringtype': - newstr = obj3.value[:obj2.value] - newstr = newstr + obj1.value - newstr = newstr + obj3.value[obj2.value + len(obj1.value):] - obj3.value = newstr - - def ps_cvn(self): - self.push(ps_name(self.pop('stringtype').value)) - - def ps_index(self): - n = self.pop('integertype').value - if n < 0: - raise RuntimeError('index may not be negative') - self.push(self.stack[-1-n]) - - def ps_for(self): - proc = self.pop('proceduretype') - limit = self.pop('integertype', 'realtype').value - increment = self.pop('integertype', 'realtype').value - i = self.pop('integertype', 'realtype').value - while 1: - if increment > 0: - if i > limit: - break - else: - if i < limit: - break - if type(i) == type(0.0): - self.push(ps_real(i)) - else: - self.push(ps_integer(i)) - self.call_procedure(proc) - i = i + increment - - def ps_forall(self): - proc = self.pop('proceduretype') - obj = self.pop('arraytype', 'stringtype', 'dicttype') - tp = obj.type - if tp == 'arraytype': - for item in obj.value: - self.push(item) - self.call_procedure(proc) - elif tp == 'stringtype': - for item in obj.value: - self.push(ps_integer(ord(item))) - self.call_procedure(proc) - elif tp == 'dicttype': - for key, value in obj.value.items(): - self.push(ps_name(key)) - self.push(value) - self.call_procedure(proc) - - def ps_definefont(self): - font = self.pop('dicttype') - name = self.pop() - font = ps_font(font.value) - self.dictstack[0]['FontDirectory'].value[name.value] = font - self.push(font) - - def ps_findfont(self): - name = self.pop() - font = self.dictstack[0]['FontDirectory'].value[name.value] - self.push(font) - - def ps_pop(self): - self.pop() - - def ps_dict(self): - self.pop('integertype') - self.push(ps_dict({})) - - def ps_begin(self): - self.dictstack.append(self.pop('dicttype').value) - - def ps_end(self): - if len(self.dictstack) > 2: - del self.dictstack[-1] - else: - raise RuntimeError('dictstack underflow') - -notdef = '.notdef' -from fontTools.encodings.StandardEncoding import StandardEncoding -ps_StandardEncoding = list(map(ps_name, StandardEncoding)) diff -Nru fonttools-3.0/Tools/fontTools/misc/py23.py fonttools-3.21.2/Tools/fontTools/misc/py23.py --- fonttools-3.0/Tools/fontTools/misc/py23.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/py23.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,161 +0,0 @@ -"""Python 2/3 compat layer.""" - -from __future__ import print_function, division, absolute_import -import sys - -try: - basestring -except NameError: - basestring = str - -try: - unicode -except NameError: - unicode = str - -try: - unichr - - if sys.maxunicode < 0x10FFFF: - # workarounds for Python 2 "narrow" builds with UCS2-only support. - - _narrow_unichr = unichr - - def unichr(i): - """ - Return the unicode character whose Unicode code is the integer 'i'. - The valid range is 0 to 0x10FFFF inclusive. - - >>> _narrow_unichr(0xFFFF + 1) - Traceback (most recent call last): - File "", line 1, in ? - ValueError: unichr() arg not in range(0x10000) (narrow Python build) - >>> unichr(0xFFFF + 1) == u'\U00010000' - True - >>> unichr(1114111) == u'\U0010FFFF' - True - >>> unichr(0x10FFFF + 1) - Traceback (most recent call last): - File "", line 1, in ? - ValueError: unichr() arg not in range(0x110000) - """ - try: - return _narrow_unichr(i) - except ValueError: - try: - padded_hex_str = hex(i)[2:].zfill(8) - escape_str = "\\U" + padded_hex_str - return escape_str.decode("unicode-escape") - except UnicodeDecodeError: - raise ValueError('unichr() arg not in range(0x110000)') - - import re - _unicode_escape_RE = re.compile(r'\\U[A-Fa-f0-9]{8}') - - def byteord(c): - """ - Given a 8-bit or unicode character, return an integer representing the - Unicode code point of the character. If a unicode argument is given, the - character's code point must be in the range 0 to 0x10FFFF inclusive. - - >>> ord(u'\U00010000') - Traceback (most recent call last): - File "", line 1, in ? - TypeError: ord() expected a character, but string of length 2 found - >>> byteord(u'\U00010000') == 0xFFFF + 1 - True - >>> byteord(u'\U0010FFFF') == 1114111 - True - """ - try: - return ord(c) - except TypeError as e: - try: - escape_str = c.encode('unicode-escape') - if not _unicode_escape_RE.match(escape_str): - raise - hex_str = escape_str[3:] - return int(hex_str, 16) - except: - raise TypeError(e) - - else: - byteord = ord - bytechr = chr - -except NameError: - unichr = chr - def bytechr(n): - return bytes([n]) - def byteord(c): - return c if isinstance(c, int) else ord(c) - - -# the 'io' module provides the same I/O interface on both 2 and 3. -# here we define an alias of io.StringIO to disambiguate it eternally... -from io import BytesIO -from io import StringIO as UnicodeIO -try: - # in python 2, by 'StringIO' we still mean a stream of *byte* strings - from StringIO import StringIO -except ImportError: - # in Python 3, we mean instead a stream of *unicode* strings - StringIO = UnicodeIO - - -def strjoin(iterable, joiner=''): - return tostr(joiner).join(iterable) - -def tobytes(s, encoding='ascii', errors='strict'): - if not isinstance(s, bytes): - return s.encode(encoding, errors) - else: - return s -def tounicode(s, encoding='ascii', errors='strict'): - if not isinstance(s, unicode): - return s.decode(encoding, errors) - else: - return s - -if str == bytes: - class Tag(str): - def tobytes(self): - if isinstance(self, bytes): - return self - else: - return self.encode('latin1') - - tostr = tobytes - - bytesjoin = strjoin -else: - class Tag(str): - - @staticmethod - def transcode(blob): - if not isinstance(blob, str): - blob = blob.decode('latin-1') - return blob - - def __new__(self, content): - return str.__new__(self, self.transcode(content)) - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - return str.__eq__(self, self.transcode(other)) - - def __hash__(self): - return str.__hash__(self) - - def tobytes(self): - return self.encode('latin-1') - - tostr = tounicode - - def bytesjoin(iterable, joiner=b''): - return tobytes(joiner).join(tobytes(item) for item in iterable) - - -if __name__ == "__main__": - import doctest, sys - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/misc/sstruct.py fonttools-3.21.2/Tools/fontTools/misc/sstruct.py --- fonttools-3.0/Tools/fontTools/misc/sstruct.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/sstruct.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,211 +0,0 @@ -"""sstruct.py -- SuperStruct - -Higher level layer on top of the struct module, enabling to -bind names to struct elements. The interface is similar to -struct, except the objects passed and returned are not tuples -(or argument lists), but dictionaries or instances. - -Just like struct, we use fmt strings to describe a data -structure, except we use one line per element. Lines are -separated by newlines or semi-colons. Each line contains -either one of the special struct characters ('@', '=', '<', -'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). -Repetitions, like the struct module offers them are not useful -in this context, except for fixed length strings (eg. 'myInt:5h' -is not allowed but 'myString:5s' is). The 'x' fmt character -(pad byte) is treated as 'special', since it is by definition -anonymous. Extra whitespace is allowed everywhere. - -The sstruct module offers one feature that the "normal" struct -module doesn't: support for fixed point numbers. These are spelled -as "n.mF", where n is the number of bits before the point, and m -the number of bits after the point. Fixed point numbers get -converted to floats. - -pack(fmt, object): - 'object' is either a dictionary or an instance (or actually - anything that has a __dict__ attribute). If it is a dictionary, - its keys are used for names. If it is an instance, it's - attributes are used to grab struct elements from. Returns - a string containing the data. - -unpack(fmt, data, object=None) - If 'object' is omitted (or None), a new dictionary will be - returned. If 'object' is a dictionary, it will be used to add - struct elements to. If it is an instance (or in fact anything - that has a __dict__ attribute), an attribute will be added for - each struct element. In the latter two cases, 'object' itself - is returned. - -unpack2(fmt, data, object=None) - Convenience function. Same as unpack, except data may be longer - than needed. The returned value is a tuple: (object, leftoverdata). - -calcsize(fmt) - like struct.calcsize(), but uses our own fmt strings: - it returns the size of the data in bytes. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -import struct -import re - -__version__ = "1.2" -__copyright__ = "Copyright 1998, Just van Rossum " - - -class Error(Exception): - pass - -def pack(fmt, obj): - formatstring, names, fixes = getformat(fmt) - elements = [] - if not isinstance(obj, dict): - obj = obj.__dict__ - for name in names: - value = obj[name] - if name in fixes: - # fixed point conversion - value = fl2fi(value, fixes[name]) - elif isinstance(value, basestring): - value = tobytes(value) - elements.append(value) - data = struct.pack(*(formatstring,) + tuple(elements)) - return data - -def unpack(fmt, data, obj=None): - if obj is None: - obj = {} - data = tobytes(data) - formatstring, names, fixes = getformat(fmt) - if isinstance(obj, dict): - d = obj - else: - d = obj.__dict__ - elements = struct.unpack(formatstring, data) - for i in range(len(names)): - name = names[i] - value = elements[i] - if name in fixes: - # fixed point conversion - value = fi2fl(value, fixes[name]) - elif isinstance(value, bytes): - try: - value = tostr(value) - except UnicodeDecodeError: - pass - d[name] = value - return obj - -def unpack2(fmt, data, obj=None): - length = calcsize(fmt) - return unpack(fmt, data[:length], obj), data[length:] - -def calcsize(fmt): - formatstring, names, fixes = getformat(fmt) - return struct.calcsize(formatstring) - - -# matches "name:formatchar" (whitespace is allowed) -_elementRE = re.compile( - "\s*" # whitespace - "([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier) - "\s*:\s*" # whitespace : whitespace - "([cbBhHiIlLqQfd]|[0-9]+[ps]|" # formatchar... - "([0-9]+)\.([0-9]+)(F))" # ...formatchar - "\s*" # whitespace - "(#.*)?$" # [comment] + end of string - ) - -# matches the special struct fmt chars and 'x' (pad byte) -_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$") - -# matches an "empty" string, possibly containing whitespace and/or a comment -_emptyRE = re.compile("\s*(#.*)?$") - -_fixedpointmappings = { - 8: "b", - 16: "h", - 32: "l"} - -_formatcache = {} - -def getformat(fmt): - try: - formatstring, names, fixes = _formatcache[fmt] - except KeyError: - lines = re.split("[\n;]", fmt) - formatstring = "" - names = [] - fixes = {} - for line in lines: - if _emptyRE.match(line): - continue - m = _extraRE.match(line) - if m: - formatchar = m.group(1) - if formatchar != 'x' and formatstring: - raise Error("a special fmt char must be first") - else: - m = _elementRE.match(line) - if not m: - raise Error("syntax error in fmt: '%s'" % line) - name = m.group(1) - names.append(name) - formatchar = m.group(2) - if m.group(3): - # fixed point - before = int(m.group(3)) - after = int(m.group(4)) - bits = before + after - if bits not in [8, 16, 32]: - raise Error("fixed point must be 8, 16 or 32 bits long") - formatchar = _fixedpointmappings[bits] - assert m.group(5) == "F" - fixes[name] = after - formatstring = formatstring + formatchar - _formatcache[fmt] = formatstring, names, fixes - return formatstring, names, fixes - -def _test(): - fmt = """ - # comments are allowed - > # big endian (see documentation for struct) - # empty lines are allowed: - - ashort: h - along: l - abyte: b # a byte - achar: c - astr: 5s - afloat: f; adouble: d # multiple "statements" are allowed - afixed: 16.16F - """ - - print('size:', calcsize(fmt)) - - class foo(object): - pass - - i = foo() - - i.ashort = 0x7fff - i.along = 0x7fffffff - i.abyte = 0x7f - i.achar = "a" - i.astr = "12345" - i.afloat = 0.5 - i.adouble = 0.5 - i.afixed = 1.5 - - data = pack(fmt, i) - print('data:', repr(data)) - print(unpack(fmt, data)) - i2 = foo() - unpack(fmt, data, i2) - print(vars(i2)) - -if __name__ == "__main__": - _test() diff -Nru fonttools-3.0/Tools/fontTools/misc/textTools.py fonttools-3.21.2/Tools/fontTools/misc/textTools.py --- fonttools-3.0/Tools/fontTools/misc/textTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/textTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -"""fontTools.misc.textTools.py -- miscellaneous routines.""" - - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import string - - -def safeEval(data, eval=eval): - """A (kindof) safe replacement for eval.""" - return eval(data, {"__builtins__":{"True":True,"False":False}}) - - -def readHex(content): - """Convert a list of hex strings to binary data.""" - return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, basestring))) - -def deHexStr(hexdata): - """Convert a hex string to binary data.""" - hexdata = strjoin(hexdata.split()) - if len(hexdata) % 2: - hexdata = hexdata + "0" - data = [] - for i in range(0, len(hexdata), 2): - data.append(bytechr(int(hexdata[i:i+2], 16))) - return bytesjoin(data) - - -def hexStr(data): - """Convert binary data to a hex string.""" - h = string.hexdigits - r = '' - for c in data: - i = byteord(c) - r = r + h[(i >> 4) & 0xF] + h[i & 0xF] - return r - - -def num2binary(l, bits=32): - items = [] - binary = "" - for i in range(bits): - if l & 0x1: - binary = "1" + binary - else: - binary = "0" + binary - l = l >> 1 - if not ((i+1) % 8): - items.append(binary) - binary = "" - if binary: - items.append(binary) - items.reverse() - assert l in (0, -1), "number doesn't fit in number of bits" - return ' '.join(items) - - -def binary2num(bin): - bin = strjoin(bin.split()) - l = 0 - for digit in bin: - l = l << 1 - if digit != "0": - l = l | 0x1 - return l - - -def caselessSort(alist): - """Return a sorted copy of a list. If there are only strings - in the list, it will not consider case. - """ - - try: - return sorted(alist, key=lambda a: (a.lower(), a)) - except TypeError: - return sorted(alist) - - -def pad(data, size): - r""" Pad byte string 'data' with null bytes until its length is a - multiple of 'size'. - - >>> len(pad(b'abcd', 4)) - 4 - >>> len(pad(b'abcde', 2)) - 6 - >>> len(pad(b'abcde', 4)) - 8 - >>> pad(b'abcdef', 4) == b'abcdef\x00\x00' - True - """ - data = tobytes(data) - if size > 1: - while len(data) % size != 0: - data += b"\0" - return data - - -if __name__ == "__main__": - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/misc/timeTools.py fonttools-3.21.2/Tools/fontTools/misc/timeTools.py --- fonttools-3.0/Tools/fontTools/misc/timeTools.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/timeTools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import time -import calendar - - -epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0)) - -def timestampToString(value): - return time.asctime(time.gmtime(max(0, value + epoch_diff))) - -def timestampFromString(value): - return calendar.timegm(time.strptime(value)) - epoch_diff - -def timestampNow(): - return int(time.time() - epoch_diff) - -def timestampSinceEpoch(value): - return int(value - epoch_diff) diff -Nru fonttools-3.0/Tools/fontTools/misc/transform.py fonttools-3.21.2/Tools/fontTools/misc/transform.py --- fonttools-3.0/Tools/fontTools/misc/transform.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/transform.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,357 +0,0 @@ -"""Affine 2D transformation matrix class. - -The Transform class implements various transformation matrix operations, -both on the matrix itself, as well as on 2D coordinates. - -Transform instances are effectively immutable: all methods that operate on the -transformation itself always return a new instance. This has as the -interesting side effect that Transform instances are hashable, ie. they can be -used as dictionary keys. - -This module exports the following symbols: - - Transform -- this is the main class - Identity -- Transform instance set to the identity transformation - Offset -- Convenience function that returns a translating transformation - Scale -- Convenience function that returns a scaling transformation - -Examples: - - >>> t = Transform(2, 0, 0, 3, 0, 0) - >>> t.transformPoint((100, 100)) - (200, 300) - >>> t = Scale(2, 3) - >>> t.transformPoint((100, 100)) - (200, 300) - >>> t.transformPoint((0, 0)) - (0, 0) - >>> t = Offset(2, 3) - >>> t.transformPoint((100, 100)) - (102, 103) - >>> t.transformPoint((0, 0)) - (2, 3) - >>> t2 = t.scale(0.5) - >>> t2.transformPoint((100, 100)) - (52.0, 53.0) - >>> import math - >>> t3 = t2.rotate(math.pi / 2) - >>> t3.transformPoint((0, 0)) - (2.0, 3.0) - >>> t3.transformPoint((100, 100)) - (-48.0, 53.0) - >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) - >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) - [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] - >>> -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -__all__ = ["Transform", "Identity", "Offset", "Scale"] - - -_EPSILON = 1e-15 -_ONE_EPSILON = 1 - _EPSILON -_MINUS_ONE_EPSILON = -1 + _EPSILON - - -def _normSinCos(v): - if abs(v) < _EPSILON: - v = 0 - elif v > _ONE_EPSILON: - v = 1 - elif v < _MINUS_ONE_EPSILON: - v = -1 - return v - - -class Transform(object): - - """2x2 transformation matrix plus offset, a.k.a. Affine transform. - Transform instances are immutable: all transforming methods, eg. - rotate(), return a new Transform instance. - - Examples: - >>> t = Transform() - >>> t - - >>> t.scale(2) - - >>> t.scale(2.5, 5.5) - - >>> - >>> t.scale(2, 3).transformPoint((100, 100)) - (200, 300) - """ - - def __init__(self, xx=1, xy=0, yx=0, yy=1, dx=0, dy=0): - """Transform's constructor takes six arguments, all of which are - optional, and can be used as keyword arguments: - >>> Transform(12) - - >>> Transform(dx=12) - - >>> Transform(yx=12) - - >>> - """ - self.__affine = xx, xy, yx, yy, dx, dy - - def transformPoint(self, p): - """Transform a point. - - Example: - >>> t = Transform() - >>> t = t.scale(2.5, 5.5) - >>> t.transformPoint((100, 100)) - (250.0, 550.0) - """ - (x, y) = p - xx, xy, yx, yy, dx, dy = self.__affine - return (xx*x + yx*y + dx, xy*x + yy*y + dy) - - def transformPoints(self, points): - """Transform a list of points. - - Example: - >>> t = Scale(2, 3) - >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) - [(0, 0), (0, 300), (200, 300), (200, 0)] - >>> - """ - xx, xy, yx, yy, dx, dy = self.__affine - return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points] - - def translate(self, x=0, y=0): - """Return a new transformation, translated (offset) by x, y. - - Example: - >>> t = Transform() - >>> t.translate(20, 30) - - >>> - """ - return self.transform((1, 0, 0, 1, x, y)) - - def scale(self, x=1, y=None): - """Return a new transformation, scaled by x, y. The 'y' argument - may be None, which implies to use the x value for y as well. - - Example: - >>> t = Transform() - >>> t.scale(5) - - >>> t.scale(5, 6) - - >>> - """ - if y is None: - y = x - return self.transform((x, 0, 0, y, 0, 0)) - - def rotate(self, angle): - """Return a new transformation, rotated by 'angle' (radians). - - Example: - >>> import math - >>> t = Transform() - >>> t.rotate(math.pi / 2) - - >>> - """ - import math - c = _normSinCos(math.cos(angle)) - s = _normSinCos(math.sin(angle)) - return self.transform((c, s, -s, c, 0, 0)) - - def skew(self, x=0, y=0): - """Return a new transformation, skewed by x and y. - - Example: - >>> import math - >>> t = Transform() - >>> t.skew(math.pi / 4) - - >>> - """ - import math - return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) - - def transform(self, other): - """Return a new transformation, transformed by another - transformation. - - Example: - >>> t = Transform(2, 0, 0, 3, 1, 6) - >>> t.transform((4, 3, 2, 1, 5, 6)) - - >>> - """ - xx1, xy1, yx1, yy1, dx1, dy1 = other - xx2, xy2, yx2, yy2, dx2, dy2 = self.__affine - return self.__class__( - xx1*xx2 + xy1*yx2, - xx1*xy2 + xy1*yy2, - yx1*xx2 + yy1*yx2, - yx1*xy2 + yy1*yy2, - xx2*dx1 + yx2*dy1 + dx2, - xy2*dx1 + yy2*dy1 + dy2) - - def reverseTransform(self, other): - """Return a new transformation, which is the other transformation - transformed by self. self.reverseTransform(other) is equivalent to - other.transform(self). - - Example: - >>> t = Transform(2, 0, 0, 3, 1, 6) - >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) - - >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) - - >>> - """ - xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine - xx2, xy2, yx2, yy2, dx2, dy2 = other - return self.__class__( - xx1*xx2 + xy1*yx2, - xx1*xy2 + xy1*yy2, - yx1*xx2 + yy1*yx2, - yx1*xy2 + yy1*yy2, - xx2*dx1 + yx2*dy1 + dx2, - xy2*dx1 + yy2*dy1 + dy2) - - def inverse(self): - """Return the inverse transformation. - - Example: - >>> t = Identity.translate(2, 3).scale(4, 5) - >>> t.transformPoint((10, 20)) - (42, 103) - >>> it = t.inverse() - >>> it.transformPoint((42, 103)) - (10.0, 20.0) - >>> - """ - if self.__affine == (1, 0, 0, 1, 0, 0): - return self - xx, xy, yx, yy, dx, dy = self.__affine - det = xx*yy - yx*xy - xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det - dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy - return self.__class__(xx, xy, yx, yy, dx, dy) - - def toPS(self): - """Return a PostScript representation: - >>> t = Identity.scale(2, 3).translate(4, 5) - >>> t.toPS() - '[2 0 0 3 8 15]' - >>> - """ - return "[%s %s %s %s %s %s]" % self.__affine - - def __len__(self): - """Transform instances also behave like sequences of length 6: - >>> len(Identity) - 6 - >>> - """ - return 6 - - def __getitem__(self, index): - """Transform instances also behave like sequences of length 6: - >>> list(Identity) - [1, 0, 0, 1, 0, 0] - >>> tuple(Identity) - (1, 0, 0, 1, 0, 0) - >>> - """ - return self.__affine[index] - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - """Transform instances are comparable: - >>> t1 = Identity.scale(2, 3).translate(4, 6) - >>> t2 = Identity.translate(8, 18).scale(2, 3) - >>> t1 == t2 - 1 - >>> - - But beware of floating point rounding errors: - >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) - >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) - >>> t1 - - >>> t2 - - >>> t1 == t2 - 0 - >>> - """ - xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine - xx2, xy2, yx2, yy2, dx2, dy2 = other - return (xx1, xy1, yx1, yy1, dx1, dy1) == \ - (xx2, xy2, yx2, yy2, dx2, dy2) - - def __hash__(self): - """Transform instances are hashable, meaning you can use them as - keys in dictionaries: - >>> d = {Scale(12, 13): None} - >>> d - {: None} - >>> - - But again, beware of floating point rounding errors: - >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) - >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) - >>> t1 - - >>> t2 - - >>> d = {t1: None} - >>> d - {: None} - >>> d[t2] - Traceback (most recent call last): - File "", line 1, in ? - KeyError: - >>> - """ - return hash(self.__affine) - - def __repr__(self): - return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) \ - + self.__affine) - - -Identity = Transform() - -def Offset(x=0, y=0): - """Return the identity transformation offset by x, y. - - Example: - >>> Offset(2, 3) - - >>> - """ - return Transform(1, 0, 0, 1, x, y) - -def Scale(x, y=None): - """Return the identity transformation scaled by x, y. The 'y' argument - may be None, which implies to use the x value for y as well. - - Example: - >>> Scale(2, 3) - - >>> - """ - if y is None: - y = x - return Transform(x, 0, 0, y, 0, 0) - - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/misc/xmlReader.py fonttools-3.21.2/Tools/fontTools/misc/xmlReader.py --- fonttools-3.0/Tools/fontTools/misc/xmlReader.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/xmlReader.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.misc.textTools import safeEval -from fontTools.ttLib.tables.DefaultTable import DefaultTable -import os - - -class TTXParseError(Exception): pass - -BUFSIZE = 0x4000 - - -class XMLReader(object): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - self.ttFont = ttFont - self.fileName = fileName - self.progress = progress - self.quiet = quiet - self.root = None - self.contentStack = [] - self.stackSize = 0 - - def read(self): - if self.progress: - import stat - self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1) - file = open(self.fileName, 'rb') - self._parseFile(file) - file.close() - - def _parseFile(self, file): - from xml.parsers.expat import ParserCreate - parser = ParserCreate() - parser.StartElementHandler = self._startElementHandler - parser.EndElementHandler = self._endElementHandler - parser.CharacterDataHandler = self._characterDataHandler - - pos = 0 - while True: - chunk = file.read(BUFSIZE) - if not chunk: - parser.Parse(chunk, 1) - break - pos = pos + len(chunk) - if self.progress: - self.progress.set(pos // 100) - parser.Parse(chunk, 0) - - def _startElementHandler(self, name, attrs): - stackSize = self.stackSize - self.stackSize = stackSize + 1 - if not stackSize: - if name != "ttFont": - raise TTXParseError("illegal root tag: %s" % name) - sfntVersion = attrs.get("sfntVersion") - if sfntVersion is not None: - if len(sfntVersion) != 4: - sfntVersion = safeEval('"' + sfntVersion + '"') - self.ttFont.sfntVersion = sfntVersion - self.contentStack.append([]) - elif stackSize == 1: - subFile = attrs.get("src") - if subFile is not None: - subFile = os.path.join(os.path.dirname(self.fileName), subFile) - subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet) - subReader.read() - self.contentStack.append([]) - return - tag = ttLib.xmlToTag(name) - msg = "Parsing '%s' table..." % tag - if self.progress: - self.progress.setlabel(msg) - elif self.ttFont.verbose: - ttLib.debugmsg(msg) - else: - if not self.quiet: - print(msg) - if tag == "GlyphOrder": - tableClass = ttLib.GlyphOrder - elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])): - tableClass = DefaultTable - else: - tableClass = ttLib.getTableClass(tag) - if tableClass is None: - tableClass = DefaultTable - if tag == 'loca' and tag in self.ttFont: - # Special-case the 'loca' table as we need the - # original if the 'glyf' table isn't recompiled. - self.currentTable = self.ttFont[tag] - else: - self.currentTable = tableClass(tag) - self.ttFont[tag] = self.currentTable - self.contentStack.append([]) - elif stackSize == 2: - self.contentStack.append([]) - self.root = (name, attrs, self.contentStack[-1]) - else: - l = [] - self.contentStack[-1].append((name, attrs, l)) - self.contentStack.append(l) - - def _characterDataHandler(self, data): - if self.stackSize > 1: - self.contentStack[-1].append(data) - - def _endElementHandler(self, name): - self.stackSize = self.stackSize - 1 - del self.contentStack[-1] - if self.stackSize == 1: - self.root = None - elif self.stackSize == 2: - name, attrs, content = self.root - self.currentTable.fromXML(name, attrs, content, self.ttFont) - self.root = None - - -class ProgressPrinter(object): - - def __init__(self, title, maxval=100): - print(title) - - def set(self, val, maxval=None): - pass - - def increment(self, val=1): - pass - - def setLabel(self, text): - print(text) diff -Nru fonttools-3.0/Tools/fontTools/misc/xmlReader_test.py fonttools-3.21.2/Tools/fontTools/misc/xmlReader_test.py --- fonttools-3.0/Tools/fontTools/misc/xmlReader_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/xmlReader_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -import os -import unittest -from fontTools.ttLib import TTFont -from .xmlReader import XMLReader -import tempfile - - -class TestXMLReader(unittest.TestCase): - - def test_decode_utf8(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.contents = [] - - def _endElementHandler(self, name): - if self.stackSize == 3: - name, attrs, content = self.root - self.contents.append(content) - super(DebugXMLReader, self)._endElementHandler(name) - - expected = 'fôôbär' - data = '''\ - - - - - %s - - - -''' % expected - - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - content = strjoin(reader.contents[0]).strip() - self.assertEqual(expected, content) - - def test_normalise_newlines(self): - - class DebugXMLReader(XMLReader): - - def __init__(self, fileName, ttFont, progress=None, quiet=False): - super(DebugXMLReader, self).__init__( - fileName, ttFont, progress, quiet) - self.newlines = [] - - def _characterDataHandler(self, data): - self.newlines.extend([c for c in data if c in ('\r', '\n')]) - - # notice how when CR is escaped, it is not normalised by the XML parser - data = ( - '\r' # \r -> \n - ' \r\n' # \r\n -> \n - ' a line of text\n' # \n - ' escaped CR and unix newline \n' # \n -> \r\n - ' escaped CR and macintosh newline \r' # \r -> \r\n - ' escaped CR and windows newline \r\n' # \r\n -> \r\n - ' \n' # \n - '') - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(data.encode('utf-8')) - reader = DebugXMLReader(tmp.name, TTFont(), quiet=True) - reader.read() - os.remove(tmp.name) - expected = ['\n'] * 3 + ['\r', '\n'] * 3 + ['\n'] - self.assertEqual(expected, reader.newlines) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/misc/xmlWriter.py fonttools-3.21.2/Tools/fontTools/misc/xmlWriter.py --- fonttools-3.0/Tools/fontTools/misc/xmlWriter.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/xmlWriter.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,180 +0,0 @@ -"""xmlWriter.py -- Simple XML authoring class""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import sys -import os -import string - -INDENT = " " - - -class XMLWriter(object): - - def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8"): - if encoding.lower().replace('-','').replace('_','') != 'utf8': - raise Exception('Only UTF-8 encoding is supported.') - if fileOrPath == '-': - fileOrPath = sys.stdout - if not hasattr(fileOrPath, "write"): - self.file = open(fileOrPath, "wb") - else: - # assume writable file object - self.file = fileOrPath - - # Figure out if writer expects bytes or unicodes - try: - # The bytes check should be first. See: - # https://github.com/behdad/fonttools/pull/233 - self.file.write(b'') - self.totype = tobytes - except TypeError: - # This better not fail. - self.file.write(tounicode('')) - self.totype = tounicode - self.indentwhite = self.totype(indentwhite) - self.newlinestr = self.totype(os.linesep) - self.indentlevel = 0 - self.stack = [] - self.needindent = 1 - self.idlefunc = idlefunc - self.idlecounter = 0 - self._writeraw('') - self.newline() - - def close(self): - self.file.close() - - def write(self, string, indent=True): - """Writes text.""" - self._writeraw(escape(string), indent=indent) - - def writecdata(self, string): - """Writes text in a CDATA section.""" - self._writeraw("") - - def write8bit(self, data, strip=False): - """Writes a bytes() sequence into the XML, escaping - non-ASCII bytes. When this is read in xmlReader, - the original bytes can be recovered by encoding to - 'latin-1'.""" - self._writeraw(escape8bit(data), strip=strip) - - def write_noindent(self, string): - """Writes text without indentation.""" - self._writeraw(escape(string), indent=False) - - def _writeraw(self, data, indent=True, strip=False): - """Writes bytes, possibly indented.""" - if indent and self.needindent: - self.file.write(self.indentlevel * self.indentwhite) - self.needindent = 0 - s = self.totype(data, encoding="utf_8") - if (strip): - s = s.strip() - self.file.write(s) - - def newline(self): - self.file.write(self.newlinestr) - self.needindent = 1 - idlecounter = self.idlecounter - if not idlecounter % 100 and self.idlefunc is not None: - self.idlefunc() - self.idlecounter = idlecounter + 1 - - def comment(self, data): - data = escape(data) - lines = data.split("\n") - self._writeraw("") - - def simpletag(self, _TAG_, *args, **kwargs): - attrdata = self.stringifyattrs(*args, **kwargs) - data = "<%s%s/>" % (_TAG_, attrdata) - self._writeraw(data) - - def begintag(self, _TAG_, *args, **kwargs): - attrdata = self.stringifyattrs(*args, **kwargs) - data = "<%s%s>" % (_TAG_, attrdata) - self._writeraw(data) - self.stack.append(_TAG_) - self.indent() - - def endtag(self, _TAG_): - assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag" - del self.stack[-1] - self.dedent() - data = "" % _TAG_ - self._writeraw(data) - - def dumphex(self, data): - linelength = 16 - hexlinelength = linelength * 2 - chunksize = 8 - for i in range(0, len(data), linelength): - hexline = hexStr(data[i:i+linelength]) - line = "" - white = "" - for j in range(0, hexlinelength, chunksize): - line = line + white + hexline[j:j+chunksize] - white = " " - self._writeraw(line) - self.newline() - - def indent(self): - self.indentlevel = self.indentlevel + 1 - - def dedent(self): - assert self.indentlevel > 0 - self.indentlevel = self.indentlevel - 1 - - def stringifyattrs(self, *args, **kwargs): - if kwargs: - assert not args - attributes = sorted(kwargs.items()) - elif args: - assert len(args) == 1 - attributes = args[0] - else: - return "" - data = "" - for attr, value in attributes: - if not isinstance(value, (bytes, unicode)): - value = str(value) - data = data + ' %s="%s"' % (attr, escapeattr(value)) - return data - - -def escape(data): - data = tostr(data, 'utf_8') - data = data.replace("&", "&") - data = data.replace("<", "<") - data = data.replace(">", ">") - data = data.replace("\r", " ") - return data - -def escapeattr(data): - data = escape(data) - data = data.replace('"', """) - return data - -def escape8bit(data): - """Input is Unicode string.""" - def escapechar(c): - n = ord(c) - if 32 <= n <= 127 and c not in "<&>": - return c - else: - return "&#" + repr(n) + ";" - return strjoin(map(escapechar, data.decode('latin-1'))) - -def hexStr(s): - h = string.hexdigits - r = '' - for c in s: - i = byteord(c) - r = r + h[(i >> 4) & 0xF] + h[i & 0xF] - return r diff -Nru fonttools-3.0/Tools/fontTools/misc/xmlWriter_test.py fonttools-3.21.2/Tools/fontTools/misc/xmlWriter_test.py --- fonttools-3.0/Tools/fontTools/misc/xmlWriter_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/misc/xmlWriter_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import os -import unittest -from .xmlWriter import XMLWriter - -linesep = tobytes(os.linesep) -HEADER = b'' + linesep - -class TestXMLWriter(unittest.TestCase): - - def test_comment_escaped(self): - writer = XMLWriter(BytesIO()) - writer.comment("This&that are ") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_comment_multiline(self): - writer = XMLWriter(BytesIO()) - writer.comment("Hello world\nHow are you?") - self.assertEqual(HEADER + b"", - writer.file.getvalue()) - - def test_encoding_default(self): - writer = XMLWriter(BytesIO()) - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_utf8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="utf8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF_8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF-8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_UTF8(self): - # https://github.com/behdad/fonttools/issues/246 - writer = XMLWriter(BytesIO(), encoding="UTF8") - self.assertEqual(b'' + linesep, - writer.file.getvalue()) - - def test_encoding_other(self): - self.assertRaises(Exception, XMLWriter, BytesIO(), - encoding="iso-8859-1") - - def test_write(self): - writer = XMLWriter(BytesIO()) - writer.write("foo&bar") - self.assertEqual(HEADER + b"foo&bar", writer.file.getvalue()) - - def test_indent_dedent(self): - writer = XMLWriter(BytesIO()) - writer.write("foo") - writer.newline() - writer.indent() - writer.write("bar") - writer.newline() - writer.dedent() - writer.write("baz") - self.assertEqual(HEADER + bytesjoin(["foo", " bar", "baz"], linesep), - writer.file.getvalue()) - - def test_writecdata(self): - writer = XMLWriter(BytesIO()) - writer.writecdata("foo&bar") - self.assertEqual(HEADER + b"", writer.file.getvalue()) - - def test_simpletag(self): - writer = XMLWriter(BytesIO()) - writer.simpletag("tag", a="1", b="2") - self.assertEqual(HEADER + b'', writer.file.getvalue()) - - def test_begintag_endtag(self): - writer = XMLWriter(BytesIO()) - writer.begintag("tag", attr="value") - writer.write("content") - writer.endtag("tag") - self.assertEqual(HEADER + b'content', writer.file.getvalue()) - - def test_dumphex(self): - writer = XMLWriter(BytesIO()) - writer.dumphex("Type is a beautiful group of letters, not a group of beautiful letters.") - self.assertEqual(HEADER + bytesjoin([ - "54797065 20697320 61206265 61757469", - "66756c20 67726f75 70206f66 206c6574", - "74657273 2c206e6f 74206120 67726f75", - "70206f66 20626561 75746966 756c206c", - "65747465 72732e ", ""], joiner=linesep), writer.file.getvalue()) - - def test_stringifyattrs(self): - writer = XMLWriter(BytesIO()) - expected = ' attr="0"' - self.assertEqual(expected, writer.stringifyattrs(attr=0)) - self.assertEqual(expected, writer.stringifyattrs(attr=b'0')) - self.assertEqual(expected, writer.stringifyattrs(attr='0')) - self.assertEqual(expected, writer.stringifyattrs(attr=u'0')) - - def test_carriage_return_escaped(self): - writer = XMLWriter(BytesIO()) - writer.write("two lines\r\nseparated by Windows line endings") - self.assertEqual( - HEADER + b'two lines \nseparated by Windows line endings', - writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/pens/basePen.py fonttools-3.21.2/Tools/fontTools/pens/basePen.py --- fonttools-3.0/Tools/fontTools/pens/basePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/basePen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,363 +0,0 @@ -"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects. - -The Pen Protocol - -A Pen is a kind of object that standardizes the way how to "draw" outlines: -it is a middle man between an outline and a drawing. In other words: -it is an abstraction for drawing outlines, making sure that outline objects -don't need to know the details about how and where they're being drawn, and -that drawings don't need to know the details of how outlines are stored. - -The most basic pattern is this: - - outline.draw(pen) # 'outline' draws itself onto 'pen' - -Pens can be used to render outlines to the screen, but also to construct -new outlines. Eg. an outline object can be both a drawable object (it has a -draw() method) as well as a pen itself: you *build* an outline using pen -methods. - -The AbstractPen class defines the Pen protocol. It implements almost -nothing (only no-op closePath() and endPath() methods), but is useful -for documentation purposes. Subclassing it basically tells the reader: -"this class implements the Pen protocol.". An examples of an AbstractPen -subclass is fontTools.pens.transformPen.TransformPen. - -The BasePen class is a base implementation useful for pens that actually -draw (for example a pen renders outlines using a native graphics engine). -BasePen contains a lot of base functionality, making it very easy to build -a pen that fully conforms to the pen protocol. Note that if you subclass -BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(), -_lineTo(), etc. See the BasePen doc string for details. Examples of -BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and -fontTools.pens.cocoaPen.CocoaPen. - -Coordinates are usually expressed as (x, y) tuples, but generally any -sequence of length 2 will do. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -__all__ = ["AbstractPen", "NullPen", "BasePen", - "decomposeSuperBezierSegment", "decomposeQuadraticSegment"] - - -class AbstractPen(object): - - def moveTo(self, pt): - """Begin a new sub path, set the current point to 'pt'. You must - end each sub path with a call to pen.closePath() or pen.endPath(). - """ - raise NotImplementedError - - def lineTo(self, pt): - """Draw a straight line from the current point to 'pt'.""" - raise NotImplementedError - - def curveTo(self, *points): - """Draw a cubic bezier with an arbitrary number of control points. - - The last point specified is on-curve, all others are off-curve - (control) points. If the number of control points is > 2, the - segment is split into multiple bezier segments. This works - like this: - - Let n be the number of control points (which is the number of - arguments to this call minus 1). If n==2, a plain vanilla cubic - bezier is drawn. If n==1, we fall back to a quadratic segment and - if n==0 we draw a straight line. It gets interesting when n>2: - n-1 PostScript-style cubic segments will be drawn as if it were - one curve. See decomposeSuperBezierSegment(). - - The conversion algorithm used for n>2 is inspired by NURB - splines, and is conceptually equivalent to the TrueType "implied - points" principle. See also decomposeQuadraticSegment(). - """ - raise NotImplementedError - - def qCurveTo(self, *points): - """Draw a whole string of quadratic curve segments. - - The last point specified is on-curve, all others are off-curve - points. - - This method implements TrueType-style curves, breaking up curves - using 'implied points': between each two consequtive off-curve points, - there is one implied point exactly in the middle between them. See - also decomposeQuadraticSegment(). - - The last argument (normally the on-curve point) may be None. - This is to support contours that have NO on-curve points (a rarely - seen feature of TrueType outlines). - """ - raise NotImplementedError - - def closePath(self): - """Close the current sub path. You must call either pen.closePath() - or pen.endPath() after each sub path. - """ - pass - - def endPath(self): - """End the current sub path, but don't close it. You must call - either pen.closePath() or pen.endPath() after each sub path. - """ - pass - - def addComponent(self, glyphName, transformation): - """Add a sub glyph. The 'transformation' argument must be a 6-tuple - containing an affine transformation, or a Transform object from the - fontTools.misc.transform module. More precisely: it should be a - sequence containing 6 numbers. - """ - raise NotImplementedError - - -class NullPen(object): - - """A pen that does nothing. - """ - - def moveTo(self, pt): - pass - - def lineTo(self, pt): - pass - - def curveTo(self, *points): - pass - - def qCurveTo(self, *points): - pass - - def closePath(self): - pass - - def endPath(self): - pass - - def addComponent(self, glyphName, transformation): - pass - - -class BasePen(AbstractPen): - - """Base class for drawing pens. You must override _moveTo, _lineTo and - _curveToOne. You may additionally override _closePath, _endPath, - addComponent and/or _qCurveToOne. You should not override any other - methods. - """ - - def __init__(self, glyphSet): - self.glyphSet = glyphSet - self.__currentPoint = None - - # must override - - def _moveTo(self, pt): - raise NotImplementedError - - def _lineTo(self, pt): - raise NotImplementedError - - def _curveToOne(self, pt1, pt2, pt3): - raise NotImplementedError - - # may override - - def _closePath(self): - pass - - def _endPath(self): - pass - - def _qCurveToOne(self, pt1, pt2): - """This method implements the basic quadratic curve type. The - default implementation delegates the work to the cubic curve - function. Optionally override with a native implementation. - """ - pt0x, pt0y = self.__currentPoint - pt1x, pt1y = pt1 - pt2x, pt2y = pt2 - mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x) - mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y) - mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x) - mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y) - self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2) - - def addComponent(self, glyphName, transformation): - """This default implementation simply transforms the points - of the base glyph and draws it onto self. - """ - from fontTools.pens.transformPen import TransformPen - try: - glyph = self.glyphSet[glyphName] - except KeyError: - pass - else: - tPen = TransformPen(self, transformation) - glyph.draw(tPen) - - # don't override - - def _getCurrentPoint(self): - """Return the current point. This is not part of the public - interface, yet is useful for subclasses. - """ - return self.__currentPoint - - def closePath(self): - self._closePath() - self.__currentPoint = None - - def endPath(self): - self._endPath() - self.__currentPoint = None - - def moveTo(self, pt): - self._moveTo(pt) - self.__currentPoint = pt - - def lineTo(self, pt): - self._lineTo(pt) - self.__currentPoint = pt - - def curveTo(self, *points): - n = len(points) - 1 # 'n' is the number of control points - assert n >= 0 - if n == 2: - # The common case, we have exactly two BCP's, so this is a standard - # cubic bezier. Even though decomposeSuperBezierSegment() handles - # this case just fine, we special-case it anyway since it's so - # common. - self._curveToOne(*points) - self.__currentPoint = points[-1] - elif n > 2: - # n is the number of control points; split curve into n-1 cubic - # bezier segments. The algorithm used here is inspired by NURB - # splines and the TrueType "implied point" principle, and ensures - # the smoothest possible connection between two curve segments, - # with no disruption in the curvature. It is practical since it - # allows one to construct multiple bezier segments with a much - # smaller amount of points. - _curveToOne = self._curveToOne - for pt1, pt2, pt3 in decomposeSuperBezierSegment(points): - _curveToOne(pt1, pt2, pt3) - self.__currentPoint = pt3 - elif n == 1: - self.qCurveTo(*points) - elif n == 0: - self.lineTo(points[0]) - else: - raise AssertionError("can't get there from here") - - def qCurveTo(self, *points): - n = len(points) - 1 # 'n' is the number of control points - assert n >= 0 - if points[-1] is None: - # Special case for TrueType quadratics: it is possible to - # define a contour with NO on-curve points. BasePen supports - # this by allowing the final argument (the expected on-curve - # point) to be None. We simulate the feature by making the implied - # on-curve point between the last and the first off-curve points - # explicit. - x, y = points[-2] # last off-curve point - nx, ny = points[0] # first off-curve point - impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny)) - self.__currentPoint = impliedStartPoint - self._moveTo(impliedStartPoint) - points = points[:-1] + (impliedStartPoint,) - if n > 0: - # Split the string of points into discrete quadratic curve - # segments. Between any two consecutive off-curve points - # there's an implied on-curve point exactly in the middle. - # This is where the segment splits. - _qCurveToOne = self._qCurveToOne - for pt1, pt2 in decomposeQuadraticSegment(points): - _qCurveToOne(pt1, pt2) - self.__currentPoint = pt2 - else: - self.lineTo(points[0]) - - -def decomposeSuperBezierSegment(points): - """Split the SuperBezier described by 'points' into a list of regular - bezier segments. The 'points' argument must be a sequence with length - 3 or greater, containing (x, y) coordinates. The last point is the - destination on-curve point, the rest of the points are off-curve points. - The start point should not be supplied. - - This function returns a list of (pt1, pt2, pt3) tuples, which each - specify a regular curveto-style bezier segment. - """ - n = len(points) - 1 - assert n > 1 - bezierSegments = [] - pt1, pt2, pt3 = points[0], None, None - for i in range(2, n+1): - # calculate points in between control points. - nDivisions = min(i, 3, n-i+2) - for j in range(1, nDivisions): - factor = j / nDivisions - temp1 = points[i-1] - temp2 = points[i-2] - temp = (temp2[0] + factor * (temp1[0] - temp2[0]), - temp2[1] + factor * (temp1[1] - temp2[1])) - if pt2 is None: - pt2 = temp - else: - pt3 = (0.5 * (pt2[0] + temp[0]), - 0.5 * (pt2[1] + temp[1])) - bezierSegments.append((pt1, pt2, pt3)) - pt1, pt2, pt3 = temp, None, None - bezierSegments.append((pt1, points[-2], points[-1])) - return bezierSegments - - -def decomposeQuadraticSegment(points): - """Split the quadratic curve segment described by 'points' into a list - of "atomic" quadratic segments. The 'points' argument must be a sequence - with length 2 or greater, containing (x, y) coordinates. The last point - is the destination on-curve point, the rest of the points are off-curve - points. The start point should not be supplied. - - This function returns a list of (pt1, pt2) tuples, which each specify a - plain quadratic bezier segment. - """ - n = len(points) - 1 - assert n > 0 - quadSegments = [] - for i in range(n - 1): - x, y = points[i] - nx, ny = points[i+1] - impliedPt = (0.5 * (x + nx), 0.5 * (y + ny)) - quadSegments.append((points[i], impliedPt)) - quadSegments.append((points[-2], points[-1])) - return quadSegments - - -class _TestPen(BasePen): - """Test class that prints PostScript to stdout.""" - def _moveTo(self, pt): - print("%s %s moveto" % (pt[0], pt[1])) - def _lineTo(self, pt): - print("%s %s lineto" % (pt[0], pt[1])) - def _curveToOne(self, bcp1, bcp2, pt): - print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1], - bcp2[0], bcp2[1], pt[0], pt[1])) - def _closePath(self): - print("closepath") - - -if __name__ == "__main__": - pen = _TestPen(None) - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.closePath() - - pen = _TestPen(None) - # testing the "no on-curve point" scenario - pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None) - pen.closePath() diff -Nru fonttools-3.0/Tools/fontTools/pens/basePen_test.py fonttools-3.21.2/Tools/fontTools/pens/basePen_test.py --- fonttools-3.0/Tools/fontTools/pens/basePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/basePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,171 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import \ - BasePen, decomposeSuperBezierSegment, decomposeQuadraticSegment -import unittest - - -class _TestPen(BasePen): - def __init__(self): - BasePen.__init__(self, glyphSet={}) - self._commands = [] - - def __repr__(self): - return " ".join(self._commands) - - def getCurrentPoint(self): - return self._getCurrentPoint() - - def _moveTo(self, pt): - self._commands.append("%s %s moveto" % (pt[0], pt[1])) - - def _lineTo(self, pt): - self._commands.append("%s %s lineto" % (pt[0], pt[1])) - - def _curveToOne(self, bcp1, bcp2, pt): - self._commands.append("%s %s %s %s %s %s curveto" % - (bcp1[0], bcp1[1], - bcp2[0], bcp2[1], - pt[0], pt[1])) - - def _closePath(self): - self._commands.append("closepath") - - def _endPath(self): - self._commands.append("endpath") - - -class _TestGlyph: - def draw(self, pen): - pen.moveTo((0.0, 0.0)) - pen.lineTo((0.0, 100.0)) - pen.curveTo((50.0, 75.0), (60.0, 50.0), (50.0, 25.0), (0.0, 0.0)) - pen.closePath() - - -class BasePenTest(unittest.TestCase): - def test_moveTo(self): - pen = _TestPen() - pen.moveTo((0.5, -4.3)) - self.assertEqual("0.5 -4.3 moveto", repr(pen)) - self.assertEqual((0.5, -4.3), pen.getCurrentPoint()) - - def test_lineTo(self): - pen = _TestPen() - pen.moveTo((4, 5)) - pen.lineTo((7, 8)) - self.assertEqual("4 5 moveto 7 8 lineto", repr(pen)) - self.assertEqual((7, 8), pen.getCurrentPoint()) - - def test_curveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.curveTo) - - def test_curveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1)) - self.assertEqual("0.0 0.0 moveto 1.0 1.1 lineto", repr(pen)) - self.assertEqual((1.0, 1.1), pen.getCurrentPoint()) - - def test_curveTo_twoPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_curveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.curveTo((1.0, 1.1), (2.0, 2.1), (3.0, 3.1), (4.0, 4.1)) - self.assertEqual("0.0 0.0 moveto " - "1.0 1.1 1.5 1.6 2.0 2.1 curveto " - "2.5 2.6 3.0 3.1 4.0 4.1 curveto", repr(pen)) - self.assertEqual((4.0, 4.1), pen.getCurrentPoint()) - - def test_qCurveTo_zeroPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - self.assertRaises(AssertionError, pen.qCurveTo) - - def test_qCurveTo_onePoint(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((77.7, 99.9)) - self.assertEqual("0.0 0.0 moveto 77.7 99.9 lineto", repr(pen)) - self.assertEqual((77.7, 99.9), pen.getCurrentPoint()) - - def test_qCurveTo_manyPoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, 3.0), (3.0, 6.0)) - self.assertEqual("0.0 0.0 moveto 4.0 2.0 5.0 4.0 3.0 6.0 curveto", - repr(pen)) - self.assertEqual((3.0, 6.0), pen.getCurrentPoint()) - - def test_qCurveTo_onlyOffCurvePoints(self): - pen = _TestPen() - pen.moveTo((0.0, 0.0)) - pen.qCurveTo((6.0, -6.0), (12.0, 12.0), (18.0, -18.0), None) - self.assertEqual("0.0 0.0 moveto " - "12.0 -12.0 moveto " - "8.0 -8.0 7.0 -3.0 9.0 3.0 curveto " - "11.0 9.0 13.0 7.0 15.0 -3.0 curveto " - "17.0 -13.0 16.0 -16.0 12.0 -12.0 curveto", repr(pen)) - self.assertEqual((12.0, -12.0), pen.getCurrentPoint()) - - def test_closePath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.closePath() - self.assertEqual("3 4 lineto closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_endPath(self): - pen = _TestPen() - pen.lineTo((3, 4)) - pen.endPath() - self.assertEqual("3 4 lineto endpath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - def test_addComponent(self): - pen = _TestPen() - pen.glyphSet["oslash"] = _TestGlyph() - pen.addComponent("oslash", (2, 3, 0.5, 2, -10, 0)) - self.assertEqual("-10.0 0.0 moveto " - "40.0 200.0 lineto " - "127.5 300.0 131.25 290.0 125.0 265.0 curveto " - "118.75 240.0 102.5 200.0 -10.0 0.0 curveto " - "closepath", repr(pen)) - self.assertEqual(None, pen.getCurrentPoint()) - - -class DecomposeSegmentTest(unittest.TestCase): - def test_decomposeSuperBezierSegment(self): - decompose = decomposeSuperBezierSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertRaises(AssertionError, decompose, [(0, 0), (1, 1)]) - self.assertEqual([((0, 0), (1, 1), (2, 2))], - decompose([(0, 0), (1, 1), (2, 2)])) - self.assertEqual( - [((0, 0), (2, -2), (4, 0)), ((6, 2), (8, 8), (12, -12))], - decompose([(0, 0), (4, -4), (8, 8), (12, -12)])) - - def test_decomposeQuadraticSegment(self): - decompose = decomposeQuadraticSegment - self.assertRaises(AssertionError, decompose, []) - self.assertRaises(AssertionError, decompose, [(0, 0)]) - self.assertEqual([((0,0), (4, 8))], decompose([(0, 0), (4, 8)])) - self.assertEqual([((0,0), (2, 4)), ((4, 8), (9, -9))], - decompose([(0, 0), (4, 8), (9, -9)])) - self.assertEqual( - [((0, 0), (2.0, 4.0)), ((4, 8), (6.5, -0.5)), ((9, -9), (10, 10))], - decompose([(0, 0), (4, 8), (9, -9), (10, 10)])) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/pens/boundsPen.py fonttools-3.21.2/Tools/fontTools/pens/boundsPen.py --- fonttools-3.0/Tools/fontTools/pens/boundsPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/boundsPen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,78 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect -from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds -from fontTools.pens.basePen import BasePen - - -__all__ = ["BoundsPen", "ControlBoundsPen"] - - -class ControlBoundsPen(BasePen): - - """Pen to calculate the "control bounds" of a shape. This is the - bounding box of all control points, so may be larger than the - actual bounding box if there are curves that don't have points - on their extremes. - - When the shape has been drawn, the bounds are available as the - 'bounds' attribute of the pen object. It's a 4-tuple: - (xMin, yMin, xMax, yMax) - """ - - def __init__(self, glyphSet): - BasePen.__init__(self, glyphSet) - self.bounds = None - - def _moveTo(self, pt): - bounds = self.bounds - if bounds: - self.bounds = updateBounds(bounds, pt) - else: - x, y = pt - self.bounds = (x, y, x, y) - - def _lineTo(self, pt): - self.bounds = updateBounds(self.bounds, pt) - - def _curveToOne(self, bcp1, bcp2, pt): - bounds = self.bounds - bounds = updateBounds(bounds, bcp1) - bounds = updateBounds(bounds, bcp2) - bounds = updateBounds(bounds, pt) - self.bounds = bounds - - def _qCurveToOne(self, bcp, pt): - bounds = self.bounds - bounds = updateBounds(bounds, bcp) - bounds = updateBounds(bounds, pt) - self.bounds = bounds - - -class BoundsPen(ControlBoundsPen): - - """Pen to calculate the bounds of a shape. It calculates the - correct bounds even when the shape contains curves that don't - have points on their extremes. This is somewhat slower to compute - than the "control bounds". - - When the shape has been drawn, the bounds are available as the - 'bounds' attribute of the pen object. It's a 4-tuple: - (xMin, yMin, xMax, yMax) - """ - - def _curveToOne(self, bcp1, bcp2, pt): - bounds = self.bounds - bounds = updateBounds(bounds, pt) - if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): - bounds = unionRect(bounds, calcCubicBounds( - self._getCurrentPoint(), bcp1, bcp2, pt)) - self.bounds = bounds - - def _qCurveToOne(self, bcp, pt): - bounds = self.bounds - bounds = updateBounds(bounds, pt) - if not pointInRect(bcp, bounds): - bounds = unionRect(bounds, calcQuadraticBounds( - self._getCurrentPoint(), bcp, pt)) - self.bounds = bounds diff -Nru fonttools-3.0/Tools/fontTools/pens/boundsPen_test.py fonttools-3.21.2/Tools/fontTools/pens/boundsPen_test.py --- fonttools-3.0/Tools/fontTools/pens/boundsPen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/boundsPen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.boundsPen import BoundsPen, ControlBoundsPen -import unittest - - -def draw_(pen): - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100)) - pen.closePath() - - -def bounds_(pen): - return " ".join(["%.0f" % c for c in pen.bounds]) - - -class BoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = BoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 58 100", bounds_(pen)) - - def test_empty(self): - pen = BoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 45 20", bounds_(pen)) - - def test_quadraticCurve(self): - pen = BoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 3", bounds_(pen)) - - -class ControlBoundsPenTest(unittest.TestCase): - def test_draw(self): - pen = ControlBoundsPen(None) - draw_(pen) - self.assertEqual("-55 0 60 100", bounds_(pen)) - - def test_empty(self): - pen = ControlBoundsPen(None) - self.assertEqual(None, pen.bounds) - - def test_curve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.curveTo((20, 10), (90, 40), (0, 0)) - self.assertEqual("0 0 90 40", bounds_(pen)) - - def test_quadraticCurve(self): - pen = ControlBoundsPen(None) - pen.moveTo((0, 0)) - pen.qCurveTo((6, 6), (10, 0)) - self.assertEqual("0 0 10 6", bounds_(pen)) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/pens/cocoaPen.py fonttools-3.21.2/Tools/fontTools/pens/cocoaPen.py --- fonttools-3.0/Tools/fontTools/pens/cocoaPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/cocoaPen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import BasePen - - -__all__ = ["CocoaPen"] - - -class CocoaPen(BasePen): - - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - from AppKit import NSBezierPath - path = NSBezierPath.bezierPath() - self.path = path - - def _moveTo(self, p): - self.path.moveToPoint_(p) - - def _lineTo(self, p): - self.path.lineToPoint_(p) - - def _curveToOne(self, p1, p2, p3): - self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2) - - def _closePath(self): - self.path.closePath() diff -Nru fonttools-3.0/Tools/fontTools/pens/__init__.py fonttools-3.21.2/Tools/fontTools/pens/__init__.py --- fonttools-3.0/Tools/fontTools/pens/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,4 +0,0 @@ -"""Empty __init__.py file to signal Python this directory is a package.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * diff -Nru fonttools-3.0/Tools/fontTools/pens/pointInsidePen.py fonttools-3.21.2/Tools/fontTools/pens/pointInsidePen.py --- fonttools-3.0/Tools/fontTools/pens/pointInsidePen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/pointInsidePen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,191 +0,0 @@ -"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing -for shapes. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import BasePen -from fontTools.misc.bezierTools import solveQuadratic, solveCubic - - -__all__ = ["PointInsidePen"] - - -# working around floating point errors -EPSILON = 1e-10 -ONE_PLUS_EPSILON = 1 + EPSILON -ZERO_MINUS_EPSILON = 0 - EPSILON - - -class PointInsidePen(BasePen): - - """This pen implements "point inside" testing: to test whether - a given point lies inside the shape (black) or outside (white). - Instances of this class can be recycled, as long as the - setTestPoint() method is used to set the new point to test. - - Typical usage: - - pen = PointInsidePen(glyphSet, (100, 200)) - outline.draw(pen) - isInside = pen.getResult() - - Both the even-odd algorithm and the non-zero-winding-rule - algorithm are implemented. The latter is the default, specify - True for the evenOdd argument of __init__ or setTestPoint - to use the even-odd algorithm. - """ - - # This class implements the classical "shoot a ray from the test point - # to infinity and count how many times it intersects the outline" (as well - # as the non-zero variant, where the counter is incremented if the outline - # intersects the ray in one direction and decremented if it intersects in - # the other direction). - # I found an amazingly clear explanation of the subtleties involved in - # implementing this correctly for polygons here: - # http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html - # I extended the principles outlined on that page to curves. - - def __init__(self, glyphSet, testPoint, evenOdd=0): - BasePen.__init__(self, glyphSet) - self.setTestPoint(testPoint, evenOdd) - - def setTestPoint(self, testPoint, evenOdd=0): - """Set the point to test. Call this _before_ the outline gets drawn.""" - self.testPoint = testPoint - self.evenOdd = evenOdd - self.firstPoint = None - self.intersectionCount = 0 - - def getResult(self): - """After the shape has been drawn, getResult() returns True if the test - point lies within the (black) shape, and False if it doesn't. - """ - if self.firstPoint is not None: - # always make sure the sub paths are closed; the algorithm only works - # for closed paths. - self.closePath() - if self.evenOdd: - result = self.intersectionCount % 2 - else: - result = self.intersectionCount - return not not result - - def _addIntersection(self, goingUp): - if self.evenOdd or goingUp: - self.intersectionCount += 1 - else: - self.intersectionCount -= 1 - - def _moveTo(self, point): - if self.firstPoint is not None: - # always make sure the sub paths are closed; the algorithm only works - # for closed paths. - self.closePath() - self.firstPoint = point - - def _lineTo(self, point): - x, y = self.testPoint - x1, y1 = self._getCurrentPoint() - x2, y2 = point - - if x1 < x and x2 < x: - return - if y1 < y and y2 < y: - return - if y1 >= y and y2 >= y: - return - - dx = x2 - x1 - dy = y2 - y1 - t = (y - y1) / dy - ix = dx * t + x1 - if ix < x: - return - self._addIntersection(y2 > y1) - - def _curveToOne(self, bcp1, bcp2, point): - x, y = self.testPoint - x1, y1 = self._getCurrentPoint() - x2, y2 = bcp1 - x3, y3 = bcp2 - x4, y4 = point - - if x1 < x and x2 < x and x3 < x and x4 < x: - return - if y1 < y and y2 < y and y3 < y and y4 < y: - return - if y1 >= y and y2 >= y and y3 >= y and y4 >= y: - return - - dy = y1 - cy = (y2 - dy) * 3.0 - by = (y3 - y2) * 3.0 - cy - ay = y4 - dy - cy - by - solutions = sorted(solveCubic(ay, by, cy, dy - y)) - solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] - if not solutions: - return - - dx = x1 - cx = (x2 - dx) * 3.0 - bx = (x3 - x2) * 3.0 - cx - ax = x4 - dx - cx - bx - - above = y1 >= y - lastT = None - for t in solutions: - if t == lastT: - continue - lastT = t - t2 = t * t - t3 = t2 * t - - direction = 3*ay*t2 + 2*by*t + cy - if direction == 0.0: - direction = 6*ay*t + 2*by - if direction == 0.0: - direction = ay - goingUp = direction > 0.0 - - xt = ax*t3 + bx*t2 + cx*t + dx - if xt < x: - above = goingUp - continue - - if t == 0.0: - if not goingUp: - self._addIntersection(goingUp) - elif t == 1.0: - if not above: - self._addIntersection(goingUp) - else: - if above != goingUp: - self._addIntersection(goingUp) - #else: - # we're not really intersecting, merely touching the 'top' - above = goingUp - - def _qCurveToOne_unfinished(self, bcp, point): - # XXX need to finish this, for now doing it through a cubic - # (BasePen implements _qCurveTo in terms of a cubic) will - # have to do. - x, y = self.testPoint - x1, y1 = self._getCurrentPoint() - x2, y2 = bcp - x3, y3 = point - c = y1 - b = (y2 - c) * 2.0 - a = y3 - c - b - solutions = sorted(solveQuadratic(a, b, c - y)) - solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON] - if not solutions: - return - # XXX - - def _closePath(self): - if self._getCurrentPoint() != self.firstPoint: - self.lineTo(self.firstPoint) - self.firstPoint = None - - _endPath = _closePath diff -Nru fonttools-3.0/Tools/fontTools/pens/pointInsidePen_test.py fonttools-3.21.2/Tools/fontTools/pens/pointInsidePen_test.py --- fonttools-3.0/Tools/fontTools/pens/pointInsidePen_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/pointInsidePen_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.pointInsidePen import PointInsidePen -import unittest - - -class PointInsidePenTest(unittest.TestCase): - def test_line(self): - def draw_triangles(pen): - pen.moveTo((0,0)); pen.lineTo((10,5)); pen.lineTo((10,0)) - pen.moveTo((9,1)); pen.lineTo((4,1)); pen.lineTo((9,4)) - pen.closePath() - - self.assertEqual( - " *********" - " ** *" - " ** *" - " * *" - " *", - self.render(draw_triangles, even_odd=True)) - - self.assertEqual( - " *********" - " *******" - " *****" - " ***" - " *", - self.render(draw_triangles, even_odd=False)) - - def test_curve(self): - def draw_curves(pen): - pen.moveTo((0,0)); pen.curveTo((9,1), (9,4), (0,5)) - pen.moveTo((10,5)); pen.curveTo((1,4), (1,1), (10,0)) - pen.closePath() - - self.assertEqual( - "*** ***" - "**** ****" - "*** ***" - "**** ****" - "*** ***", - self.render(draw_curves, even_odd=True)) - - self.assertEqual( - "*** ***" - "**********" - "**********" - "**********" - "*** ***", - self.render(draw_curves, even_odd=False)) - - def test_qCurve(self): - def draw_qCurves(pen): - pen.moveTo((0,0)); pen.qCurveTo((15,2), (0,5)) - pen.moveTo((10,5)); pen.qCurveTo((-5,3), (10,0)) - pen.closePath() - - self.assertEqual( - "*** **" - "**** ***" - "*** ***" - "*** ****" - "** ***", - self.render(draw_qCurves, even_odd=True)) - - self.assertEqual( - "*** **" - "**********" - "**********" - "**********" - "** ***", - self.render(draw_qCurves, even_odd=False)) - - @staticmethod - def render(draw_function, even_odd): - result = BytesIO() - for y in range(5): - for x in range(10): - pen = PointInsidePen(None, (x + 0.5, y + 0.5), even_odd) - draw_function(pen) - if pen.getResult(): - result.write(b"*") - else: - result.write(b" ") - return tounicode(result.getvalue()) - - -if __name__ == "__main__": - unittest.main() - diff -Nru fonttools-3.0/Tools/fontTools/pens/qtPen.py fonttools-3.21.2/Tools/fontTools/pens/qtPen.py --- fonttools-3.0/Tools/fontTools/pens/qtPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/qtPen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,28 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import BasePen - - -__all__ = ["QtPen"] - - -class QtPen(BasePen): - - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - from PyQt5.QtGui import QPainterPath - path = QPainterPath() - self.path = path - - def _moveTo(self, p): - self.path.moveTo(*p) - - def _lineTo(self, p): - self.path.lineTo(*p) - - def _curveToOne(self, p1, p2, p3): - self.path.cubicTo(*p1+p2+p3) - - def _closePath(self): - self.path.closeSubpath() diff -Nru fonttools-3.0/Tools/fontTools/pens/reportLabPen.py fonttools-3.21.2/Tools/fontTools/pens/reportLabPen.py --- fonttools-3.0/Tools/fontTools/pens/reportLabPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/reportLabPen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,72 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import BasePen -from reportlab.graphics.shapes import Path - - -class ReportLabPen(BasePen): - - """A pen for drawing onto a reportlab.graphics.shapes.Path object.""" - - def __init__(self, glyphSet, path=None): - BasePen.__init__(self, glyphSet) - if path is None: - path = Path() - self.path = path - - def _moveTo(self, p): - (x,y) = p - self.path.moveTo(x,y) - - def _lineTo(self, p): - (x,y) = p - self.path.lineTo(x,y) - - def _curveToOne(self, p1, p2, p3): - (x1,y1) = p1 - (x2,y2) = p2 - (x3,y3) = p3 - self.path.curveTo(x1, y1, x2, y2, x3, y3) - - def _closePath(self): - self.path.closePath() - - -if __name__=="__main__": - import sys - if len(sys.argv) < 3: - print("Usage: reportLabPen.py []") - print(" If no image file name is created, by default .png is created.") - print(" example: reportLabPen.py Arial.TTF R test.png") - print(" (The file format will be PNG, regardless of the image file name supplied)") - sys.exit(0) - - from fontTools.ttLib import TTFont - from reportlab.lib import colors - - path = sys.argv[1] - glyphName = sys.argv[2] - if (len(sys.argv) > 3): - imageFile = sys.argv[3] - else: - imageFile = "%s.png" % glyphName - - font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font - gs = font.getGlyphSet() - pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5)) - g = gs[glyphName] - g.draw(pen) - - w, h = g.width, 1000 - from reportlab.graphics import renderPM - from reportlab.graphics.shapes import Group, Drawing, scale - - # Everything is wrapped in a group to allow transformations. - g = Group(pen.path) - g.translate(0, 200) - g.scale(0.3, 0.3) - - d = Drawing(w, h) - d.add(g) - - renderPM.drawToFile(d, imageFile, fmt="PNG") diff -Nru fonttools-3.0/Tools/fontTools/pens/transformPen.py fonttools-3.21.2/Tools/fontTools/pens/transformPen.py --- fonttools-3.0/Tools/fontTools/pens/transformPen.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/pens/transformPen.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.pens.basePen import AbstractPen - - -__all__ = ["TransformPen"] - - -class TransformPen(AbstractPen): - - """Pen that transforms all coordinates using a Affine transformation, - and passes them to another pen. - """ - - def __init__(self, outPen, transformation): - """The 'outPen' argument is another pen object. It will receive the - transformed coordinates. The 'transformation' argument can either - be a six-tuple, or a fontTools.misc.transform.Transform object. - """ - if not hasattr(transformation, "transformPoint"): - from fontTools.misc.transform import Transform - transformation = Transform(*transformation) - self._transformation = transformation - self._transformPoint = transformation.transformPoint - self._outPen = outPen - self._stack = [] - - def moveTo(self, pt): - self._outPen.moveTo(self._transformPoint(pt)) - - def lineTo(self, pt): - self._outPen.lineTo(self._transformPoint(pt)) - - def curveTo(self, *points): - self._outPen.curveTo(*self._transformPoints(points)) - - def qCurveTo(self, *points): - if points[-1] is None: - points = self._transformPoints(points[:-1]) + [None] - else: - points = self._transformPoints(points) - self._outPen.qCurveTo(*points) - - def _transformPoints(self, points): - new = [] - transformPoint = self._transformPoint - for pt in points: - new.append(transformPoint(pt)) - return new - - def closePath(self): - self._outPen.closePath() - - def addComponent(self, glyphName, transformation): - transformation = self._transformation.transform(transformation) - self._outPen.addComponent(glyphName, transformation) - - -if __name__ == "__main__": - from fontTools.pens.basePen import _TestPen - pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0)) - pen.moveTo((0, 0)) - pen.lineTo((0, 100)) - pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0)) - pen.closePath() diff -Nru fonttools-3.0/Tools/fontTools/subset.py fonttools-3.21.2/Tools/fontTools/subset.py --- fonttools-3.0/Tools/fontTools/subset.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/subset.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,2742 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.ttLib.tables import otTables -from fontTools.misc import psCharStrings -import sys -import struct -import time -import array - -__usage__ = "pyftsubset font-file [glyph...] [--option=value]..." - -__doc__="""\ -pyftsubset -- OpenType font subsetter and optimizer - - pyftsubset is an OpenType font subsetter and optimizer, based on fontTools. - It accepts any TT- or CFF-flavored OpenType (.otf or .ttf) or WOFF (.woff) - font file. The subsetted glyph set is based on the specified glyphs - or characters, and specified OpenType layout features. - - The tool also performs some size-reducing optimizations, aimed for using - subset fonts as webfonts. Individual optimizations can be enabled or - disabled, and are enabled by default when they are safe. - -Usage: - """+__usage__+""" - - At least one glyph or one of --gids, --gids-file, --glyphs, --glyphs-file, - --text, --text-file, --unicodes, or --unicodes-file, must be specified. - -Arguments: - font-file - The input font file. - glyph - Specify one or more glyph identifiers to include in the subset. Must be - PS glyph names, or the special string '*' to keep the entire glyph set. - -Initial glyph set specification: - These options populate the initial glyph set. Same option can appear - multiple times, and the results are accummulated. - --gids=[,...] - Specify comma/whitespace-separated list of glyph IDs or ranges as - decimal numbers. For example, --gids=10-12,14 adds glyphs with - numbers 10, 11, 12, and 14. - --gids-file= - Like --gids but reads from a file. Anything after a '#' on any line - is ignored as comments. - --glyphs=[,...] - Specify comma/whitespace-separated PS glyph names to add to the subset. - Note that only PS glyph names are accepted, not gidNNN, U+XXXX, etc - that are accepted on the command line. The special string '*' wil keep - the entire glyph set. - --glyphs-file= - Like --glyphs but reads from a file. Anything after a '#' on any line - is ignored as comments. - --text= - Specify characters to include in the subset, as UTF-8 string. - --text-file= - Like --text but reads from a file. Newline character are not added to - the subset. - --unicodes=[,...] - Specify comma/whitespace-separated list of Unicode codepoints or - ranges as hex numbers, optionally prefixed with 'U+', 'u', etc. - For example, --unicodes=41-5a,61-7a adds ASCII letters, so does - the more verbose --unicodes=U+0041-005A,U+0061-007A. - The special strings '*' will choose all Unicode characters mapped - by the font. - --unicodes-file= - Like --unicodes, but reads from a file. Anything after a '#' on any - line in the file is ignored as comments. - --ignore-missing-glyphs - Do not fail if some requested glyphs or gids are not available in - the font. - --no-ignore-missing-glyphs - Stop and fail if some requested glyphs or gids are not available - in the font. [default] - --ignore-missing-unicodes [default] - Do not fail if some requested Unicode characters (including those - indirectly specified using --text or --text-file) are not available - in the font. - --no-ignore-missing-unicodes - Stop and fail if some requested Unicode characters are not available - in the font. - Note the default discrepancy between ignoring missing glyphs versus - unicodes. This is for historical reasons and in the future - --no-ignore-missing-unicodes might become default. - -Other options: - For the other options listed below, to see the current value of the option, - pass a value of '?' to it, with or without a '='. - Examples: - $ pyftsubset --glyph-names? - Current setting for 'glyph-names' is: False - $ ./pyftsubset --name-IDs=? - Current setting for 'name-IDs' is: [1, 2] - $ ./pyftsubset --hinting? --no-hinting --hinting? - Current setting for 'hinting' is: True - Current setting for 'hinting' is: False - -Output options: - --output-file= - The output font file. If not specified, the subsetted font - will be saved in as font-file.subset. - --flavor= - Specify flavor of output font file. May be 'woff' or 'woff2'. - Note that WOFF2 requires the Brotli Python extension, available - at https://github.com/google/brotli - -Glyph set expansion: - These options control how additional glyphs are added to the subset. - --notdef-glyph - Add the '.notdef' glyph to the subset (ie, keep it). [default] - --no-notdef-glyph - Drop the '.notdef' glyph unless specified in the glyph set. This - saves a few bytes, but is not possible for Postscript-flavored - fonts, as those require '.notdef'. For TrueType-flavored fonts, - this works fine as long as no unsupported glyphs are requested - from the font. - --notdef-outline - Keep the outline of '.notdef' glyph. The '.notdef' glyph outline is - used when glyphs not supported by the font are to be shown. It is not - needed otherwise. - --no-notdef-outline - When including a '.notdef' glyph, remove its outline. This saves - a few bytes. [default] - --recommended-glyphs - Add glyphs 0, 1, 2, and 3 to the subset, as recommended for - TrueType-flavored fonts: '.notdef', 'NULL' or '.null', 'CR', 'space'. - Some legacy software might require this, but no modern system does. - --no-recommended-glyphs - Do not add glyphs 0, 1, 2, and 3 to the subset, unless specified in - glyph set. [default] - --layout-features[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of OpenType layout feature tags that will be preserved. - Glyph variants used by the preserved features are added to the - specified subset glyph set. By default, 'calt', 'ccmp', 'clig', 'curs', - 'kern', 'liga', 'locl', 'mark', 'mkmk', 'rclt', 'rlig' and all features - required for script shaping are preserved. To see the full list, try - '--layout-features=?'. Use '*' to keep all features. - Multiple --layout-features options can be provided if necessary. - Examples: - --layout-features+=onum,pnum,ss01 - * Keep the default set of features and 'onum', 'pnum', 'ss01'. - --layout-features-='mark','mkmk' - * Keep the default set of features but drop 'mark' and 'mkmk'. - --layout-features='kern' - * Only keep the 'kern' feature, drop all others. - --layout-features='' - * Drop all features. - --layout-features='*' - * Keep all features. - --layout-features+=aalt --layout-features-=vrt2 - * Keep default set of features plus 'aalt', but drop 'vrt2'. - -Hinting options: - --hinting - Keep hinting [default] - --no-hinting - Drop glyph-specific hinting and font-wide hinting tables, as well - as remove hinting-related bits and pieces from other tables (eg. GPOS). - See --hinting-tables for list of tables that are dropped by default. - Instructions and hints are stripped from 'glyf' and 'CFF ' tables - respectively. This produces (sometimes up to 30%) smaller fonts that - are suitable for extremely high-resolution systems, like high-end - mobile devices and retina displays. - XXX Note: Currently there is a known bug in 'CFF ' hint stripping that - might make the font unusable as a webfont as they will be rejected by - OpenType Sanitizer used in common browsers. For more information see: - https://github.com/behdad/fonttools/issues/144 - The --desubroutinize options works around that bug. - -Optimization options: - --desubroutinize - Remove CFF use of subroutinizes. Subroutinization is a way to make CFF - fonts smaller. For small subsets however, desubroutinizing might make - the font smaller. It has even been reported that desubroutinized CFF - fonts compress better (produce smaller output) WOFF and WOFF2 fonts. - Also see note under --no-hinting. - --no-desubroutinize [default] - Leave CFF subroutinizes as is, only throw away unused subroutinizes. - -Font table options: - --drop-tables[+|-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the comma-separated - set of tables that will be be dropped. - By default, the following tables are dropped: - 'BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ', 'PCLT', 'LTSH' - and Graphite tables: 'Feat', 'Glat', 'Gloc', 'Silf', 'Sill' - and color tables: 'CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'. - The tool will attempt to subset the remaining tables. - Examples: - --drop-tables-='SVG ' - * Drop the default set of tables but keep 'SVG '. - --drop-tables+=GSUB - * Drop the default set of tables and 'GSUB'. - --drop-tables=DSIG - * Only drop the 'DSIG' table, keep all others. - --drop-tables= - * Keep all tables. - --no-subset-tables+=
[,
...] - Add to the set of tables that will not be subsetted. - By default, the following tables are included in this list, as - they do not need subsetting (ignore the fact that 'loca' is listed - here): 'gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2', 'loca', - 'name', 'cvt ', 'fpgm', 'prep', 'VMDX', and 'DSIG'. Tables that the tool - does not know how to subset and are not specified here will be dropped - from the font. - Example: - --no-subset-tables+=FFTM - * Keep 'FFTM' table in the font by preventing subsetting. - --hinting-tables[-]=
[,
...] - Specify (=), add to (+=) or exclude from (-=) the list of font-wide - hinting tables that will be dropped if --no-hinting is specified, - Examples: - --hinting-tables-='VDMX' - * Drop font-wide hinting tables except 'VDMX'. - --hinting-tables='' - * Keep all font-wide hinting tables (but strip hints from glyphs). - --legacy-kern - Keep TrueType 'kern' table even when OpenType 'GPOS' is available. - --no-legacy-kern - Drop TrueType 'kern' table if OpenType 'GPOS' is available. [default] - -Font naming options: - These options control what is retained in the 'name' table. For numerical - codes, see: http://www.microsoft.com/typography/otspec/name.htm - --name-IDs[+|-]=[,...] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - entry nameIDs that will be preserved. By default only nameID 1 (Family) - and nameID 2 (Style) are preserved. Use '*' to keep all entries. - Examples: - --name-IDs+=0,4,6 - * Also keep Copyright, Full name and PostScript name entry. - --name-IDs='' - * Drop all 'name' table entries. - --name-IDs='*' - * keep all 'name' table entries - --name-legacy - Keep legacy (non-Unicode) 'name' table entries (0.x, 1.x etc.). - XXX Note: This might be needed for some fonts that have no Unicode name - entires for English. See: https://github.com/behdad/fonttools/issues/146 - --no-name-legacy - Drop legacy (non-Unicode) 'name' table entries [default] - --name-languages[+|-]=[,] - Specify (=), add to (+=) or exclude from (-=) the set of 'name' table - langIDs that will be preserved. By default only records with langID - 0x0409 (English) are preserved. Use '*' to keep all langIDs. - --obfuscate-names - Make the font unusable as a system font by replacing name IDs 1, 2, 3, 4, - and 6 with dummy strings (it is still fully functional as webfont). - -Glyph naming and encoding options: - --glyph-names - Keep PS glyph names in TT-flavored fonts. In general glyph names are - not needed for correct use of the font. However, some PDF generators - and PDF viewers might rely on glyph names to extract Unicode text - from PDF documents. - --no-glyph-names - Drop PS glyph names in TT-flavored fonts, by using 'post' table - version 3.0. [default] - --legacy-cmap - Keep the legacy 'cmap' subtables (0.x, 1.x, 4.x etc.). - --no-legacy-cmap - Drop the legacy 'cmap' subtables. [default] - --symbol-cmap - Keep the 3.0 symbol 'cmap'. - --no-symbol-cmap - Drop the 3.0 symbol 'cmap'. [default] - -Other font-specific options: - --recalc-bounds - Recalculate font bounding boxes. - --no-recalc-bounds - Keep original font bounding boxes. This is faster and still safe - for all practical purposes. [default] - --recalc-timestamp - Set font 'modified' timestamp to current time. - --no-recalc-timestamp - Do not modify font 'modified' timestamp. [default] - --canonical-order - Order tables as recommended in the OpenType standard. This is not - required by the standard, nor by any known implementation. - --no-canonical-order - Keep original order of font tables. This is faster. [default] - -Application options: - --verbose - Display verbose information of the subsetting process. - --timing - Display detailed timing information of the subsetting process. - --xml - Display the TTX XML representation of subsetted font. - -Example: - Produce a subset containing the characters ' !"#$%' without performing - size-reducing optimizations: - - $ pyftsubset font.ttf --unicodes="U+0020-0025" \\ - --layout-features='*' --glyph-names --symbol-cmap --legacy-cmap \\ - --notdef-glyph --notdef-outline --recommended-glyphs \\ - --name-IDs='*' --name-legacy --name-languages='*' -""" - - -def _add_method(*clazzes): - """Returns a decorator function that adds a new method to one or - more classes.""" - def wrapper(method): - for clazz in clazzes: - assert clazz.__name__ != 'DefaultTable', \ - 'Oops, table class not found.' - assert not hasattr(clazz, method.__name__), \ - "Oops, class '%s' has method '%s'." % (clazz.__name__, - method.__name__) - setattr(clazz, method.__name__, method) - return None - return wrapper - -def _uniq_sort(l): - return sorted(set(l)) - -def _set_update(s, *others): - # Jython's set.update only takes one other argument. - # Emulate real set.update... - for other in others: - s.update(other) - -def _dict_subset(d, glyphs): - return {g:d[g] for g in glyphs} - - -@_add_method(otTables.Coverage) -def intersect(self, glyphs): - """Returns ascending list of matching coverage values.""" - return [i for i,g in enumerate(self.glyphs) if g in glyphs] - -@_add_method(otTables.Coverage) -def intersect_glyphs(self, glyphs): - """Returns set of intersecting glyphs.""" - return set(g for g in self.glyphs if g in glyphs) - -@_add_method(otTables.Coverage) -def subset(self, glyphs): - """Returns ascending list of remaining coverage values.""" - indices = self.intersect(glyphs) - self.glyphs = [g for g in self.glyphs if g in glyphs] - return indices - -@_add_method(otTables.Coverage) -def remap(self, coverage_map): - """Remaps coverage.""" - self.glyphs = [self.glyphs[i] for i in coverage_map] - -@_add_method(otTables.ClassDef) -def intersect(self, glyphs): - """Returns ascending list of matching class values.""" - return _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - [v for g,v in self.classDefs.items() if g in glyphs]) - -@_add_method(otTables.ClassDef) -def intersect_class(self, glyphs, klass): - """Returns set of glyphs matching class.""" - if klass == 0: - return set(g for g in glyphs if g not in self.classDefs) - return set(g for g,v in self.classDefs.items() - if v == klass and g in glyphs) - -@_add_method(otTables.ClassDef) -def subset(self, glyphs, remap=False): - """Returns ascending list of remaining classes.""" - self.classDefs = {g:v for g,v in self.classDefs.items() if g in glyphs} - # Note: while class 0 has the special meaning of "not matched", - # if no glyph will ever /not match/, we can optimize class 0 out too. - indices = _uniq_sort( - ([0] if any(g not in self.classDefs for g in glyphs) else []) + - list(self.classDefs.values())) - if remap: - self.remap(indices) - return indices - -@_add_method(otTables.ClassDef) -def remap(self, class_map): - """Remaps classes.""" - self.classDefs = {g:class_map.index(v) for g,v in self.classDefs.items()} - -@_add_method(otTables.SingleSubst) -def closure_glyphs(self, s, cur_glyphs): - s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs) - -@_add_method(otTables.SingleSubst) -def subset_glyphs(self, s): - self.mapping = {g:v for g,v in self.mapping.items() - if g in s.glyphs and v in s.glyphs} - return bool(self.mapping) - -@_add_method(otTables.MultipleSubst) -def closure_glyphs(self, s, cur_glyphs): - indices = self.Coverage.intersect(cur_glyphs) - _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices)) - -@_add_method(otTables.MultipleSubst) -def subset_glyphs(self, s): - indices = self.Coverage.subset(s.glyphs) - self.Sequence = [self.Sequence[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,seq in enumerate(self.Sequence) - if all(sub in s.glyphs for sub in seq.Substitute)] - self.Sequence = [self.Sequence[i] for i in indices] - self.Coverage.remap(indices) - self.SequenceCount = len(self.Sequence) - return bool(self.SequenceCount) - -@_add_method(otTables.AlternateSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items() - if g in cur_glyphs)) - -@_add_method(otTables.AlternateSubst) -def subset_glyphs(self, s): - self.alternates = {g:vlist - for g,vlist in self.alternates.items() - if g in s.glyphs and - all(v in s.glyphs for v in vlist)} - return bool(self.alternates) - -@_add_method(otTables.LigatureSubst) -def closure_glyphs(self, s, cur_glyphs): - _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs - if all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items() - if g in cur_glyphs)) - -@_add_method(otTables.LigatureSubst) -def subset_glyphs(self, s): - self.ligatures = {g:v for g,v in self.ligatures.items() - if g in s.glyphs} - self.ligatures = {g:[seq for seq in seqs - if seq.LigGlyph in s.glyphs and - all(c in s.glyphs for c in seq.Component)] - for g,seqs in self.ligatures.items()} - self.ligatures = {g:v for g,v in self.ligatures.items() if v} - return bool(self.ligatures) - -@_add_method(otTables.ReverseChainSingleSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - indices = self.Coverage.intersect(cur_glyphs) - if(not indices or - not all(c.intersect(s.glyphs) - for c in self.LookAheadCoverage + self.BacktrackCoverage)): - return - s.glyphs.update(self.Substitute[i] for i in indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ReverseChainSingleSubst) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.Substitute = [self.Substitute[i] for i in indices] - # Now drop rules generating glyphs we don't want - indices = [i for i,sub in enumerate(self.Substitute) - if sub in s.glyphs] - self.Substitute = [self.Substitute[i] for i in indices] - self.Coverage.remap(indices) - self.GlyphCount = len(self.Substitute) - return bool(self.GlyphCount and - all(c.subset(s.glyphs) - for c in self.LookAheadCoverage+self.BacktrackCoverage)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def subset_glyphs(self, s): - if self.Format == 1: - return len(self.Coverage.subset(s.glyphs)) - elif self.Format == 2: - indices = self.Coverage.subset(s.glyphs) - self.Value = [self.Value[i] for i in indices] - self.ValueCount = len(self.Value) - return bool(self.ValueCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.SinglePos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat &= ~0x00F0 - return True - -@_add_method(otTables.PairPos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.PairSet = [self.PairSet[i] for i in indices] - for p in self.PairSet: - p.PairValueRecord = [r for r in p.PairValueRecord - if r.SecondGlyph in s.glyphs] - p.PairValueCount = len(p.PairValueRecord) - # Remove empty pairsets - indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount] - self.Coverage.remap(indices) - self.PairSet = [self.PairSet[i] for i in indices] - self.PairSetCount = len(self.PairSet) - return bool(self.PairSetCount) - elif self.Format == 2: - class1_map = self.ClassDef1.subset(s.glyphs, remap=True) - class2_map = self.ClassDef2.subset(s.glyphs, remap=True) - self.Class1Record = [self.Class1Record[i] for i in class1_map] - for c in self.Class1Record: - c.Class2Record = [c.Class2Record[i] for i in class2_map] - self.Class1Count = len(class1_map) - self.Class2Count = len(class2_map) - return bool(self.Class1Count and - self.Class2Count and - self.Coverage.subset(s.glyphs)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.PairPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables - self.ValueFormat1 &= ~0x00F0 - self.ValueFormat2 &= ~0x00F0 - return True - -@_add_method(otTables.CursivePos) -def subset_glyphs(self, s): - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices] - self.EntryExitCount = len(self.EntryExitRecord) - return bool(self.EntryExitCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Anchor) -def prune_hints(self): - # Drop device tables / contour anchor point - self.ensureDecompiled() - self.Format = 1 - -@_add_method(otTables.CursivePos) -def prune_post_subset(self, options): - if not options.hinting: - for rec in self.EntryExitRecord: - if rec.EntryAnchor: rec.EntryAnchor.prune_hints() - if rec.ExitAnchor: rec.ExitAnchor.prune_hints() - return True - -@_add_method(otTables.MarkBasePos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - base_indices = self.BaseCoverage.subset(s.glyphs) - self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i] - for i in base_indices] - self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.BaseArray.BaseRecord: - b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.BaseArray.BaseCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkBasePos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.BaseArray.BaseRecord: - for a in b.BaseAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkLigPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark_indices = self.MarkCoverage.subset(s.glyphs) - self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i] - for i in mark_indices] - self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord) - ligature_indices = self.LigatureCoverage.subset(s.glyphs) - self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i] - for i in ligature_indices] - self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.MarkArray.MarkRecord: - m.Class = class_indices.index(m.Class) - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices] - return bool(self.ClassCount and - self.MarkArray.MarkCount and - self.LigatureArray.LigatureCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkLigPos) -def prune_post_subset(self, options): - if not options.hinting: - for m in self.MarkArray.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for l in self.LigatureArray.LigatureAttach: - for c in l.ComponentRecord: - for a in c.LigatureAnchor: - if a: - a.prune_hints() - return True - -@_add_method(otTables.MarkMarkPos) -def subset_glyphs(self, s): - if self.Format == 1: - mark1_indices = self.Mark1Coverage.subset(s.glyphs) - self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i] - for i in mark1_indices] - self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord) - mark2_indices = self.Mark2Coverage.subset(s.glyphs) - self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i] - for i in mark2_indices] - self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record) - # Prune empty classes - class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord) - self.ClassCount = len(class_indices) - for m in self.Mark1Array.MarkRecord: - m.Class = class_indices.index(m.Class) - for b in self.Mark2Array.Mark2Record: - b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices] - return bool(self.ClassCount and - self.Mark1Array.MarkCount and - self.Mark2Array.MarkCount) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.MarkMarkPos) -def prune_post_subset(self, options): - if not options.hinting: - # Drop device tables or contour anchor point - for m in self.Mark1Array.MarkRecord: - if m.MarkAnchor: - m.MarkAnchor.prune_hints() - for b in self.Mark2Array.Mark2Record: - for m in b.Mark2Anchor: - if m: - m.prune_hints() - return True - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def subset_lookups(self, lookup_indices): - pass - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos) -def collect_lookups(self): - return [] - -@_add_method(otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def prune_post_subset(self, options): - return True - -@_add_method(otTables.SingleSubst, - otTables.AlternateSubst, - otTables.ReverseChainSingleSubst) -def may_have_non_1to1(self): - return False - -@_add_method(otTables.MultipleSubst, - otTables.LigatureSubst, - otTables.ContextSubst, - otTables.ChainContextSubst) -def may_have_non_1to1(self): - return True - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def __subset_classify_context(self): - - class ContextHelper(object): - def __init__(self, klass, Format): - if klass.__name__.endswith('Subst'): - Typ = 'Sub' - Type = 'Subst' - else: - Typ = 'Pos' - Type = 'Pos' - if klass.__name__.startswith('Chain'): - Chain = 'Chain' - else: - Chain = '' - ChainTyp = Chain+Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - - self.LookupRecord = Type+'LookupRecord' - - if Format == 1: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(None,) - ChainContextData = lambda r:(None, None, None) - RuleData = lambda r:(r.Input,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - SetRuleData = None - ChainSetRuleData = None - elif Format == 2: - Coverage = lambda r: r.Coverage - ChainCoverage = lambda r: r.Coverage - ContextData = lambda r:(r.ClassDef,) - ChainContextData = lambda r:(r.BacktrackClassDef, - r.InputClassDef, - r.LookAheadClassDef) - RuleData = lambda r:(r.Class,) - ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead) - def SetRuleData(r, d):(r.Class,) = d - def ChainSetRuleData(r, d):(r.Backtrack, r.Input, r.LookAhead) = d - elif Format == 3: - Coverage = lambda r: r.Coverage[0] - ChainCoverage = lambda r: r.InputCoverage[0] - ContextData = None - ChainContextData = None - RuleData = lambda r: r.Coverage - ChainRuleData = lambda r:(r.BacktrackCoverage + - r.InputCoverage + - r.LookAheadCoverage) - SetRuleData = None - ChainSetRuleData = None - else: - assert 0, "unknown format: %s" % Format - - if Chain: - self.Coverage = ChainCoverage - self.ContextData = ChainContextData - self.RuleData = ChainRuleData - self.SetRuleData = ChainSetRuleData - else: - self.Coverage = Coverage - self.ContextData = ContextData - self.RuleData = RuleData - self.SetRuleData = SetRuleData - - if Format == 1: - self.Rule = ChainTyp+'Rule' - self.RuleCount = ChainTyp+'RuleCount' - self.RuleSet = ChainTyp+'RuleSet' - self.RuleSetCount = ChainTyp+'RuleSetCount' - self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else [] - elif Format == 2: - self.Rule = ChainTyp+'ClassRule' - self.RuleCount = ChainTyp+'ClassRuleCount' - self.RuleSet = ChainTyp+'ClassSet' - self.RuleSetCount = ChainTyp+'ClassSetCount' - self.Intersect = lambda glyphs, c, r: (c.intersect_class(glyphs, r) if c - else (set(glyphs) if r == 0 else set())) - - self.ClassDef = 'InputClassDef' if Chain else 'ClassDef' - self.ClassDefIndex = 1 if Chain else 0 - self.Input = 'Input' if Chain else 'Class' - - if self.Format not in [1, 2, 3]: - return None # Don't shoot the messenger; let it go - if not hasattr(self.__class__, "__ContextHelpers"): - self.__class__.__ContextHelpers = {} - if self.Format not in self.__class__.__ContextHelpers: - helper = ContextHelper(self.__class__, self.Format) - self.__class__.__ContextHelpers[self.Format] = helper - return self.__class__.__ContextHelpers[self.Format] - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst) -def closure_glyphs(self, s, cur_glyphs): - c = self.__subset_classify_context() - - indices = c.Coverage(self).intersect(cur_glyphs) - if not indices: - return [] - cur_glyphs = c.Coverage(self).intersect_glyphs(cur_glyphs) - - if self.Format == 1: - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset([c.Coverage(self).glyphs[i]]) - else: - pos_glyphs = frozenset([r.Input[seqi - 1]]) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.Input)+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 2: - ClassDef = getattr(self, c.ClassDef) - indices = ClassDef.intersect(cur_glyphs) - ContextData = c.ContextData(self) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - for i in indices: - if i >= rssCount or not rss[i]: continue - for r in getattr(rss[i], c.Rule): - if not r: continue - if not all(all(c.Intersect(s.glyphs, cd, k) for k in klist) - for cd,klist in zip(ContextData, c.RuleData(r))): - continue - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(ClassDef.intersect_class(cur_glyphs, i)) - else: - pos_glyphs = frozenset(ClassDef.intersect_class(s.glyphs, getattr(r, c.Input)[seqi - 1])) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(getattr(r, c.Input))+2)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - elif self.Format == 3: - if not all(x.intersect(s.glyphs) for x in c.RuleData(self)): - return [] - r = self - chaos = set() - for ll in getattr(r, c.LookupRecord): - if not ll: continue - seqi = ll.SequenceIndex - if seqi in chaos: - # TODO Can we improve this? - pos_glyphs = None - else: - if seqi == 0: - pos_glyphs = frozenset(cur_glyphs) - else: - pos_glyphs = frozenset(r.InputCoverage[seqi].intersect_glyphs(s.glyphs)) - lookup = s.table.LookupList.Lookup[ll.LookupListIndex] - chaos.add(seqi) - if lookup.may_have_non_1to1(): - chaos.update(range(seqi, len(r.InputCoverage)+1)) - lookup.closure_glyphs(s, cur_glyphs=pos_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ContextPos, - otTables.ChainContextSubst, - otTables.ChainContextPos) -def subset_glyphs(self, s): - c = self.__subset_classify_context() - - if self.Format == 1: - indices = self.Coverage.subset(s.glyphs) - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(g in s.glyphs for g in glist) - for glist in c.RuleData(r))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - # Prune empty rulesets - indices = [i for i,rs in enumerate(rss) if rs and getattr(rs, c.Rule)] - self.Coverage.remap(indices) - rss = [rss[i] for i in indices] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - return bool(rss) - elif self.Format == 2: - if not self.Coverage.subset(s.glyphs): - return False - ContextData = c.ContextData(self) - klass_maps = [x.subset(s.glyphs, remap=True) if x else None for x in ContextData] - - # Keep rulesets for class numbers that survived. - indices = klass_maps[c.ClassDefIndex] - rss = getattr(self, c.RuleSet) - rssCount = getattr(self, c.RuleSetCount) - rss = [rss[i] for i in indices if i < rssCount] - del rssCount - # Delete, but not renumber, unreachable rulesets. - indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs) - rss = [rss if i in indices else None for i,rss in enumerate(rss)] - - for rs in rss: - if not rs: continue - ss = getattr(rs, c.Rule) - ss = [r for r in ss - if r and all(all(k in klass_map for k in klist) - for klass_map,klist in zip(klass_maps, c.RuleData(r)))] - setattr(rs, c.Rule, ss) - setattr(rs, c.RuleCount, len(ss)) - - # Remap rule classes - for r in ss: - c.SetRuleData(r, [[klass_map.index(k) for k in klist] - for klass_map,klist in zip(klass_maps, c.RuleData(r))]) - - # Prune empty rulesets - rss = [rs if rs and getattr(rs, c.Rule) else None for rs in rss] - while rss and rss[-1] is None: - del rss[-1] - setattr(self, c.RuleSet, rss) - setattr(self, c.RuleSetCount, len(rss)) - - # TODO: We can do a second round of remapping class values based - # on classes that are actually used in at least one rule. Right - # now we subset classes to c.glyphs only. Or better, rewrite - # the above to do that. - - return bool(rss) - elif self.Format == 3: - return all(x.subset(s.glyphs) for x in c.RuleData(self)) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def subset_lookups(self, lookup_indices): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - for rs in getattr(self, c.RuleSet): - if not rs: continue - for r in getattr(rs, c.Rule): - if not r: continue - setattr(r, c.LookupRecord, - [ll for ll in getattr(r, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(r, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - elif self.Format == 3: - setattr(self, c.LookupRecord, - [ll for ll in getattr(self, c.LookupRecord) - if ll and ll.LookupListIndex in lookup_indices]) - for ll in getattr(self, c.LookupRecord): - if not ll: continue - ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos) -def collect_lookups(self): - c = self.__subset_classify_context() - - if self.Format in [1, 2]: - return [ll.LookupListIndex - for rs in getattr(self, c.RuleSet) if rs - for r in getattr(rs, c.Rule) if r - for ll in getattr(r, c.LookupRecord) if ll] - elif self.Format == 3: - return [ll.LookupListIndex - for ll in getattr(self, c.LookupRecord) if ll] - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def closure_glyphs(self, s, cur_glyphs): - if self.Format == 1: - self.ExtSubTable.closure_glyphs(s, cur_glyphs) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst) -def may_have_non_1to1(self): - if self.Format == 1: - return self.ExtSubTable.may_have_non_1to1() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_glyphs(self, s): - if self.Format == 1: - return self.ExtSubTable.subset_glyphs(s) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def prune_post_subset(self, options): - if self.Format == 1: - return self.ExtSubTable.prune_post_subset(options) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def subset_lookups(self, lookup_indices): - if self.Format == 1: - return self.ExtSubTable.subset_lookups(lookup_indices) - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.ExtensionSubst, - otTables.ExtensionPos) -def collect_lookups(self): - if self.Format == 1: - return self.ExtSubTable.collect_lookups() - else: - assert 0, "unknown format: %s" % self.Format - -@_add_method(otTables.Lookup) -def closure_glyphs(self, s, cur_glyphs=None): - if cur_glyphs is None: - cur_glyphs = frozenset(s.glyphs) - - # Memoize - if (id(self), cur_glyphs) in s._doneLookups: - return - s._doneLookups.add((id(self), cur_glyphs)) - - if self in s._activeLookups: - raise Exception("Circular loop in lookup recursion") - s._activeLookups.append(self) - for st in self.SubTable: - if not st: continue - st.closure_glyphs(s, cur_glyphs) - assert(s._activeLookups[-1] == self) - del s._activeLookups[-1] - -@_add_method(otTables.Lookup) -def subset_glyphs(self, s): - self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)] - self.SubTableCount = len(self.SubTable) - return bool(self.SubTableCount) - -@_add_method(otTables.Lookup) -def prune_post_subset(self, options): - ret = False - for st in self.SubTable: - if not st: continue - if st.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.Lookup) -def subset_lookups(self, lookup_indices): - for s in self.SubTable: - s.subset_lookups(lookup_indices) - -@_add_method(otTables.Lookup) -def collect_lookups(self): - return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable - if st), [])) - -@_add_method(otTables.Lookup) -def may_have_non_1to1(self): - return any(st.may_have_non_1to1() for st in self.SubTable if st) - -@_add_method(otTables.LookupList) -def subset_glyphs(self, s): - """Returns the indices of nonempty lookups.""" - return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)] - -@_add_method(otTables.LookupList) -def prune_post_subset(self, options): - ret = False - for l in self.Lookup: - if not l: continue - if l.prune_post_subset(options): ret = True - return ret - -@_add_method(otTables.LookupList) -def subset_lookups(self, lookup_indices): - self.ensureDecompiled() - self.Lookup = [self.Lookup[i] for i in lookup_indices - if i < self.LookupCount] - self.LookupCount = len(self.Lookup) - for l in self.Lookup: - l.subset_lookups(lookup_indices) - -@_add_method(otTables.LookupList) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - self.ensureDecompiled() - self.Lookup = [l if i in lookup_indices else None for i,l in enumerate(self.Lookup)] - -@_add_method(otTables.LookupList) -def closure_lookups(self, lookup_indices): - lookup_indices = _uniq_sort(lookup_indices) - recurse = lookup_indices - while True: - recurse_lookups = sum((self.Lookup[i].collect_lookups() - for i in recurse if i < self.LookupCount), []) - recurse_lookups = [l for l in recurse_lookups - if l not in lookup_indices and l < self.LookupCount] - if not recurse_lookups: - return _uniq_sort(lookup_indices) - recurse_lookups = _uniq_sort(recurse_lookups) - lookup_indices.extend(recurse_lookups) - recurse = recurse_lookups - -@_add_method(otTables.Feature) -def subset_lookups(self, lookup_indices): - self.LookupListIndex = [l for l in self.LookupListIndex - if l in lookup_indices] - # Now map them. - self.LookupListIndex = [lookup_indices.index(l) - for l in self.LookupListIndex] - self.LookupCount = len(self.LookupListIndex) - return self.LookupCount or self.FeatureParams - -@_add_method(otTables.Feature) -def collect_lookups(self): - return self.LookupListIndex[:] - -@_add_method(otTables.FeatureList) -def subset_lookups(self, lookup_indices): - """Returns the indices of nonempty features.""" - # Note: Never ever drop feature 'pref', even if it's empty. - # HarfBuzz chooses shaper for Khmer based on presence of this - # feature. See thread at: - # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html - feature_indices = [i for i,f in enumerate(self.FeatureRecord) - if (f.Feature.subset_lookups(lookup_indices) or - f.FeatureTag == 'pref')] - self.subset_features(feature_indices) - return feature_indices - -@_add_method(otTables.FeatureList) -def collect_lookups(self, feature_indices): - return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups() - for i in feature_indices - if i < self.FeatureCount), [])) - -@_add_method(otTables.FeatureList) -def subset_features(self, feature_indices): - self.ensureDecompiled() - self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices] - self.FeatureCount = len(self.FeatureRecord) - return bool(self.FeatureCount) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def subset_features(self, feature_indices): - if self.ReqFeatureIndex in feature_indices: - self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex) - else: - self.ReqFeatureIndex = 65535 - self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices] - # Now map them. - self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex - if f in feature_indices] - self.FeatureCount = len(self.FeatureIndex) - return bool(self.FeatureCount or self.ReqFeatureIndex != 65535) - -@_add_method(otTables.DefaultLangSys, - otTables.LangSys) -def collect_features(self): - feature_indices = self.FeatureIndex[:] - if self.ReqFeatureIndex != 65535: - feature_indices.append(self.ReqFeatureIndex) - return _uniq_sort(feature_indices) - -@_add_method(otTables.Script) -def subset_features(self, feature_indices): - if(self.DefaultLangSys and - not self.DefaultLangSys.subset_features(feature_indices)): - self.DefaultLangSys = None - self.LangSysRecord = [l for l in self.LangSysRecord - if l.LangSys.subset_features(feature_indices)] - self.LangSysCount = len(self.LangSysRecord) - return bool(self.LangSysCount or self.DefaultLangSys) - -@_add_method(otTables.Script) -def collect_features(self): - feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord] - if self.DefaultLangSys: - feature_indices.append(self.DefaultLangSys.collect_features()) - return _uniq_sort(sum(feature_indices, [])) - -@_add_method(otTables.ScriptList) -def subset_features(self, feature_indices): - self.ScriptRecord = [s for s in self.ScriptRecord - if s.Script.subset_features(feature_indices)] - self.ScriptCount = len(self.ScriptRecord) - return bool(self.ScriptCount) - -@_add_method(otTables.ScriptList) -def collect_features(self): - return _uniq_sort(sum((s.Script.collect_features() - for s in self.ScriptRecord), [])) - -@_add_method(ttLib.getTableClass('GSUB')) -def closure_glyphs(self, s): - s.table = self.table - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - while True: - orig_glyphs = frozenset(s.glyphs) - s._activeLookups = [] - s._doneLookups = set() - for i in lookup_indices: - if i >= self.table.LookupList.LookupCount: continue - if not self.table.LookupList.Lookup[i]: continue - self.table.LookupList.Lookup[i].closure_glyphs(s) - del s._activeLookups, s._doneLookups - if orig_glyphs == s.glyphs: - break - del s.table - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_glyphs(self, s): - s.glyphs = s.glyphs_gsubed - if self.table.LookupList: - lookup_indices = self.table.LookupList.subset_glyphs(s) - else: - lookup_indices = [] - self.subset_lookups(lookup_indices) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_lookups(self, lookup_indices): - """Retains specified lookups, then removes empty features, language - systems, and scripts.""" - if self.table.LookupList: - self.table.LookupList.subset_lookups(lookup_indices) - if self.table.FeatureList: - feature_indices = self.table.FeatureList.subset_lookups(lookup_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def neuter_lookups(self, lookup_indices): - """Sets lookups not in lookup_indices to None.""" - if self.table.LookupList: - self.table.LookupList.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_lookups(self, remap=True): - """Remove (default) or neuter unreferenced lookups""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - lookup_indices = self.table.FeatureList.collect_lookups(feature_indices) - else: - lookup_indices = [] - if self.table.LookupList: - lookup_indices = self.table.LookupList.closure_lookups(lookup_indices) - else: - lookup_indices = [] - if remap: - self.subset_lookups(lookup_indices) - else: - self.neuter_lookups(lookup_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def subset_feature_tags(self, feature_tags): - if self.table.FeatureList: - feature_indices = \ - [i for i,f in enumerate(self.table.FeatureList.FeatureRecord) - if f.FeatureTag in feature_tags] - self.table.FeatureList.subset_features(feature_indices) - else: - feature_indices = [] - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_features(self): - """Remove unreferenced features""" - if self.table.ScriptList: - feature_indices = self.table.ScriptList.collect_features() - else: - feature_indices = [] - if self.table.FeatureList: - self.table.FeatureList.subset_features(feature_indices) - if self.table.ScriptList: - self.table.ScriptList.subset_features(feature_indices) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_pre_subset(self, options): - # Drop undesired features - if '*' not in options.layout_features: - self.subset_feature_tags(options.layout_features) - # Neuter unreferenced lookups - self.prune_lookups(remap=False) - return True - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def remove_redundant_langsys(self): - table = self.table - if not table.ScriptList or not table.FeatureList: - return - - features = table.FeatureList.FeatureRecord - - for s in table.ScriptList.ScriptRecord: - d = s.Script.DefaultLangSys - if not d: - continue - for lr in s.Script.LangSysRecord[:]: - l = lr.LangSys - # Compare d and l - if len(d.FeatureIndex) != len(l.FeatureIndex): - continue - if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535): - continue - - if d.ReqFeatureIndex != 65535: - if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]: - continue - - for i in range(len(d.FeatureIndex)): - if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]: - break - else: - # LangSys and default are equal; delete LangSys - s.Script.LangSysRecord.remove(lr) - -@_add_method(ttLib.getTableClass('GSUB'), - ttLib.getTableClass('GPOS')) -def prune_post_subset(self, options): - table = self.table - - self.prune_lookups() # XXX Is this actually needed?! - - if table.LookupList: - table.LookupList.prune_post_subset(options) - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if not table.LookupList.Lookup: - # table.LookupList = None - - if not table.LookupList: - table.FeatureList = None - - if table.FeatureList: - self.remove_redundant_langsys() - # Remove unreferenced features - self.prune_features() - - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.FeatureList and not table.FeatureList.FeatureRecord: - # table.FeatureList = None - - # Never drop scripts themselves as them just being available - # holds semantic significance. - # XXX Next two lines disabled because OTS is stupid and - # doesn't like NULL offsets here. - #if table.ScriptList and not table.ScriptList.ScriptRecord: - # table.ScriptList = None - - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - table = self.table - if table.LigCaretList: - indices = table.LigCaretList.Coverage.subset(glyphs) - table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i] - for i in indices] - table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph) - if table.MarkAttachClassDef: - table.MarkAttachClassDef.classDefs = \ - {g:v for g,v in table.MarkAttachClassDef.classDefs.items() - if g in glyphs} - if table.GlyphClassDef: - table.GlyphClassDef.classDefs = \ - {g:v for g,v in table.GlyphClassDef.classDefs.items() - if g in glyphs} - if table.AttachList: - indices = table.AttachList.Coverage.subset(glyphs) - GlyphCount = table.AttachList.GlyphCount - table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i] - for i in indices - if i < GlyphCount] - table.AttachList.GlyphCount = len(table.AttachList.AttachPoint) - if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef: - for coverage in table.MarkGlyphSetsDef.Coverage: - coverage.subset(glyphs) - # TODO: The following is disabled. If enabling, we need to go fixup all - # lookups that use MarkFilteringSet and map their set. - # indices = table.MarkGlyphSetsDef.Coverage = \ - # [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs] - return True - -@_add_method(ttLib.getTableClass('GDEF')) -def prune_post_subset(self, options): - table = self.table - # XXX check these against OTS - if table.LigCaretList and not table.LigCaretList.LigGlyphCount: - table.LigCaretList = None - if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs: - table.MarkAttachClassDef = None - if table.GlyphClassDef and not table.GlyphClassDef.classDefs: - table.GlyphClassDef = None - if table.AttachList and not table.AttachList.GlyphCount: - table.AttachList = None - if (hasattr(table, "MarkGlyphSetsDef") and - table.MarkGlyphSetsDef and - not table.MarkGlyphSetsDef.Coverage): - table.MarkGlyphSetsDef = None - if table.Version == 0x00010002/0x10000: - table.Version = 1.0 - return bool(table.LigCaretList or - table.MarkAttachClassDef or - table.GlyphClassDef or - table.AttachList or - (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef)) - -@_add_method(ttLib.getTableClass('kern')) -def prune_pre_subset(self, options): - # Prune unknown kern table types - self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('kern')) -def subset_glyphs(self, s): - glyphs = s.glyphs_gsubed - for t in self.kernTables: - t.kernTable = {(a,b):v for (a,b),v in t.kernTable.items() - if a in glyphs and b in glyphs} - self.kernTables = [t for t in self.kernTables if t.kernTable] - return bool(self.kernTables) - -@_add_method(ttLib.getTableClass('vmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return bool(self.metrics) - -@_add_method(ttLib.getTableClass('hmtx')) -def subset_glyphs(self, s): - self.metrics = _dict_subset(self.metrics, s.glyphs) - return True # Required table - -@_add_method(ttLib.getTableClass('hdmx')) -def subset_glyphs(self, s): - self.hdmx = {sz:_dict_subset(l, s.glyphs) for sz,l in self.hdmx.items()} - return bool(self.hdmx) - -@_add_method(ttLib.getTableClass('VORG')) -def subset_glyphs(self, s): - self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items() - if g in s.glyphs} - self.numVertOriginYMetrics = len(self.VOriginRecords) - return True # Never drop; has default metrics - -@_add_method(ttLib.getTableClass('post')) -def prune_pre_subset(self, options): - if not options.glyph_names: - self.formatType = 3.0 - return True # Required table - -@_add_method(ttLib.getTableClass('post')) -def subset_glyphs(self, s): - self.extraNames = [] # This seems to do it - return True # Required table - -@_add_method(ttLib.getTableModule('glyf').Glyph) -def remapComponentsFast(self, indices): - if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: - return # Not composite - data = array.array("B", self.data) - i = 10 - more = 1 - while more: - flags =(data[i] << 8) | data[i+1] - glyphID =(data[i+2] << 8) | data[i+3] - # Remap - glyphID = indices.index(glyphID) - data[i+2] = glyphID >> 8 - data[i+3] = glyphID & 0xFF - i += 4 - flags = int(flags) - - if flags & 0x0001: i += 4 # ARG_1_AND_2_ARE_WORDS - else: i += 2 - if flags & 0x0008: i += 2 # WE_HAVE_A_SCALE - elif flags & 0x0040: i += 4 # WE_HAVE_AN_X_AND_Y_SCALE - elif flags & 0x0080: i += 8 # WE_HAVE_A_TWO_BY_TWO - more = flags & 0x0020 # MORE_COMPONENTS - - self.data = data.tostring() - -@_add_method(ttLib.getTableClass('glyf')) -def closure_glyphs(self, s): - decompose = s.glyphs - while True: - components = set() - for g in decompose: - if g not in self.glyphs: - continue - gl = self.glyphs[g] - for c in gl.getComponentNames(self): - if c not in s.glyphs: - components.add(c) - components = set(c for c in components if c not in s.glyphs) - if not components: - break - decompose = components - s.glyphs.update(components) - -@_add_method(ttLib.getTableClass('glyf')) -def prune_pre_subset(self, options): - if options.notdef_glyph and not options.notdef_outline: - g = self[self.glyphOrder[0]] - # Yay, easy! - g.__dict__.clear() - g.data = "" - return True - -@_add_method(ttLib.getTableClass('glyf')) -def subset_glyphs(self, s): - self.glyphs = _dict_subset(self.glyphs, s.glyphs) - indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs] - for v in self.glyphs.values(): - if hasattr(v, "data"): - v.remapComponentsFast(indices) - else: - pass # No need - self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs] - # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset. - return True - -@_add_method(ttLib.getTableClass('glyf')) -def prune_post_subset(self, options): - remove_hinting = not options.hinting - for v in self.glyphs.values(): - v.trim(remove_hinting=remove_hinting) - return True - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_pre_subset(self, options): - cff = self.cff - # CFF table must have one font only - cff.fontNames = cff.fontNames[:1] - - if options.notdef_glyph and not options.notdef_outline: - for fontname in cff.keys(): - font = cff[fontname] - c,_ = font.CharStrings.getItemAndSelector('.notdef') - # XXX we should preserve the glyph width - c.bytecode = '\x0e' # endchar - c.program = None - - return True # bool(cff.fontNames) - -@_add_method(ttLib.getTableClass('CFF ')) -def subset_glyphs(self, s): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Load all glyphs - for g in font.charset: - if g not in s.glyphs: continue - c,sel = cs.getItemAndSelector(g) - - if cs.charStringsAreIndexed: - indices = [i for i,g in enumerate(font.charset) if g in s.glyphs] - csi = cs.charStringsIndex - csi.items = [csi.items[i] for i in indices] - del csi.file, csi.offsets - if hasattr(font, "FDSelect"): - sel = font.FDSelect - # XXX We want to set sel.format to None, such that the - # most compact format is selected. However, OTS was - # broken and couldn't parse a FDSelect format 0 that - # happened before CharStrings. As such, always force - # format 3 until we fix cffLib to always generate - # FDSelect after CharStrings. - # https://github.com/khaledhosny/ots/pull/31 - #sel.format = None - sel.format = 3 - sel.gidArray = [sel.gidArray[i] for i in indices] - cs.charStrings = {g:indices.index(v) - for g,v in cs.charStrings.items() - if g in s.glyphs} - else: - cs.charStrings = {g:v - for g,v in cs.charStrings.items() - if g in s.glyphs} - font.charset = [g for g in font.charset if g in s.glyphs] - font.numGlyphs = len(font.charset) - - return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) - -@_add_method(psCharStrings.T2CharString) -def subset_subroutines(self, subrs, gsubrs): - p = self.program - assert len(p) - for i in range(1, len(p)): - if p[i] == 'callsubr': - assert isinstance(p[i-1], int) - p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias - elif p[i] == 'callgsubr': - assert isinstance(p[i-1], int) - p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias - -@_add_method(psCharStrings.T2CharString) -def drop_hints(self): - hints = self._hints - - if hints.has_hint: - self.program = self.program[hints.last_hint:] - if hasattr(self, 'width'): - # Insert width back if needed - if self.width != self.private.defaultWidthX: - self.program.insert(0, self.width - self.private.nominalWidthX) - - if hints.has_hintmask: - i = 0 - p = self.program - while i < len(p): - if p[i] in ['hintmask', 'cntrmask']: - assert i + 1 <= len(p) - del p[i:i+2] - continue - i += 1 - - # TODO: we currently don't drop calls to "empty" subroutines. - - assert len(self.program) - - del self._hints - -class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - for subrs in [localSubrs, globalSubrs]: - if subrs and not hasattr(subrs, "_used"): - subrs._used = set() - - def op_callsubr(self, index): - self.localSubrs._used.add(self.operandStack[-1]+self.localBias) - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - - def op_callgsubr(self, index): - self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias) - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - -class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - class Hints(object): - def __init__(self): - # Whether calling this charstring produces any hint stems - self.has_hint = False - # Index to start at to drop all hints - self.last_hint = 0 - # Index up to which we know more hints are possible. - # Only relevant if status is 0 or 1. - self.last_checked = 0 - # The status means: - # 0: after dropping hints, this charstring is empty - # 1: after dropping hints, there may be more hints - # continuing after this - # 2: no more hints possible after this charstring - self.status = 0 - # Has hintmask instructions; not recursive - self.has_hintmask = False - pass - - def __init__(self, css, localSubrs, globalSubrs): - self._css = css - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - old_hints = charString._hints if hasattr(charString, '_hints') else None - charString._hints = self.Hints() - - psCharStrings.SimpleT2Decompiler.execute(self, charString) - - hints = charString._hints - - if hints.has_hint or hints.has_hintmask: - self._css.add(charString) - - if hints.status != 2: - # Check from last_check, make sure we didn't have any operators. - for i in range(hints.last_checked, len(charString.program) - 1): - if isinstance(charString.program[i], str): - hints.status = 2 - break - else: - hints.status = 1 # There's *something* here - hints.last_checked = len(charString.program) - - if old_hints: - assert hints.__dict__ == old_hints.__dict__ - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def op_hstem(self, index): - psCharStrings.SimpleT2Decompiler.op_hstem(self, index) - self.processHint(index) - def op_vstem(self, index): - psCharStrings.SimpleT2Decompiler.op_vstem(self, index) - self.processHint(index) - def op_hstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index) - self.processHint(index) - def op_vstemhm(self, index): - psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index) - self.processHint(index) - def op_hintmask(self, index): - psCharStrings.SimpleT2Decompiler.op_hintmask(self, index) - self.processHintmask(index) - def op_cntrmask(self, index): - psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index) - self.processHintmask(index) - - def processHintmask(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hintmask = True - if hints.status != 2 and hints.has_hint: - # Check from last_check, see if we may be an implicit vstem - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - if hints.status != 2: - # We are an implicit vstem - hints.last_hint = index + 1 - hints.status = 0 - hints.last_checked = index + 1 - - def processHint(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hint = True - hints.last_hint = index - hints.last_checked = index - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - hints = cs._hints - subr_hints = subr._hints - - if subr_hints.has_hint: - if hints.status != 2: - hints.has_hint = True - hints.last_checked = index - hints.status = subr_hints.status - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - else: - # In my understanding, this is a font bug. - # I.e., it has hint stems *after* path construction. - # I've seen this in widespread fonts. - # Best to ignore the hints I suppose... - pass - #assert 0 - else: - hints.status = max(hints.status, subr_hints.status) - if hints.status != 2: - # Check from last_check, make sure we didn't have - # any operators. - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - hints.last_checked = index - if hints.status != 2: - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - -class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler): - - def __init__(self, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, - localSubrs, - globalSubrs) - - def execute(self, charString): - # Note: Currently we recompute _desubroutinized each time. - # This is more robust in some cases, but in other places we assume - # that each subroutine always expands to the same code, so - # maybe it doesn't matter. To speed up we can just not - # recompute _desubroutinized if it's there. For now I just - # double-check that it desubroutinized to the same thing. - old_desubroutinized = charString._desubroutinized if hasattr(charString, '_desubroutinized') else None - - charString._patches = [] - psCharStrings.SimpleT2Decompiler.execute(self, charString) - desubroutinized = charString.program[:] - for idx,expansion in reversed (charString._patches): - assert idx >= 2 - assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1] - assert type(desubroutinized[idx - 2]) == int - if expansion[-1] == 'return': - expansion = expansion[:-1] - desubroutinized[idx-2:idx] = expansion - if 'endchar' in desubroutinized: - # Cut off after first endchar - desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1] - else: - if not len(desubroutinized) or desubroutinized[-1] != 'return': - desubroutinized.append('return') - - charString._desubroutinized = desubroutinized - del charString._patches - - if old_desubroutinized: - assert desubroutinized == old_desubroutinized - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1]+self.localBias] - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1]+self.globalBias] - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - self.processSubr(index, subr) - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - cs._patches.append((index, subr._desubroutinized)) - - -@_add_method(ttLib.getTableClass('CFF ')) -def prune_post_subset(self, options): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Drop unused FontDictionaries - if hasattr(font, "FDSelect"): - sel = font.FDSelect - indices = _uniq_sort(sel.gidArray) - sel.gidArray = [indices.index (ss) for ss in sel.gidArray] - arr = font.FDArray - arr.items = [arr[i] for i in indices] - del arr.file, arr.offsets - - # Desubroutinize if asked for - if options.desubroutinize: - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - c.program = c._desubroutinized - - # Drop hints if not needed - if not options.hinting: - - # This can be tricky, but doesn't have to. What we do is: - # - # - Run all used glyph charstrings and recurse into subroutines, - # - For each charstring (including subroutines), if it has any - # of the hint stem operators, we mark it as such. - # Upon returning, for each charstring we note all the - # subroutine calls it makes that (recursively) contain a stem, - # - Dropping hinting then consists of the following two ops: - # * Drop the piece of the program in each charstring before the - # last call to a stem op or a stem-calling subroutine, - # * Drop all hintmask operations. - # - It's trickier... A hintmask right after hints and a few numbers - # will act as an implicit vstemhm. As such, we track whether - # we have seen any non-hint operators so far and do the right - # thing, recursively... Good luck understanding that :( - css = set() - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs) - decompiler.execute(c) - for charstring in css: - charstring.drop_hints() - del css - - # Drop font-wide hinting values - all_privs = [] - if hasattr(font, 'FDSelect'): - all_privs.extend(fd.Private for fd in font.FDArray) - else: - all_privs.append(font.Private) - for priv in all_privs: - for k in ['BlueValues', 'OtherBlues', - 'FamilyBlues', 'FamilyOtherBlues', - 'BlueScale', 'BlueShift', 'BlueFuzz', - 'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']: - if hasattr(priv, k): - setattr(priv, k, None) - - # Renumber subroutines to remove unused ones - - # Mark all used subroutines - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs) - decompiler.execute(c) - - all_subrs = [font.GlobalSubrs] - if hasattr(font, 'FDSelect'): - all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs) - elif hasattr(font.Private, 'Subrs') and font.Private.Subrs: - all_subrs.append(font.Private.Subrs) - - subrs = set(subrs) # Remove duplicates - - # Prepare - for subrs in all_subrs: - if not hasattr(subrs, '_used'): - subrs._used = set() - subrs._used = _uniq_sort(subrs._used) - subrs._old_bias = psCharStrings.calcSubrBias(subrs) - subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) - - # Renumber glyph charstrings - for g in font.charset: - c,sel = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - c.subset_subroutines (subrs, font.GlobalSubrs) - - # Renumber subroutines themselves - for subrs in all_subrs: - if subrs == font.GlobalSubrs: - if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'): - local_subrs = font.Private.Subrs - else: - local_subrs = [] - else: - local_subrs = subrs - - subrs.items = [subrs.items[i] for i in subrs._used] - del subrs.file - if hasattr(subrs, 'offsets'): - del subrs.offsets - - for subr in subrs.items: - subr.subset_subroutines (local_subrs, font.GlobalSubrs) - - # Cleanup - for subrs in all_subrs: - del subrs._used, subrs._old_bias, subrs._new_bias - - return True - -@_add_method(ttLib.getTableClass('cmap')) -def closure_glyphs(self, s): - tables = [t for t in self.tables if t.isUnicode()] - - # Close glyphs - for table in tables: - if table.format == 14: - for cmap in table.uvsDict.values(): - glyphs = {g for u,g in cmap if u in s.unicodes_requested} - if None in glyphs: - glyphs.remove(None) - s.glyphs.update(glyphs) - else: - cmap = table.cmap - intersection = s.unicodes_requested.intersection(cmap.keys()) - s.glyphs.update(cmap[u] for u in intersection) - - # Calculate unicodes_missing - s.unicodes_missing = s.unicodes_requested.copy() - for table in tables: - s.unicodes_missing.difference_update(table.cmap) - -@_add_method(ttLib.getTableClass('cmap')) -def prune_pre_subset(self, options): - if not options.legacy_cmap: - # Drop non-Unicode / non-Symbol cmaps - self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()] - if not options.symbol_cmap: - self.tables = [t for t in self.tables if not t.isSymbol()] - # TODO(behdad) Only keep one subtable? - # For now, drop format=0 which can't be subset_glyphs easily? - self.tables = [t for t in self.tables if t.format != 0] - self.numSubTables = len(self.tables) - return True # Required table - -@_add_method(ttLib.getTableClass('cmap')) -def subset_glyphs(self, s): - s.glyphs = None # We use s.glyphs_requested and s.unicodes_requested only - for t in self.tables: - if t.format == 14: - # TODO(behdad) We drop all the default-UVS mappings - # for glyphs_requested. So it's the caller's responsibility to make - # sure those are included. - t.uvsDict = {v:[(u,g) for u,g in l - if g in s.glyphs_requested or u in s.unicodes_requested] - for v,l in t.uvsDict.items()} - t.uvsDict = {v:l for v,l in t.uvsDict.items() if l} - elif t.isUnicode(): - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested or u in s.unicodes_requested} - else: - t.cmap = {u:g for u,g in t.cmap.items() - if g in s.glyphs_requested} - self.tables = [t for t in self.tables - if (t.cmap if t.format != 14 else t.uvsDict)] - self.numSubTables = len(self.tables) - # TODO(behdad) Convert formats when needed. - # In particular, if we have a format=12 without non-BMP - # characters, either drop format=12 one or convert it - # to format=4 if there's not one. - return True # Required table - -@_add_method(ttLib.getTableClass('DSIG')) -def prune_pre_subset(self, options): - # Drop all signatures since they will be invalid - self.usNumSigs = 0 - self.signatureRecords = [] - return True - -@_add_method(ttLib.getTableClass('maxp')) -def prune_pre_subset(self, options): - if not options.hinting: - if self.tableVersion == 0x00010000: - self.maxZones = 1 - self.maxTwilightPoints = 0 - self.maxFunctionDefs = 0 - self.maxInstructionDefs = 0 - self.maxStackElements = 0 - self.maxSizeOfInstructions = 0 - return True - -@_add_method(ttLib.getTableClass('name')) -def prune_pre_subset(self, options): - if '*' not in options.name_IDs: - self.names = [n for n in self.names if n.nameID in options.name_IDs] - if not options.name_legacy: - # TODO(behdad) Sometimes (eg Apple Color Emoji) there's only a macroman - # entry for Latin and no Unicode names. - self.names = [n for n in self.names if n.isUnicode()] - # TODO(behdad) Option to keep only one platform's - if '*' not in options.name_languages: - # TODO(behdad) This is Windows-platform specific! - self.names = [n for n in self.names - if n.langID in options.name_languages] - if options.obfuscate_names: - namerecs = [] - for n in self.names: - if n.nameID in [1, 4]: - n.string = ".\x7f".encode('utf_16_be') if n.isUnicode() else ".\x7f" - elif n.nameID in [2, 6]: - n.string = "\x7f".encode('utf_16_be') if n.isUnicode() else "\x7f" - elif n.nameID == 3: - n.string = "" - elif n.nameID in [16, 17, 18]: - continue - namerecs.append(n) - self.names = namerecs - return True # Required table - - -# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange? -# TODO(behdad) Drop AAT tables. -# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries. -# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left -# TODO(behdad) Drop GDEF subitems if unused by lookups -# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF) -# TODO(behdad) Text direction considerations. -# TODO(behdad) Text script / language considerations. -# TODO(behdad) Optionally drop 'kern' table if GPOS available -# TODO(behdad) Implement --unicode='*' to choose all cmap'ed -# TODO(behdad) Drop old-spec Indic scripts - - -class Options(object): - - class OptionError(Exception): pass - class UnknownOptionError(OptionError): pass - - _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', - 'EBSC', 'SVG ', 'PCLT', 'LTSH'] - _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill'] # Graphite - _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL'] # Color - _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', - 'vhea', 'OS/2', 'loca', 'name', 'cvt ', - 'fpgm', 'prep', 'VDMX', 'DSIG'] - _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX'] - - # Based on HarfBuzz shapers - _layout_features_groups = { - # Default shaper - 'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'], - 'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'], - 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], - 'ltr': ['ltra', 'ltrm'], - 'rtl': ['rtla', 'rtlm'], - # Complex shapers - 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', - 'cswh', 'mset'], - 'hangul': ['ljmo', 'vjmo', 'tjmo'], - 'tibetan': ['abvs', 'blws', 'abvm', 'blwm'], - 'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half', - 'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres', - 'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'], - } - _layout_features_default = _uniq_sort(sum( - iter(_layout_features_groups.values()), [])) - - drop_tables = _drop_tables_default - no_subset_tables = _no_subset_tables_default - hinting_tables = _hinting_tables_default - legacy_kern = False # drop 'kern' table if GPOS available - layout_features = _layout_features_default - ignore_missing_glyphs = False - ignore_missing_unicodes = True - hinting = True - glyph_names = False - legacy_cmap = False - symbol_cmap = False - name_IDs = [1, 2] # Family and Style - name_legacy = False - name_languages = [0x0409] # English - obfuscate_names = False # to make webfont unusable as a system font - notdef_glyph = True # gid0 for TrueType / .notdef for CFF - notdef_outline = False # No need for notdef to have an outline really - recommended_glyphs = False # gid1, gid2, gid3 for TrueType - recalc_bounds = False # Recalculate font bounding boxes - recalc_timestamp = False # Recalculate font modified timestamp - canonical_order = False # Order tables as recommended - flavor = None # May be 'woff' or 'woff2' - desubroutinize = False # Desubroutinize CFF CharStrings - - def __init__(self, **kwargs): - self.set(**kwargs) - - def set(self, **kwargs): - for k,v in kwargs.items(): - if not hasattr(self, k): - raise self.UnknownOptionError("Unknown option '%s'" % k) - setattr(self, k, v) - - def parse_opts(self, argv, ignore_unknown=False): - ret = [] - for a in argv: - orig_a = a - if not a.startswith('--'): - ret.append(a) - continue - a = a[2:] - i = a.find('=') - op = '=' - if i == -1: - if a.startswith("no-"): - k = a[3:] - v = False - else: - k = a - v = True - if k.endswith("?"): - k = k[:-1] - v = '?' - else: - k = a[:i] - if k[-1] in "-+": - op = k[-1]+'=' # Op is '-=' or '+=' now. - k = k[:-1] - v = a[i+1:] - ok = k - k = k.replace('-', '_') - if not hasattr(self, k): - if ignore_unknown is True or ok in ignore_unknown: - ret.append(orig_a) - continue - else: - raise self.UnknownOptionError("Unknown option '%s'" % a) - - ov = getattr(self, k) - if v == '?': - print("Current setting for '%s' is: %s" % (ok, ov)) - continue - if isinstance(ov, bool): - v = bool(v) - elif isinstance(ov, int): - v = int(v) - elif isinstance(ov, str): - v = str(v) # redundant - elif isinstance(ov, list): - if isinstance(v, bool): - raise self.OptionError("Option '%s' requires values to be specified using '='" % a) - vv = v.replace(',', ' ').split() - if vv == ['']: - vv = [] - vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv] - if op == '=': - v = vv - elif op == '+=': - v = ov - v.extend(vv) - elif op == '-=': - v = ov - for x in vv: - if x in v: - v.remove(x) - else: - assert False - - setattr(self, k, v) - - return ret - - -class Subsetter(object): - - class SubsettingError(Exception): pass - class MissingGlyphsSubsettingError(SubsettingError): pass - class MissingUnicodesSubsettingError(SubsettingError): pass - - def __init__(self, options=None, log=None): - - if not log: - log = Logger() - if not options: - options = Options() - - self.options = options - self.log = log - self.unicodes_requested = set() - self.glyph_names_requested = set() - self.glyph_ids_requested = set() - - def populate(self, glyphs=[], gids=[], unicodes=[], text=""): - self.unicodes_requested.update(unicodes) - if isinstance(text, bytes): - text = text.decode("utf_8") - for u in text: - self.unicodes_requested.add(ord(u)) - self.glyph_names_requested.update(glyphs) - self.glyph_ids_requested.update(gids) - - def _prune_pre_subset(self, font): - - for tag in font.keys(): - if tag == 'GlyphOrder': continue - - if(tag in self.options.drop_tables or - (tag in self.options.hinting_tables and not self.options.hinting) or - (tag == 'kern' and (not self.options.legacy_kern and 'GPOS' in font))): - self.log(tag, "dropped") - del font[tag] - continue - - clazz = ttLib.getTableClass(tag) - - if hasattr(clazz, 'prune_pre_subset'): - table = font[tag] - self.log.lapse("load '%s'" % tag) - retain = table.prune_pre_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - continue - else: - self.log(tag, "pruned") - - def _closure_glyphs(self, font): - - realGlyphs = set(font.getGlyphOrder()) - glyph_order = font.getGlyphOrder() - - self.glyphs_requested = set() - self.glyphs_requested.update(self.glyph_names_requested) - self.glyphs_requested.update(glyph_order[i] - for i in self.glyph_ids_requested - if i < len(glyph_order)) - - self.glyphs_missing = set() - self.glyphs_missing.update(self.glyphs_requested.difference(realGlyphs)) - self.glyphs_missing.update(i for i in self.glyph_ids_requested - if i >= len(glyph_order)) - if self.glyphs_missing: - self.log("Missing requested glyphs: %s" % self.glyphs_missing) - if not self.options.ignore_missing_glyphs: - raise self.MissingGlyphsSubsettingError(self.glyphs_missing) - - self.glyphs = self.glyphs_requested.copy() - - self.unicodes_missing = set() - if 'cmap' in font: - font['cmap'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log.lapse("close glyph list over 'cmap'") - self.glyphs_cmaped = frozenset(self.glyphs) - if self.unicodes_missing: - missing = ["U+%04X" % u for u in self.unicodes_missing] - self.log("Missing glyphs for requested Unicodes: %s" % missing) - if not self.options.ignore_missing_unicodes: - raise self.MissingUnicodesSubsettingError(missing) - del missing - - if self.options.notdef_glyph: - if 'glyf' in font: - self.glyphs.add(font.getGlyphName(0)) - self.log("Added gid0 to subset") - else: - self.glyphs.add('.notdef') - self.log("Added .notdef to subset") - if self.options.recommended_glyphs: - if 'glyf' in font: - for i in range(min(4, len(font.getGlyphOrder()))): - self.glyphs.add(font.getGlyphName(i)) - self.log("Added first four glyphs to subset") - - if 'GSUB' in font: - self.log("Closing glyph list over 'GSUB': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['GSUB'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'GSUB': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'GSUB'") - self.glyphs_gsubed = frozenset(self.glyphs) - - if 'glyf' in font: - self.log("Closing glyph list over 'glyf': %d glyphs before" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - font['glyf'].closure_glyphs(self) - self.glyphs.intersection_update(realGlyphs) - self.log("Closed glyph list over 'glyf': %d glyphs after" % - len(self.glyphs)) - self.log.glyphs(self.glyphs, font=font) - self.log.lapse("close glyph list over 'glyf'") - self.glyphs_glyfed = frozenset(self.glyphs) - - self.glyphs_all = frozenset(self.glyphs) - - self.log("Retaining %d glyphs: " % len(self.glyphs_all)) - - del self.glyphs - - def _subset_glyphs(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - - if tag in self.options.no_subset_tables: - self.log(tag, "subsetting not needed") - elif hasattr(clazz, 'subset_glyphs'): - table = font[tag] - self.glyphs = self.glyphs_all - retain = table.subset_glyphs(self) - del self.glyphs - self.log.lapse("subset '%s'" % tag) - if not retain: - self.log(tag, "subsetted to empty; dropped") - del font[tag] - else: - self.log(tag, "subsetted") - else: - self.log(tag, "NOT subset; don't know how to subset; dropped") - del font[tag] - - glyphOrder = font.getGlyphOrder() - glyphOrder = [g for g in glyphOrder if g in self.glyphs_all] - font.setGlyphOrder(glyphOrder) - font._buildReverseGlyphOrderDict() - self.log.lapse("subset GlyphOrder") - - def _prune_post_subset(self, font): - for tag in font.keys(): - if tag == 'GlyphOrder': continue - clazz = ttLib.getTableClass(tag) - if hasattr(clazz, 'prune_post_subset'): - table = font[tag] - retain = table.prune_post_subset(self.options) - self.log.lapse("prune '%s'" % tag) - if not retain: - self.log(tag, "pruned to empty; dropped") - del font[tag] - else: - self.log(tag, "pruned") - - def subset(self, font): - - self._prune_pre_subset(font) - self._closure_glyphs(font) - self._subset_glyphs(font) - self._prune_post_subset(font) - - -class Logger(object): - - def __init__(self, verbose=False, xml=False, timing=False): - self.verbose = verbose - self.xml = xml - self.timing = timing - self.last_time = self.start_time = time.time() - - def parse_opts(self, argv): - argv = argv[:] - for v in ['verbose', 'xml', 'timing']: - if "--"+v in argv: - setattr(self, v, True) - argv.remove("--"+v) - return argv - - def __call__(self, *things): - if not self.verbose: - return - print(' '.join(str(x) for x in things)) - - def lapse(self, *things): - if not self.timing: - return - new_time = time.time() - print("Took %0.3fs to %s" %(new_time - self.last_time, - ' '.join(str(x) for x in things))) - self.last_time = new_time - - def glyphs(self, glyphs, font=None): - if not self.verbose: - return - self("Glyph names:", sorted(glyphs)) - if font: - reverseGlyphMap = font.getReverseGlyphMap() - self("Glyph IDs: ", sorted(reverseGlyphMap[g] for g in glyphs)) - - def font(self, font, file=sys.stdout): - if not self.xml: - return - from fontTools.misc import xmlWriter - writer = xmlWriter.XMLWriter(file) - for tag in font.keys(): - writer.begintag(tag) - writer.newline() - font[tag].toXML(writer, font) - writer.endtag(tag) - writer.newline() - - -def load_font(fontFile, - options, - allowVID=False, - checkChecksums=False, - dontLoadGlyphNames=False, - lazy=True): - - font = ttLib.TTFont(fontFile, - allowVID=allowVID, - checkChecksums=checkChecksums, - recalcBBoxes=options.recalc_bounds, - recalcTimestamp=options.recalc_timestamp, - lazy=lazy) - - # Hack: - # - # If we don't need glyph names, change 'post' class to not try to - # load them. It avoid lots of headache with broken fonts as well - # as loading time. - # - # Ideally ttLib should provide a way to ask it to skip loading - # glyph names. But it currently doesn't provide such a thing. - # - if dontLoadGlyphNames: - post = ttLib.getTableClass('post') - saved = post.decode_format_2_0 - post.decode_format_2_0 = post.decode_format_3_0 - f = font['post'] - if f.formatType == 2.0: - f.formatType = 3.0 - post.decode_format_2_0 = saved - - return font - -def save_font(font, outfile, options): - if options.flavor and not hasattr(font, 'flavor'): - raise Exception("fonttools version does not support flavors.") - font.flavor = options.flavor - font.save(outfile, reorderTables=options.canonical_order) - -def parse_unicodes(s): - import re - s = re.sub (r"0[xX]", " ", s) - s = re.sub (r"[<+>,;&#\\xXuU\n ]", " ", s) - l = [] - for item in s.split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(item, 16)) - else: - start,end = fields - l.extend(range(int(start, 16), int(end, 16)+1)) - return l - -def parse_gids(s): - l = [] - for item in s.replace(',', ' ').split(): - fields = item.split('-') - if len(fields) == 1: - l.append(int(fields[0])) - else: - l.extend(range(int(fields[0]), int(fields[1])+1)) - return l - -def parse_glyphs(s): - return s.replace(',', ' ').split() - -def main(args=None): - - if args is None: - args = sys.argv[1:] - - if '--help' in args: - print(__doc__) - sys.exit(0) - - log = Logger() - args = log.parse_opts(args) - - options = Options() - args = options.parse_opts(args, - ignore_unknown=['gids', 'gids-file', - 'glyphs', 'glyphs-file', - 'text', 'text-file', - 'unicodes', 'unicodes-file', - 'output-file']) - - if len(args) < 2: - print("usage:", __usage__, file=sys.stderr) - print("Try pyftsubset --help for more information.", file=sys.stderr) - sys.exit(1) - - fontfile = args[0] - args = args[1:] - - subsetter = Subsetter(options=options, log=log) - outfile = fontfile + '.subset' - glyphs = [] - gids = [] - unicodes = [] - wildcard_glyphs = False - wildcard_unicodes = False - text = "" - for g in args: - if g == '*': - wildcard_glyphs = True - continue - if g.startswith('--output-file='): - outfile = g[14:] - continue - if g.startswith('--text='): - text += g[7:] - continue - if g.startswith('--text-file='): - text += open(g[12:]).read().replace('\n', '') - continue - if g.startswith('--unicodes='): - if g[11:] == '*': - wildcard_unicodes = True - else: - unicodes.extend(parse_unicodes(g[11:])) - continue - if g.startswith('--unicodes-file='): - for line in open(g[16:]).readlines(): - unicodes.extend(parse_unicodes(line.split('#')[0])) - continue - if g.startswith('--gids='): - gids.extend(parse_gids(g[7:])) - continue - if g.startswith('--gids-file='): - for line in open(g[12:]).readlines(): - gids.extend(parse_gids(line.split('#')[0])) - continue - if g.startswith('--glyphs='): - if g[9:] == '*': - wildcard_glyphs = True - else: - glyphs.extend(parse_glyphs(g[9:])) - continue - if g.startswith('--glyphs-file='): - for line in open(g[14:]).readlines(): - glyphs.extend(parse_glyphs(line.split('#')[0])) - continue - glyphs.append(g) - - dontLoadGlyphNames = not options.glyph_names and not glyphs - font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames) - log.lapse("load font") - if wildcard_glyphs: - glyphs.extend(font.getGlyphOrder()) - if wildcard_unicodes: - for t in font['cmap'].tables: - if t.isUnicode(): - unicodes.extend(t.cmap.keys()) - assert '' not in glyphs - - log.lapse("compile glyph list") - log("Text: '%s'" % text) - log("Unicodes:", unicodes) - log("Glyphs:", glyphs) - log("Gids:", gids) - - subsetter.populate(glyphs=glyphs, gids=gids, unicodes=unicodes, text=text) - subsetter.subset(font) - - save_font (font, outfile, options) - log.lapse("compile and save font") - - log.last_time = log.start_time - log.lapse("make one with everything(TOTAL TIME)") - - if log.verbose: - import os - log("Input font:% 7d bytes: %s" % (os.path.getsize(fontfile), fontfile)) - log("Subset font:% 7d bytes: %s" % (os.path.getsize(outfile), outfile)) - - log.font(font) - - font.close() - - -__all__ = [ - 'Options', - 'Subsetter', - 'Logger', - 'load_font', - 'save_font', - 'parse_gids', - 'parse_glyphs', - 'parse_unicodes', - 'main' -] - -if __name__ == '__main__': - main() diff -Nru fonttools-3.0/Tools/fontTools/t1Lib.py fonttools-3.21.2/Tools/fontTools/t1Lib.py --- fonttools-3.0/Tools/fontTools/t1Lib.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/t1Lib.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,371 +0,0 @@ -"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts - -Functions for reading and writing raw Type 1 data: - -read(path) - reads any Type 1 font file, returns the raw data and a type indicator: - 'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed - to by 'path'. - Raises an error when the file does not contain valid Type 1 data. - -write(path, data, kind='OTHER', dohex=False) - writes raw Type 1 data to the file pointed to by 'path'. - 'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'. - 'dohex' is a flag which determines whether the eexec encrypted - part should be written as hexadecimal or binary, but only if kind - is 'LWFN' or 'PFB'. -""" -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import eexec -from fontTools.misc.macCreatorType import getMacCreatorAndType -import os -import re - -__author__ = "jvr" -__version__ = "1.0b2" -DEBUG = 0 - - -try: - try: - from Carbon import Res - except ImportError: - import Res # MacPython < 2.2 -except ImportError: - haveMacSupport = 0 -else: - haveMacSupport = 1 - import MacOS - - -class T1Error(Exception): pass - - -class T1Font(object): - - """Type 1 font class. - - Uses a minimal interpeter that supports just about enough PS to parse - Type 1 fonts. - """ - - def __init__(self, path=None): - if path is not None: - self.data, type = read(path) - else: - pass # XXX - - def saveAs(self, path, type): - write(path, self.getData(), type) - - def getData(self): - # XXX Todo: if the data has been converted to Python object, - # recreate the PS stream - return self.data - - def getGlyphSet(self): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a .draw() method that supports the Pen protocol, and will - have an attribute named 'width', but only *after* the .draw() method - has been called. - - In the case of Type 1, the GlyphSet is simply the CharStrings dict. - """ - return self["CharStrings"] - - def __getitem__(self, key): - if not hasattr(self, "font"): - self.parse() - return self.font[key] - - def parse(self): - from fontTools.misc import psLib - from fontTools.misc import psCharStrings - self.font = psLib.suckfont(self.data) - charStrings = self.font["CharStrings"] - lenIV = self.font["Private"].get("lenIV", 4) - assert lenIV >= 0 - subrs = self.font["Private"]["Subrs"] - for glyphName, charString in charStrings.items(): - charString, R = eexec.decrypt(charString, 4330) - charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:], - subrs=subrs) - for i in range(len(subrs)): - charString, R = eexec.decrypt(subrs[i], 4330) - subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs) - del self.data - - -# low level T1 data read and write functions - -def read(path, onlyHeader=False): - """reads any Type 1 font file, returns raw data""" - normpath = path.lower() - creator, typ = getMacCreatorAndType(path) - if typ == 'LWFN': - return readLWFN(path, onlyHeader), 'LWFN' - if normpath[-4:] == '.pfb': - return readPFB(path, onlyHeader), 'PFB' - else: - return readOther(path), 'OTHER' - -def write(path, data, kind='OTHER', dohex=False): - assertType1(data) - kind = kind.upper() - try: - os.remove(path) - except os.error: - pass - err = 1 - try: - if kind == 'LWFN': - writeLWFN(path, data) - elif kind == 'PFB': - writePFB(path, data) - else: - writeOther(path, data, dohex) - err = 0 - finally: - if err and not DEBUG: - try: - os.remove(path) - except os.error: - pass - - -# -- internal -- - -LWFNCHUNKSIZE = 2000 -HEXLINELENGTH = 80 - - -def readLWFN(path, onlyHeader=False): - """reads an LWFN font file, returns raw data""" - resRef = Res.FSOpenResFile(path, 1) # read-only - try: - Res.UseResFile(resRef) - n = Res.Count1Resources('POST') - data = [] - for i in range(501, 501 + n): - res = Res.Get1Resource('POST', i) - code = byteord(res.data[0]) - if byteord(res.data[1]) != 0: - raise T1Error('corrupt LWFN file') - if code in [1, 2]: - if onlyHeader and code == 2: - break - data.append(res.data[2:]) - elif code in [3, 5]: - break - elif code == 4: - f = open(path, "rb") - data.append(f.read()) - f.close() - elif code == 0: - pass # comment, ignore - else: - raise T1Error('bad chunk code: ' + repr(code)) - finally: - Res.CloseResFile(resRef) - data = bytesjoin(data) - assertType1(data) - return data - -def readPFB(path, onlyHeader=False): - """reads a PFB font file, returns raw data""" - f = open(path, "rb") - data = [] - while True: - if f.read(1) != bytechr(128): - raise T1Error('corrupt PFB file') - code = byteord(f.read(1)) - if code in [1, 2]: - chunklen = stringToLong(f.read(4)) - chunk = f.read(chunklen) - assert len(chunk) == chunklen - data.append(chunk) - elif code == 3: - break - else: - raise T1Error('bad chunk code: ' + repr(code)) - if onlyHeader: - break - f.close() - data = bytesjoin(data) - assertType1(data) - return data - -def readOther(path): - """reads any (font) file, returns raw data""" - f = open(path, "rb") - data = f.read() - f.close() - assertType1(data) - - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted and isHex(chunk[:4]): - data.append(deHexString(chunk)) - else: - data.append(chunk) - return bytesjoin(data) - -# file writing tools - -def writeLWFN(path, data): - Res.FSpCreateResFile(path, "just", "LWFN", 0) - resRef = Res.FSOpenResFile(path, 2) # write-only - try: - Res.UseResFile(resRef) - resID = 501 - chunks = findEncryptedChunks(data) - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - while chunk: - res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2]) - res.AddResource('POST', resID, '') - chunk = chunk[LWFNCHUNKSIZE - 2:] - resID = resID + 1 - res = Res.Resource(bytechr(5) + '\0') - res.AddResource('POST', resID, '') - finally: - Res.CloseResFile(resRef) - -def writePFB(path, data): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - f.write(bytechr(128) + bytechr(code)) - f.write(longToString(len(chunk))) - f.write(chunk) - f.write(bytechr(128) + bytechr(3)) - finally: - f.close() - -def writeOther(path, data, dohex=False): - chunks = findEncryptedChunks(data) - f = open(path, "wb") - try: - hexlinelen = HEXLINELENGTH // 2 - for isEncrypted, chunk in chunks: - if isEncrypted: - code = 2 - else: - code = 1 - if code == 2 and dohex: - while chunk: - f.write(eexec.hexString(chunk[:hexlinelen])) - f.write('\r') - chunk = chunk[hexlinelen:] - else: - f.write(chunk) - finally: - f.close() - - -# decryption tools - -EEXECBEGIN = "currentfile eexec" -EEXECEND = '0' * 64 -EEXECINTERNALEND = "currentfile closefile" -EEXECBEGINMARKER = "%-- eexec start\r" -EEXECENDMARKER = "%-- eexec end\r" - -_ishexRE = re.compile('[0-9A-Fa-f]*$') - -def isHex(text): - return _ishexRE.match(text) is not None - - -def decryptType1(data): - chunks = findEncryptedChunks(data) - data = [] - for isEncrypted, chunk in chunks: - if isEncrypted: - if isHex(chunk[:4]): - chunk = deHexString(chunk) - decrypted, R = eexec.decrypt(chunk, 55665) - decrypted = decrypted[4:] - if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \ - and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND: - raise T1Error("invalid end of eexec part") - decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r' - data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER) - else: - if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN: - data.append(chunk[:-len(EEXECBEGIN)-1]) - else: - data.append(chunk) - return bytesjoin(data) - -def findEncryptedChunks(data): - chunks = [] - while True: - eBegin = data.find(EEXECBEGIN) - if eBegin < 0: - break - eBegin = eBegin + len(EEXECBEGIN) + 1 - eEnd = data.find(EEXECEND, eBegin) - if eEnd < 0: - raise T1Error("can't find end of eexec part") - cypherText = data[eBegin:eEnd + 2] - if isHex(cypherText[:4]): - cypherText = deHexString(cypherText) - plainText, R = eexec.decrypt(cypherText, 55665) - eEndLocal = plainText.find(EEXECINTERNALEND) - if eEndLocal < 0: - raise T1Error("can't find end of eexec part") - chunks.append((0, data[:eBegin])) - chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1])) - data = data[eEnd:] - chunks.append((0, data)) - return chunks - -def deHexString(hexstring): - return eexec.deHexString(strjoin(hexstring.split())) - - -# Type 1 assertion - -_fontType1RE = re.compile(br"/FontType\s+1\s+def") - -def assertType1(data): - for head in [b'%!PS-AdobeFont', b'%!FontType1']: - if data[:len(head)] == head: - break - else: - raise T1Error("not a PostScript font") - if not _fontType1RE.search(data): - raise T1Error("not a Type 1 font") - if data.find(b"currentfile eexec") < 0: - raise T1Error("not an encrypted Type 1 font") - # XXX what else? - return data - - -# pfb helpers - -def longToString(long): - s = "" - for i in range(4): - s += bytechr((long & (0xff << (i * 8))) >> i * 8) - return s - -def stringToLong(s): - if len(s) != 4: - raise ValueError('string must be 4 bytes long') - l = 0 - for i in range(4): - l += byteord(s[i]) << (i * 8) - return l diff -Nru fonttools-3.0/Tools/fontTools/ttLib/__init__.py fonttools-3.21.2/Tools/fontTools/ttLib/__init__.py --- fonttools-3.0/Tools/fontTools/ttLib/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,991 +0,0 @@ -"""fontTools.ttLib -- a package for dealing with TrueType fonts. - -This package offers translators to convert TrueType fonts to Python -objects and vice versa, and additionally from Python to TTX (an XML-based -text format) and vice versa. - -Example interactive session: - -Python 1.5.2c1 (#43, Mar 9 1999, 13:06:43) [CW PPC w/GUSI w/MSL] -Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam ->>> from fontTools import ttLib ->>> tt = ttLib.TTFont("afont.ttf") ->>> tt['maxp'].numGlyphs -242 ->>> tt['OS/2'].achVendID -'B&H\000' ->>> tt['head'].unitsPerEm -2048 ->>> tt.saveXML("afont.ttx") -Dumping 'LTSH' table... -Dumping 'OS/2' table... -Dumping 'VDMX' table... -Dumping 'cmap' table... -Dumping 'cvt ' table... -Dumping 'fpgm' table... -Dumping 'glyf' table... -Dumping 'hdmx' table... -Dumping 'head' table... -Dumping 'hhea' table... -Dumping 'hmtx' table... -Dumping 'loca' table... -Dumping 'maxp' table... -Dumping 'name' table... -Dumping 'post' table... -Dumping 'prep' table... ->>> tt2 = ttLib.TTFont() ->>> tt2.importXML("afont.ttx") ->>> tt2['maxp'].numGlyphs -242 ->>> - -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import os -import sys - -haveMacSupport = 0 -if sys.platform == "mac": - haveMacSupport = 1 -elif sys.platform == "darwin": - if sys.version_info[:3] != (2, 2, 0) and sys.version_info[:1] < (3,): - # Python 2.2's Mac support is broken, so don't enable it there. - # Python 3 does not have Res used by macUtils - haveMacSupport = 1 - - -class TTLibError(Exception): pass - - -class TTFont(object): - - """The main font object. It manages file input and output, and offers - a convenient way of accessing tables. - Tables will be only decompiled when necessary, ie. when they're actually - accessed. This means that simple operations can be extremely fast. - """ - - def __init__(self, file=None, res_name_or_index=None, - sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False, - verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False, - recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=False): - - """The constructor can be called with a few different arguments. - When reading a font from disk, 'file' should be either a pathname - pointing to a file, or a readable file object. - - It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt - resource name or an sfnt resource index number or zero. The latter - case will cause TTLib to autodetect whether the file is a flat file - or a suitcase. (If it's a suitcase, only the first 'sfnt' resource - will be read!) - - The 'checkChecksums' argument is used to specify how sfnt - checksums are treated upon reading a file from disk: - 0: don't check (default) - 1: check, print warnings if a wrong checksum is found - 2: check, raise an exception if a wrong checksum is found. - - The TTFont constructor can also be called without a 'file' - argument: this is the way to create a new empty font. - In this case you can optionally supply the 'sfntVersion' argument, - and a 'flavor' which can be None, or 'woff'. - - If the recalcBBoxes argument is false, a number of things will *not* - be recalculated upon save/compile: - 1) glyph bounding boxes - 2) maxp font bounding box - 3) hhea min/max values - (1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-). - Additionally, upon importing an TTX file, this option cause glyphs - to be compiled right away. This should reduce memory consumption - greatly, and therefore should have some impact on the time needed - to parse/compile large fonts. - - If the recalcTimestamp argument is false, the modified timestamp in the - 'head' table will *not* be recalculated upon save/compile. - - If the allowVID argument is set to true, then virtual GID's are - supported. Asking for a glyph ID with a glyph name or GID that is not in - the font will return a virtual GID. This is valid for GSUB and cmap - tables. For SING glyphlets, the cmap table is used to specify Unicode - values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested - and does not exist in the font, or the glyphname has the form glyphN - and does not exist in the font, then N is used as the virtual GID. - Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new - virtual GIDs, the next is one less than the previous. - - If ignoreDecompileErrors is set to True, exceptions raised in - individual tables during decompilation will be ignored, falling - back to the DefaultTable implementation, which simply keeps the - binary data. - - If lazy is set to True, many data structures are loaded lazily, upon - access only. If it is set to False, many data structures are loaded - immediately. The default is lazy=None which is somewhere in between. - """ - - from fontTools.ttLib import sfnt - self.verbose = verbose - self.quiet = quiet - self.lazy = lazy - self.recalcBBoxes = recalcBBoxes - self.recalcTimestamp = recalcTimestamp - self.tables = {} - self.reader = None - - # Permit the user to reference glyphs that are not int the font. - self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value. - self.reverseVIDDict = {} - self.VIDDict = {} - self.allowVID = allowVID - self.ignoreDecompileErrors = ignoreDecompileErrors - - if not file: - self.sfntVersion = sfntVersion - self.flavor = flavor - self.flavorData = None - return - if not hasattr(file, "read"): - closeStream = True - # assume file is a string - if haveMacSupport and res_name_or_index is not None: - # on the mac, we deal with sfnt resources as well as flat files - from . import macUtils - if res_name_or_index == 0: - if macUtils.getSFNTResIndices(file): - # get the first available sfnt font. - file = macUtils.SFNTResourceReader(file, 1) - else: - file = open(file, "rb") - else: - file = macUtils.SFNTResourceReader(file, res_name_or_index) - else: - file = open(file, "rb") - - else: - # assume "file" is a readable file object - closeStream = False - # read input file in memory and wrap a stream around it to allow overwriting - tmp = BytesIO(file.read()) - if hasattr(file, 'name'): - # save reference to input file name - tmp.name = file.name - if closeStream: - file.close() - self.reader = sfnt.SFNTReader(tmp, checkChecksums, fontNumber=fontNumber) - self.sfntVersion = self.reader.sfntVersion - self.flavor = self.reader.flavor - self.flavorData = self.reader.flavorData - - def close(self): - """If we still have a reader object, close it.""" - if self.reader is not None: - self.reader.close() - - def save(self, file, makeSuitcase=False, reorderTables=True): - """Save the font to disk. Similarly to the constructor, - the 'file' argument can be either a pathname or a writable - file object. - - On the Mac, if makeSuitcase is true, a suitcase (resource fork) - file will we made instead of a flat .ttf file. - """ - from fontTools.ttLib import sfnt - if not hasattr(file, "write"): - closeStream = 1 - if os.name == "mac" and makeSuitcase: - from . import macUtils - file = macUtils.SFNTResourceWriter(file, self) - else: - file = open(file, "wb") - if os.name == "mac": - from fontTools.misc.macCreator import setMacCreatorAndType - setMacCreatorAndType(file.name, 'mdos', 'BINA') - else: - # assume "file" is a writable file object - closeStream = 0 - - tags = list(self.keys()) - if "GlyphOrder" in tags: - tags.remove("GlyphOrder") - numTables = len(tags) - # write to a temporary stream to allow saving to unseekable streams - tmp = BytesIO() - writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData) - - done = [] - for tag in tags: - self._writeTable(tag, writer, done) - - writer.close() - - if (reorderTables is None or writer.reordersTables() or - (reorderTables is False and self.reader is None)): - # don't reorder tables and save as is - file.write(tmp.getvalue()) - tmp.close() - else: - if reorderTables is False: - # sort tables using the original font's order - tableOrder = list(self.reader.keys()) - else: - # use the recommended order from the OpenType specification - tableOrder = None - tmp.flush() - tmp.seek(0) - tmp2 = BytesIO() - reorderFontTables(tmp, tmp2, tableOrder) - file.write(tmp2.getvalue()) - tmp.close() - tmp2.close() - - if closeStream: - file.close() - - def saveXML(self, fileOrPath, progress=None, quiet=False, - tables=None, skipTables=None, splitTables=False, disassembleInstructions=True, - bitmapGlyphDataFormat='raw'): - """Export the font as TTX (an XML-based text file), or as a series of text - files when splitTables is true. In the latter case, the 'fileOrPath' - argument should be a path to a directory. - The 'tables' argument must either be false (dump all tables) or a - list of tables to dump. The 'skipTables' argument may be a list of tables - to skip, but only when the 'tables' argument is false. - """ - from fontTools import version - from fontTools.misc import xmlWriter - - self.disassembleInstructions = disassembleInstructions - self.bitmapGlyphDataFormat = bitmapGlyphDataFormat - if not tables: - tables = list(self.keys()) - if "GlyphOrder" not in tables: - tables = ["GlyphOrder"] + tables - if skipTables: - for tag in skipTables: - if tag in tables: - tables.remove(tag) - numTables = len(tables) - if progress: - progress.set(0, numTables) - idlefunc = getattr(progress, "idle", None) - else: - idlefunc = None - - writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc) - writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], - ttLibVersion=version) - writer.newline() - - if not splitTables: - writer.newline() - else: - # 'fileOrPath' must now be a path - path, ext = os.path.splitext(fileOrPath) - fileNameTemplate = path + ".%s" + ext - - for i in range(numTables): - if progress: - progress.set(i) - tag = tables[i] - if splitTables: - tablePath = fileNameTemplate % tagToIdentifier(tag) - tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc) - tableWriter.begintag("ttFont", ttLibVersion=version) - tableWriter.newline() - tableWriter.newline() - writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) - writer.newline() - else: - tableWriter = writer - self._tableToXML(tableWriter, tag, progress, quiet) - if splitTables: - tableWriter.endtag("ttFont") - tableWriter.newline() - tableWriter.close() - if progress: - progress.set((i + 1)) - writer.endtag("ttFont") - writer.newline() - writer.close() - if self.verbose: - debugmsg("Done dumping TTX") - - def _tableToXML(self, writer, tag, progress, quiet): - if tag in self: - table = self[tag] - report = "Dumping '%s' table..." % tag - else: - report = "No '%s' table found." % tag - if progress: - progress.setLabel(report) - elif self.verbose: - debugmsg(report) - else: - if not quiet: - print(report) - if tag not in self: - return - xmlTag = tagToXML(tag) - attrs = dict() - if hasattr(table, "ERROR"): - attrs['ERROR'] = "decompilation error" - from .tables.DefaultTable import DefaultTable - if table.__class__ == DefaultTable: - attrs['raw'] = True - writer.begintag(xmlTag, **attrs) - writer.newline() - if tag in ("glyf", "CFF "): - table.toXML(writer, self, progress) - else: - table.toXML(writer, self) - writer.endtag(xmlTag) - writer.newline() - writer.newline() - - def importXML(self, file, progress=None, quiet=False): - """Import a TTX file (an XML-based text format), so as to recreate - a font object. - """ - if "maxp" in self and "post" in self: - # Make sure the glyph order is loaded, as it otherwise gets - # lost if the XML doesn't contain the glyph order, yet does - # contain the table which was originally used to extract the - # glyph names from (ie. 'post', 'cmap' or 'CFF '). - self.getGlyphOrder() - - from fontTools.misc import xmlReader - - reader = xmlReader.XMLReader(file, self, progress, quiet) - reader.read() - - def isLoaded(self, tag): - """Return true if the table identified by 'tag' has been - decompiled and loaded into memory.""" - return tag in self.tables - - def has_key(self, tag): - if self.isLoaded(tag): - return True - elif self.reader and tag in self.reader: - return True - elif tag == "GlyphOrder": - return True - else: - return False - - __contains__ = has_key - - def keys(self): - keys = list(self.tables.keys()) - if self.reader: - for key in list(self.reader.keys()): - if key not in keys: - keys.append(key) - - if "GlyphOrder" in keys: - keys.remove("GlyphOrder") - keys = sortedTagList(keys) - return ["GlyphOrder"] + keys - - def __len__(self): - return len(list(self.keys())) - - def __getitem__(self, tag): - tag = Tag(tag) - try: - return self.tables[tag] - except KeyError: - if tag == "GlyphOrder": - table = GlyphOrder(tag) - self.tables[tag] = table - return table - if self.reader is not None: - import traceback - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) - data = self.reader[tag] - tableClass = getTableClass(tag) - table = tableClass(tag) - self.tables[tag] = table - if self.verbose: - debugmsg("Decompiling '%s' table" % tag) - try: - table.decompile(data, self) - except: - if not self.ignoreDecompileErrors: - raise - # fall back to DefaultTable, retaining the binary table data - print("An exception occurred during the decompilation of the '%s' table" % tag) - from .tables.DefaultTable import DefaultTable - file = StringIO() - traceback.print_exc(file=file) - table = DefaultTable(tag) - table.ERROR = file.getvalue() - self.tables[tag] = table - table.decompile(data, self) - return table - else: - raise KeyError("'%s' table not found" % tag) - - def __setitem__(self, tag, table): - self.tables[Tag(tag)] = table - - def __delitem__(self, tag): - if tag not in self: - raise KeyError("'%s' table not found" % tag) - if tag in self.tables: - del self.tables[tag] - if self.reader and tag in self.reader: - del self.reader[tag] - - def get(self, tag, default=None): - try: - return self[tag] - except KeyError: - return default - - def setGlyphOrder(self, glyphOrder): - self.glyphOrder = glyphOrder - - def getGlyphOrder(self): - try: - return self.glyphOrder - except AttributeError: - pass - if 'CFF ' in self: - cff = self['CFF '] - self.glyphOrder = cff.getGlyphOrder() - elif 'post' in self: - # TrueType font - glyphOrder = self['post'].getGlyphOrder() - if glyphOrder is None: - # - # No names found in the 'post' table. - # Try to create glyph names from the unicode cmap (if available) - # in combination with the Adobe Glyph List (AGL). - # - self._getGlyphNamesFromCmap() - else: - self.glyphOrder = glyphOrder - else: - self._getGlyphNamesFromCmap() - return self.glyphOrder - - def _getGlyphNamesFromCmap(self): - # - # This is rather convoluted, but then again, it's an interesting problem: - # - we need to use the unicode values found in the cmap table to - # build glyph names (eg. because there is only a minimal post table, - # or none at all). - # - but the cmap parser also needs glyph names to work with... - # So here's what we do: - # - make up glyph names based on glyphID - # - load a temporary cmap table based on those names - # - extract the unicode values, build the "real" glyph names - # - unload the temporary cmap table - # - if self.isLoaded("cmap"): - # Bootstrapping: we're getting called by the cmap parser - # itself. This means self.tables['cmap'] contains a partially - # loaded cmap, making it impossible to get at a unicode - # subtable here. We remove the partially loaded cmap and - # restore it later. - # This only happens if the cmap table is loaded before any - # other table that does f.getGlyphOrder() or f.getGlyphName(). - cmapLoading = self.tables['cmap'] - del self.tables['cmap'] - else: - cmapLoading = None - # Make up glyph names based on glyphID, which will be used by the - # temporary cmap and by the real cmap in case we don't find a unicode - # cmap. - numGlyphs = int(self['maxp'].numGlyphs) - glyphOrder = [None] * numGlyphs - glyphOrder[0] = ".notdef" - for i in range(1, numGlyphs): - glyphOrder[i] = "glyph%.5d" % i - # Set the glyph order, so the cmap parser has something - # to work with (so we don't get called recursively). - self.glyphOrder = glyphOrder - # Get a (new) temporary cmap (based on the just invented names) - try: - tempcmap = self['cmap'].getcmap(3, 1) - except KeyError: - tempcmap = None - if tempcmap is not None: - # we have a unicode cmap - from fontTools import agl - cmap = tempcmap.cmap - # create a reverse cmap dict - reversecmap = {} - for unicode, name in list(cmap.items()): - reversecmap[name] = unicode - allNames = {} - for i in range(numGlyphs): - tempName = glyphOrder[i] - if tempName in reversecmap: - unicode = reversecmap[tempName] - if unicode in agl.UV2AGL: - # get name from the Adobe Glyph List - glyphName = agl.UV2AGL[unicode] - else: - # create uni name - glyphName = "uni%04X" % unicode - tempName = glyphName - n = allNames.get(tempName, 0) - if n: - tempName = glyphName + "#" + str(n) - glyphOrder[i] = tempName - allNames[tempName] = n + 1 - # Delete the temporary cmap table from the cache, so it can - # be parsed again with the right names. - del self.tables['cmap'] - else: - pass # no unicode cmap available, stick with the invented names - self.glyphOrder = glyphOrder - if cmapLoading: - # restore partially loaded cmap, so it can continue loading - # using the proper names. - self.tables['cmap'] = cmapLoading - - def getGlyphNames(self): - """Get a list of glyph names, sorted alphabetically.""" - glyphNames = sorted(self.getGlyphOrder()[:]) - return glyphNames - - def getGlyphNames2(self): - """Get a list of glyph names, sorted alphabetically, - but not case sensitive. - """ - from fontTools.misc import textTools - return textTools.caselessSort(self.getGlyphOrder()) - - def getGlyphName(self, glyphID, requireReal=False): - try: - return self.getGlyphOrder()[glyphID] - except IndexError: - if requireReal or not self.allowVID: - # XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in - # the cmap table than there are glyphs. I don't think it's legal... - return "glyph%.5d" % glyphID - else: - # user intends virtual GID support - try: - glyphName = self.VIDDict[glyphID] - except KeyError: - glyphName ="glyph%.5d" % glyphID - self.last_vid = min(glyphID, self.last_vid ) - self.reverseVIDDict[glyphName] = glyphID - self.VIDDict[glyphID] = glyphName - return glyphName - - def getGlyphID(self, glyphName, requireReal=False): - if not hasattr(self, "_reverseGlyphOrderDict"): - self._buildReverseGlyphOrderDict() - glyphOrder = self.getGlyphOrder() - d = self._reverseGlyphOrderDict - if glyphName not in d: - if glyphName in glyphOrder: - self._buildReverseGlyphOrderDict() - return self.getGlyphID(glyphName) - else: - if requireReal: - raise KeyError(glyphName) - elif not self.allowVID: - # Handle glyphXXX only - if glyphName[:5] == "glyph": - try: - return int(glyphName[5:]) - except (NameError, ValueError): - raise KeyError(glyphName) - else: - # user intends virtual GID support - try: - glyphID = self.reverseVIDDict[glyphName] - except KeyError: - # if name is in glyphXXX format, use the specified name. - if glyphName[:5] == "glyph": - try: - glyphID = int(glyphName[5:]) - except (NameError, ValueError): - glyphID = None - if glyphID is None: - glyphID = self.last_vid -1 - self.last_vid = glyphID - self.reverseVIDDict[glyphName] = glyphID - self.VIDDict[glyphID] = glyphName - return glyphID - - glyphID = d[glyphName] - if glyphName != glyphOrder[glyphID]: - self._buildReverseGlyphOrderDict() - return self.getGlyphID(glyphName) - return glyphID - - def getReverseGlyphMap(self, rebuild=False): - if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): - self._buildReverseGlyphOrderDict() - return self._reverseGlyphOrderDict - - def _buildReverseGlyphOrderDict(self): - self._reverseGlyphOrderDict = d = {} - glyphOrder = self.getGlyphOrder() - for glyphID in range(len(glyphOrder)): - d[glyphOrder[glyphID]] = glyphID - - def _writeTable(self, tag, writer, done): - """Internal helper function for self.save(). Keeps track of - inter-table dependencies. - """ - if tag in done: - return - tableClass = getTableClass(tag) - for masterTable in tableClass.dependencies: - if masterTable not in done: - if masterTable in self: - self._writeTable(masterTable, writer, done) - else: - done.append(masterTable) - tabledata = self.getTableData(tag) - if self.verbose: - debugmsg("writing '%s' table to disk" % tag) - writer[tag] = tabledata - done.append(tag) - - def getTableData(self, tag): - """Returns raw table data, whether compiled or directly read from disk. - """ - tag = Tag(tag) - if self.isLoaded(tag): - if self.verbose: - debugmsg("compiling '%s' table" % tag) - return self.tables[tag].compile(self) - elif self.reader and tag in self.reader: - if self.verbose: - debugmsg("Reading '%s' table from disk" % tag) - return self.reader[tag] - else: - raise KeyError(tag) - - def getGlyphSet(self, preferCFF=True): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a .draw() method that supports the Pen protocol, and will - have an attribute named 'width'. - - If the font is CFF-based, the outlines will be taken from the 'CFF ' - table. Otherwise the outlines will be taken from the 'glyf' table. - If the font contains both a 'CFF ' and a 'glyf' table, you can use - the 'preferCFF' argument to specify which one should be taken. - """ - glyphs = None - if (preferCFF and "CFF " in self) or "glyf" not in self: - glyphs = _TTGlyphSet(self, list(self["CFF "].cff.values())[0].CharStrings, _TTGlyphCFF) - - if glyphs is None and "glyf" in self: - glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf) - - if glyphs is None: - raise TTLibError("Font contains no outlines") - - return glyphs - - -class _TTGlyphSet(object): - - """Generic dict-like GlyphSet class that pulls metrics from hmtx and - glyph shape from TrueType or CFF. - """ - - def __init__(self, ttFont, glyphs, glyphType): - self._glyphs = glyphs - self._hmtx = ttFont['hmtx'] - self._glyphType = glyphType - - def keys(self): - return list(self._glyphs.keys()) - - def has_key(self, glyphName): - return glyphName in self._glyphs - - __contains__ = has_key - - def __getitem__(self, glyphName): - return self._glyphType(self, self._glyphs[glyphName], self._hmtx[glyphName]) - - def get(self, glyphName, default=None): - try: - return self[glyphName] - except KeyError: - return default - -class _TTGlyph(object): - - """Wrapper for a TrueType glyph that supports the Pen protocol, meaning - that it has a .draw() method that takes a pen object as its only - argument. Additionally there is a 'width' attribute. - """ - - def __init__(self, glyphset, glyph, metrics): - self._glyphset = glyphset - self._glyph = glyph - self.width, self.lsb = metrics - - def draw(self, pen): - """Draw the glyph onto Pen. See fontTools.pens.basePen for details - how that works. - """ - self._glyph.draw(pen) - -class _TTGlyphCFF(_TTGlyph): - pass - -class _TTGlyphGlyf(_TTGlyph): - - def draw(self, pen): - """Draw the glyph onto Pen. See fontTools.pens.basePen for details - how that works. - """ - glyfTable = self._glyphset._glyphs - glyph = self._glyph - offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0 - glyph.draw(pen, glyfTable, offset) - - -class GlyphOrder(object): - - """A pseudo table. The glyph order isn't in the font as a separate - table, but it's nice to present it as such in the TTX format. - """ - - def __init__(self, tag=None): - pass - - def toXML(self, writer, ttFont): - glyphOrder = ttFont.getGlyphOrder() - writer.comment("The 'id' attribute is only for humans; " - "it is ignored when parsed.") - writer.newline() - for i in range(len(glyphOrder)): - glyphName = glyphOrder[i] - writer.simpletag("GlyphID", id=i, name=glyphName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphOrder"): - self.glyphOrder = [] - ttFont.setGlyphOrder(self.glyphOrder) - if name == "GlyphID": - self.glyphOrder.append(attrs["name"]) - - -def getTableModule(tag): - """Fetch the packer/unpacker module for a table. - Return None when no module is found. - """ - from . import tables - pyTag = tagToIdentifier(tag) - try: - __import__("fontTools.ttLib.tables." + pyTag) - except ImportError as err: - # If pyTag is found in the ImportError message, - # means table is not implemented. If it's not - # there, then some other module is missing, don't - # suppress the error. - if str(err).find(pyTag) >= 0: - return None - else: - raise err - else: - return getattr(tables, pyTag) - - -def getTableClass(tag): - """Fetch the packer/unpacker class for a table. - Return None when no class is found. - """ - module = getTableModule(tag) - if module is None: - from .tables.DefaultTable import DefaultTable - return DefaultTable - pyTag = tagToIdentifier(tag) - tableClass = getattr(module, "table_" + pyTag) - return tableClass - - -def getClassTag(klass): - """Fetch the table tag for a class object.""" - name = klass.__name__ - assert name[:6] == 'table_' - name = name[6:] # Chop 'table_' - return identifierToTag(name) - - -def newTable(tag): - """Return a new instance of a table.""" - tableClass = getTableClass(tag) - return tableClass(tag) - - -def _escapechar(c): - """Helper function for tagToIdentifier()""" - import re - if re.match("[a-z0-9]", c): - return "_" + c - elif re.match("[A-Z]", c): - return c + "_" - else: - return hex(byteord(c))[2:] - - -def tagToIdentifier(tag): - """Convert a table tag to a valid (but UGLY) python identifier, - as well as a filename that's guaranteed to be unique even on a - caseless file system. Each character is mapped to two characters. - Lowercase letters get an underscore before the letter, uppercase - letters get an underscore after the letter. Trailing spaces are - trimmed. Illegal characters are escaped as two hex bytes. If the - result starts with a number (as the result of a hex escape), an - extra underscore is prepended. Examples: - 'glyf' -> '_g_l_y_f' - 'cvt ' -> '_c_v_t' - 'OS/2' -> 'O_S_2f_2' - """ - import re - tag = Tag(tag) - if tag == "GlyphOrder": - return tag - assert len(tag) == 4, "tag should be 4 characters long" - while len(tag) > 1 and tag[-1] == ' ': - tag = tag[:-1] - ident = "" - for c in tag: - ident = ident + _escapechar(c) - if re.match("[0-9]", ident): - ident = "_" + ident - return ident - - -def identifierToTag(ident): - """the opposite of tagToIdentifier()""" - if ident == "GlyphOrder": - return ident - if len(ident) % 2 and ident[0] == "_": - ident = ident[1:] - assert not (len(ident) % 2) - tag = "" - for i in range(0, len(ident), 2): - if ident[i] == "_": - tag = tag + ident[i+1] - elif ident[i+1] == "_": - tag = tag + ident[i] - else: - # assume hex - tag = tag + chr(int(ident[i:i+2], 16)) - # append trailing spaces - tag = tag + (4 - len(tag)) * ' ' - return Tag(tag) - - -def tagToXML(tag): - """Similarly to tagToIdentifier(), this converts a TT tag - to a valid XML element name. Since XML element names are - case sensitive, this is a fairly simple/readable translation. - """ - import re - tag = Tag(tag) - if tag == "OS/2": - return "OS_2" - elif tag == "GlyphOrder": - return tag - if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): - return tag.strip() - else: - return tagToIdentifier(tag) - - -def xmlToTag(tag): - """The opposite of tagToXML()""" - if tag == "OS_2": - return Tag("OS/2") - if len(tag) == 8: - return identifierToTag(tag) - else: - return Tag(tag + " " * (4 - len(tag))) - - -def debugmsg(msg): - import time - print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time()))) - - -# Table order as recommended in the OpenType specification 1.4 -TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX", - "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf", - "kern", "name", "post", "gasp", "PCLT"] - -OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", - "CFF "] - -def sortedTagList(tagList, tableOrder=None): - """Return a sorted copy of tagList, sorted according to the OpenType - specification, or according to a custom tableOrder. If given and not - None, tableOrder needs to be a list of tag names. - """ - tagList = sorted(tagList) - if tableOrder is None: - if "DSIG" in tagList: - # DSIG should be last (XXX spec reference?) - tagList.remove("DSIG") - tagList.append("DSIG") - if "CFF " in tagList: - tableOrder = OTFTableOrder - else: - tableOrder = TTFTableOrder - orderedTables = [] - for tag in tableOrder: - if tag in tagList: - orderedTables.append(tag) - tagList.remove(tag) - orderedTables.extend(tagList) - return orderedTables - - -def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): - """Rewrite a font file, ordering the tables as recommended by the - OpenType specification 1.4. - """ - from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter - reader = SFNTReader(inFile, checkChecksums=checkChecksums) - writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) - tables = list(reader.keys()) - for tag in sortedTagList(tables, tableOrder): - writer[tag] = reader[tag] - writer.close() - - -def maxPowerOfTwo(x): - """Return the highest exponent of two, so that - (2 ** exponent) <= x. Return 0 if x is 0. - """ - exponent = 0 - while x: - x = x >> 1 - exponent = exponent + 1 - return max(exponent - 1, 0) - - -def getSearchRange(n, itemSize=16): - """Calculate searchRange, entrySelector, rangeShift. - """ - # itemSize defaults to 16, for backward compatibility - # with upstream fonttools. - exponent = maxPowerOfTwo(n) - searchRange = (2 ** exponent) * itemSize - entrySelector = exponent - rangeShift = max(0, n * itemSize - searchRange) - return searchRange, entrySelector, rangeShift diff -Nru fonttools-3.0/Tools/fontTools/ttLib/macUtils.py fonttools-3.21.2/Tools/fontTools/ttLib/macUtils.py --- fonttools-3.0/Tools/fontTools/ttLib/macUtils.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/macUtils.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,73 +0,0 @@ -"""ttLib.macUtils.py -- Various Mac-specific stuff.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import sys -import os -if sys.platform not in ("mac", "darwin"): - raise ImportError("This module is Mac-only!") -try: - from Carbon import Res -except ImportError: - import Res - - -def MyOpenResFile(path): - mode = 1 # read only - try: - resref = Res.FSOpenResFile(path, mode) - except Res.Error: - # try data fork - resref = Res.FSOpenResourceFile(path, unicode(), mode) - return resref - - -def getSFNTResIndices(path): - """Determine whether a file has a resource fork or not.""" - try: - resref = MyOpenResFile(path) - except Res.Error: - return [] - Res.UseResFile(resref) - numSFNTs = Res.Count1Resources('sfnt') - Res.CloseResFile(resref) - return list(range(1, numSFNTs + 1)) - - -def openTTFonts(path): - """Given a pathname, return a list of TTFont objects. In the case - of a flat TTF/OTF file, the list will contain just one font object; - but in the case of a Mac font suitcase it will contain as many - font objects as there are sfnt resources in the file. - """ - from fontTools import ttLib - fonts = [] - sfnts = getSFNTResIndices(path) - if not sfnts: - fonts.append(ttLib.TTFont(path)) - else: - for index in sfnts: - fonts.append(ttLib.TTFont(path, index)) - if not fonts: - raise ttLib.TTLibError("no fonts found in file '%s'" % path) - return fonts - - -class SFNTResourceReader(object): - - """Simple (Mac-only) read-only file wrapper for 'sfnt' resources.""" - - def __init__(self, path, res_name_or_index): - resref = MyOpenResFile(path) - Res.UseResFile(resref) - if isinstance(res_name_or_index, basestring): - res = Res.Get1NamedResource('sfnt', res_name_or_index) - else: - res = Res.Get1IndResource('sfnt', res_name_or_index) - self.file = BytesIO(res.data) - Res.CloseResFile(resref) - self.name = path - - def __getattr__(self, attr): - # cheap inheritance - return getattr(self.file, attr) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/sfnt.py fonttools-3.21.2/Tools/fontTools/ttLib/sfnt.py --- fonttools-3.0/Tools/fontTools/ttLib/sfnt.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/sfnt.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,520 +0,0 @@ -"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. - -Defines two public classes: - SFNTReader - SFNTWriter - -(Normally you don't have to use these classes explicitly; they are -used automatically by ttLib.TTFont.) - -The reading and writing of sfnt files is separated in two distinct -classes, since whenever to number of tables changes or whenever -a table's length chages you need to rewrite the whole file anyway. -""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.ttLib import getSearchRange -import struct -from collections import OrderedDict - - -class SFNTReader(object): - - def __new__(cls, *args, **kwargs): - """ Return an instance of the SFNTReader sub-class which is compatible - with the input file type. - """ - if args and cls is SFNTReader: - infile = args[0] - sfntVersion = Tag(infile.read(4)) - infile.seek(0) - if sfntVersion == "wOF2": - # return new WOFF2Reader object - from fontTools.ttLib.woff2 import WOFF2Reader - return object.__new__(WOFF2Reader) - # return default object - return object.__new__(cls) - - def __init__(self, file, checkChecksums=1, fontNumber=-1): - self.file = file - self.checkChecksums = checkChecksums - - self.flavor = None - self.flavorData = None - self.DirectoryEntry = SFNTDirectoryEntry - self.sfntVersion = self.file.read(4) - self.file.seek(0) - if self.sfntVersion == b"ttcf": - data = self.file.read(ttcHeaderSize) - if len(data) != ttcHeaderSize: - from fontTools import ttLib - raise ttLib.TTLibError("Not a Font Collection (not enough data)") - sstruct.unpack(ttcHeaderFormat, data, self) - assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version - if not 0 <= fontNumber < self.numFonts: - from fontTools import ttLib - raise ttLib.TTLibError("specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1)) - offsetTable = struct.unpack(">%dL" % self.numFonts, self.file.read(self.numFonts * 4)) - if self.Version == 0x00020000: - pass # ignoring version 2.0 signatures - self.file.seek(offsetTable[fontNumber]) - data = self.file.read(sfntDirectorySize) - if len(data) != sfntDirectorySize: - from fontTools import ttLib - raise ttLib.TTLibError("Not a Font Collection (not enough data)") - sstruct.unpack(sfntDirectoryFormat, data, self) - elif self.sfntVersion == b"wOFF": - self.flavor = "woff" - self.DirectoryEntry = WOFFDirectoryEntry - data = self.file.read(woffDirectorySize) - if len(data) != woffDirectorySize: - from fontTools import ttLib - raise ttLib.TTLibError("Not a WOFF font (not enough data)") - sstruct.unpack(woffDirectoryFormat, data, self) - else: - data = self.file.read(sfntDirectorySize) - if len(data) != sfntDirectorySize: - from fontTools import ttLib - raise ttLib.TTLibError("Not a TrueType or OpenType font (not enough data)") - sstruct.unpack(sfntDirectoryFormat, data, self) - self.sfntVersion = Tag(self.sfntVersion) - - if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): - from fontTools import ttLib - raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") - self.tables = OrderedDict() - for i in range(self.numTables): - entry = self.DirectoryEntry() - entry.fromFile(self.file) - tag = Tag(entry.tag) - self.tables[tag] = entry - - # Load flavor data if any - if self.flavor == "woff": - self.flavorData = WOFFFlavorData(self) - - def has_key(self, tag): - return tag in self.tables - - __contains__ = has_key - - def keys(self): - return self.tables.keys() - - def __getitem__(self, tag): - """Fetch the raw table data.""" - entry = self.tables[Tag(tag)] - data = entry.loadData (self.file) - if self.checkChecksums: - if tag == 'head': - # Beh: we have to special-case the 'head' table. - checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) - else: - checksum = calcChecksum(data) - if self.checkChecksums > 1: - # Be obnoxious, and barf when it's wrong - assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag - elif checksum != entry.checkSum: - # Be friendly, and just print a warning. - print("bad checksum for '%s' table" % tag) - return data - - def __delitem__(self, tag): - del self.tables[Tag(tag)] - - def close(self): - self.file.close() - - -class SFNTWriter(object): - - def __new__(cls, *args, **kwargs): - """ Return an instance of the SFNTWriter sub-class which is compatible - with the specified 'flavor'. - """ - flavor = None - if kwargs and 'flavor' in kwargs: - flavor = kwargs['flavor'] - elif args and len(args) > 3: - flavor = args[3] - if cls is SFNTWriter: - if flavor == "woff2": - # return new WOFF2Writer object - from fontTools.ttLib.woff2 import WOFF2Writer - return object.__new__(WOFF2Writer) - # return default object - return object.__new__(cls) - - def __init__(self, file, numTables, sfntVersion="\000\001\000\000", - flavor=None, flavorData=None): - self.file = file - self.numTables = numTables - self.sfntVersion = Tag(sfntVersion) - self.flavor = flavor - self.flavorData = flavorData - - if self.flavor == "woff": - self.directoryFormat = woffDirectoryFormat - self.directorySize = woffDirectorySize - self.DirectoryEntry = WOFFDirectoryEntry - - self.signature = "wOFF" - - # to calculate WOFF checksum adjustment, we also need the original SFNT offsets - self.origNextTableOffset = sfntDirectorySize + numTables * sfntDirectoryEntrySize - else: - assert not self.flavor, "Unknown flavor '%s'" % self.flavor - self.directoryFormat = sfntDirectoryFormat - self.directorySize = sfntDirectorySize - self.DirectoryEntry = SFNTDirectoryEntry - - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables, 16) - - self.nextTableOffset = self.directorySize + numTables * self.DirectoryEntry.formatSize - # clear out directory area - self.file.seek(self.nextTableOffset) - # make sure we're actually where we want to be. (old cStringIO bug) - self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) - self.tables = OrderedDict() - - def __setitem__(self, tag, data): - """Write raw table data to disk.""" - if tag in self.tables: - from fontTools import ttLib - raise ttLib.TTLibError("cannot rewrite '%s' table" % tag) - - entry = self.DirectoryEntry() - entry.tag = tag - entry.offset = self.nextTableOffset - if tag == 'head': - entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) - self.headTable = data - entry.uncompressed = True - else: - entry.checkSum = calcChecksum(data) - entry.saveData(self.file, data) - - if self.flavor == "woff": - entry.origOffset = self.origNextTableOffset - self.origNextTableOffset += (entry.origLength + 3) & ~3 - - self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) - # Add NUL bytes to pad the table data to a 4-byte boundary. - # Don't depend on f.seek() as we need to add the padding even if no - # subsequent write follows (seek is lazy), ie. after the final table - # in the font. - self.file.write(b'\0' * (self.nextTableOffset - self.file.tell())) - assert self.nextTableOffset == self.file.tell() - - self.tables[tag] = entry - - def close(self): - """All tables must have been written to disk. Now write the - directory. - """ - tables = sorted(self.tables.items()) - if len(tables) != self.numTables: - from fontTools import ttLib - raise ttLib.TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables))) - - if self.flavor == "woff": - self.signature = b"wOFF" - self.reserved = 0 - - self.totalSfntSize = 12 - self.totalSfntSize += 16 * len(tables) - for tag, entry in tables: - self.totalSfntSize += (entry.origLength + 3) & ~3 - - data = self.flavorData if self.flavorData else WOFFFlavorData() - if data.majorVersion is not None and data.minorVersion is not None: - self.majorVersion = data.majorVersion - self.minorVersion = data.minorVersion - else: - if hasattr(self, 'headTable'): - self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8]) - else: - self.majorVersion = self.minorVersion = 0 - if data.metaData: - self.metaOrigLength = len(data.metaData) - self.file.seek(0,2) - self.metaOffset = self.file.tell() - import zlib - compressedMetaData = zlib.compress(data.metaData) - self.metaLength = len(compressedMetaData) - self.file.write(compressedMetaData) - else: - self.metaOffset = self.metaLength = self.metaOrigLength = 0 - if data.privData: - self.file.seek(0,2) - off = self.file.tell() - paddedOff = (off + 3) & ~3 - self.file.write('\0' * (paddedOff - off)) - self.privOffset = self.file.tell() - self.privLength = len(data.privData) - self.file.write(data.privData) - else: - self.privOffset = self.privLength = 0 - - self.file.seek(0,2) - self.length = self.file.tell() - - else: - assert not self.flavor, "Unknown flavor '%s'" % self.flavor - pass - - directory = sstruct.pack(self.directoryFormat, self) - - self.file.seek(self.directorySize) - seenHead = 0 - for tag, entry in tables: - if tag == "head": - seenHead = 1 - directory = directory + entry.toString() - if seenHead: - self.writeMasterChecksum(directory) - self.file.seek(0) - self.file.write(directory) - - def _calcMasterChecksum(self, directory): - # calculate checkSumAdjustment - tags = list(self.tables.keys()) - checksums = [] - for i in range(len(tags)): - checksums.append(self.tables[tags[i]].checkSum) - - if self.DirectoryEntry != SFNTDirectoryEntry: - # Create a SFNT directory for checksum calculation purposes - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) - directory = sstruct.pack(sfntDirectoryFormat, self) - tables = sorted(self.tables.items()) - for tag, entry in tables: - sfntEntry = SFNTDirectoryEntry() - sfntEntry.tag = entry.tag - sfntEntry.checkSum = entry.checkSum - sfntEntry.offset = entry.origOffset - sfntEntry.length = entry.origLength - directory = directory + sfntEntry.toString() - - directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize - assert directory_end == len(directory) - - checksums.append(calcChecksum(directory)) - checksum = sum(checksums) & 0xffffffff - # BiboAfba! - checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff - return checksumadjustment - - def writeMasterChecksum(self, directory): - checksumadjustment = self._calcMasterChecksum(directory) - # write the checksum to the file - self.file.seek(self.tables['head'].offset + 8) - self.file.write(struct.pack(">L", checksumadjustment)) - - def reordersTables(self): - return False - - -# -- sfnt directory helpers and cruft - -ttcHeaderFormat = """ - > # big endian - TTCTag: 4s # "ttcf" - Version: L # 0x00010000 or 0x00020000 - numFonts: L # number of fonts - # OffsetTable[numFonts]: L # array with offsets from beginning of file - # ulDsigTag: L # version 2.0 only - # ulDsigLength: L # version 2.0 only - # ulDsigOffset: L # version 2.0 only -""" - -ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) - -sfntDirectoryFormat = """ - > # big endian - sfntVersion: 4s - numTables: H # number of tables - searchRange: H # (max2 <= numTables)*16 - entrySelector: H # log2(max2 <= numTables) - rangeShift: H # numTables*16-searchRange -""" - -sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) - -sfntDirectoryEntryFormat = """ - > # big endian - tag: 4s - checkSum: L - offset: L - length: L -""" - -sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) - -woffDirectoryFormat = """ - > # big endian - signature: 4s # "wOFF" - sfntVersion: 4s - length: L # total woff file size - numTables: H # number of tables - reserved: H # set to 0 - totalSfntSize: L # uncompressed size - majorVersion: H # major version of WOFF file - minorVersion: H # minor version of WOFF file - metaOffset: L # offset to metadata block - metaLength: L # length of compressed metadata - metaOrigLength: L # length of uncompressed metadata - privOffset: L # offset to private data block - privLength: L # length of private data block -""" - -woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) - -woffDirectoryEntryFormat = """ - > # big endian - tag: 4s - offset: L - length: L # compressed length - origLength: L # original length - checkSum: L # original checksum -""" - -woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) - - -class DirectoryEntry(object): - - def __init__(self): - self.uncompressed = False # if True, always embed entry raw - - def fromFile(self, file): - sstruct.unpack(self.format, file.read(self.formatSize), self) - - def fromString(self, str): - sstruct.unpack(self.format, str, self) - - def toString(self): - return sstruct.pack(self.format, self) - - def __repr__(self): - if hasattr(self, "tag"): - return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) - else: - return "<%s at %x>" % (self.__class__.__name__, id(self)) - - def loadData(self, file): - file.seek(self.offset) - data = file.read(self.length) - assert len(data) == self.length - if hasattr(self.__class__, 'decodeData'): - data = self.decodeData(data) - return data - - def saveData(self, file, data): - if hasattr(self.__class__, 'encodeData'): - data = self.encodeData(data) - self.length = len(data) - file.seek(self.offset) - file.write(data) - - def decodeData(self, rawData): - return rawData - - def encodeData(self, data): - return data - -class SFNTDirectoryEntry(DirectoryEntry): - - format = sfntDirectoryEntryFormat - formatSize = sfntDirectoryEntrySize - -class WOFFDirectoryEntry(DirectoryEntry): - - format = woffDirectoryEntryFormat - formatSize = woffDirectoryEntrySize - zlibCompressionLevel = 6 - - def decodeData(self, rawData): - import zlib - if self.length == self.origLength: - data = rawData - else: - assert self.length < self.origLength - data = zlib.decompress(rawData) - assert len (data) == self.origLength - return data - - def encodeData(self, data): - import zlib - self.origLength = len(data) - if not self.uncompressed: - compressedData = zlib.compress(data, self.zlibCompressionLevel) - if self.uncompressed or len(compressedData) >= self.origLength: - # Encode uncompressed - rawData = data - self.length = self.origLength - else: - rawData = compressedData - self.length = len(rawData) - return rawData - -class WOFFFlavorData(): - - Flavor = 'woff' - - def __init__(self, reader=None): - self.majorVersion = None - self.minorVersion = None - self.metaData = None - self.privData = None - if reader: - self.majorVersion = reader.majorVersion - self.minorVersion = reader.minorVersion - if reader.metaLength: - reader.file.seek(reader.metaOffset) - rawData = reader.file.read(reader.metaLength) - assert len(rawData) == reader.metaLength - import zlib - data = zlib.decompress(rawData) - assert len(data) == reader.metaOrigLength - self.metaData = data - if reader.privLength: - reader.file.seek(reader.privOffset) - data = reader.file.read(reader.privLength) - assert len(data) == reader.privLength - self.privData = data - - -def calcChecksum(data): - """Calculate the checksum for an arbitrary block of data. - Optionally takes a 'start' argument, which allows you to - calculate a checksum in chunks by feeding it a previous - result. - - If the data length is not a multiple of four, it assumes - it is to be padded with null byte. - - >>> print(calcChecksum(b"abcd")) - 1633837924 - >>> print(calcChecksum(b"abcdxyz")) - 3655064932 - """ - remainder = len(data) % 4 - if remainder: - data += b"\0" * (4 - remainder) - value = 0 - blockSize = 4096 - assert blockSize % 4 == 0 - for i in range(0, len(data), blockSize): - block = data[i:i+blockSize] - longs = struct.unpack(">%dL" % (len(block) // 4), block) - value = (value + sum(longs)) & 0xffffffff - return value - - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/standardGlyphOrder.py fonttools-3.21.2/Tools/fontTools/ttLib/standardGlyphOrder.py --- fonttools-3.0/Tools/fontTools/ttLib/standardGlyphOrder.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/standardGlyphOrder.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,274 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -# -# 'post' table formats 1.0 and 2.0 rely on this list of "standard" -# glyphs. -# -# My list is correct according to the Apple documentation for the 'post' -# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html -# (However, it seems that TTFdump (from MS) and FontLab disagree, at -# least with respect to the last glyph, which they list as 'dslash' -# instead of 'dcroat'.) -# - -standardGlyphOrder = [ - ".notdef", # 0 - ".null", # 1 - "nonmarkingreturn", # 2 - "space", # 3 - "exclam", # 4 - "quotedbl", # 5 - "numbersign", # 6 - "dollar", # 7 - "percent", # 8 - "ampersand", # 9 - "quotesingle", # 10 - "parenleft", # 11 - "parenright", # 12 - "asterisk", # 13 - "plus", # 14 - "comma", # 15 - "hyphen", # 16 - "period", # 17 - "slash", # 18 - "zero", # 19 - "one", # 20 - "two", # 21 - "three", # 22 - "four", # 23 - "five", # 24 - "six", # 25 - "seven", # 26 - "eight", # 27 - "nine", # 28 - "colon", # 29 - "semicolon", # 30 - "less", # 31 - "equal", # 32 - "greater", # 33 - "question", # 34 - "at", # 35 - "A", # 36 - "B", # 37 - "C", # 38 - "D", # 39 - "E", # 40 - "F", # 41 - "G", # 42 - "H", # 43 - "I", # 44 - "J", # 45 - "K", # 46 - "L", # 47 - "M", # 48 - "N", # 49 - "O", # 50 - "P", # 51 - "Q", # 52 - "R", # 53 - "S", # 54 - "T", # 55 - "U", # 56 - "V", # 57 - "W", # 58 - "X", # 59 - "Y", # 60 - "Z", # 61 - "bracketleft", # 62 - "backslash", # 63 - "bracketright", # 64 - "asciicircum", # 65 - "underscore", # 66 - "grave", # 67 - "a", # 68 - "b", # 69 - "c", # 70 - "d", # 71 - "e", # 72 - "f", # 73 - "g", # 74 - "h", # 75 - "i", # 76 - "j", # 77 - "k", # 78 - "l", # 79 - "m", # 80 - "n", # 81 - "o", # 82 - "p", # 83 - "q", # 84 - "r", # 85 - "s", # 86 - "t", # 87 - "u", # 88 - "v", # 89 - "w", # 90 - "x", # 91 - "y", # 92 - "z", # 93 - "braceleft", # 94 - "bar", # 95 - "braceright", # 96 - "asciitilde", # 97 - "Adieresis", # 98 - "Aring", # 99 - "Ccedilla", # 100 - "Eacute", # 101 - "Ntilde", # 102 - "Odieresis", # 103 - "Udieresis", # 104 - "aacute", # 105 - "agrave", # 106 - "acircumflex", # 107 - "adieresis", # 108 - "atilde", # 109 - "aring", # 110 - "ccedilla", # 111 - "eacute", # 112 - "egrave", # 113 - "ecircumflex", # 114 - "edieresis", # 115 - "iacute", # 116 - "igrave", # 117 - "icircumflex", # 118 - "idieresis", # 119 - "ntilde", # 120 - "oacute", # 121 - "ograve", # 122 - "ocircumflex", # 123 - "odieresis", # 124 - "otilde", # 125 - "uacute", # 126 - "ugrave", # 127 - "ucircumflex", # 128 - "udieresis", # 129 - "dagger", # 130 - "degree", # 131 - "cent", # 132 - "sterling", # 133 - "section", # 134 - "bullet", # 135 - "paragraph", # 136 - "germandbls", # 137 - "registered", # 138 - "copyright", # 139 - "trademark", # 140 - "acute", # 141 - "dieresis", # 142 - "notequal", # 143 - "AE", # 144 - "Oslash", # 145 - "infinity", # 146 - "plusminus", # 147 - "lessequal", # 148 - "greaterequal", # 149 - "yen", # 150 - "mu", # 151 - "partialdiff", # 152 - "summation", # 153 - "product", # 154 - "pi", # 155 - "integral", # 156 - "ordfeminine", # 157 - "ordmasculine", # 158 - "Omega", # 159 - "ae", # 160 - "oslash", # 161 - "questiondown", # 162 - "exclamdown", # 163 - "logicalnot", # 164 - "radical", # 165 - "florin", # 166 - "approxequal", # 167 - "Delta", # 168 - "guillemotleft", # 169 - "guillemotright", # 170 - "ellipsis", # 171 - "nonbreakingspace", # 172 - "Agrave", # 173 - "Atilde", # 174 - "Otilde", # 175 - "OE", # 176 - "oe", # 177 - "endash", # 178 - "emdash", # 179 - "quotedblleft", # 180 - "quotedblright", # 181 - "quoteleft", # 182 - "quoteright", # 183 - "divide", # 184 - "lozenge", # 185 - "ydieresis", # 186 - "Ydieresis", # 187 - "fraction", # 188 - "currency", # 189 - "guilsinglleft", # 190 - "guilsinglright", # 191 - "fi", # 192 - "fl", # 193 - "daggerdbl", # 194 - "periodcentered", # 195 - "quotesinglbase", # 196 - "quotedblbase", # 197 - "perthousand", # 198 - "Acircumflex", # 199 - "Ecircumflex", # 200 - "Aacute", # 201 - "Edieresis", # 202 - "Egrave", # 203 - "Iacute", # 204 - "Icircumflex", # 205 - "Idieresis", # 206 - "Igrave", # 207 - "Oacute", # 208 - "Ocircumflex", # 209 - "apple", # 210 - "Ograve", # 211 - "Uacute", # 212 - "Ucircumflex", # 213 - "Ugrave", # 214 - "dotlessi", # 215 - "circumflex", # 216 - "tilde", # 217 - "macron", # 218 - "breve", # 219 - "dotaccent", # 220 - "ring", # 221 - "cedilla", # 222 - "hungarumlaut", # 223 - "ogonek", # 224 - "caron", # 225 - "Lslash", # 226 - "lslash", # 227 - "Scaron", # 228 - "scaron", # 229 - "Zcaron", # 230 - "zcaron", # 231 - "brokenbar", # 232 - "Eth", # 233 - "eth", # 234 - "Yacute", # 235 - "yacute", # 236 - "Thorn", # 237 - "thorn", # 238 - "minus", # 239 - "multiply", # 240 - "onesuperior", # 241 - "twosuperior", # 242 - "threesuperior", # 243 - "onehalf", # 244 - "onequarter", # 245 - "threequarters", # 246 - "franc", # 247 - "Gbreve", # 248 - "gbreve", # 249 - "Idotaccent", # 250 - "Scedilla", # 251 - "scedilla", # 252 - "Cacute", # 253 - "cacute", # 254 - "Ccaron", # 255 - "ccaron", # 256 - "dcroat" # 257 -] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/asciiTable.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/asciiTable.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/asciiTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/asciiTable.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,22 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable - - -class asciiTable(DefaultTable.DefaultTable): - - def toXML(self, writer, ttFont): - data = tostr(self.data) - # removing null bytes. XXX needed?? - data = data.split('\0') - data = strjoin(data) - writer.begintag("source") - writer.newline() - writer.write_noindent(data.replace("\r", "\n")) - writer.newline() - writer.endtag("source") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - lines = strjoin(content).replace("\r", "\n").split("\n") - self.data = tobytes("\r".join(lines[1:-1])) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_a_v_a_r.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_a_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,94 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import fixedToFloat, floatToFixed -from fontTools.misc.textTools import safeEval -from fontTools.ttLib import TTLibError -from . import DefaultTable -import array -import struct -import warnings - - -# Apple's documentation of 'avar': -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html - -AVAR_HEADER_FORMAT = """ - > # big endian - version: L - axisCount: L -""" - - -class table__a_v_a_r(DefaultTable.DefaultTable): - dependencies = ["fvar"] - - def __init__(self, tag=None): - DefaultTable.DefaultTable.__init__(self, tag) - self.segments = {} - - def compile(self, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {"version": 0x00010000, "axisCount": len(axisTags)} - result = [sstruct.pack(AVAR_HEADER_FORMAT, header)] - for axis in axisTags: - mappings = sorted(self.segments[axis].items()) - result.append(struct.pack(">H", len(mappings))) - for key, value in mappings: - fixedKey = floatToFixed(key, 14) - fixedValue = floatToFixed(value, 14) - result.append(struct.pack(">hh", fixedKey, fixedValue)) - return bytesjoin(result) - - def decompile(self, data, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - header = {} - headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT) - header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize]) - if header["version"] != 0x00010000: - raise TTLibError("unsupported 'avar' version %04x" % header["version"]) - pos = headerSize - for axis in axisTags: - segments = self.segments[axis] = {} - numPairs = struct.unpack(">H", data[pos:pos+2])[0] - pos = pos + 2 - for _ in range(numPairs): - fromValue, toValue = struct.unpack(">hh", data[pos:pos+4]) - segments[fixedToFloat(fromValue, 14)] = fixedToFloat(toValue, 14) - pos = pos + 4 - self.fixupSegments_(warn=warnings.warn) - - def toXML(self, writer, ttFont, progress=None): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - for axis in axisTags: - writer.begintag("segment", axis=axis) - writer.newline() - for key, value in sorted(self.segments[axis].items()): - writer.simpletag("mapping", **{"from": key, "to": value}) - writer.newline() - writer.endtag("segment") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "segment": - axis = attrs["axis"] - segment = self.segments[axis] = {} - for element in content: - if isinstance(element, tuple): - elementName, elementAttrs, _ = element - if elementName == "mapping": - fromValue = safeEval(elementAttrs["from"]) - toValue = safeEval(elementAttrs["to"]) - if fromValue in segment: - warnings.warn("duplicate entry for %s in axis '%s'" % - (fromValue, axis)) - segment[fromValue] = toValue - self.fixupSegments_(warn=warnings.warn) - - def fixupSegments_(self, warn): - for axis, mappings in self.segments.items(): - for k in [-1.0, 0.0, 1.0]: - if mappings.get(k) != k: - warn("avar axis '%s' should map %s to %s" % (axis, k, k)) - mappings[k] = k diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_a_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._a_v_a_r import table__a_v_a_r -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis -import collections -import unittest - - -TEST_DATA = deHexStr( - "00 01 00 00 00 00 00 02 " - "00 04 C0 00 C0 00 00 00 00 00 13 33 33 33 40 00 40 00 " - "00 03 C0 00 C0 00 00 00 00 00 40 00 40 00") - - -class AxisVariationTableTest(unittest.TestCase): - def test_compile(self): - avar = table__a_v_a_r() - avar.segments["wdth"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - avar.segments["wght"] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - self.assertEqual(TEST_DATA, avar.compile(self.makeFont(["wdth", "wght"]))) - - def test_decompile(self): - avar = table__a_v_a_r() - avar.decompile(TEST_DATA, self.makeFont(["wdth", "wght"])) - self.assertEqual({ - "wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}, - "wght": {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - }, avar.segments) - - def test_decompile_unsupportedVersion(self): - avar = table__a_v_a_r() - font = self.makeFont(["wdth", "wght"]) - self.assertRaises(TTLibError, avar.decompile, deHexStr("02 01 03 06 00 00 00 00"), font) - - def test_toXML(self): - avar = table__a_v_a_r() - avar.segments["opsz"] = {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0} - writer = XMLWriter(BytesIO()) - avar.toXML(writer, self.makeFont(["opsz"])) - self.assertEqual([ - '', - '', - '', - '', - '', - '' - ], self.xml_lines(writer)) - - def test_fromXML(self): - avar = table__a_v_a_r() - avar.fromXML("segment", {"axis":"wdth"}, [ - ("mapping", {"from": "-1.0", "to": "-1.0"}, []), - ("mapping", {"from": "0.0", "to": "0.0"}, []), - ("mapping", {"from": "0.7", "to": "0.2"}, []), - ("mapping", {"from": "1.0", "to": "1.0"}, []) - ], ttFont=None) - self.assertEqual({"wdth": {-1: -1, 0: 0, 0.7: 0.2, 1.0: 1.0}}, avar.segments) - - def test_fixupSegments(self): - avar = table__a_v_a_r() - avar.segments = {"wdth": {0.3: 0.8, 1.0: 0.7}} - warnings = [] - avar.fixupSegments_(lambda w: warnings.append(w)) - self.assertEqual({"wdth": {-1.0: -1.0, 0.0: 0.0, 0.3: 0.8, 1.0: 1.0}}, avar.segments) - self.assertEqual([ - "avar axis 'wdth' should map -1.0 to -1.0", - "avar axis 'wdth' should map 0.0 to 0.0", - "avar axis 'wdth' should map 1.0 to 1.0" - ], warnings) - - @staticmethod - def makeFont(axisTags): - """['opsz', 'wdth'] --> ttFont""" - fvar = table__f_v_a_r() - for tag in axisTags: - axis = Axis() - axis.axisTag = tag - fvar.axes.append(axis) - return {"fvar": fvar} - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/B_A_S_E_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/B_A_S_E_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/B_A_S_E_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/B_A_S_E_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_B_A_S_E_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/BitmapGlyphMetrics.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,58 +0,0 @@ -# Since bitmap glyph metrics are shared between EBLC and EBDT -# this class gets its own python file. -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval - - -bigGlyphMetricsFormat = """ - > # big endian - height: B - width: B - horiBearingX: b - horiBearingY: b - horiAdvance: B - vertBearingX: b - vertBearingY: b - vertAdvance: B -""" - -smallGlyphMetricsFormat = """ - > # big endian - height: B - width: B - BearingX: b - BearingY: b - Advance: B -""" - -class BitmapGlyphMetrics(object): - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__) - writer.newline() - for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: - writer.simpletag(metricName, value=getattr(self, metricName)) - writer.newline() - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - # Make sure this is a metric that is needed by GlyphMetrics. - if name in metricNames: - vars(self)[name] = safeEval(attrs['value']) - else: - print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__) - - -class BigGlyphMetrics(BitmapGlyphMetrics): - binaryFormat = bigGlyphMetricsFormat - -class SmallGlyphMetrics(BitmapGlyphMetrics): - binaryFormat = smallGlyphMetricsFormat diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_D_T_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_B_D_T_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Matt Fontaine - - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from . import E_B_D_T_ -from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat -from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin -import struct - -class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): - - # Change the data locator table being referenced. - locatorName = 'CBLC' - - # Modify the format class accessor for color bitmap use. - def getImageFormatClass(self, imageFormat): - try: - return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) - except KeyError: - return cbdt_bitmap_classes[imageFormat] - -# Helper method for removing export features not supported by color bitmaps. -# Write data in the parent class will default to raw if an option is unsupported. -def _removeUnsupportedForColor(dataFunctions): - dataFunctions = dict(dataFunctions) - del dataFunctions['row'] - return dataFunctions - -class ColorBitmapGlyph(BitmapGlyph): - - fileExtension = '.png' - xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) - -class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): - - def decompile(self): - self.metrics = SmallGlyphMetrics() - dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) - (dataLen,) = struct.unpack(">L", data[:4]) - data = data[4:] - - # For the image data cut it to the size specified by dataLen. - assert dataLen <= len(data), "Data overun in format 17" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) - dataList.append(struct.pack(">L", len(self.imageData))) - dataList.append(self.imageData) - return bytesjoin(dataList) - -class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): - - def decompile(self): - self.metrics = BigGlyphMetrics() - dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) - (dataLen,) = struct.unpack(">L", data[:4]) - data = data[4:] - - # For the image data cut it to the size specified by dataLen. - assert dataLen <= len(data), "Data overun in format 18" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) - dataList.append(struct.pack(">L", len(self.imageData))) - dataList.append(self.imageData) - return bytesjoin(dataList) - -class cbdt_bitmap_format_19(ColorBitmapGlyph): - - def decompile(self): - (dataLen,) = struct.unpack(">L", self.data[:4]) - data = self.data[4:] - - assert dataLen <= len(data), "Data overun in format 19" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - return struct.pack(">L", len(self.imageData)) + self.imageData - -# Dict for CBDT extended formats. -cbdt_bitmap_classes = { - 17: cbdt_bitmap_format_17, - 18: cbdt_bitmap_format_18, - 19: cbdt_bitmap_format_19, -} diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_L_C_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_B_L_C_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/C_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Matt Fontaine - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import E_B_L_C_ - -class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): - - dependencies = ['CBDT'] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/C_F_F_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_F_F_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/C_F_F_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_F_F_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import cffLib -from . import DefaultTable - - -class table_C_F_F_(DefaultTable.DefaultTable): - - def __init__(self, tag): - DefaultTable.DefaultTable.__init__(self, tag) - self.cff = cffLib.CFFFontSet() - self._gaveGlyphOrder = False - - def decompile(self, data, otFont): - self.cff.decompile(BytesIO(data), otFont) - assert len(self.cff) == 1, "can't deal with multi-font CFF tables." - - def compile(self, otFont): - f = BytesIO() - self.cff.compile(f, otFont) - return f.getvalue() - - def haveGlyphNames(self): - if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): - return False # CID-keyed font - else: - return True - - def getGlyphOrder(self): - if self._gaveGlyphOrder: - from fontTools import ttLib - raise ttLib.TTLibError("illegal use of getGlyphOrder()") - self._gaveGlyphOrder = True - return self.cff[self.cff.fontNames[0]].getGlyphOrder() - - def setGlyphOrder(self, glyphOrder): - pass - # XXX - #self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) - - def toXML(self, writer, otFont, progress=None): - self.cff.toXML(writer, progress) - - def fromXML(self, name, attrs, content, otFont): - if not hasattr(self, "cff"): - self.cff = cffLib.CFFFontSet() - self.cff.fromXML(name, attrs, content) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_m_a_p.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_m_a_p.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1294 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval, readHex -from fontTools.misc.encodingTools import getEncoding -from fontTools.ttLib import getSearchRange -from fontTools.unicode import Unicode -from . import DefaultTable -import sys -import struct -import array -import operator - - -class table__c_m_a_p(DefaultTable.DefaultTable): - - def getcmap(self, platformID, platEncID): - for subtable in self.tables: - if (subtable.platformID == platformID and - subtable.platEncID == platEncID): - return subtable - return None # not found - - def decompile(self, data, ttFont): - tableVersion, numSubTables = struct.unpack(">HH", data[:4]) - self.tableVersion = int(tableVersion) - self.tables = tables = [] - seenOffsets = {} - for i in range(numSubTables): - platformID, platEncID, offset = struct.unpack( - ">HHl", data[4+i*8:4+(i+1)*8]) - platformID, platEncID = int(platformID), int(platEncID) - format, length = struct.unpack(">HH", data[offset:offset+4]) - if format in [8,10,12,13]: - format, reserved, length = struct.unpack(">HHL", data[offset:offset+8]) - elif format in [14]: - format, length = struct.unpack(">HL", data[offset:offset+6]) - - if not length: - print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s, format %s offset %s. Skipping table." % (platformID, platEncID,format, offset)) - continue - table = CmapSubtable.newSubtable(format) - table.platformID = platformID - table.platEncID = platEncID - # Note that by default we decompile only the subtable header info; - # any other data gets decompiled only when an attribute of the - # subtable is referenced. - table.decompileHeader(data[offset:offset+int(length)], ttFont) - if offset in seenOffsets: - table.data = None # Mark as decompiled - table.cmap = tables[seenOffsets[offset]].cmap - else: - seenOffsets[offset] = i - tables.append(table) - - def compile(self, ttFont): - self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() - numSubTables = len(self.tables) - totalOffset = 4 + 8 * numSubTables - data = struct.pack(">HH", self.tableVersion, numSubTables) - tableData = b"" - seen = {} # Some tables are the same object reference. Don't compile them twice. - done = {} # Some tables are different objects, but compile to the same data chunk - for table in self.tables: - try: - offset = seen[id(table.cmap)] - except KeyError: - chunk = table.compile(ttFont) - if chunk in done: - offset = done[chunk] - else: - offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData) - tableData = tableData + chunk - data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) - return data + tableData - - def toXML(self, writer, ttFont): - writer.simpletag("tableVersion", version=self.tableVersion) - writer.newline() - for table in self.tables: - table.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "tableVersion": - self.tableVersion = safeEval(attrs["version"]) - return - if name[:12] != "cmap_format_": - return - if not hasattr(self, "tables"): - self.tables = [] - format = safeEval(name[12:]) - table = CmapSubtable.newSubtable(format) - table.platformID = safeEval(attrs["platformID"]) - table.platEncID = safeEval(attrs["platEncID"]) - table.fromXML(name, attrs, content, ttFont) - self.tables.append(table) - - -class CmapSubtable(object): - - @staticmethod - def getSubtableClass(format): - """Return the subtable class for a format.""" - return cmap_classes.get(format, cmap_format_unknown) - - @staticmethod - def newSubtable(format): - """Return a new instance of a subtable for format.""" - subtableClass = CmapSubtable.getSubtableClass(format) - return subtableClass(format) - - def __init__(self, format): - self.format = format - self.data = None - self.ttFont = None - - def __getattr__(self, attr): - # allow lazy decompilation of subtables. - if attr[:2] == '__': # don't handle requests for member functions like '__lt__' - raise AttributeError(attr) - if self.data is None: - raise AttributeError(attr) - self.decompile(None, None) # use saved data. - self.data = None # Once this table has been decompiled, make sure we don't - # just return the original data. Also avoids recursion when - # called with an attribute that the cmap subtable doesn't have. - return getattr(self, attr) - - def decompileHeader(self, data, ttFont): - format, length, language = struct.unpack(">HHH", data[:6]) - assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length) - self.format = int(format) - self.length = int(length) - self.language = int(language) - self.data = data[6:] - self.ttFont = ttFont - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, [ - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ("language", self.language), - ]) - writer.newline() - codes = sorted(self.cmap.items()) - self._writeCodes(codes, writer) - writer.endtag(self.__class__.__name__) - writer.newline() - - def getEncoding(self, default=None): - """Returns the Python encoding name for this cmap subtable based on its platformID, - platEncID, and language. If encoding for these values is not known, by default - None is returned. That can be overriden by passing a value to the default - argument. - - Note that if you want to choose a "preferred" cmap subtable, most of the time - self.isUnicode() is what you want as that one only returns true for the modern, - commonly used, Unicode-compatible triplets, not the legacy ones. - """ - return getEncoding(self.platformID, self.platEncID, self.language, default) - - def isUnicode(self): - return (self.platformID == 0 or - (self.platformID == 3 and self.platEncID in [0, 1, 10])) - - def isSymbol(self): - return self.platformID == 3 and self.platEncID == 0 - - def _writeCodes(self, codes, writer): - isUnicode = self.isUnicode() - for code, name in codes: - writer.simpletag("map", code=hex(code), name=name) - if isUnicode: - writer.comment(Unicode[code]) - writer.newline() - - def __lt__(self, other): - if not isinstance(other, CmapSubtable): - return NotImplemented - - # implemented so that list.sort() sorts according to the spec. - selfTuple = ( - getattr(self, "platformID", None), - getattr(self, "platEncID", None), - getattr(self, "language", None), - self.__dict__) - otherTuple = ( - getattr(other, "platformID", None), - getattr(other, "platEncID", None), - getattr(other, "language", None), - other.__dict__) - return selfTuple < otherTuple - - -class cmap_format_0(CmapSubtable): - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - data = self.data # decompileHeader assigns the data after the header to self.data - assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" - glyphIdArray = array.array("B") - glyphIdArray.fromstring(self.data) - self.cmap = cmap = {} - lenArray = len(glyphIdArray) - charCodes = list(range(lenArray)) - names = map(self.ttFont.getGlyphName, glyphIdArray) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) - - def compile(self, ttFont): - if self.data: - return struct.pack(">HHH", 0, 262, self.language) + self.data - - charCodeList = sorted(self.cmap.items()) - charCodes = [entry[0] for entry in charCodeList] - valueList = [entry[1] for entry in charCodeList] - assert charCodes == list(range(256)) - valueList = map(ttFont.getGlyphID, valueList) - - glyphIdArray = array.array("B", valueList) - data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring() - assert len(data) == 262 - return data - - def fromXML(self, name, attrs, content, ttFont): - self.language = safeEval(attrs["language"]) - if not hasattr(self, "cmap"): - self.cmap = {} - cmap = self.cmap - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "map": - continue - cmap[safeEval(attrs["code"])] = attrs["name"] - - -subHeaderFormat = ">HHhH" -class SubHeader(object): - def __init__(self): - self.firstCode = None - self.entryCount = None - self.idDelta = None - self.idRangeOffset = None - self.glyphIndexArray = [] - -class cmap_format_2(CmapSubtable): - - def setIDDelta(self, subHeader): - subHeader.idDelta = 0 - # find the minGI which is not zero. - minGI = subHeader.glyphIndexArray[0] - for gid in subHeader.glyphIndexArray: - if (gid != 0) and (gid < minGI): - minGI = gid - # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. - # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. - # We would like to pick an idDelta such that the first glyphArray GID is 1, - # so that we are more likely to be able to combine glypharray GID subranges. - # This means that we have a problem when minGI is > 32K - # Since the final gi is reconstructed from the glyphArray GID by: - # (short)finalGID = (gid + idDelta) % 0x10000), - # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the - # negative number to an unsigned short. - - if (minGI > 1): - if minGI > 0x7FFF: - subHeader.idDelta = -(0x10000 - minGI) -1 - else: - subHeader.idDelta = minGI -1 - idDelta = subHeader.idDelta - for i in range(subHeader.entryCount): - gid = subHeader.glyphIndexArray[i] - if gid > 0: - subHeader.glyphIndexArray[i] = gid - idDelta - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - - data = self.data # decompileHeader assigns the data after the header to self.data - subHeaderKeys = [] - maxSubHeaderindex = 0 - # get the key array, and determine the number of subHeaders. - allKeys = array.array("H") - allKeys.fromstring(data[:512]) - data = data[512:] - if sys.byteorder != "big": - allKeys.byteswap() - subHeaderKeys = [ key//8 for key in allKeys] - maxSubHeaderindex = max(subHeaderKeys) - - #Load subHeaders - subHeaderList = [] - pos = 0 - for i in range(maxSubHeaderindex + 1): - subHeader = SubHeader() - (subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \ - subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8]) - pos += 8 - giDataPos = pos + subHeader.idRangeOffset-2 - giList = array.array("H") - giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2]) - if sys.byteorder != "big": - giList.byteswap() - subHeader.glyphIndexArray = giList - subHeaderList.append(subHeader) - # How this gets processed. - # Charcodes may be one or two bytes. - # The first byte of a charcode is mapped through the subHeaderKeys, to select - # a subHeader. For any subheader but 0, the next byte is then mapped through the - # selected subheader. If subheader Index 0 is selected, then the byte itself is - # mapped through the subheader, and there is no second byte. - # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. - # - # Each subheader references a range in the glyphIndexArray whose length is entryCount. - # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray - # referenced by another subheader. - # The only subheader that will be referenced by more than one first-byte value is the subheader - # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: - # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} - # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. - # A subheader specifies a subrange within (0...256) by the - # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero - # (e.g. glyph not in font). - # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). - # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by - # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the - # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. - # Example for Logocut-Medium - # first byte of charcode = 129; selects subheader 1. - # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} - # second byte of charCode = 66 - # the index offset = 66-64 = 2. - # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: - # [glyphIndexArray index], [subrange array index] = glyphIndex - # [256], [0]=1 from charcode [129, 64] - # [257], [1]=2 from charcode [129, 65] - # [258], [2]=3 from charcode [129, 66] - # [259], [3]=4 from charcode [129, 67] - # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, - # add it to the glyphID to get the final glyphIndex - # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! - - self.data = b"" - self.cmap = cmap = {} - notdefGI = 0 - for firstByte in range(256): - subHeadindex = subHeaderKeys[firstByte] - subHeader = subHeaderList[subHeadindex] - if subHeadindex == 0: - if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount): - continue # gi is notdef. - else: - charCode = firstByte - offsetIndex = firstByte - subHeader.firstCode - gi = subHeader.glyphIndexArray[offsetIndex] - if gi != 0: - gi = (gi + subHeader.idDelta) % 0x10000 - else: - continue # gi is notdef. - cmap[charCode] = gi - else: - if subHeader.entryCount: - charCodeOffset = firstByte * 256 + subHeader.firstCode - for offsetIndex in range(subHeader.entryCount): - charCode = charCodeOffset + offsetIndex - gi = subHeader.glyphIndexArray[offsetIndex] - if gi != 0: - gi = (gi + subHeader.idDelta) % 0x10000 - else: - continue - cmap[charCode] = gi - # If not subHeader.entryCount, then all char codes with this first byte are - # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the - # same as mapping it to .notdef. - # cmap values are GID's. - glyphOrder = self.ttFont.getGlyphOrder() - gids = list(cmap.values()) - charCodes = list(cmap.keys()) - lenCmap = len(gids) - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) - - def compile(self, ttFont): - if self.data: - return struct.pack(">HHH", self.format, self.length, self.language) + self.data - kEmptyTwoCharCodeRange = -1 - notdefGI = 0 - - items = sorted(self.cmap.items()) - charCodes = [item[0] for item in items] - names = [item[1] for item in items] - nameMap = ttFont.getReverseGlyphMap() - lenCharCodes = len(charCodes) - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=True) - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - # allow virtual GIDs in format 2 tables - gids = [] - for name in names: - try: - gid = nameMap[name] - except KeyError: - try: - if (name[:3] == 'gid'): - gid = eval(name[3:]) - else: - gid = ttFont.getGlyphID(name) - except: - raise KeyError(name) - - gids.append(gid) - - # Process the (char code to gid) item list in char code order. - # By definition, all one byte char codes map to subheader 0. - # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, - # which defines all char codes in its range to map to notdef) unless proven otherwise. - # Note that since the char code items are processed in char code order, all the char codes with the - # same first byte are in sequential order. - - subHeaderKeys = [ kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList. - subHeaderList = [] - - # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up - # with a cmap where all the one byte char codes map to notdef, - # with the result that the subhead 0 would not get created just by processing the item list. - charCode = charCodes[0] - if charCode > 255: - subHeader = SubHeader() - subHeader.firstCode = 0 - subHeader.entryCount = 0 - subHeader.idDelta = 0 - subHeader.idRangeOffset = 0 - subHeaderList.append(subHeader) - - lastFirstByte = -1 - items = zip(charCodes, gids) - for charCode, gid in items: - if gid == 0: - continue - firstbyte = charCode >> 8 - secondByte = charCode & 0x00FF - - if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one. - if lastFirstByte > -1: - # fix GI's and iDelta of current subheader. - self.setIDDelta(subHeader) - - # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero - # for the indices matching the char codes. - if lastFirstByte == 0: - for index in range(subHeader.entryCount): - charCode = subHeader.firstCode + index - subHeaderKeys[charCode] = 0 - - assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange." - # init new subheader - subHeader = SubHeader() - subHeader.firstCode = secondByte - subHeader.entryCount = 1 - subHeader.glyphIndexArray.append(gid) - subHeaderList.append(subHeader) - subHeaderKeys[firstbyte] = len(subHeaderList) -1 - lastFirstByte = firstbyte - else: - # need to fill in with notdefs all the code points between the last charCode and the current charCode. - codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount) - for i in range(codeDiff): - subHeader.glyphIndexArray.append(notdefGI) - subHeader.glyphIndexArray.append(gid) - subHeader.entryCount = subHeader.entryCount + codeDiff + 1 - - # fix GI's and iDelta of last subheader that we we added to the subheader array. - self.setIDDelta(subHeader) - - # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. - subHeader = SubHeader() - subHeader.firstCode = 0 - subHeader.entryCount = 0 - subHeader.idDelta = 0 - subHeader.idRangeOffset = 2 - subHeaderList.append(subHeader) - emptySubheadIndex = len(subHeaderList) - 1 - for index in range(256): - if subHeaderKeys[index] == kEmptyTwoCharCodeRange: - subHeaderKeys[index] = emptySubheadIndex - # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the - # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, - # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with - # charcode 0 and GID 0. - - idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. - subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. - for index in range(subheadRangeLen): - subHeader = subHeaderList[index] - subHeader.idRangeOffset = 0 - for j in range(index): - prevSubhead = subHeaderList[j] - if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray - subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8 - subHeader.glyphIndexArray = [] - break - if subHeader.idRangeOffset == 0: # didn't find one. - subHeader.idRangeOffset = idRangeOffset - idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray. - else: - idRangeOffset = idRangeOffset - 8 # one less subheader - - # Now we can write out the data! - length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array. - for subhead in subHeaderList[:-1]: - length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays. - dataList = [struct.pack(">HHH", 2, length, self.language)] - for index in subHeaderKeys: - dataList.append(struct.pack(">H", index*8)) - for subhead in subHeaderList: - dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset)) - for subhead in subHeaderList[:-1]: - for gi in subhead.glyphIndexArray: - dataList.append(struct.pack(">H", gi)) - data = bytesjoin(dataList) - assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length) - return data - - def fromXML(self, name, attrs, content, ttFont): - self.language = safeEval(attrs["language"]) - if not hasattr(self, "cmap"): - self.cmap = {} - cmap = self.cmap - - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "map": - continue - cmap[safeEval(attrs["code"])] = attrs["name"] - - -cmap_format_4_format = ">7H" - -#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF. -#uint16 reservedPad # This value should be zero -#uint16 startCode[segCount] # Starting character code for each segment -#uint16 idDelta[segCount] # Delta for all character codes in segment -#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0 -#uint16 glyphIndexArray[variable] # Glyph index array - -def splitRange(startCode, endCode, cmap): - # Try to split a range of character codes into subranges with consecutive - # glyph IDs in such a way that the cmap4 subtable can be stored "most" - # efficiently. I can't prove I've got the optimal solution, but it seems - # to do well with the fonts I tested: none became bigger, many became smaller. - if startCode == endCode: - return [], [endCode] - - lastID = cmap[startCode] - lastCode = startCode - inOrder = None - orderedBegin = None - subRanges = [] - - # Gather subranges in which the glyph IDs are consecutive. - for code in range(startCode + 1, endCode + 1): - glyphID = cmap[code] - - if glyphID - 1 == lastID: - if inOrder is None or not inOrder: - inOrder = 1 - orderedBegin = lastCode - else: - if inOrder: - inOrder = 0 - subRanges.append((orderedBegin, lastCode)) - orderedBegin = None - - lastID = glyphID - lastCode = code - - if inOrder: - subRanges.append((orderedBegin, lastCode)) - assert lastCode == endCode - - # Now filter out those new subranges that would only make the data bigger. - # A new segment cost 8 bytes, not using a new segment costs 2 bytes per - # character. - newRanges = [] - for b, e in subRanges: - if b == startCode and e == endCode: - break # the whole range, we're fine - if b == startCode or e == endCode: - threshold = 4 # split costs one more segment - else: - threshold = 8 # split costs two more segments - if (e - b + 1) > threshold: - newRanges.append((b, e)) - subRanges = newRanges - - if not subRanges: - return [], [endCode] - - if subRanges[0][0] != startCode: - subRanges.insert(0, (startCode, subRanges[0][0] - 1)) - if subRanges[-1][1] != endCode: - subRanges.append((subRanges[-1][1] + 1, endCode)) - - # Fill the "holes" in the segments list -- those are the segments in which - # the glyph IDs are _not_ consecutive. - i = 1 - while i < len(subRanges): - if subRanges[i-1][1] + 1 != subRanges[i][0]: - subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1)) - i = i + 1 - i = i + 1 - - # Transform the ranges into startCode/endCode lists. - start = [] - end = [] - for b, e in subRanges: - start.append(b) - end.append(e) - start.pop(0) - - assert len(start) + 1 == len(end) - return start, end - - -class cmap_format_4(CmapSubtable): - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - - data = self.data # decompileHeader assigns the data after the header to self.data - (segCountX2, searchRange, entrySelector, rangeShift) = \ - struct.unpack(">4H", data[:8]) - data = data[8:] - segCount = segCountX2 // 2 - - allCodes = array.array("H") - allCodes.fromstring(data) - self.data = data = None - - if sys.byteorder != "big": - allCodes.byteswap() - - # divide the data - endCode = allCodes[:segCount] - allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field - startCode = allCodes[:segCount] - allCodes = allCodes[segCount:] - idDelta = allCodes[:segCount] - allCodes = allCodes[segCount:] - idRangeOffset = allCodes[:segCount] - glyphIndexArray = allCodes[segCount:] - lenGIArray = len(glyphIndexArray) - - # build 2-byte character mapping - charCodes = [] - gids = [] - for i in range(len(startCode) - 1): # don't do 0xffff! - start = startCode[i] - delta = idDelta[i] - rangeOffset = idRangeOffset[i] - # *someone* needs to get killed. - partial = rangeOffset // 2 - start + i - len(idRangeOffset) - - rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) - charCodes.extend(rangeCharCodes) - if rangeOffset == 0: - gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes]) - else: - for charCode in rangeCharCodes: - index = charCode + partial - assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray) - if glyphIndexArray[index] != 0: # if not missing glyph - glyphID = glyphIndexArray[index] + delta - else: - glyphID = 0 # missing glyph - gids.append(glyphID & 0xFFFF) - - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) - - def compile(self, ttFont): - if self.data: - return struct.pack(">HHH", self.format, self.length, self.language) + self.data - - charCodes = list(self.cmap.keys()) - lenCharCodes = len(charCodes) - if lenCharCodes == 0: - startCode = [0xffff] - endCode = [0xffff] - else: - charCodes.sort() - names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes)) - nameMap = ttFont.getReverseGlyphMap() - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=True) - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - # allow virtual GIDs in format 4 tables - gids = [] - for name in names: - try: - gid = nameMap[name] - except KeyError: - try: - if (name[:3] == 'gid'): - gid = eval(name[3:]) - else: - gid = ttFont.getGlyphID(name) - except: - raise KeyError(name) - - gids.append(gid) - cmap = {} # code:glyphID mapping - list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) - - # Build startCode and endCode lists. - # Split the char codes in ranges of consecutive char codes, then split - # each range in more ranges of consecutive/not consecutive glyph IDs. - # See splitRange(). - lastCode = charCodes[0] - endCode = [] - startCode = [lastCode] - for charCode in charCodes[1:]: # skip the first code, it's the first start code - if charCode == lastCode + 1: - lastCode = charCode - continue - start, end = splitRange(startCode[-1], lastCode, cmap) - startCode.extend(start) - endCode.extend(end) - startCode.append(charCode) - lastCode = charCode - start, end = splitRange(startCode[-1], lastCode, cmap) - startCode.extend(start) - endCode.extend(end) - startCode.append(0xffff) - endCode.append(0xffff) - - # build up rest of cruft - idDelta = [] - idRangeOffset = [] - glyphIndexArray = [] - for i in range(len(endCode)-1): # skip the closing codes (0xffff) - indices = [] - for charCode in range(startCode[i], endCode[i] + 1): - indices.append(cmap[charCode]) - if (indices == list(range(indices[0], indices[0] + len(indices)))): - idDelta.append((indices[0] - startCode[i]) % 0x10000) - idRangeOffset.append(0) - else: - # someone *definitely* needs to get killed. - idDelta.append(0) - idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i)) - glyphIndexArray.extend(indices) - idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef - idRangeOffset.append(0) - - # Insane. - segCount = len(endCode) - segCountX2 = segCount * 2 - searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) - - charCodeArray = array.array("H", endCode + [0] + startCode) - idDeltaArray = array.array("H", idDelta) - restArray = array.array("H", idRangeOffset + glyphIndexArray) - if sys.byteorder != "big": - charCodeArray.byteswap() - idDeltaArray.byteswap() - restArray.byteswap() - data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring() - - length = struct.calcsize(cmap_format_4_format) + len(data) - header = struct.pack(cmap_format_4_format, self.format, length, self.language, - segCountX2, searchRange, entrySelector, rangeShift) - return header + data - - def fromXML(self, name, attrs, content, ttFont): - self.language = safeEval(attrs["language"]) - if not hasattr(self, "cmap"): - self.cmap = {} - cmap = self.cmap - - for element in content: - if not isinstance(element, tuple): - continue - nameMap, attrsMap, dummyContent = element - if nameMap != "map": - assert 0, "Unrecognized keyword in cmap subtable" - cmap[safeEval(attrsMap["code"])] = attrsMap["name"] - - -class cmap_format_6(CmapSubtable): - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - - data = self.data # decompileHeader assigns the data after the header to self.data - firstCode, entryCount = struct.unpack(">HH", data[:4]) - firstCode = int(firstCode) - data = data[4:] - #assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! - glyphIndexArray = array.array("H") - glyphIndexArray.fromstring(data[:2 * int(entryCount)]) - if sys.byteorder != "big": - glyphIndexArray.byteswap() - self.data = data = None - - self.cmap = cmap = {} - - lenArray = len(glyphIndexArray) - charCodes = list(range(firstCode, firstCode + lenArray)) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, glyphIndexArray )) - list(map(operator.setitem, [cmap]*lenArray, charCodes, names)) - - def compile(self, ttFont): - if self.data: - return struct.pack(">HHH", self.format, self.length, self.language) + self.data - cmap = self.cmap - codes = sorted(cmap.keys()) - if codes: # yes, there are empty cmap tables. - codes = list(range(codes[0], codes[-1] + 1)) - firstCode = codes[0] - valueList = [cmap.get(code, ".notdef") for code in codes] - valueList = map(ttFont.getGlyphID, valueList) - glyphIndexArray = array.array("H", valueList) - if sys.byteorder != "big": - glyphIndexArray.byteswap() - data = glyphIndexArray.tostring() - else: - data = b"" - firstCode = 0 - header = struct.pack(">HHHHH", - 6, len(data) + 10, self.language, firstCode, len(codes)) - return header + data - - def fromXML(self, name, attrs, content, ttFont): - self.language = safeEval(attrs["language"]) - if not hasattr(self, "cmap"): - self.cmap = {} - cmap = self.cmap - - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "map": - continue - cmap[safeEval(attrs["code"])] = attrs["name"] - - -class cmap_format_12_or_13(CmapSubtable): - - def __init__(self, format): - self.format = format - self.reserved = 0 - self.data = None - self.ttFont = None - - def decompileHeader(self, data, ttFont): - format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) - assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length) - self.format = format - self.reserved = reserved - self.length = length - self.language = language - self.nGroups = nGroups - self.data = data[16:] - self.ttFont = ttFont - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - - data = self.data # decompileHeader assigns the data after the header to self.data - charCodes = [] - gids = [] - pos = 0 - for i in range(self.nGroups): - startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] ) - pos += 12 - lenGroup = 1 + endCharCode - startCharCode - charCodes.extend(list(range(startCharCode, endCharCode +1))) - gids.extend(self._computeGIDs(glyphID, lenGroup)) - self.data = data = None - self.cmap = cmap = {} - lenCmap = len(gids) - glyphOrder = self.ttFont.getGlyphOrder() - try: - names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids )) - except IndexError: - getGlyphName = self.ttFont.getGlyphName - names = list(map(getGlyphName, gids )) - list(map(operator.setitem, [cmap]*lenCmap, charCodes, names)) - - def compile(self, ttFont): - if self.data: - return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data - charCodes = list(self.cmap.keys()) - lenCharCodes = len(charCodes) - names = list(self.cmap.values()) - nameMap = ttFont.getReverseGlyphMap() - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=True) - try: - gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names)) - except KeyError: - # allow virtual GIDs in format 12 tables - gids = [] - for name in names: - try: - gid = nameMap[name] - except KeyError: - try: - if (name[:3] == 'gid'): - gid = eval(name[3:]) - else: - gid = ttFont.getGlyphID(name) - except: - raise KeyError(name) - - gids.append(gid) - - cmap = {} # code:glyphID mapping - list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids)) - - charCodes.sort() - index = 0 - startCharCode = charCodes[0] - startGlyphID = cmap[startCharCode] - lastGlyphID = startGlyphID - self._format_step - lastCharCode = startCharCode - 1 - nGroups = 0 - dataList = [] - maxIndex = len(charCodes) - for index in range(maxIndex): - charCode = charCodes[index] - glyphID = cmap[charCode] - if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): - dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) - startCharCode = charCode - startGlyphID = glyphID - nGroups = nGroups + 1 - lastGlyphID = glyphID - lastCharCode = charCode - dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) - nGroups = nGroups + 1 - data = bytesjoin(dataList) - lengthSubtable = len(data) +16 - assert len(data) == (nGroups*12) == (lengthSubtable-16) - return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, [ - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ("format", self.format), - ("reserved", self.reserved), - ("length", self.length), - ("language", self.language), - ("nGroups", self.nGroups), - ]) - writer.newline() - codes = sorted(self.cmap.items()) - self._writeCodes(codes, writer) - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.format = safeEval(attrs["format"]) - self.reserved = safeEval(attrs["reserved"]) - self.length = safeEval(attrs["length"]) - self.language = safeEval(attrs["language"]) - self.nGroups = safeEval(attrs["nGroups"]) - if not hasattr(self, "cmap"): - self.cmap = {} - cmap = self.cmap - - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "map": - continue - cmap[safeEval(attrs["code"])] = attrs["name"] - - -class cmap_format_12(cmap_format_12_or_13): - - _format_step = 1 - - def __init__(self, format=12): - cmap_format_12_or_13.__init__(self, format) - - def _computeGIDs(self, startingGlyph, numberOfGlyphs): - return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) - - def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): - return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) - - -class cmap_format_13(cmap_format_12_or_13): - - _format_step = 0 - - def __init__(self, format=13): - cmap_format_12_or_13.__init__(self, format) - - def _computeGIDs(self, startingGlyph, numberOfGlyphs): - return [startingGlyph] * numberOfGlyphs - - def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): - return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) - - -def cvtToUVS(threeByteString): - data = b"\0" + threeByteString - val, = struct.unpack(">L", data) - return val - -def cvtFromUVS(val): - assert 0 <= val < 0x1000000 - fourByteString = struct.pack(">L", val) - return fourByteString[1:] - - -class cmap_format_14(CmapSubtable): - - def decompileHeader(self, data, ttFont): - format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10]) - self.data = data[10:] - self.length = length - self.numVarSelectorRecords = numVarSelectorRecords - self.ttFont = ttFont - self.language = 0xFF # has no language. - - def decompile(self, data, ttFont): - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - data = self.data - - self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. - uvsDict = {} - recOffset = 0 - for n in range(self.numVarSelectorRecords): - uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11]) - recOffset += 11 - varUVS = cvtToUVS(uvs) - if defOVSOffset: - startOffset = defOVSOffset - 10 - numValues, = struct.unpack(">L", data[startOffset:startOffset+4]) - startOffset +=4 - for r in range(numValues): - uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4]) - startOffset += 4 - firstBaseUV = cvtToUVS(uv) - cnt = addtlCnt+1 - baseUVList = list(range(firstBaseUV, firstBaseUV+cnt)) - glyphList = [None]*cnt - localUVList = zip(baseUVList, glyphList) - try: - uvsDict[varUVS].extend(localUVList) - except KeyError: - uvsDict[varUVS] = list(localUVList) - - if nonDefUVSOffset: - startOffset = nonDefUVSOffset - 10 - numRecs, = struct.unpack(">L", data[startOffset:startOffset+4]) - startOffset +=4 - localUVList = [] - for r in range(numRecs): - uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5]) - startOffset += 5 - uv = cvtToUVS(uv) - glyphName = self.ttFont.getGlyphName(gid) - localUVList.append( [uv, glyphName] ) - try: - uvsDict[varUVS].extend(localUVList) - except KeyError: - uvsDict[varUVS] = localUVList - - self.uvsDict = uvsDict - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, [ - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ("format", self.format), - ("length", self.length), - ("numVarSelectorRecords", self.numVarSelectorRecords), - ]) - writer.newline() - uvsDict = self.uvsDict - uvsList = sorted(uvsDict.keys()) - for uvs in uvsList: - uvList = uvsDict[uvs] - uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) - for uv, gname in uvList: - if gname is None: - gname = "None" - # I use the arg rather than th keyword syntax in order to preserve the attribute order. - writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)] ) - writer.newline() - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.format = safeEval(attrs["format"]) - self.length = safeEval(attrs["length"]) - self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"]) - self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail - if not hasattr(self, "cmap"): - self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail. - if not hasattr(self, "uvsDict"): - self.uvsDict = {} - uvsDict = self.uvsDict - - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "map": - continue - uvs = safeEval(attrs["uvs"]) - uv = safeEval(attrs["uv"]) - gname = attrs["name"] - if gname == "None": - gname = None - try: - uvsDict[uvs].append( [uv, gname]) - except KeyError: - uvsDict[uvs] = [ [uv, gname] ] - - def compile(self, ttFont): - if self.data: - return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data - - uvsDict = self.uvsDict - uvsList = sorted(uvsDict.keys()) - self.numVarSelectorRecords = len(uvsList) - offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block. - data = [] - varSelectorRecords =[] - for uvs in uvsList: - entryList = uvsDict[uvs] - - defList = [entry for entry in entryList if entry[1] is None] - if defList: - defList = [entry[0] for entry in defList] - defOVSOffset = offset - defList.sort() - - lastUV = defList[0] - cnt = -1 - defRecs = [] - for defEntry in defList: - cnt +=1 - if (lastUV+cnt) != defEntry: - rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1) - lastUV = defEntry - defRecs.append(rec) - cnt = 0 - - rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) - defRecs.append(rec) - - numDefRecs = len(defRecs) - data.append(struct.pack(">L", numDefRecs)) - data.extend(defRecs) - offset += 4 + numDefRecs*4 - else: - defOVSOffset = 0 - - ndefList = [entry for entry in entryList if entry[1] is not None] - if ndefList: - nonDefUVSOffset = offset - ndefList.sort() - numNonDefRecs = len(ndefList) - data.append(struct.pack(">L", numNonDefRecs)) - offset += 4 + numNonDefRecs*5 - - for uv, gname in ndefList: - gid = ttFont.getGlyphID(gname) - ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid) - data.append(ndrec) - else: - nonDefUVSOffset = 0 - - vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) - varSelectorRecords.append(vrec) - - data = bytesjoin(varSelectorRecords) + bytesjoin(data) - self.length = 10 + len(data) - headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) - self.data = headerdata + data - - return self.data - - -class cmap_format_unknown(CmapSubtable): - - def toXML(self, writer, ttFont): - cmapName = self.__class__.__name__[:12] + str(self.format) - writer.begintag(cmapName, [ - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ]) - writer.newline() - writer.dumphex(self.data) - writer.endtag(cmapName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.data = readHex(content) - self.cmap = {} - - def decompileHeader(self, data, ttFont): - self.language = 0 # dummy value - self.data = data - - def decompile(self, data, ttFont): - # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. - # If not, someone is calling the subtable decompile() directly, and must provide both args. - if data is not None and ttFont is not None: - self.decompileHeader(data, ttFont) - else: - assert (data is None and ttFont is None), "Need both data and ttFont arguments" - - def compile(self, ttFont): - if self.data: - return self.data - else: - return None - -cmap_classes = { - 0: cmap_format_0, - 2: cmap_format_2, - 4: cmap_format_4, - 6: cmap_format_6, - 12: cmap_format_12, - 13: cmap_format_13, - 14: cmap_format_14, -} diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_m_a_p_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._c_m_a_p import CmapSubtable - -class CmapSubtableTest(unittest.TestCase): - - def makeSubtable(self, platformID, platEncID, langID): - subtable = CmapSubtable(None) - subtable.platformID, subtable.platEncID, subtable.language = (platformID, platEncID, langID) - return subtable - - def test_toUnicode_utf16be(self): - subtable = self.makeSubtable(0, 2, 7) - self.assertEqual("utf_16_be", subtable.getEncoding()) - self.assertEqual(True, subtable.isUnicode()) - - def test_toUnicode_macroman(self): - subtable = self.makeSubtable(1, 0, 7) # MacRoman - self.assertEqual("mac_roman", subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_toUnicode_macromanian(self): - subtable = self.makeSubtable(1, 0, 37) # Mac Romanian - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_mac_encodings(self): - subtable = self.makeSubtable(1, 1, 0) # Mac Japanese - self.assertNotEqual(None, subtable.getEncoding()) - self.assertEqual(False, subtable.isUnicode()) - - def test_extended_unknown(self): - subtable = self.makeSubtable(10, 11, 12) - self.assertEqual(subtable.getEncoding(), None) - self.assertEqual(subtable.getEncoding("ascii"), "ascii") - self.assertEqual(subtable.getEncoding(default="xyz"), "xyz") - - def test_decompile_4(self): - subtable = CmapSubtable.newSubtable(4) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 3 + b'\x10' + b'\0' * 12, font) - - def test_decompile_12(self): - subtable = CmapSubtable.newSubtable(12) - font = ttLib.TTFont() - font.setGlyphOrder([]) - subtable.decompile(b'\0' * 7 + b'\x10' + b'\0' * 8, font) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/C_O_L_R_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_O_L_R_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/C_O_L_R_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_O_L_R_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,159 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import operator -import struct - - -class table_C_O_L_R_(DefaultTable.DefaultTable): - - """ This table is structured so that you can treat it like a dictionary keyed by glyph name. - ttFont['COLR'][] will return the color layers for any glyph - ttFont['COLR'][] = will set the color layers for any glyph. - """ - - def decompile(self, data, ttFont): - self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID - self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14]) - assert (self.version == 0), "Version of COLR table is higher than I know how to handle" - glyphOrder = ttFont.getGlyphOrder() - gids = [] - layerLists = [] - glyphPos = offsetBaseGlyphRecord - for i in range(numBaseGlyphRecords): - gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6]) - glyphPos += 6 - gids.append(gid) - assert (firstLayerIndex + numLayers <= numLayerRecords) - layerPos = offsetLayerRecord + firstLayerIndex * 4 - layers = [] - for j in range(numLayers): - layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4]) - try: - layerName = glyphOrder[layerGid] - except IndexError: - layerName = self.getGlyphName(layerGid) - layerPos += 4 - layers.append(LayerRecord(layerName, colorID)) - layerLists.append(layers) - - self.ColorLayers = colorLayerLists = {} - try: - names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids)) - except IndexError: - getGlyphName = self.getGlyphName - names = list(map(getGlyphName, gids )) - - list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists)) - - def compile(self, ttFont): - ordered = [] - ttFont.getReverseGlyphMap(rebuild=True) - glyphNames = self.ColorLayers.keys() - for glyphName in glyphNames: - try: - gid = ttFont.getGlyphID(glyphName) - except: - assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) - ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) - ordered.sort() - - glyphMap = [] - layerMap = [] - for (gid, glyphName, layers) in ordered: - glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers))) - for layer in layers: - layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID)) - - dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))] - dataList.extend(glyphMap) - dataList.extend(layerMap) - data = bytesjoin(dataList) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - ordered = [] - glyphNames = self.ColorLayers.keys() - for glyphName in glyphNames: - try: - gid = ttFont.getGlyphID(glyphName) - except: - assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) - ordered.append([gid, glyphName, self.ColorLayers[glyphName]]) - ordered.sort() - for entry in ordered: - writer.begintag("ColorGlyph", name=entry[1]) - writer.newline() - for layer in entry[2]: - layer.toXML(writer, ttFont) - writer.endtag("ColorGlyph") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "ColorLayers"): - self.ColorLayers = {} - self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID - if name == "ColorGlyph": - glyphName = attrs["name"] - for element in content: - if isinstance(element, basestring): - continue - layers = [] - for element in content: - if isinstance(element, basestring): - continue - layer = LayerRecord() - layer.fromXML(element[0], element[1], element[2], ttFont) - layers.append (layer) - operator.setitem(self, glyphName, layers) - elif "value" in attrs: - setattr(self, name, safeEval(attrs["value"])) - - def __getitem__(self, glyphSelector): - if isinstance(glyphSelector, int): - # its a gid, convert to glyph name - glyphSelector = self.getGlyphName(glyphSelector) - - if glyphSelector not in self.ColorLayers: - return None - - return self.ColorLayers[glyphSelector] - - def __setitem__(self, glyphSelector, value): - if isinstance(glyphSelector, int): - # its a gid, convert to glyph name - glyphSelector = self.getGlyphName(glyphSelector) - - if value: - self.ColorLayers[glyphSelector] = value - elif glyphSelector in self.ColorLayers: - del self.ColorLayers[glyphSelector] - - def __delitem__(self, glyphSelector): - del self.ColorLayers[glyphSelector] - -class LayerRecord(object): - - def __init__(self, name=None, colorID=None): - self.name = name - self.colorID = colorID - - def toXML(self, writer, ttFont): - writer.simpletag("layer", name=self.name, colorID=self.colorID) - writer.newline() - - def fromXML(self, eltname, attrs, content, ttFont): - for (name, value) in attrs.items(): - if name == "name": - if isinstance(value, int): - value = ttFont.getGlyphName(value) - setattr(self, name, value) - else: - setattr(self, name, safeEval(value)) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/C_P_A_L_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_P_A_L_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/C_P_A_L_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/C_P_A_L_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,100 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import struct - - -class table_C_P_A_L_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12]) - assert (self.version == 0), "Version of COLR table is higher than I know how to handle" - self.palettes = [] - pos = 12 - for i in range(numPalettes): - startIndex = struct.unpack(">H", data[pos:pos+2])[0] - assert (startIndex + self.numPaletteEntries <= numColorRecords) - pos += 2 - palette = [] - ppos = goffsetFirstColorRecord + startIndex * 4 - for j in range(self.numPaletteEntries): - palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) ) - ppos += 4 - self.palettes.append(palette) - - def compile(self, ttFont): - dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))] - for i in range(len(self.palettes)): - dataList.append(struct.pack(">H", i*self.numPaletteEntries)) - for palette in self.palettes: - assert(len(palette) == self.numPaletteEntries) - for color in palette: - dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha)) - data = bytesjoin(dataList) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) - writer.newline() - for index, palette in enumerate(self.palettes): - writer.begintag("palette", index=index) - writer.newline() - assert(len(palette) == self.numPaletteEntries) - for cindex, color in enumerate(palette): - color.toXML(writer, ttFont, cindex) - writer.endtag("palette") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "palettes"): - self.palettes = [] - if name == "palette": - palette = [] - for element in content: - if isinstance(element, basestring): - continue - palette = [] - for element in content: - if isinstance(element, basestring): - continue - color = Color() - color.fromXML(element[0], element[1], element[2], ttFont) - palette.append (color) - self.palettes.append(palette) - elif "value" in attrs: - value = safeEval(attrs["value"]) - setattr(self, name, value) - -class Color(object): - - def __init__(self, blue=None, green=None, red=None, alpha=None): - self.blue = blue - self.green = green - self.red = red - self.alpha = alpha - - def hex(self): - return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) - - def __repr__(self): - return self.hex() - - def toXML(self, writer, ttFont, index=None): - writer.simpletag("color", value=self.hex(), index=index) - writer.newline() - - def fromXML(self, eltname, attrs, content, ttFont): - value = attrs["value"] - if value[0] == '#': - value = value[1:] - self.red = int(value[0:2], 16) - self.green = int(value[2:4], 16) - self.blue = int(value[4:6], 16) - self.alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_c_v_t.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_v_t.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_c_v_t.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_c_v_t.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array - -class table__c_v_t(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - values = array.array("h") - values.fromstring(data) - if sys.byteorder != "big": - values.byteswap() - self.values = values - - def compile(self, ttFont): - values = self.values[:] - if sys.byteorder != "big": - values.byteswap() - return values.tostring() - - def toXML(self, writer, ttFont): - for i in range(len(self.values)): - value = self.values[i] - writer.simpletag("cv", value=value, index=i) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "values"): - self.values = array.array("h") - if name == "cv": - index = safeEval(attrs["index"]) - value = safeEval(attrs["value"]) - for i in range(1 + index - len(self.values)): - self.values.append(0) - self.values[index] = value - - def __len__(self): - return len(self.values) - - def __getitem__(self, index): - return self.values[index] - - def __setitem__(self, index, value): - self.values[index] = value - - def __delitem__(self, index): - del self.values[index] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/DefaultTable.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/DefaultTable.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/DefaultTable.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/DefaultTable.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,47 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.ttLib import getClassTag - -class DefaultTable(object): - - dependencies = [] - - def __init__(self, tag=None): - if tag is None: - tag = getClassTag(self.__class__) - self.tableTag = Tag(tag) - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer, ttFont, progress=None): - if hasattr(self, "ERROR"): - writer.comment("An error occurred during the decompilation of this table") - writer.newline() - writer.comment(self.ERROR) - writer.newline() - writer.begintag("hexdata") - writer.newline() - writer.dumphex(self.compile(ttFont)) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - from fontTools.misc.textTools import readHex - from fontTools import ttLib - if name != "hexdata": - raise ttLib.TTLibError("can't handle '%s' element" % name) - self.decompile(readHex(content), ttFont) - - def __repr__(self): - return "<'%s' table at %x>" % (self.tableTag, id(self)) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/D_S_I_G_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/D_S_I_G_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/D_S_I_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/D_S_I_G_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,131 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from fontTools.misc import sstruct -from . import DefaultTable -import base64 - -DSIG_HeaderFormat = """ - > # big endian - ulVersion: L - usNumSigs: H - usFlag: H -""" -# followed by an array of usNumSigs DSIG_Signature records -DSIG_SignatureFormat = """ - > # big endian - ulFormat: L - ulLength: L # length includes DSIG_SignatureBlock header - ulOffset: L -""" -# followed by an array of usNumSigs DSIG_SignatureBlock records, -# each followed immediately by the pkcs7 bytes -DSIG_SignatureBlockFormat = """ - > # big endian - usReserved1: H - usReserved2: H - cbSignature: l # length of following raw pkcs7 data -""" - -# -# NOTE -# the DSIG table format allows for SignatureBlocks residing -# anywhere in the table and possibly in a different order as -# listed in the array after the first table header -# -# this implementation does not keep track of any gaps and/or data -# before or after the actual signature blocks while decompiling, -# and puts them in the same physical order as listed in the header -# on compilation with no padding whatsoever. -# - -class table_D_S_I_G_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) - assert self.ulVersion == 1, "DSIG ulVersion must be 1" - assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" - self.signatureRecords = sigrecs = [] - for n in range(self.usNumSigs): - sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord()) - assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n - sigrecs.append(sigrec) - for sigrec in sigrecs: - dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec) - assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n - assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n - sigrec.pkcs7 = newData[:sigrec.cbSignature] - - def compile(self, ttFont): - packed = sstruct.pack(DSIG_HeaderFormat, self) - headers = [packed] - offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) - data = [] - for sigrec in self.signatureRecords: - # first pack signature block - sigrec.cbSignature = len(sigrec.pkcs7) - packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 - data.append(packed) - # update redundant length field - sigrec.ulLength = len(packed) - # update running table offset - sigrec.ulOffset = offset - headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) - offset += sigrec.ulLength - if offset % 2: - # Pad to even bytes - data.append(b'\0') - return bytesjoin(headers+data) - - def toXML(self, xmlWriter, ttFont): - xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!") - xmlWriter.newline() - xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag) - for sigrec in self.signatureRecords: - xmlWriter.newline() - sigrec.toXML(xmlWriter, ttFont) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "tableHeader": - self.signatureRecords = [] - self.ulVersion = safeEval(attrs["version"]) - self.usNumSigs = safeEval(attrs["numSigs"]) - self.usFlag = safeEval(attrs["flag"]) - return - if name == "SignatureRecord": - sigrec = SignatureRecord() - sigrec.fromXML(name, attrs, content, ttFont) - self.signatureRecords.append(sigrec) - -pem_spam = lambda l, spam = { - "-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True -}: not spam.get(l.strip()) - -def b64encode(b): - s = base64.b64encode(b) - # Line-break at 76 chars. - items = [] - while s: - items.append(tostr(s[:76])) - items.append('\n') - s = s[76:] - return strjoin(items) - -class SignatureRecord(object): - def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, self.__dict__) - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, format=self.ulFormat) - writer.newline() - writer.write_noindent("-----BEGIN PKCS7-----\n") - writer.write_noindent(b64encode(self.pkcs7)) - writer.write_noindent("-----END PKCS7-----\n") - writer.endtag(self.__class__.__name__) - - def fromXML(self, name, attrs, content, ttFont): - self.ulFormat = safeEval(attrs["format"]) - self.usReserved1 = safeEval(attrs.get("reserved1", "0")) - self.usReserved2 = safeEval(attrs.get("reserved2", "0")) - self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_D_T_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/E_B_D_T_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_D_T_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/E_B_D_T_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,759 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr -from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat -from . import DefaultTable -import itertools -import os -import struct - -ebdtTableVersionFormat = """ - > # big endian - version: 16.16F -""" - -ebdtComponentFormat = """ - > # big endian - glyphCode: H - xOffset: b - yOffset: b -""" - -class table_E_B_D_T_(DefaultTable.DefaultTable): - - # Keep a reference to the name of the data locator table. - locatorName = 'EBLC' - - # This method can be overridden in subclasses to support new formats - # without changing the other implementation. Also can be used as a - # convenience method for coverting a font file to an alternative format. - def getImageFormatClass(self, imageFormat): - return ebdt_bitmap_classes[imageFormat] - - def decompile(self, data, ttFont): - # Get the version but don't advance the slice. - # Most of the lookup for this table is done relative - # to the begining so slice by the offsets provided - # in the EBLC table. - sstruct.unpack2(ebdtTableVersionFormat, data, self) - - # Keep a dict of glyphs that have been seen so they aren't remade. - # This dict maps intervals of data to the BitmapGlyph. - glyphDict = {} - - # Pull out the EBLC table and loop through glyphs. - # A strike is a concept that spans both tables. - # The actual bitmap data is stored in the EBDT. - locator = ttFont[self.__class__.locatorName] - self.strikeData = [] - for curStrike in locator.strikes: - bitmapGlyphDict = {} - self.strikeData.append(bitmapGlyphDict) - for indexSubTable in curStrike.indexSubTables: - dataIter = zip(indexSubTable.names, indexSubTable.locations) - for curName, curLoc in dataIter: - # Don't create duplicate data entries for the same glyphs. - # Instead just use the structures that already exist if they exist. - if curLoc in glyphDict: - curGlyph = glyphDict[curLoc] - else: - curGlyphData = data[slice(*curLoc)] - imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat) - curGlyph = imageFormatClass(curGlyphData, ttFont) - glyphDict[curLoc] = curGlyph - bitmapGlyphDict[curName] = curGlyph - - def compile(self, ttFont): - - dataList = [] - dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) - dataSize = len(dataList[0]) - - # Keep a dict of glyphs that have been seen so they aren't remade. - # This dict maps the id of the BitmapGlyph to the interval - # in the data. - glyphDict = {} - - # Go through the bitmap glyph data. Just in case the data for a glyph - # changed the size metrics should be recalculated. There are a variety - # of formats and they get stored in the EBLC table. That is why - # recalculation is defered to the EblcIndexSubTable class and just - # pass what is known about bitmap glyphs from this particular table. - locator = ttFont[self.__class__.locatorName] - for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): - for curIndexSubTable in curStrike.indexSubTables: - dataLocations = [] - for curName in curIndexSubTable.names: - # Handle the data placement based on seeing the glyph or not. - # Just save a reference to the location if the glyph has already - # been saved in compile. This code assumes that glyphs will only - # be referenced multiple times from indexFormat5. By luck the - # code may still work when referencing poorly ordered fonts with - # duplicate references. If there is a font that is unlucky the - # respective compile methods for the indexSubTables will fail - # their assertions. All fonts seem to follow this assumption. - # More complicated packing may be needed if a counter-font exists. - glyph = curGlyphDict[curName] - objectId = id(glyph) - if objectId not in glyphDict: - data = glyph.compile(ttFont) - data = curIndexSubTable.padBitmapData(data) - startByte = dataSize - dataSize += len(data) - endByte = dataSize - dataList.append(data) - dataLoc = (startByte, endByte) - glyphDict[objectId] = dataLoc - else: - dataLoc = glyphDict[objectId] - dataLocations.append(dataLoc) - # Just use the new data locations in the indexSubTable. - # The respective compile implementations will take care - # of any of the problems in the convertion that may arise. - curIndexSubTable.locations = dataLocations - - return bytesjoin(dataList) - - def toXML(self, writer, ttFont): - # When exporting to XML if one of the data export formats - # requires metrics then those metrics may be in the locator. - # In this case populate the bitmaps with "export metrics". - if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'): - locator = ttFont[self.__class__.locatorName] - for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): - for curIndexSubTable in curStrike.indexSubTables: - for curName in curIndexSubTable.names: - glyph = curGlyphDict[curName] - # I'm not sure which metrics have priority here. - # For now if both metrics exist go with glyph metrics. - if hasattr(glyph, 'metrics'): - glyph.exportMetrics = glyph.metrics - else: - glyph.exportMetrics = curIndexSubTable.metrics - glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth - - writer.simpletag("header", [('version', self.version)]) - writer.newline() - locator = ttFont[self.__class__.locatorName] - for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): - writer.begintag('strikedata', [('index', strikeIndex)]) - writer.newline() - for curName, curBitmap in bitmapGlyphDict.items(): - curBitmap.toXML(strikeIndex, curName, writer, ttFont) - writer.endtag('strikedata') - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == 'header': - self.version = safeEval(attrs['version']) - elif name == 'strikedata': - if not hasattr(self, 'strikeData'): - self.strikeData = [] - strikeIndex = safeEval(attrs['index']) - - bitmapGlyphDict = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): - imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):]) - glyphName = attrs['name'] - imageFormatClass = self.getImageFormatClass(imageFormat) - curGlyph = imageFormatClass(None, None) - curGlyph.fromXML(name, attrs, content, ttFont) - assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName - bitmapGlyphDict[glyphName] = curGlyph - else: - print("Warning: %s being ignored by %s", name, self.__class__.__name__) - - # Grow the strike data array to the appropriate size. The XML - # format allows the strike index value to be out of order. - if strikeIndex >= len(self.strikeData): - self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) - assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices." - self.strikeData[strikeIndex] = bitmapGlyphDict - -class EbdtComponent(object): - - def toXML(self, writer, ttFont): - writer.begintag('ebdtComponent', [('name', self.name)]) - writer.newline() - for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: - writer.simpletag(componentName, value=getattr(self, componentName)) - writer.newline() - writer.endtag('ebdtComponent') - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.name = attrs['name'] - componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name in componentNames: - vars(self)[name] = safeEval(attrs['value']) - else: - print("Warning: unknown name '%s' being ignored by EbdtComponent." % name) - -# Helper functions for dealing with binary. - -def _data2binary(data, numBits): - binaryList = [] - for curByte in data: - value = byteord(curByte) - numBitsCut = min(8, numBits) - for i in range(numBitsCut): - if value & 0x1: - binaryList.append('1') - else: - binaryList.append('0') - value = value >> 1 - numBits -= numBitsCut - return strjoin(binaryList) - -def _binary2data(binary): - byteList = [] - for bitLoc in range(0, len(binary), 8): - byteString = binary[bitLoc:bitLoc+8] - curByte = 0 - for curBit in reversed(byteString): - curByte = curByte << 1 - if curBit == '1': - curByte |= 1 - byteList.append(bytechr(curByte)) - return bytesjoin(byteList) - -def _memoize(f): - class memodict(dict): - def __missing__(self, key): - ret = f(key) - if len(key) == 1: - self[key] = ret - return ret - return memodict().__getitem__ - -# 00100111 -> 11100100 per byte, not to be confused with little/big endian. -# Bitmap data per byte is in the order that binary is written on the page -# with the least significant bit as far right as possible. This is the -# opposite of what makes sense algorithmically and hence this function. -@_memoize -def _reverseBytes(data): - if len(data) != 1: - return bytesjoin(map(_reverseBytes, data)) - byte = byteord(data) - result = 0 - for i in range(8): - result = result << 1 - result |= byte & 1 - byte = byte >> 1 - return bytechr(result) - -# This section of code is for reading and writing image data to/from XML. - -def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): - writer.begintag('rawimagedata') - writer.newline() - writer.dumphex(bitmapObject.imageData) - writer.endtag('rawimagedata') - writer.newline() - -def _readRawImageData(bitmapObject, name, attrs, content, ttFont): - bitmapObject.imageData = readHex(content) - -def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): - metrics = bitmapObject.exportMetrics - del bitmapObject.exportMetrics - bitDepth = bitmapObject.exportBitDepth - del bitmapObject.exportBitDepth - - writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) - writer.newline() - for curRow in range(metrics.height): - rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) - writer.simpletag('row', value=hexStr(rowData)) - writer.newline() - writer.endtag('rowimagedata') - writer.newline() - -def _readRowImageData(bitmapObject, name, attrs, content, ttFont): - bitDepth = safeEval(attrs['bitDepth']) - metrics = SmallGlyphMetrics() - metrics.width = safeEval(attrs['width']) - metrics.height = safeEval(attrs['height']) - - dataRows = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attr, content = element - # Chop off 'imagedata' from the tag to get just the option. - if name == 'row': - dataRows.append(deHexStr(attr['value'])) - bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) - -def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): - metrics = bitmapObject.exportMetrics - del bitmapObject.exportMetrics - bitDepth = bitmapObject.exportBitDepth - del bitmapObject.exportBitDepth - - # A dict for mapping binary to more readable/artistic ASCII characters. - binaryConv = {'0':'.', '1':'@'} - - writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height) - writer.newline() - for curRow in range(metrics.height): - rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True) - rowData = _data2binary(rowData, metrics.width) - # Make the output a readable ASCII art form. - rowData = strjoin(map(binaryConv.get, rowData)) - writer.simpletag('row', value=rowData) - writer.newline() - writer.endtag('bitwiseimagedata') - writer.newline() - -def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): - bitDepth = safeEval(attrs['bitDepth']) - metrics = SmallGlyphMetrics() - metrics.width = safeEval(attrs['width']) - metrics.height = safeEval(attrs['height']) - - # A dict for mapping from ASCII to binary. All characters are considered - # a '1' except space, period and '0' which maps to '0'. - binaryConv = {' ':'0', '.':'0', '0':'0'} - - dataRows = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attr, content = element - if name == 'row': - mapParams = zip(attr['value'], itertools.repeat('1')) - rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) - dataRows.append(_binary2data(rowData)) - - bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True) - -def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): - try: - folder = os.path.dirname(writer.file.name) - except AttributeError: - # fall back to current directory if output file's directory isn't found - folder = '.' - folder = os.path.join(folder, 'bitmaps') - filename = glyphName + bitmapObject.fileExtension - if not os.path.isdir(folder): - os.makedirs(folder) - folder = os.path.join(folder, 'strike%d' % strikeIndex) - if not os.path.isdir(folder): - os.makedirs(folder) - - fullPath = os.path.join(folder, filename) - writer.simpletag('extfileimagedata', value=fullPath) - writer.newline() - - with open(fullPath, "wb") as file: - file.write(bitmapObject.imageData) - -def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): - fullPath = attrs['value'] - with open(fullPath, "rb") as file: - bitmapObject.imageData = file.read() - -# End of XML writing code. - -# Important information about the naming scheme. Used for identifying formats -# in XML. -_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_' - -class BitmapGlyph(object): - - # For the external file format. This can be changed in subclasses. This way - # when the extfile option is turned on files have the form: glyphName.ext - # The default is just a flat binary file with no meaning. - fileExtension = '.bin' - - # Keep track of reading and writing of various forms. - xmlDataFunctions = { - 'raw': (_writeRawImageData, _readRawImageData), - 'row': (_writeRowImageData, _readRowImageData), - 'bitwise': (_writeBitwiseImageData, _readBitwiseImageData), - 'extfile': (_writeExtFileImageData, _readExtFileImageData), - } - - def __init__(self, data, ttFont): - self.data = data - self.ttFont = ttFont - # TODO Currently non-lazy decompilation is untested here... - #if not ttFont.lazy: - # self.decompile() - # del self.data - - def __getattr__(self, attr): - # Allow lazy decompile. - if attr[:2] == '__': - raise AttributeError(attr) - if not hasattr(self, "data"): - raise AttributeError(attr) - self.decompile() - del self.data - return getattr(self, attr) - - # Not a fan of this but it is needed for safer safety checking. - def getFormat(self): - return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):]) - - def toXML(self, strikeIndex, glyphName, writer, ttFont): - writer.begintag(self.__class__.__name__, [('name', glyphName)]) - writer.newline() - - self.writeMetrics(writer, ttFont) - # Use the internal write method to write using the correct output format. - self.writeData(strikeIndex, glyphName, writer, ttFont) - - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.readMetrics(name, attrs, content, ttFont) - for element in content: - if not isinstance(element, tuple): - continue - name, attr, content = element - if not name.endswith('imagedata'): - continue - # Chop off 'imagedata' from the tag to get just the option. - option = name[:-len('imagedata')] - assert option in self.__class__.xmlDataFunctions - self.readData(name, attr, content, ttFont) - - # Some of the glyphs have the metrics. This allows for metrics to be - # added if the glyph format has them. Default behavior is to do nothing. - def writeMetrics(self, writer, ttFont): - pass - - # The opposite of write metrics. - def readMetrics(self, name, attrs, content, ttFont): - pass - - def writeData(self, strikeIndex, glyphName, writer, ttFont): - try: - writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat] - except KeyError: - writeFunc = _writeRawImageData - writeFunc(strikeIndex, glyphName, self, writer, ttFont) - - def readData(self, name, attrs, content, ttFont): - # Chop off 'imagedata' from the tag to get just the option. - option = name[:-len('imagedata')] - writeFunc, readFunc = self.__class__.xmlDataFunctions[option] - readFunc(self, name, attrs, content, ttFont) - - -# A closure for creating a mixin for the two types of metrics handling. -# Most of the code is very similar so its easier to deal with here. -# Everything works just by passing the class that the mixin is for. -def _createBitmapPlusMetricsMixin(metricsClass): - # Both metrics names are listed here to make meaningful error messages. - metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] - curMetricsName = metricsClass.__name__ - # Find which metrics this is for and determine the opposite name. - metricsId = metricStrings.index(curMetricsName) - oppositeMetricsName = metricStrings[1-metricsId] - - class BitmapPlusMetricsMixin(object): - - def writeMetrics(self, writer, ttFont): - self.metrics.toXML(writer, ttFont) - - def readMetrics(self, name, attrs, content, ttFont): - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == curMetricsName: - self.metrics = metricsClass() - self.metrics.fromXML(name, attrs, content, ttFont) - elif name == oppositeMetricsName: - print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat()) - - return BitmapPlusMetricsMixin - -# Since there are only two types of mixin's just create them here. -BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) -BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) - -# Data that is bit aligned can be tricky to deal with. These classes implement -# helper functionality for dealing with the data and getting a particular row -# of bitwise data. Also helps implement fancy data export/import in XML. -class BitAlignedBitmapMixin(object): - - def _getBitRange(self, row, bitDepth, metrics): - rowBits = (bitDepth * metrics.width) - bitOffset = row * rowBits - return (bitOffset, bitOffset+rowBits) - - def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): - if metrics is None: - metrics = self.metrics - assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" - - # Loop through each byte. This can cover two bytes in the original data or - # a single byte if things happen to be aligned. The very last entry might - # not be aligned so take care to trim the binary data to size and pad with - # zeros in the row data. Bit aligned data is somewhat tricky. - # - # Example of data cut. Data cut represented in x's. - # '|' represents byte boundary. - # data = ...0XX|XXXXXX00|000... => XXXXXXXX - # or - # data = ...0XX|XXXX0000|000... => XXXXXX00 - # or - # data = ...000|XXXXXXXX|000... => XXXXXXXX - # or - # data = ...000|00XXXX00|000... => XXXX0000 - # - dataList = [] - bitRange = self._getBitRange(row, bitDepth, metrics) - stepRange = bitRange + (8,) - for curBit in range(*stepRange): - endBit = min(curBit+8, bitRange[1]) - numBits = endBit - curBit - cutPoint = curBit % 8 - firstByteLoc = curBit // 8 - secondByteLoc = endBit // 8 - if firstByteLoc < secondByteLoc: - numBitsCut = 8 - cutPoint - else: - numBitsCut = endBit - curBit - curByte = _reverseBytes(self.imageData[firstByteLoc]) - firstHalf = byteord(curByte) >> cutPoint - firstHalf = ((1<> numBitsCut) & ((1<<8-numBitsCut)-1) - ordDataList[secondByteLoc] |= secondByte - - # Save the image data with the bits going the correct way. - self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) - -class ByteAlignedBitmapMixin(object): - - def _getByteRange(self, row, bitDepth, metrics): - rowBytes = (bitDepth * metrics.width + 7) // 8 - byteOffset = row * rowBytes - return (byteOffset, byteOffset+rowBytes) - - def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): - if metrics is None: - metrics = self.metrics - assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" - byteRange = self._getByteRange(row, bitDepth, metrics) - data = self.imageData[slice(*byteRange)] - if reverseBytes: - data = _reverseBytes(data) - return data - - def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): - if metrics is None: - metrics = self.metrics - if reverseBytes: - dataRows = map(_reverseBytes, dataRows) - self.imageData = bytesjoin(dataRows) - -class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): - - def decompile(self): - self.metrics = SmallGlyphMetrics() - dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) - self.imageData = data - - def compile(self, ttFont): - data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) - return data + self.imageData - - -class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph): - - def decompile(self): - self.metrics = SmallGlyphMetrics() - dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) - self.imageData = data - - def compile(self, ttFont): - data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) - return data + self.imageData - - -class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): - - def decompile(self): - self.imageData = self.data - - def compile(self, ttFont): - return self.imageData - -class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): - - def decompile(self): - self.metrics = BigGlyphMetrics() - dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) - self.imageData = data - - def compile(self, ttFont): - data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) - return data + self.imageData - - -class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph): - - def decompile(self): - self.metrics = BigGlyphMetrics() - dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) - self.imageData = data - - def compile(self, ttFont): - data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) - return data + self.imageData - - -class ComponentBitmapGlyph(BitmapGlyph): - - def toXML(self, strikeIndex, glyphName, writer, ttFont): - writer.begintag(self.__class__.__name__, [('name', glyphName)]) - writer.newline() - - self.writeMetrics(writer, ttFont) - - writer.begintag('components') - writer.newline() - for curComponent in self.componentArray: - curComponent.toXML(writer, ttFont) - writer.endtag('components') - writer.newline() - - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.readMetrics(name, attrs, content, ttFont) - for element in content: - if not isinstance(element, tuple): - continue - name, attr, content = element - if name == 'components': - self.componentArray = [] - for compElement in content: - if not isinstance(compElement, tuple): - continue - name, attrs, content = compElement - if name == 'ebdtComponent': - curComponent = EbdtComponent() - curComponent.fromXML(name, attrs, content, ttFont) - self.componentArray.append(curComponent) - else: - print("Warning: '%s' being ignored in component array." % name) - - -class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): - - def decompile(self): - self.metrics = SmallGlyphMetrics() - dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) - data = data[1:] - - (numComponents,) = struct.unpack(">H", data[:2]) - data = data[2:] - self.componentArray = [] - for i in range(numComponents): - curComponent = EbdtComponent() - dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) - curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) - self.componentArray.append(curComponent) - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) - dataList.append(b'\0') - dataList.append(struct.pack(">H", len(self.componentArray))) - for curComponent in self.componentArray: - curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) - dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) - return bytesjoin(dataList) - - -class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): - - def decompile(self): - self.metrics = BigGlyphMetrics() - dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) - (numComponents,) = struct.unpack(">H", data[:2]) - data = data[2:] - self.componentArray = [] - for i in range(numComponents): - curComponent = EbdtComponent() - dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) - curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) - self.componentArray.append(curComponent) - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) - dataList.append(struct.pack(">H", len(self.componentArray))) - for curComponent in self.componentArray: - curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) - dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) - return bytesjoin(dataList) - - -# Dictionary of bitmap formats to the class representing that format -# currently only the ones listed in this map are the ones supported. -ebdt_bitmap_classes = { - 1: ebdt_bitmap_format_1, - 2: ebdt_bitmap_format_2, - 5: ebdt_bitmap_format_5, - 6: ebdt_bitmap_format_6, - 7: ebdt_bitmap_format_7, - 8: ebdt_bitmap_format_8, - 9: ebdt_bitmap_format_9, - } diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_L_C_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/E_B_L_C_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/E_B_L_C_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/E_B_L_C_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,617 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from . import DefaultTable -from fontTools.misc.textTools import safeEval -from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat -import struct -import itertools -from collections import deque - -eblcHeaderFormat = """ - > # big endian - version: 16.16F - numSizes: I -""" -# The table format string is split to handle sbitLineMetrics simply. -bitmapSizeTableFormatPart1 = """ - > # big endian - indexSubTableArrayOffset: I - indexTablesSize: I - numberOfIndexSubTables: I - colorRef: I -""" -# The compound type for hori and vert. -sbitLineMetricsFormat = """ - > # big endian - ascender: b - descender: b - widthMax: B - caretSlopeNumerator: b - caretSlopeDenominator: b - caretOffset: b - minOriginSB: b - minAdvanceSB: b - maxBeforeBL: b - minAfterBL: b - pad1: b - pad2: b -""" -# hori and vert go between the two parts. -bitmapSizeTableFormatPart2 = """ - > # big endian - startGlyphIndex: H - endGlyphIndex: H - ppemX: B - ppemY: B - bitDepth: B - flags: b -""" - -indexSubTableArrayFormat = ">HHL" -indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) - -indexSubHeaderFormat = ">HHL" -indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) - -codeOffsetPairFormat = ">HH" -codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) - -class table_E_B_L_C_(DefaultTable.DefaultTable): - - dependencies = ['EBDT'] - - # This method can be overridden in subclasses to support new formats - # without changing the other implementation. Also can be used as a - # convenience method for coverting a font file to an alternative format. - def getIndexFormatClass(self, indexFormat): - return eblc_sub_table_classes[indexFormat] - - def decompile(self, data, ttFont): - - # Save the original data because offsets are from the start of the table. - origData = data - - dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self) - - self.strikes = [] - for curStrikeIndex in range(self.numSizes): - curStrike = Strike() - self.strikes.append(curStrike) - curTable = curStrike.bitmapSizeTable - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable) - for metric in ('hori', 'vert'): - metricObj = SbitLineMetrics() - vars(curTable)[metric] = metricObj - dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj) - dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable) - - for curStrike in self.strikes: - curTable = curStrike.bitmapSizeTable - for subtableIndex in range(curTable.numberOfIndexSubTables): - lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize - upperBound = lowerBound + indexSubTableArraySize - data = origData[lowerBound:upperBound] - - tup = struct.unpack(indexSubTableArrayFormat, data) - (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup - offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable - data = origData[offsetToIndexSubTable:] - - tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize]) - (indexFormat, imageFormat, imageDataOffset) = tup - - indexFormatClass = self.getIndexFormatClass(indexFormat) - indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont) - indexSubTable.firstGlyphIndex = firstGlyphIndex - indexSubTable.lastGlyphIndex = lastGlyphIndex - indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable - indexSubTable.indexFormat = indexFormat - indexSubTable.imageFormat = imageFormat - indexSubTable.imageDataOffset = imageDataOffset - curStrike.indexSubTables.append(indexSubTable) - - def compile(self, ttFont): - - dataList = [] - self.numSizes = len(self.strikes) - dataList.append(sstruct.pack(eblcHeaderFormat, self)) - - # Data size of the header + bitmapSizeTable needs to be calculated - # in order to form offsets. This value will hold the size of the data - # in dataList after all the data is consolidated in dataList. - dataSize = len(dataList[0]) - - # The table will be structured in the following order: - # (0) header - # (1) Each bitmapSizeTable [1 ... self.numSizes] - # (2) Alternate between indexSubTableArray and indexSubTable - # for each bitmapSizeTable present. - # - # The issue is maintaining the proper offsets when table information - # gets moved around. All offsets and size information must be recalculated - # when building the table to allow editing within ttLib and also allow easy - # import/export to and from XML. All of this offset information is lost - # when exporting to XML so everything must be calculated fresh so importing - # from XML will work cleanly. Only byte offset and size information is - # calculated fresh. Count information like numberOfIndexSubTables is - # checked through assertions. If the information in this table was not - # touched or was changed properly then these types of values should match. - # - # The table will be rebuilt the following way: - # (0) Precompute the size of all the bitmapSizeTables. This is needed to - # compute the offsets properly. - # (1) For each bitmapSizeTable compute the indexSubTable and - # indexSubTableArray pair. The indexSubTable must be computed first - # so that the offset information in indexSubTableArray can be - # calculated. Update the data size after each pairing. - # (2) Build each bitmapSizeTable. - # (3) Consolidate all the data into the main dataList in the correct order. - - for curStrike in self.strikes: - dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) - dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat) - dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) - - indexSubTablePairDataList = [] - for curStrike in self.strikes: - curTable = curStrike.bitmapSizeTable - curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) - curTable.indexSubTableArrayOffset = dataSize - - # Precompute the size of the indexSubTableArray. This information - # is important for correctly calculating the new value for - # additionalOffsetToIndexSubtable. - sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize - lowerBound = dataSize - dataSize += sizeOfSubTableArray - upperBound = dataSize - - indexSubTableDataList = [] - for indexSubTable in curStrike.indexSubTables: - indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset - glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) - indexSubTable.firstGlyphIndex = min(glyphIds) - indexSubTable.lastGlyphIndex = max(glyphIds) - data = indexSubTable.compile(ttFont) - indexSubTableDataList.append(data) - dataSize += len(data) - curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables) - curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables) - - for i in curStrike.indexSubTables: - data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable) - indexSubTablePairDataList.append(data) - indexSubTablePairDataList.extend(indexSubTableDataList) - curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset - - for curStrike in self.strikes: - curTable = curStrike.bitmapSizeTable - data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) - dataList.append(data) - for metric in ('hori', 'vert'): - metricObj = vars(curTable)[metric] - data = sstruct.pack(sbitLineMetricsFormat, metricObj) - dataList.append(data) - data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) - dataList.append(data) - dataList.extend(indexSubTablePairDataList) - - return bytesjoin(dataList) - - def toXML(self, writer, ttFont): - writer.simpletag('header', [('version', self.version)]) - writer.newline() - for curIndex, curStrike in enumerate(self.strikes): - curStrike.toXML(curIndex, writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == 'header': - self.version = safeEval(attrs['version']) - elif name == 'strike': - if not hasattr(self, 'strikes'): - self.strikes = [] - strikeIndex = safeEval(attrs['index']) - curStrike = Strike() - curStrike.fromXML(name, attrs, content, ttFont, self) - - # Grow the strike array to the appropriate size. The XML format - # allows for the strike index value to be out of order. - if strikeIndex >= len(self.strikes): - self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) - assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." - self.strikes[strikeIndex] = curStrike - -class Strike(object): - - def __init__(self): - self.bitmapSizeTable = BitmapSizeTable() - self.indexSubTables = [] - - def toXML(self, strikeIndex, writer, ttFont): - writer.begintag('strike', [('index', strikeIndex)]) - writer.newline() - self.bitmapSizeTable.toXML(writer, ttFont) - writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.') - writer.newline() - for indexSubTable in self.indexSubTables: - indexSubTable.toXML(writer, ttFont) - writer.endtag('strike') - writer.newline() - - def fromXML(self, name, attrs, content, ttFont, locator): - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == 'bitmapSizeTable': - self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) - elif name.startswith(_indexSubTableSubclassPrefix): - indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):]) - indexFormatClass = locator.getIndexFormatClass(indexFormat) - indexSubTable = indexFormatClass(None, None) - indexSubTable.indexFormat = indexFormat - indexSubTable.fromXML(name, attrs, content, ttFont) - self.indexSubTables.append(indexSubTable) - - -class BitmapSizeTable(object): - - # Returns all the simple metric names that bitmap size table - # cares about in terms of XML creation. - def _getXMLMetricNames(self): - dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] - dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1] - # Skip the first 3 data names because they are byte offsets and counts. - return dataNames[3:] - - def toXML(self, writer, ttFont): - writer.begintag('bitmapSizeTable') - writer.newline() - for metric in ('hori', 'vert'): - getattr(self, metric).toXML(metric, writer, ttFont) - for metricName in self._getXMLMetricNames(): - writer.simpletag(metricName, value=getattr(self, metricName)) - writer.newline() - writer.endtag('bitmapSizeTable') - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - # Create a lookup for all the simple names that make sense to - # bitmap size table. Only read the information from these names. - dataNames = set(self._getXMLMetricNames()) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == 'sbitLineMetrics': - direction = attrs['direction'] - assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid." - metricObj = SbitLineMetrics() - metricObj.fromXML(name, attrs, content, ttFont) - vars(self)[direction] = metricObj - elif name in dataNames: - vars(self)[name] = safeEval(attrs['value']) - else: - print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name) - - -class SbitLineMetrics(object): - - def toXML(self, name, writer, ttFont): - writer.begintag('sbitLineMetrics', [('direction', name)]) - writer.newline() - for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: - writer.simpletag(metricName, value=getattr(self, metricName)) - writer.newline() - writer.endtag('sbitLineMetrics') - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name in metricNames: - vars(self)[name] = safeEval(attrs['value']) - -# Important information about the naming scheme. Used for identifying subtables. -_indexSubTableSubclassPrefix = 'eblc_index_sub_table_' - -class EblcIndexSubTable(object): - - def __init__(self, data, ttFont): - self.data = data - self.ttFont = ttFont - # TODO Currently non-lazy decompiling doesn't work for this class... - #if not ttFont.lazy: - # self.decompile() - # del self.data, self.ttFont - - def __getattr__(self, attr): - # Allow lazy decompile. - if attr[:2] == '__': - raise AttributeError(attr) - if not hasattr(self, "data"): - raise AttributeError(attr) - self.decompile() - del self.data, self.ttFont - return getattr(self, attr) - - # This method just takes care of the indexSubHeader. Implementing subclasses - # should call it to compile the indexSubHeader and then continue compiling - # the remainder of their unique format. - def compile(self, ttFont): - return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset) - - # Creates the XML for bitmap glyphs. Each index sub table basically makes - # the same XML except for specific metric information that is written - # out via a method call that a subclass implements optionally. - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, [ - ('imageFormat', self.imageFormat), - ('firstGlyphIndex', self.firstGlyphIndex), - ('lastGlyphIndex', self.lastGlyphIndex), - ]) - writer.newline() - self.writeMetrics(writer, ttFont) - # Write out the names as thats all thats needed to rebuild etc. - # For font debugging of consecutive formats the ids are also written. - # The ids are not read when moving from the XML format. - glyphIds = map(ttFont.getGlyphID, self.names) - for glyphName, glyphId in zip(self.names, glyphIds): - writer.simpletag('glyphLoc', name=glyphName, id=glyphId) - writer.newline() - writer.endtag(self.__class__.__name__) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - # Read all the attributes. Even though the glyph indices are - # recalculated, they are still read in case there needs to - # be an immediate export of the data. - self.imageFormat = safeEval(attrs['imageFormat']) - self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex']) - self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex']) - - self.readMetrics(name, attrs, content, ttFont) - - self.names = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == 'glyphLoc': - self.names.append(attrs['name']) - - # A helper method that writes the metrics for the index sub table. It also - # is responsible for writing the image size for fixed size data since fixed - # size is not recalculated on compile. Default behavior is to do nothing. - def writeMetrics(self, writer, ttFont): - pass - - # A helper method that is the inverse of writeMetrics. - def readMetrics(self, name, attrs, content, ttFont): - pass - - # This method is for fixed glyph data sizes. There are formats where - # the glyph data is fixed but are actually composite glyphs. To handle - # this the font spec in indexSubTable makes the data the size of the - # fixed size by padding the component arrays. This function abstracts - # out this padding process. Input is data unpadded. Output is data - # padded only in fixed formats. Default behavior is to return the data. - def padBitmapData(self, data): - return data - - # Remove any of the glyph locations and names that are flagged as skipped. - # This only occurs in formats {1,3}. - def removeSkipGlyphs(self): - # Determines if a name, location pair is a valid data location. - # Skip glyphs are marked when the size is equal to zero. - def isValidLocation(args): - (name, (startByte, endByte)) = args - return startByte < endByte - # Remove all skip glyphs. - dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) - self.names, self.locations = list(map(list, zip(*dataPairs))) - -# A closure for creating a custom mixin. This is done because formats 1 and 3 -# are very similar. The only difference between them is the size per offset -# value. Code put in here should handle both cases generally. -def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): - - # Prep the data size for the offset array data format. - dataFormat = '>'+formatStringForDataType - offsetDataSize = struct.calcsize(dataFormat) - - class OffsetArrayIndexSubTableMixin(object): - - def decompile(self): - - numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 - indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)] - indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) - offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations] - - glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) - modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] - self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) - - self.names = list(map(self.ttFont.getGlyphName, glyphIds)) - self.removeSkipGlyphs() - - def compile(self, ttFont): - # First make sure that all the data lines up properly. Formats 1 and 3 - # must have all its data lined up consecutively. If not this will fail. - for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): - assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats" - - glyphIds = list(map(ttFont.getGlyphID, self.names)) - # Make sure that all ids are sorted strictly increasing. - assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1)) - - # Run a simple algorithm to add skip glyphs to the data locations at - # the places where an id is not present. - idQueue = deque(glyphIds) - locQueue = deque(self.locations) - allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) - allLocations = [] - for curId in allGlyphIds: - if curId != idQueue[0]: - allLocations.append((locQueue[0][0], locQueue[0][0])) - else: - idQueue.popleft() - allLocations.append(locQueue.popleft()) - - # Now that all the locations are collected, pack them appropriately into - # offsets. This is the form where offset[i] is the location and - # offset[i+1]-offset[i] is the size of the data location. - offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] - # Image data offset must be less than or equal to the minimum of locations. - # This offset may change the value for round tripping but is safer and - # allows imageDataOffset to not be required to be in the XML version. - self.imageDataOffset = min(offsets) - offsetArray = [offset - self.imageDataOffset for offset in offsets] - - dataList = [EblcIndexSubTable.compile(self, ttFont)] - dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray] - # Take care of any padding issues. Only occurs in format 3. - if offsetDataSize * len(dataList) % 4 != 0: - dataList.append(struct.pack(dataFormat, 0)) - return bytesjoin(dataList) - - return OffsetArrayIndexSubTableMixin - -# A Mixin for functionality shared between the different kinds -# of fixed sized data handling. Both kinds have big metrics so -# that kind of special processing is also handled in this mixin. -class FixedSizeIndexSubTableMixin(object): - - def writeMetrics(self, writer, ttFont): - writer.simpletag('imageSize', value=self.imageSize) - writer.newline() - self.metrics.toXML(writer, ttFont) - - def readMetrics(self, name, attrs, content, ttFont): - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == 'imageSize': - self.imageSize = safeEval(attrs['value']) - elif name == BigGlyphMetrics.__name__: - self.metrics = BigGlyphMetrics() - self.metrics.fromXML(name, attrs, content, ttFont) - elif name == SmallGlyphMetrics.__name__: - print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat) - - def padBitmapData(self, data): - # Make sure that the data isn't bigger than the fixed size. - assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat - # Pad the data so that it matches the fixed size. - pad = (self.imageSize - len(data)) * b'\0' - return data + pad - -class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable): - pass - -class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): - - def decompile(self): - (self.imageSize,) = struct.unpack(">L", self.data[:4]) - self.metrics = BigGlyphMetrics() - sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) - glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)) - offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] - self.locations = list(zip(offsets, offsets[1:])) - self.names = list(map(self.ttFont.getGlyphName, glyphIds)) - - def compile(self, ttFont): - glyphIds = list(map(ttFont.getGlyphID, self.names)) - # Make sure all the ids are consecutive. This is required by Format 2. - assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive." - self.imageDataOffset = min(zip(*self.locations)[0]) - - dataList = [EblcIndexSubTable.compile(self, ttFont)] - dataList.append(struct.pack(">L", self.imageSize)) - dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) - return bytesjoin(dataList) - -class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable): - pass - -class eblc_index_sub_table_4(EblcIndexSubTable): - - def decompile(self): - - (numGlyphs,) = struct.unpack(">L", self.data[:4]) - data = self.data[4:] - indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)] - indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) - glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations] - glyphIds, offsets = list(map(list, zip(*glyphArray))) - # There are one too many glyph ids. Get rid of the last one. - glyphIds.pop() - - offsets = [offset + self.imageDataOffset for offset in offsets] - self.locations = list(zip(offsets, offsets[1:])) - self.names = list(map(self.ttFont.getGlyphName, glyphIds)) - - def compile(self, ttFont): - # First make sure that all the data lines up properly. Format 4 - # must have all its data lined up consecutively. If not this will fail. - for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): - assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4" - - offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] - # Image data offset must be less than or equal to the minimum of locations. - # Resetting this offset may change the value for round tripping but is safer - # and allows imageDataOffset to not be required to be in the XML version. - self.imageDataOffset = min(offsets) - offsets = [offset - self.imageDataOffset for offset in offsets] - glyphIds = list(map(ttFont.getGlyphID, self.names)) - # Create an iterator over the ids plus a padding value. - idsPlusPad = list(itertools.chain(glyphIds, [0])) - - dataList = [EblcIndexSubTable.compile(self, ttFont)] - dataList.append(struct.pack(">L", len(glyphIds))) - tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)] - dataList += tmp - data = bytesjoin(dataList) - return data - -class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): - - def decompile(self): - self.origDataLen = 0 - (self.imageSize,) = struct.unpack(">L", self.data[:4]) - data = self.data[4:] - self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics()) - (numGlyphs,) = struct.unpack(">L", data[:4]) - data = data[4:] - glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)] - - offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)] - self.locations = list(zip(offsets, offsets[1:])) - self.names = list(map(self.ttFont.getGlyphName, glyphIds)) - - def compile(self, ttFont): - self.imageDataOffset = min(zip(*self.locations)[0]) - dataList = [EblcIndexSubTable.compile(self, ttFont)] - dataList.append(struct.pack(">L", self.imageSize)) - dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) - glyphIds = list(map(ttFont.getGlyphID, self.names)) - dataList.append(struct.pack(">L", len(glyphIds))) - dataList += [struct.pack(">H", curId) for curId in glyphIds] - if len(glyphIds) % 2 == 1: - dataList.append(struct.pack(">H", 0)) - return bytesjoin(dataList) - -# Dictionary of indexFormat to the class representing that format. -eblc_sub_table_classes = { - 1: eblc_index_sub_table_1, - 2: eblc_index_sub_table_2, - 3: eblc_index_sub_table_3, - 4: eblc_index_sub_table_4, - 5: eblc_index_sub_table_5, - } diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_f_e_a_t.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_e_a_t.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_f_e_a_t.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_e_a_t.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table__f_e_a_t(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/F_F_T_M_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/F_F_T_M_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/F_F_T_M_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/F_F_T_M_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from fontTools.misc.timeTools import timestampFromString, timestampToString -from . import DefaultTable - -FFTMFormat = """ - > # big endian - version: I - FFTimeStamp: Q - sourceCreated: Q - sourceModified: Q -""" - -class table_F_F_T_M_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - dummy, rest = sstruct.unpack2(FFTMFormat, data, self) - - def compile(self, ttFont): - data = sstruct.pack(FFTMFormat, self) - return data - - def toXML(self, writer, ttFont): - writer.comment("FontForge's timestamp, font source creation and modification dates") - writer.newline() - formatstring, names, fixes = sstruct.getformat(FFTMFormat) - for name in names: - value = getattr(self, name) - if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): - value = timestampToString(value) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): - value = timestampFromString(value) - else: - value = safeEval(value) - setattr(self, name, value) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_f_p_g_m.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_p_g_m.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_f_p_g_m.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_p_g_m.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable -from . import ttProgram - -class table__f_p_g_m(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - program = ttProgram.Program() - program.fromBytecode(data) - self.program = program - - def compile(self, ttFont): - return self.program.getBytecode() - - def toXML(self, writer, ttFont): - self.program.toXML(writer, ttFont) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - program = ttProgram.Program() - program.fromXML(name, attrs, content, ttFont) - self.program = program - - def __bool__(self): - """ - >>> fpgm = table__f_p_g_m() - >>> bool(fpgm) - False - >>> p = ttProgram.Program() - >>> fpgm.program = p - >>> bool(fpgm) - False - >>> bc = bytearray([0]) - >>> p.fromBytecode(bc) - >>> bool(fpgm) - True - >>> p.bytecode.pop() - 0 - >>> bool(fpgm) - False - """ - return hasattr(self, 'program') and bool(self.program) - - __nonzero__ = __bool__ - - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_v_a_r.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,187 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import fixedToFloat, floatToFixed -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from fontTools.ttLib import TTLibError -from . import DefaultTable -import struct - - -# Apple's documentation of 'fvar': -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html - -FVAR_HEADER_FORMAT = """ - > # big endian - version: L - offsetToData: H - countSizePairs: H - axisCount: H - axisSize: H - instanceCount: H - instanceSize: H -""" - -FVAR_AXIS_FORMAT = """ - > # big endian - axisTag: 4s - minValue: 16.16F - defaultValue: 16.16F - maxValue: 16.16F - flags: H - nameID: H -""" - -FVAR_INSTANCE_FORMAT = """ - > # big endian - nameID: H - flags: H -""" - -class table__f_v_a_r(DefaultTable.DefaultTable): - dependencies = ["name"] - - def __init__(self, tag="fvar"): - DefaultTable.DefaultTable.__init__(self, tag) - self.axes = [] - self.instances = [] - - def compile(self, ttFont): - header = { - "version": 0x00010000, - "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), - "countSizePairs": 2, - "axisCount": len(self.axes), - "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), - "instanceCount": len(self.instances), - "instanceSize": sstruct.calcsize(FVAR_INSTANCE_FORMAT) + len(self.axes) * 4 - } - result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] - result.extend([axis.compile() for axis in self.axes]) - axisTags = [axis.axisTag for axis in self.axes] - result.extend([instance.compile(axisTags) for instance in self.instances]) - return bytesjoin(result) - - def decompile(self, data, ttFont): - header = {} - headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) - header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) - if header["version"] != 0x00010000: - raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) - pos = header["offsetToData"] - axisSize = header["axisSize"] - for _ in range(header["axisCount"]): - axis = Axis() - axis.decompile(data[pos:pos+axisSize]) - self.axes.append(axis) - pos += axisSize - instanceSize = header["instanceSize"] - axisTags = [axis.axisTag for axis in self.axes] - for _ in range(header["instanceCount"]): - instance = NamedInstance() - instance.decompile(data[pos:pos+instanceSize], axisTags) - self.instances.append(instance) - pos += instanceSize - - def toXML(self, writer, ttFont, progress=None): - for axis in self.axes: - axis.toXML(writer, ttFont) - for instance in self.instances: - instance.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "Axis": - axis = Axis() - axis.fromXML(name, attrs, content, ttFont) - self.axes.append(axis) - elif name == "NamedInstance": - instance = NamedInstance() - instance.fromXML(name, attrs, content, ttFont) - self.instances.append(instance) - -class Axis(object): - def __init__(self): - self.axisTag = None - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values - self.minValue = -1.0 - self.defaultValue = 0.0 - self.maxValue = 1.0 - - def compile(self): - return sstruct.pack(FVAR_AXIS_FORMAT, self) - - def decompile(self, data): - sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) - - def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) - if name is not None: - writer.newline() - writer.comment(name) - writer.newline() - writer.begintag("Axis") - writer.newline() - for tag, value in [("AxisTag", self.axisTag), - ("MinValue", str(self.minValue)), - ("DefaultValue", str(self.defaultValue)), - ("MaxValue", str(self.maxValue)), - ("NameID", str(self.nameID))]: - writer.begintag(tag) - writer.write(value) - writer.endtag(tag) - writer.newline() - writer.endtag("Axis") - writer.newline() - - def fromXML(self, name, _attrs, content, ttFont): - assert(name == "Axis") - for tag, _, value in filter(lambda t: type(t) is tuple, content): - value = ''.join(value) - if tag == "AxisTag": - self.axisTag = value - elif tag in ["MinValue", "DefaultValue", "MaxValue", "NameID"]: - setattr(self, tag[0].lower() + tag[1:], safeEval(value)) - -class NamedInstance(object): - def __init__(self): - self.nameID = 0 - self.flags = 0 # not exposed in XML because spec defines no values - self.coordinates = {} - - def compile(self, axisTags): - result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] - for axis in axisTags: - fixedCoord = floatToFixed(self.coordinates[axis], 16) - result.append(struct.pack(">l", fixedCoord)) - return bytesjoin(result) - - def decompile(self, data, axisTags): - sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) - pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) - for axis in axisTags: - value = struct.unpack(">l", data[pos : pos + 4])[0] - self.coordinates[axis] = fixedToFloat(value, 16) - pos += 4 - - def toXML(self, writer, ttFont): - name = ttFont["name"].getDebugName(self.nameID) - if name is not None: - writer.newline() - writer.comment(name) - writer.newline() - writer.begintag("NamedInstance", nameID=self.nameID) - writer.newline() - for axis in ttFont["fvar"].axes: - writer.simpletag("coord", axis=axis.axisTag, - value=self.coordinates[axis.axisTag]) - writer.newline() - writer.endtag("NamedInstance") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - assert(name == "NamedInstance") - self.nameID = safeEval(attrs["nameID"]) - for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): - if tag == "coord": - self.coordinates[elementAttrs["axis"]] = safeEval(elementAttrs["value"]) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_f_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,190 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance -from fontTools.ttLib.tables._n_a_m_e import table__n_a_m_e, NameRecord -import unittest - - - -FVAR_DATA = deHexStr( - "00 01 00 00 00 10 00 02 00 02 00 14 00 02 00 0C " - "77 67 68 74 00 64 00 00 01 90 00 00 03 84 00 00 00 00 01 01 " - "77 64 74 68 00 32 00 00 00 64 00 00 00 c8 00 00 00 00 01 02 " - "01 03 00 00 01 2c 00 00 00 64 00 00 " - "01 04 00 00 01 2c 00 00 00 4b 00 00") - -FVAR_AXIS_DATA = deHexStr( - "6F 70 73 7a ff ff 80 00 00 01 4c cd 00 01 80 00 00 00 01 59") - -FVAR_INSTANCE_DATA = deHexStr("01 59 00 00 00 00 b3 33 00 00 80 00") - - -def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -def AddName(font, name): - nameTable = font.get("name") - if nameTable is None: - nameTable = font["name"] = table__n_a_m_e() - nameTable.names = [] - namerec = NameRecord() - namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256]) - namerec.string = name.encode('mac_roman') - namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0) - nameTable.names.append(namerec) - return namerec - - -def MakeFont(): - axes = [("wght", "Weight", 100, 400, 900), ("wdth", "Width", 50, 100, 200)] - instances = [("Light", 300, 100), ("Light Condensed", 300, 75)] - fvarTable = table__f_v_a_r() - font = {"fvar": fvarTable} - for tag, name, minValue, defaultValue, maxValue in axes: - axis = Axis() - axis.axisTag = tag - axis.defaultValue = defaultValue - axis.minValue, axis.maxValue = minValue, maxValue - axis.nameID = AddName(font, name).nameID - fvarTable.axes.append(axis) - for name, weight, width in instances: - inst = NamedInstance() - inst.nameID = AddName(font, name).nameID - inst.coordinates = {"wght": weight, "wdth": width} - fvarTable.instances.append(inst) - return font - - -class FontVariationTableTest(unittest.TestCase): - def test_compile(self): - font = MakeFont() - h = font["fvar"].compile(font) - self.assertEqual(FVAR_DATA, font["fvar"].compile(font)) - - def test_decompile(self): - fvar = table__f_v_a_r() - fvar.decompile(FVAR_DATA, ttFont={"fvar": fvar}) - self.assertEqual(["wght", "wdth"], [a.axisTag for a in fvar.axes]) - self.assertEqual([259, 260], [i.nameID for i in fvar.instances]) - - def test_toXML(self): - font = MakeFont() - writer = XMLWriter(BytesIO()) - font["fvar"].toXML(writer, font) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual(2, xml.count("")) - self.assertTrue("wght" in xml) - self.assertTrue("wdth" in xml) - self.assertEqual(2, xml.count("" in xml) - self.assertTrue("" in xml) - - def test_fromXML(self): - fvar = table__f_v_a_r() - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["opsz"])], ttFont=None) - fvar.fromXML("Axis", {}, [("AxisTag", {}, ["slnt"])], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "765"}, [], ttFont=None) - fvar.fromXML("NamedInstance", {"nameID": "234"}, [], ttFont=None) - self.assertEqual(["opsz", "slnt"], [a.axisTag for a in fvar.axes]) - self.assertEqual([765, 234], [i.nameID for i in fvar.instances]) - - -class AxisTest(unittest.TestCase): - def test_compile(self): - axis = Axis() - axis.axisTag, axis.nameID = ('opsz', 345) - axis.minValue, axis.defaultValue, axis.maxValue = (-0.5, 1.3, 1.5) - self.assertEqual(FVAR_AXIS_DATA, axis.compile()) - - def test_decompile(self): - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - self.assertEqual("opsz", axis.axisTag) - self.assertEqual(345, axis.nameID) - self.assertEqual(-0.5, axis.minValue) - self.assertEqual(1.3, axis.defaultValue) - self.assertEqual(1.5, axis.maxValue) - - def test_toXML(self): - font = MakeFont() - axis = Axis() - axis.decompile(FVAR_AXIS_DATA) - AddName(font, "Optical Size").nameID = 256 - axis.nameID = 256 - writer = XMLWriter(BytesIO()) - axis.toXML(writer, font) - self.assertEqual([ - '', - '', - '', - 'opsz', - '-0.5', - '1.3', - '1.5', - '256', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - axis = Axis() - axis.fromXML("Axis", {}, [ - ("AxisTag", {}, ["wght"]), - ("MinValue", {}, ["100"]), - ("DefaultValue", {}, ["400"]), - ("MaxValue", {}, ["900"]), - ("NameID", {}, ["256"]) - ], ttFont=None) - self.assertEqual("wght", axis.axisTag) - self.assertEqual(100, axis.minValue) - self.assertEqual(400, axis.defaultValue) - self.assertEqual(900, axis.maxValue) - self.assertEqual(256, axis.nameID) - - -class NamedInstanceTest(unittest.TestCase): - def test_compile(self): - inst = NamedInstance() - inst.nameID = 345 - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - self.assertEqual(FVAR_INSTANCE_DATA, inst.compile(["wght", "wdth"])) - - def test_decompile(self): - inst = NamedInstance() - inst.decompile(FVAR_INSTANCE_DATA, ["wght", "wdth"]) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - def test_toXML(self): - font = MakeFont() - inst = NamedInstance() - inst.nameID = AddName(font, "Light Condensed").nameID - inst.coordinates = {"wght": 0.7, "wdth": 0.5} - writer = XMLWriter(BytesIO()) - inst.toXML(writer, font) - self.assertEqual([ - '', - '', - '' % inst.nameID, - '', - '', - '' - ], xml_lines(writer)) - - def test_fromXML(self): - inst = NamedInstance() - attrs = {"nameID": "345"} - inst.fromXML("NamedInstance", attrs, [ - ("coord", {"axis": "wght", "value": "0.7"}, []), - ("coord", {"axis": "wdth", "value": "0.5"}, []), - ], ttFont=MakeFont()) - self.assertEqual(345, inst.nameID) - self.assertEqual({"wght": 0.7, "wdth": 0.5}, inst.coordinates) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_g_a_s_p.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_a_s_p.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_g_a_s_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_a_s_p.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,51 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import struct - - -GASP_SYMMETRIC_GRIDFIT = 0x0004 -GASP_SYMMETRIC_SMOOTHING = 0x0008 -GASP_DOGRAY = 0x0002 -GASP_GRIDFIT = 0x0001 - -class table__g_a_s_p(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - self.version, numRanges = struct.unpack(">HH", data[:4]) - assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version - data = data[4:] - self.gaspRange = {} - for i in range(numRanges): - rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4]) - self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) - data = data[4:] - assert not data, "too much data" - - def compile(self, ttFont): - version = 0 # ignore self.version - numRanges = len(self.gaspRange) - data = b"" - items = sorted(self.gaspRange.items()) - for rangeMaxPPEM, rangeGaspBehavior in items: - data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) - if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): - version = 1 - data = struct.pack(">HH", version, numRanges) + data - return data - - def toXML(self, writer, ttFont): - items = sorted(self.gaspRange.items()) - for rangeMaxPPEM, rangeGaspBehavior in items: - writer.simpletag("gaspRange", [ - ("rangeMaxPPEM", rangeMaxPPEM), - ("rangeGaspBehavior", rangeGaspBehavior)]) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name != "gaspRange": - return - if not hasattr(self, "gaspRange"): - self.gaspRange = {} - self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"]) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/G_D_E_F_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_D_E_F_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/G_D_E_F_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_D_E_F_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_G_D_E_F_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_g_l_y_f.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_l_y_f.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_g_l_y_f.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_l_y_f.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1246 +0,0 @@ -"""_g_l_y_f.py -- Converter classes for the 'glyf' table.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools import ttLib -from fontTools.misc.textTools import safeEval, pad -from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect -from fontTools.misc.bezierTools import calcQuadraticBounds -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from . import DefaultTable -from . import ttProgram -import sys -import struct -import array -import warnings - -# -# The Apple and MS rasterizers behave differently for -# scaled composite components: one does scale first and then translate -# and the other does it vice versa. MS defined some flags to indicate -# the difference, but it seems nobody actually _sets_ those flags. -# -# Funny thing: Apple seems to _only_ do their thing in the -# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE -# (eg. Charcoal)... -# -SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple - - -class table__g_l_y_f(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - loca = ttFont['loca'] - last = int(loca[0]) - noname = 0 - self.glyphs = {} - self.glyphOrder = glyphOrder = ttFont.getGlyphOrder() - for i in range(0, len(loca)-1): - try: - glyphName = glyphOrder[i] - except IndexError: - noname = noname + 1 - glyphName = 'ttxautoglyph%s' % i - next = int(loca[i+1]) - glyphdata = data[last:next] - if len(glyphdata) != (next - last): - raise ttLib.TTLibError("not enough 'glyf' table data") - glyph = Glyph(glyphdata) - self.glyphs[glyphName] = glyph - last = next - if len(data) - next >= 4: - warnings.warn("too much 'glyf' table data: expected %d, received %d bytes" % - (next, len(data))) - if noname: - warnings.warn('%s glyphs have no name' % noname) - if ttFont.lazy is False: # Be lazy for None and True - for glyph in self.glyphs.values(): - glyph.expand(self) - - def compile(self, ttFont): - if not hasattr(self, "glyphOrder"): - self.glyphOrder = ttFont.getGlyphOrder() - padding = self.padding if hasattr(self, 'padding') else None - locations = [] - currentLocation = 0 - dataList = [] - recalcBBoxes = ttFont.recalcBBoxes - for glyphName in self.glyphOrder: - glyph = self.glyphs[glyphName] - glyphData = glyph.compile(self, recalcBBoxes) - if padding: - glyphData = pad(glyphData, size=padding) - locations.append(currentLocation) - currentLocation = currentLocation + len(glyphData) - dataList.append(glyphData) - locations.append(currentLocation) - - if padding is None and currentLocation < 0x20000: - # See if we can pad any odd-lengthed glyphs to allow loca - # table to use the short offsets. - indices = [i for i,glyphData in enumerate(dataList) if len(glyphData) % 2 == 1] - if indices and currentLocation + len(indices) < 0x20000: - # It fits. Do it. - for i in indices: - dataList[i] += b'\0' - currentLocation = 0 - for i,glyphData in enumerate(dataList): - locations[i] = currentLocation - currentLocation += len(glyphData) - locations[len(dataList)] = currentLocation - - data = bytesjoin(dataList) - if 'loca' in ttFont: - ttFont['loca'].set(locations) - if 'maxp' in ttFont: - ttFont['maxp'].numGlyphs = len(self.glyphs) - return data - - def toXML(self, writer, ttFont, progress=None): - writer.newline() - glyphNames = ttFont.getGlyphNames() - writer.comment("The xMin, yMin, xMax and yMax values\nwill be recalculated by the compiler.") - writer.newline() - writer.newline() - counter = 0 - progressStep = 10 - numGlyphs = len(glyphNames) - for glyphName in glyphNames: - if not counter % progressStep and progress is not None: - progress.setLabel("Dumping 'glyf' table... (%s)" % glyphName) - progress.increment(progressStep / numGlyphs) - counter = counter + 1 - glyph = self[glyphName] - if glyph.numberOfContours: - writer.begintag('TTGlyph', [ - ("name", glyphName), - ("xMin", glyph.xMin), - ("yMin", glyph.yMin), - ("xMax", glyph.xMax), - ("yMax", glyph.yMax), - ]) - writer.newline() - glyph.toXML(writer, ttFont) - writer.endtag('TTGlyph') - writer.newline() - else: - writer.simpletag('TTGlyph', name=glyphName) - writer.comment("contains no outline data") - writer.newline() - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name != "TTGlyph": - return - if not hasattr(self, "glyphs"): - self.glyphs = {} - if not hasattr(self, "glyphOrder"): - self.glyphOrder = ttFont.getGlyphOrder() - glyphName = attrs["name"] - if ttFont.verbose: - ttLib.debugmsg("unpacking glyph '%s'" % glyphName) - glyph = Glyph() - for attr in ['xMin', 'yMin', 'xMax', 'yMax']: - setattr(glyph, attr, safeEval(attrs.get(attr, '0'))) - self.glyphs[glyphName] = glyph - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - glyph.fromXML(name, attrs, content, ttFont) - if not ttFont.recalcBBoxes: - glyph.compact(self, 0) - - def setGlyphOrder(self, glyphOrder): - self.glyphOrder = glyphOrder - - def getGlyphName(self, glyphID): - return self.glyphOrder[glyphID] - - def getGlyphID(self, glyphName): - # XXX optimize with reverse dict!!! - return self.glyphOrder.index(glyphName) - - def keys(self): - return self.glyphs.keys() - - def has_key(self, glyphName): - return glyphName in self.glyphs - - __contains__ = has_key - - def __getitem__(self, glyphName): - glyph = self.glyphs[glyphName] - glyph.expand(self) - return glyph - - def __setitem__(self, glyphName, glyph): - self.glyphs[glyphName] = glyph - if glyphName not in self.glyphOrder: - self.glyphOrder.append(glyphName) - - def __delitem__(self, glyphName): - del self.glyphs[glyphName] - self.glyphOrder.remove(glyphName) - - def __len__(self): - assert len(self.glyphOrder) == len(self.glyphs) - return len(self.glyphs) - - -glyphHeaderFormat = """ - > # big endian - numberOfContours: h - xMin: h - yMin: h - xMax: h - yMax: h -""" - -# flags -flagOnCurve = 0x01 -flagXShort = 0x02 -flagYShort = 0x04 -flagRepeat = 0x08 -flagXsame = 0x10 -flagYsame = 0x20 -flagReserved1 = 0x40 -flagReserved2 = 0x80 - -_flagSignBytes = { - 0: 2, - flagXsame: 0, - flagXShort|flagXsame: +1, - flagXShort: -1, - flagYsame: 0, - flagYShort|flagYsame: +1, - flagYShort: -1, -} - -def flagBest(x, y, onCurve): - """For a given x,y delta pair, returns the flag that packs this pair - most efficiently, as well as the number of byte cost of such flag.""" - - flag = flagOnCurve if onCurve else 0 - cost = 0 - # do x - if x == 0: - flag = flag | flagXsame - elif -255 <= x <= 255: - flag = flag | flagXShort - if x > 0: - flag = flag | flagXsame - cost += 1 - else: - cost += 2 - # do y - if y == 0: - flag = flag | flagYsame - elif -255 <= y <= 255: - flag = flag | flagYShort - if y > 0: - flag = flag | flagYsame - cost += 1 - else: - cost += 2 - return flag, cost - -def flagFits(newFlag, oldFlag, mask): - newBytes = _flagSignBytes[newFlag & mask] - oldBytes = _flagSignBytes[oldFlag & mask] - return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) - -def flagSupports(newFlag, oldFlag): - return ((oldFlag & flagOnCurve) == (newFlag & flagOnCurve) and - flagFits(newFlag, oldFlag, flagXsame|flagXShort) and - flagFits(newFlag, oldFlag, flagYsame|flagYShort)) - -def flagEncodeCoord(flag, mask, coord, coordBytes): - byteCount = _flagSignBytes[flag & mask] - if byteCount == 1: - coordBytes.append(coord) - elif byteCount == -1: - coordBytes.append(-coord) - elif byteCount == 2: - coordBytes.append((coord >> 8) & 0xFF) - coordBytes.append(coord & 0xFF) - -def flagEncodeCoords(flag, x, y, xBytes, yBytes): - flagEncodeCoord(flag, flagXsame|flagXShort, x, xBytes) - flagEncodeCoord(flag, flagYsame|flagYShort, y, yBytes) - - -ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes -ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points -ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true -WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 -NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) -MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one -WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy -WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 -WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow -USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph -OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts -SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) -UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) - - -class Glyph(object): - - def __init__(self, data=""): - if not data: - # empty char - self.numberOfContours = 0 - return - self.data = data - - def compact(self, glyfTable, recalcBBoxes=True): - data = self.compile(glyfTable, recalcBBoxes) - self.__dict__.clear() - self.data = data - - def expand(self, glyfTable): - if not hasattr(self, "data"): - # already unpacked - return - if not self.data: - # empty char - self.numberOfContours = 0 - return - dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) - del self.data - if self.isComposite(): - self.decompileComponents(data, glyfTable) - else: - self.decompileCoordinates(data) - - def compile(self, glyfTable, recalcBBoxes=True): - if hasattr(self, "data"): - return self.data - if self.numberOfContours == 0: - return "" - if recalcBBoxes: - self.recalcBounds(glyfTable) - data = sstruct.pack(glyphHeaderFormat, self) - if self.isComposite(): - data = data + self.compileComponents(glyfTable) - else: - data = data + self.compileCoordinates() - return data - - def toXML(self, writer, ttFont): - if self.isComposite(): - for compo in self.components: - compo.toXML(writer, ttFont) - if hasattr(self, "program"): - writer.begintag("instructions") - self.program.toXML(writer, ttFont) - writer.endtag("instructions") - writer.newline() - else: - last = 0 - for i in range(self.numberOfContours): - writer.begintag("contour") - writer.newline() - for j in range(last, self.endPtsOfContours[i] + 1): - writer.simpletag("pt", [ - ("x", self.coordinates[j][0]), - ("y", self.coordinates[j][1]), - ("on", self.flags[j] & flagOnCurve)]) - writer.newline() - last = self.endPtsOfContours[i] + 1 - writer.endtag("contour") - writer.newline() - if self.numberOfContours: - writer.begintag("instructions") - self.program.toXML(writer, ttFont) - writer.endtag("instructions") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "contour": - if self.numberOfContours < 0: - raise ttLib.TTLibError("can't mix composites and contours in glyph") - self.numberOfContours = self.numberOfContours + 1 - coordinates = GlyphCoordinates() - flags = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name != "pt": - continue # ignore anything but "pt" - coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) - flags.append(not not safeEval(attrs["on"])) - flags = array.array("B", flags) - if not hasattr(self, "coordinates"): - self.coordinates = coordinates - self.flags = flags - self.endPtsOfContours = [len(coordinates)-1] - else: - self.coordinates.extend (coordinates) - self.flags.extend(flags) - self.endPtsOfContours.append(len(self.coordinates)-1) - elif name == "component": - if self.numberOfContours > 0: - raise ttLib.TTLibError("can't mix composites and contours in glyph") - self.numberOfContours = -1 - if not hasattr(self, "components"): - self.components = [] - component = GlyphComponent() - self.components.append(component) - component.fromXML(name, attrs, content, ttFont) - elif name == "instructions": - self.program = ttProgram.Program() - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - self.program.fromXML(name, attrs, content, ttFont) - - def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): - assert self.isComposite() - nContours = 0 - nPoints = 0 - for compo in self.components: - baseGlyph = glyfTable[compo.glyphName] - if baseGlyph.numberOfContours == 0: - continue - elif baseGlyph.numberOfContours > 0: - nP, nC = baseGlyph.getMaxpValues() - else: - nP, nC, maxComponentDepth = baseGlyph.getCompositeMaxpValues( - glyfTable, maxComponentDepth + 1) - nPoints = nPoints + nP - nContours = nContours + nC - return nPoints, nContours, maxComponentDepth - - def getMaxpValues(self): - assert self.numberOfContours > 0 - return len(self.coordinates), len(self.endPtsOfContours) - - def decompileComponents(self, data, glyfTable): - self.components = [] - more = 1 - haveInstructions = 0 - while more: - component = GlyphComponent() - more, haveInstr, data = component.decompile(data, glyfTable) - haveInstructions = haveInstructions | haveInstr - self.components.append(component) - if haveInstructions: - numInstructions, = struct.unpack(">h", data[:2]) - data = data[2:] - self.program = ttProgram.Program() - self.program.fromBytecode(data[:numInstructions]) - data = data[numInstructions:] - if len(data) >= 4: - warnings.warn("too much glyph data at the end of composite glyph: %d excess bytes" % len(data)) - - def decompileCoordinates(self, data): - endPtsOfContours = array.array("h") - endPtsOfContours.fromstring(data[:2*self.numberOfContours]) - if sys.byteorder != "big": - endPtsOfContours.byteswap() - self.endPtsOfContours = endPtsOfContours.tolist() - - data = data[2*self.numberOfContours:] - - instructionLength, = struct.unpack(">h", data[:2]) - data = data[2:] - self.program = ttProgram.Program() - self.program.fromBytecode(data[:instructionLength]) - data = data[instructionLength:] - nCoordinates = self.endPtsOfContours[-1] + 1 - flags, xCoordinates, yCoordinates = \ - self.decompileCoordinatesRaw(nCoordinates, data) - - # fill in repetitions and apply signs - self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) - xIndex = 0 - yIndex = 0 - for i in range(nCoordinates): - flag = flags[i] - # x coordinate - if flag & flagXShort: - if flag & flagXsame: - x = xCoordinates[xIndex] - else: - x = -xCoordinates[xIndex] - xIndex = xIndex + 1 - elif flag & flagXsame: - x = 0 - else: - x = xCoordinates[xIndex] - xIndex = xIndex + 1 - # y coordinate - if flag & flagYShort: - if flag & flagYsame: - y = yCoordinates[yIndex] - else: - y = -yCoordinates[yIndex] - yIndex = yIndex + 1 - elif flag & flagYsame: - y = 0 - else: - y = yCoordinates[yIndex] - yIndex = yIndex + 1 - coordinates[i] = (x, y) - assert xIndex == len(xCoordinates) - assert yIndex == len(yCoordinates) - coordinates.relativeToAbsolute() - # discard all flags but for "flagOnCurve" - self.flags = array.array("B", (f & flagOnCurve for f in flags)) - - def decompileCoordinatesRaw(self, nCoordinates, data): - # unpack flags and prepare unpacking of coordinates - flags = array.array("B", [0] * nCoordinates) - # Warning: deep Python trickery going on. We use the struct module to unpack - # the coordinates. We build a format string based on the flags, so we can - # unpack the coordinates in one struct.unpack() call. - xFormat = ">" # big endian - yFormat = ">" # big endian - i = j = 0 - while True: - flag = byteord(data[i]) - i = i + 1 - repeat = 1 - if flag & flagRepeat: - repeat = byteord(data[i]) + 1 - i = i + 1 - for k in range(repeat): - if flag & flagXShort: - xFormat = xFormat + 'B' - elif not (flag & flagXsame): - xFormat = xFormat + 'h' - if flag & flagYShort: - yFormat = yFormat + 'B' - elif not (flag & flagYsame): - yFormat = yFormat + 'h' - flags[j] = flag - j = j + 1 - if j >= nCoordinates: - break - assert j == nCoordinates, "bad glyph flags" - data = data[i:] - # unpack raw coordinates, krrrrrr-tching! - xDataLen = struct.calcsize(xFormat) - yDataLen = struct.calcsize(yFormat) - if len(data) - (xDataLen + yDataLen) >= 4: - warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen))) - xCoordinates = struct.unpack(xFormat, data[:xDataLen]) - yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen]) - return flags, xCoordinates, yCoordinates - - def compileComponents(self, glyfTable): - data = b"" - lastcomponent = len(self.components) - 1 - more = 1 - haveInstructions = 0 - for i in range(len(self.components)): - if i == lastcomponent: - haveInstructions = hasattr(self, "program") - more = 0 - compo = self.components[i] - data = data + compo.compile(more, haveInstructions, glyfTable) - if haveInstructions: - instructions = self.program.getBytecode() - data = data + struct.pack(">h", len(instructions)) + instructions - return data - - def compileCoordinates(self): - assert len(self.coordinates) == len(self.flags) - data = [] - endPtsOfContours = array.array("h", self.endPtsOfContours) - if sys.byteorder != "big": - endPtsOfContours.byteswap() - data.append(endPtsOfContours.tostring()) - instructions = self.program.getBytecode() - data.append(struct.pack(">h", len(instructions))) - data.append(instructions) - - deltas = self.coordinates.copy() - if deltas.isFloat(): - # Warn? - xPoints = [int(round(x)) for x in xPoints] - yPoints = [int(round(y)) for y in xPoints] - deltas.absoluteToRelative() - - # TODO(behdad): Add a configuration option for this? - deltas = self.compileDeltasGreedy(self.flags, deltas) - #deltas = self.compileDeltasOptimal(self.flags, deltas) - - data.extend(deltas) - return bytesjoin(data) - - def compileDeltasGreedy(self, flags, deltas): - # Implements greedy algorithm for packing coordinate deltas: - # uses shortest representation one coordinate at a time. - compressedflags = [] - xPoints = [] - yPoints = [] - lastflag = None - repeat = 0 - for flag,(x,y) in zip(flags, deltas): - # Oh, the horrors of TrueType - # do x - if x == 0: - flag = flag | flagXsame - elif -255 <= x <= 255: - flag = flag | flagXShort - if x > 0: - flag = flag | flagXsame - else: - x = -x - xPoints.append(bytechr(x)) - else: - xPoints.append(struct.pack(">h", x)) - # do y - if y == 0: - flag = flag | flagYsame - elif -255 <= y <= 255: - flag = flag | flagYShort - if y > 0: - flag = flag | flagYsame - else: - y = -y - yPoints.append(bytechr(y)) - else: - yPoints.append(struct.pack(">h", y)) - # handle repeating flags - if flag == lastflag and repeat != 255: - repeat = repeat + 1 - if repeat == 1: - compressedflags.append(flag) - else: - compressedflags[-2] = flag | flagRepeat - compressedflags[-1] = repeat - else: - repeat = 0 - compressedflags.append(flag) - lastflag = flag - compressedFlags = array.array("B", compressedflags).tostring() - compressedXs = bytesjoin(xPoints) - compressedYs = bytesjoin(yPoints) - return (compressedFlags, compressedXs, compressedYs) - - def compileDeltasOptimal(self, flags, deltas): - # Implements optimal, dynaic-programming, algorithm for packing coordinate - # deltas. The savings are negligible :(. - candidates = [] - bestTuple = None - bestCost = 0 - repeat = 0 - for flag,(x,y) in zip(flags, deltas): - # Oh, the horrors of TrueType - flag, coordBytes = flagBest(x, y, flag) - bestCost += 1 + coordBytes - newCandidates = [(bestCost, bestTuple, flag, coordBytes), - (bestCost+1, bestTuple, (flag|flagRepeat), coordBytes)] - for lastCost,lastTuple,lastFlag,coordBytes in candidates: - if lastCost + coordBytes <= bestCost + 1 and (lastFlag & flagRepeat) and (lastFlag < 0xff00) and flagSupports(lastFlag, flag): - if (lastFlag & 0xFF) == (flag|flagRepeat) and lastCost == bestCost + 1: - continue - newCandidates.append((lastCost + coordBytes, lastTuple, lastFlag+256, coordBytes)) - candidates = newCandidates - bestTuple = min(candidates, key=lambda t:t[0]) - bestCost = bestTuple[0] - - flags = [] - while bestTuple: - cost, bestTuple, flag, coordBytes = bestTuple - flags.append(flag) - flags.reverse() - - compressedFlags = array.array("B") - compressedXs = array.array("B") - compressedYs = array.array("B") - coords = iter(deltas) - ff = [] - for flag in flags: - repeatCount, flag = flag >> 8, flag & 0xFF - compressedFlags.append(flag) - if flag & flagRepeat: - assert(repeatCount > 0) - compressedFlags.append(repeatCount) - else: - assert(repeatCount == 0) - for i in range(1 + repeatCount): - x,y = next(coords) - flagEncodeCoords(flag, x, y, compressedXs, compressedYs) - ff.append(flag) - try: - next(coords) - raise Exception("internal error") - except StopIteration: - pass - compressedFlags = compressedFlags.tostring() - compressedXs = compressedXs.tostring() - compressedYs = compressedYs.tostring() - - return (compressedFlags, compressedXs, compressedYs) - - def recalcBounds(self, glyfTable): - coords, endPts, flags = self.getCoordinates(glyfTable) - if len(coords) > 0: - if 0: - # This branch calculates exact glyph outline bounds - # analytically, handling cases without on-curve - # extremas, etc. However, the glyf table header - # simply says that the bounds should be min/max x/y - # "for coordinate data", so I suppose that means no - # fancy thing here, just get extremas of all coord - # points (on and off). As such, this branch is - # disabled. - - # Collect on-curve points - onCurveCoords = [coords[j] for j in range(len(coords)) - if flags[j] & flagOnCurve] - # Add implicit on-curve points - start = 0 - for end in endPts: - last = end - for j in range(start, end + 1): - if not ((flags[j] | flags[last]) & flagOnCurve): - x = (coords[last][0] + coords[j][0]) / 2 - y = (coords[last][1] + coords[j][1]) / 2 - onCurveCoords.append((x,y)) - last = j - start = end + 1 - # Add bounds for curves without an explicit extrema - start = 0 - for end in endPts: - last = end - for j in range(start, end + 1): - if not (flags[j] & flagOnCurve): - next = j + 1 if j < end else start - bbox = calcBounds([coords[last], coords[next]]) - if not pointInRect(coords[j], bbox): - # Ouch! - warnings.warn("Outline has curve with implicit extrema.") - # Ouch! Find analytical curve bounds. - pthis = coords[j] - plast = coords[last] - if not (flags[last] & flagOnCurve): - plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2) - pnext = coords[next] - if not (flags[next] & flagOnCurve): - pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2) - bbox = calcQuadraticBounds(plast, pthis, pnext) - onCurveCoords.append((bbox[0],bbox[1])) - onCurveCoords.append((bbox[2],bbox[3])) - last = j - start = end + 1 - - self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords) - else: - self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords) - else: - self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0) - - def isComposite(self): - """Can be called on compact or expanded glyph.""" - if hasattr(self, "data") and self.data: - return struct.unpack(">h", self.data[:2])[0] == -1 - else: - return self.numberOfContours == -1 - - def __getitem__(self, componentIndex): - if not self.isComposite(): - raise ttLib.TTLibError("can't use glyph as sequence") - return self.components[componentIndex] - - def getCoordinates(self, glyfTable): - if self.numberOfContours > 0: - return self.coordinates, self.endPtsOfContours, self.flags - elif self.isComposite(): - # it's a composite - allCoords = GlyphCoordinates() - allFlags = array.array("B") - allEndPts = [] - for compo in self.components: - g = glyfTable[compo.glyphName] - coordinates, endPts, flags = g.getCoordinates(glyfTable) - if hasattr(compo, "firstPt"): - # move according to two reference points - x1,y1 = allCoords[compo.firstPt] - x2,y2 = coordinates[compo.secondPt] - move = x1-x2, y1-y2 - else: - move = compo.x, compo.y - - coordinates = GlyphCoordinates(coordinates) - if not hasattr(compo, "transform"): - coordinates.translate(move) - else: - apple_way = compo.flags & SCALED_COMPONENT_OFFSET - ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET - assert not (apple_way and ms_way) - if not (apple_way or ms_way): - scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file - else: - scale_component_offset = apple_way - if scale_component_offset: - # the Apple way: first move, then scale (ie. scale the component offset) - coordinates.translate(move) - coordinates.transform(compo.transform) - else: - # the MS way: first scale, then move - coordinates.transform(compo.transform) - coordinates.translate(move) - offset = len(allCoords) - allEndPts.extend(e + offset for e in endPts) - allCoords.extend(coordinates) - allFlags.extend(flags) - return allCoords, allEndPts, allFlags - else: - return GlyphCoordinates(), [], array.array("B") - - def getComponentNames(self, glyfTable): - if not hasattr(self, "data"): - if self.isComposite(): - return [c.glyphName for c in self.components] - else: - return [] - - # Extract components without expanding glyph - - if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: - return [] # Not composite - - data = self.data - i = 10 - components = [] - more = 1 - while more: - flags, glyphID = struct.unpack(">HH", data[i:i+4]) - i += 4 - flags = int(flags) - components.append(glyfTable.getGlyphName(int(glyphID))) - - if flags & ARG_1_AND_2_ARE_WORDS: i += 4 - else: i += 2 - if flags & WE_HAVE_A_SCALE: i += 2 - elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 - elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 - more = flags & MORE_COMPONENTS - - return components - - def trim(self, remove_hinting=False): - """ Remove padding and, if requested, hinting, from a glyph. - This works on both expanded and compacted glyphs, without - expanding it.""" - if not hasattr(self, "data"): - if remove_hinting: - self.program = ttProgram.Program() - self.program.fromBytecode([]) - # No padding to trim. - return - if not self.data: - return - numContours = struct.unpack(">h", self.data[:2])[0] - data = array.array("B", self.data) - i = 10 - if numContours >= 0: - i += 2 * numContours # endPtsOfContours - nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1 - instructionLen = (data[i] << 8) | data[i+1] - if remove_hinting: - # Zero instruction length - data[i] = data [i+1] = 0 - i += 2 - if instructionLen: - # Splice it out - data = data[:i] + data[i+instructionLen:] - instructionLen = 0 - else: - i += 2 + instructionLen - - coordBytes = 0 - j = 0 - while True: - flag = data[i] - i = i + 1 - repeat = 1 - if flag & flagRepeat: - repeat = data[i] + 1 - i = i + 1 - xBytes = yBytes = 0 - if flag & flagXShort: - xBytes = 1 - elif not (flag & flagXsame): - xBytes = 2 - if flag & flagYShort: - yBytes = 1 - elif not (flag & flagYsame): - yBytes = 2 - coordBytes += (xBytes + yBytes) * repeat - j += repeat - if j >= nCoordinates: - break - assert j == nCoordinates, "bad glyph flags" - i += coordBytes - # Remove padding - data = data[:i] - else: - more = 1 - we_have_instructions = False - while more: - flags =(data[i] << 8) | data[i+1] - if remove_hinting: - flags &= ~WE_HAVE_INSTRUCTIONS - if flags & WE_HAVE_INSTRUCTIONS: - we_have_instructions = True - data[i+0] = flags >> 8 - data[i+1] = flags & 0xFF - i += 4 - flags = int(flags) - - if flags & ARG_1_AND_2_ARE_WORDS: i += 4 - else: i += 2 - if flags & WE_HAVE_A_SCALE: i += 2 - elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4 - elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8 - more = flags & MORE_COMPONENTS - if we_have_instructions: - instructionLen = (data[i] << 8) | data[i+1] - i += 2 + instructionLen - # Remove padding - data = data[:i] - - self.data = data.tostring() - - def removeHinting(self): - self.trim (remove_hinting=True) - - def draw(self, pen, glyfTable, offset=0): - - if self.isComposite(): - for component in self.components: - glyphName, transform = component.getComponentInfo() - pen.addComponent(glyphName, transform) - return - - coordinates, endPts, flags = self.getCoordinates(glyfTable) - if offset: - coordinates = coordinates.copy() - coordinates.translate((offset, 0)) - start = 0 - for end in endPts: - end = end + 1 - contour = coordinates[start:end] - cFlags = flags[start:end] - start = end - if 1 not in cFlags: - # There is not a single on-curve point on the curve, - # use pen.qCurveTo's special case by specifying None - # as the on-curve point. - contour.append(None) - pen.qCurveTo(*contour) - else: - # Shuffle the points so that contour the is guaranteed - # to *end* in an on-curve point, which we'll use for - # the moveTo. - firstOnCurve = cFlags.index(1) + 1 - contour = contour[firstOnCurve:] + contour[:firstOnCurve] - cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] - pen.moveTo(contour[-1]) - while contour: - nextOnCurve = cFlags.index(1) + 1 - if nextOnCurve == 1: - pen.lineTo(contour[0]) - else: - pen.qCurveTo(*contour[:nextOnCurve]) - contour = contour[nextOnCurve:] - cFlags = cFlags[nextOnCurve:] - pen.closePath() - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ - - -class GlyphComponent(object): - - def __init__(self): - pass - - def getComponentInfo(self): - """Return the base glyph name and a transform.""" - # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement - # something equivalent in fontTools.objects.glyph (I'd rather not - # convert it to an absolute offset, since it is valuable information). - # This method will now raise "AttributeError: x" on glyphs that use - # this TT feature. - if hasattr(self, "transform"): - [[xx, xy], [yx, yy]] = self.transform - trans = (xx, xy, yx, yy, self.x, self.y) - else: - trans = (1, 0, 0, 1, self.x, self.y) - return self.glyphName, trans - - def decompile(self, data, glyfTable): - flags, glyphID = struct.unpack(">HH", data[:4]) - self.flags = int(flags) - glyphID = int(glyphID) - self.glyphName = glyfTable.getGlyphName(int(glyphID)) - #print ">>", reprflag(self.flags) - data = data[4:] - - if self.flags & ARG_1_AND_2_ARE_WORDS: - if self.flags & ARGS_ARE_XY_VALUES: - self.x, self.y = struct.unpack(">hh", data[:4]) - else: - x, y = struct.unpack(">HH", data[:4]) - self.firstPt, self.secondPt = int(x), int(y) - data = data[4:] - else: - if self.flags & ARGS_ARE_XY_VALUES: - self.x, self.y = struct.unpack(">bb", data[:2]) - else: - x, y = struct.unpack(">BB", data[:2]) - self.firstPt, self.secondPt = int(x), int(y) - data = data[2:] - - if self.flags & WE_HAVE_A_SCALE: - scale, = struct.unpack(">h", data[:2]) - self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]] # fixed 2.14 - data = data[2:] - elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: - xscale, yscale = struct.unpack(">hh", data[:4]) - self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]] # fixed 2.14 - data = data[4:] - elif self.flags & WE_HAVE_A_TWO_BY_TWO: - (xscale, scale01, - scale10, yscale) = struct.unpack(">hhhh", data[:8]) - self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)], - [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14 - data = data[8:] - more = self.flags & MORE_COMPONENTS - haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS - self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | - SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) - return more, haveInstructions, data - - def compile(self, more, haveInstructions, glyfTable): - data = b"" - - # reset all flags we will calculate ourselves - flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | - SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET | - NON_OVERLAPPING) - if more: - flags = flags | MORE_COMPONENTS - if haveInstructions: - flags = flags | WE_HAVE_INSTRUCTIONS - - if hasattr(self, "firstPt"): - if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): - data = data + struct.pack(">BB", self.firstPt, self.secondPt) - else: - data = data + struct.pack(">HH", self.firstPt, self.secondPt) - flags = flags | ARG_1_AND_2_ARE_WORDS - else: - flags = flags | ARGS_ARE_XY_VALUES - if (-128 <= self.x <= 127) and (-128 <= self.y <= 127): - data = data + struct.pack(">bb", self.x, self.y) - else: - data = data + struct.pack(">hh", self.x, self.y) - flags = flags | ARG_1_AND_2_ARE_WORDS - - if hasattr(self, "transform"): - transform = [[fl2fi(x,14) for x in row] for row in self.transform] - if transform[0][1] or transform[1][0]: - flags = flags | WE_HAVE_A_TWO_BY_TWO - data = data + struct.pack(">hhhh", - transform[0][0], transform[0][1], - transform[1][0], transform[1][1]) - elif transform[0][0] != transform[1][1]: - flags = flags | WE_HAVE_AN_X_AND_Y_SCALE - data = data + struct.pack(">hh", - transform[0][0], transform[1][1]) - else: - flags = flags | WE_HAVE_A_SCALE - data = data + struct.pack(">h", - transform[0][0]) - - glyphID = glyfTable.getGlyphID(self.glyphName) - return struct.pack(">HH", flags, glyphID) + data - - def toXML(self, writer, ttFont): - attrs = [("glyphName", self.glyphName)] - if not hasattr(self, "firstPt"): - attrs = attrs + [("x", self.x), ("y", self.y)] - else: - attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] - - if hasattr(self, "transform"): - transform = self.transform - if transform[0][1] or transform[1][0]: - attrs = attrs + [ - ("scalex", transform[0][0]), ("scale01", transform[0][1]), - ("scale10", transform[1][0]), ("scaley", transform[1][1]), - ] - elif transform[0][0] != transform[1][1]: - attrs = attrs + [ - ("scalex", transform[0][0]), ("scaley", transform[1][1]), - ] - else: - attrs = attrs + [("scale", transform[0][0])] - attrs = attrs + [("flags", hex(self.flags))] - writer.simpletag("component", attrs) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.glyphName = attrs["glyphName"] - if "firstPt" in attrs: - self.firstPt = safeEval(attrs["firstPt"]) - self.secondPt = safeEval(attrs["secondPt"]) - else: - self.x = safeEval(attrs["x"]) - self.y = safeEval(attrs["y"]) - if "scale01" in attrs: - scalex = safeEval(attrs["scalex"]) - scale01 = safeEval(attrs["scale01"]) - scale10 = safeEval(attrs["scale10"]) - scaley = safeEval(attrs["scaley"]) - self.transform = [[scalex, scale01], [scale10, scaley]] - elif "scalex" in attrs: - scalex = safeEval(attrs["scalex"]) - scaley = safeEval(attrs["scaley"]) - self.transform = [[scalex, 0], [0, scaley]] - elif "scale" in attrs: - scale = safeEval(attrs["scale"]) - self.transform = [[scale, 0], [0, scale]] - self.flags = safeEval(attrs["flags"]) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ - -class GlyphCoordinates(object): - - def __init__(self, iterable=[]): - self._a = array.array("h") - self.extend(iterable) - - def isFloat(self): - return self._a.typecode == 'f' - - def _ensureFloat(self): - if self.isFloat(): - return - # The conversion to list() is to work around Jython bug - self._a = array.array("f", list(self._a)) - - def _checkFloat(self, p): - if any(isinstance(v, float) for v in p): - p = [int(v) if int(v) == v else v for v in p] - if any(isinstance(v, float) for v in p): - self._ensureFloat() - return p - - @staticmethod - def zeros(count): - return GlyphCoordinates([(0,0)] * count) - - def copy(self): - c = GlyphCoordinates() - c._a.extend(self._a) - return c - - def __len__(self): - return len(self._a) // 2 - - def __getitem__(self, k): - if isinstance(k, slice): - indices = range(*k.indices(len(self))) - return [self[i] for i in indices] - return self._a[2*k],self._a[2*k+1] - - def __setitem__(self, k, v): - if isinstance(k, slice): - indices = range(*k.indices(len(self))) - # XXX This only works if len(v) == len(indices) - # TODO Implement __delitem__ - for j,i in enumerate(indices): - self[i] = v[j] - return - v = self._checkFloat(v) - self._a[2*k],self._a[2*k+1] = v - - def __repr__(self): - return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])' - - def append(self, p): - p = self._checkFloat(p) - self._a.extend(tuple(p)) - - def extend(self, iterable): - for p in iterable: - p = self._checkFloat(p) - self._a.extend(p) - - def relativeToAbsolute(self): - a = self._a - x,y = 0,0 - for i in range(len(a) // 2): - a[2*i ] = x = a[2*i ] + x - a[2*i+1] = y = a[2*i+1] + y - - def absoluteToRelative(self): - a = self._a - x,y = 0,0 - for i in range(len(a) // 2): - dx = a[2*i ] - x - dy = a[2*i+1] - y - x = a[2*i ] - y = a[2*i+1] - a[2*i ] = dx - a[2*i+1] = dy - - def translate(self, p): - (x,y) = p - a = self._a - for i in range(len(a) // 2): - a[2*i ] += x - a[2*i+1] += y - - def transform(self, t): - a = self._a - for i in range(len(a) // 2): - x = a[2*i ] - y = a[2*i+1] - px = x * t[0][0] + y * t[1][0] - py = x * t[0][1] + y * t[1][1] - self[i] = (px, py) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self._a == other._a - - -def reprflag(flag): - bin = "" - if isinstance(flag, str): - flag = byteord(flag) - while flag: - if flag & 0x01: - bin = "1" + bin - else: - bin = "0" + bin - flag = flag >> 1 - bin = (14 - len(bin)) * "0" + bin - return bin diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/G_M_A_P_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_M_A_P_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/G_M_A_P_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_M_A_P_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,128 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -GMAPFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - flags: H - recordsCount: H - recordsOffset: H - fontNameLength: H -""" -# psFontName is a byte string which follows the record above. This is zero padded -# to the beginning of the records array. The recordsOffsst is 32 bit aligned. - -GMAPRecordFormat1 = """ - > # big endian - UV: L - cid: H - gid: H - ggid: H - name: 32s -""" - - -class GMAPRecord(object): - def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): - self.UV = uv - self.cid = cid - self.gid = gid - self.ggid = ggid - self.name = name - - def toXML(self, writer, ttFont): - writer.begintag("GMAPRecord") - writer.newline() - writer.simpletag("UV", value=self.UV) - writer.newline() - writer.simpletag("cid", value=self.cid) - writer.newline() - writer.simpletag("gid", value=self.gid) - writer.newline() - writer.simpletag("glyphletGid", value=self.gid) - writer.newline() - writer.simpletag("GlyphletName", value=self.name) - writer.newline() - writer.endtag("GMAPRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name == "GlyphletName": - self.name = value - else: - setattr(self, name, safeEval(value)) - - def compile(self, ttFont): - if self.UV is None: - self.UV = 0 - nameLen = len(self.name) - if nameLen < 32: - self.name = self.name + "\0"*(32 - nameLen) - data = sstruct.pack(GMAPRecordFormat1, self) - return data - - def __repr__(self): - return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]" - - -class table_G_M_A_P_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(GMAPFormat, data, self) - self.psFontName = tostr(newData[:self.fontNameLength]) - assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned." - newData = data[self.recordsOffset:] - self.gmapRecords = [] - for i in range (self.recordsCount): - gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord()) - gmapRecord.name = gmapRecord.name.strip('\0') - self.gmapRecords.append(gmapRecord) - - def compile(self, ttFont): - self.recordsCount = len(self.gmapRecords) - self.fontNameLength = len(self.psFontName) - self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) - data = sstruct.pack(GMAPFormat, self) - data = data + tobytes(self.psFontName) - data = data + b"\0" * (self.recordsOffset - len(data)) - for record in self.gmapRecords: - data = data + record.compile(ttFont) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(GMAPFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - writer.simpletag("PSFontName", value=self.psFontName) - writer.newline() - for gmapRecord in self.gmapRecords: - gmapRecord.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "GMAPRecord": - if not hasattr(self, "gmapRecords"): - self.gmapRecords = [] - gmapRecord = GMAPRecord() - self.gmapRecords.append(gmapRecord) - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - gmapRecord.fromXML(name, attrs, content, ttFont) - else: - value = attrs["value"] - if name == "PSFontName": - self.psFontName = value - else: - setattr(self, name, safeEval(value)) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_K_G_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_P_K_G_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_K_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_P_K_G_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,129 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, readHex -from . import DefaultTable -import sys -import array - -GPKGFormat = """ - > # big endian - version: H - flags: H - numGMAPs: H - numGlyplets: H -""" -# psFontName is a byte string which follows the record above. This is zero padded -# to the beginning of the records array. The recordsOffsst is 32 bit aligned. - - -class table_G_P_K_G_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(GPKGFormat, data, self) - - GMAPoffsets = array.array("I") - endPos = (self.numGMAPs+1) * 4 - GMAPoffsets.fromstring(newData[:endPos]) - if sys.byteorder != "big": - GMAPoffsets.byteswap() - self.GMAPs = [] - for i in range(self.numGMAPs): - start = GMAPoffsets[i] - end = GMAPoffsets[i+1] - self.GMAPs.append(data[start:end]) - pos = endPos - endPos = pos + (self.numGlyplets + 1)*4 - glyphletOffsets = array.array("I") - glyphletOffsets.fromstring(newData[pos:endPos]) - if sys.byteorder != "big": - glyphletOffsets.byteswap() - self.glyphlets = [] - for i in range(self.numGlyplets): - start = glyphletOffsets[i] - end = glyphletOffsets[i+1] - self.glyphlets.append(data[start:end]) - - def compile(self, ttFont): - self.numGMAPs = len(self.GMAPs) - self.numGlyplets = len(self.glyphlets) - GMAPoffsets = [0]*(self.numGMAPs + 1) - glyphletOffsets = [0]*(self.numGlyplets + 1) - - dataList =[ sstruct.pack(GPKGFormat, self)] - - pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4 - GMAPoffsets[0] = pos - for i in range(1, self.numGMAPs +1): - pos += len(self.GMAPs[i-1]) - GMAPoffsets[i] = pos - gmapArray = array.array("I", GMAPoffsets) - if sys.byteorder != "big": - gmapArray.byteswap() - dataList.append(gmapArray.tostring()) - - glyphletOffsets[0] = pos - for i in range(1, self.numGlyplets +1): - pos += len(self.glyphlets[i-1]) - glyphletOffsets[i] = pos - glyphletArray = array.array("I", glyphletOffsets) - if sys.byteorder != "big": - glyphletArray.byteswap() - dataList.append(glyphletArray.tostring()) - dataList += self.GMAPs - dataList += self.glyphlets - data = bytesjoin(dataList) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(GPKGFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - - writer.begintag("GMAPs") - writer.newline() - for gmapData in self.GMAPs: - writer.begintag("hexdata") - writer.newline() - writer.dumphex(gmapData) - writer.endtag("hexdata") - writer.newline() - writer.endtag("GMAPs") - writer.newline() - - writer.begintag("glyphlets") - writer.newline() - for glyphletData in self.glyphlets: - writer.begintag("hexdata") - writer.newline() - writer.dumphex(glyphletData) - writer.endtag("hexdata") - writer.newline() - writer.endtag("glyphlets") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "GMAPs": - if not hasattr(self, "GMAPs"): - self.GMAPs = [] - for element in content: - if isinstance(element, basestring): - continue - itemName, itemAttrs, itemContent = element - if itemName == "hexdata": - self.GMAPs.append(readHex(itemContent)) - elif name == "glyphlets": - if not hasattr(self, "glyphlets"): - self.glyphlets = [] - for element in content: - if isinstance(element, basestring): - continue - itemName, itemAttrs, itemContent = element - if itemName == "hexdata": - self.glyphlets.append(readHex(itemContent)) - else: - setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_O_S_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_P_O_S_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/G_P_O_S_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_P_O_S_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_G_P_O_S_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/G_S_U_B_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_S_U_B_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/G_S_U_B_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/G_S_U_B_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_G_S_U_B_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_v_a_r.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_v_a_r.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,717 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.misc import sstruct -from fontTools.misc.fixedTools import fixedToFloat, floatToFixed -from fontTools.misc.textTools import safeEval -from fontTools.ttLib import TTLibError -from . import DefaultTable -import array -import io -import sys -import struct - -# Apple's documentation of 'gvar': -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html -# -# FreeType2 source code for parsing 'gvar': -# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c - -GVAR_HEADER_FORMAT = """ - > # big endian - version: H - reserved: H - axisCount: H - sharedCoordCount: H - offsetToCoord: I - glyphCount: H - flags: H - offsetToData: I -""" - -GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) - -TUPLES_SHARE_POINT_NUMBERS = 0x8000 -TUPLE_COUNT_MASK = 0x0fff - -EMBEDDED_TUPLE_COORD = 0x8000 -INTERMEDIATE_TUPLE = 0x4000 -PRIVATE_POINT_NUMBERS = 0x2000 -TUPLE_INDEX_MASK = 0x0fff - -DELTAS_ARE_ZERO = 0x80 -DELTAS_ARE_WORDS = 0x40 -DELTA_RUN_COUNT_MASK = 0x3f - -POINTS_ARE_WORDS = 0x80 -POINT_RUN_COUNT_MASK = 0x7f - - -class table__g_v_a_r(DefaultTable.DefaultTable): - - dependencies = ["fvar", "glyf"] - - def compile(self, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - - sharedCoords = self.compileSharedCoords_(axisTags) - sharedCoordIndices = {coord:i for i, coord in enumerate(sharedCoords)} - sharedCoordSize = sum([len(c) for c in sharedCoords]) - - compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedCoordIndices) - offset = 0 - offsets = [] - for glyph in compiledGlyphs: - offsets.append(offset) - offset += len(glyph) - offsets.append(offset) - compiledOffsets, tableFormat = self.compileOffsets_(offsets) - - header = {} - header["version"] = self.version - header["reserved"] = self.reserved - header["axisCount"] = len(axisTags) - header["sharedCoordCount"] = len(sharedCoords) - header["offsetToCoord"] = GVAR_HEADER_SIZE + len(compiledOffsets) - header["glyphCount"] = len(compiledGlyphs) - header["flags"] = tableFormat - header["offsetToData"] = header["offsetToCoord"] + sharedCoordSize - compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) - - result = [compiledHeader, compiledOffsets] - result.extend(sharedCoords) - result.extend(compiledGlyphs) - return bytesjoin(result) - - def compileSharedCoords_(self, axisTags): - coordCount = {} - for variations in self.variations.values(): - for gvar in variations: - coord = gvar.compileCoord(axisTags) - coordCount[coord] = coordCount.get(coord, 0) + 1 - sharedCoords = [(count, coord) for (coord, count) in coordCount.items() if count > 1] - sharedCoords.sort(reverse=True) - MAX_NUM_SHARED_COORDS = TUPLE_INDEX_MASK + 1 - sharedCoords = sharedCoords[:MAX_NUM_SHARED_COORDS] - return [c[1] for c in sharedCoords] # Strip off counts. - - def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): - result = [] - for glyphName in ttFont.getGlyphOrder(): - glyph = ttFont["glyf"][glyphName] - numPointsInGlyph = self.getNumPoints_(glyph) - result.append(self.compileGlyph_(glyphName, numPointsInGlyph, axisTags, sharedCoordIndices)) - return result - - def compileGlyph_(self, glyphName, numPointsInGlyph, axisTags, sharedCoordIndices): - variations = self.variations.get(glyphName, []) - variations = [v for v in variations if v.hasImpact()] - if len(variations) == 0: - return b"" - - # Each glyph variation tuples modifies a set of control points. To indicate - # which exact points are getting modified, a single tuple can either refer - # to a shared set of points, or the tuple can supply its private point numbers. - # Because the impact of sharing can be positive (no need for a private point list) - # or negative (need to supply 0,0 deltas for unused points), it is not obvious - # how to determine which tuples should take their points from the shared - # pool versus have their own. Perhaps we should resort to brute force, - # and try all combinations? However, if a glyph has n variation tuples, - # we would need to try 2^n combinations (because each tuple may or may not - # be part of the shared set). How many variations tuples do glyphs have? - # - # Skia.ttf: {3: 1, 5: 11, 6: 41, 7: 62, 8: 387, 13: 1, 14: 3} - # JamRegular.ttf: {3: 13, 4: 122, 5: 1, 7: 4, 8: 1, 9: 1, 10: 1} - # BuffaloGalRegular.ttf: {1: 16, 2: 13, 4: 2, 5: 4, 6: 19, 7: 1, 8: 3, 9: 18} - # (Reading example: In Skia.ttf, 41 glyphs have 6 variation tuples). - # - # Is this even worth optimizing? If we never use a shared point list, - # the private lists will consume 112K for Skia, 5K for BuffaloGalRegular, - # and 15K for JamRegular. If we always use a shared point list, - # the shared lists will consume 16K for Skia, 3K for BuffaloGalRegular, - # and 10K for JamRegular. However, in the latter case the delta arrays - # will become larger, but I haven't yet measured by how much. From - # gut feeling (which may be wrong), the optimum is to share some but - # not all points; however, then we would need to try all combinations. - # - # For the time being, we try two variants and then pick the better one: - # (a) each tuple supplies its own private set of points; - # (b) all tuples refer to a shared set of points, which consists of - # "every control point in the glyph". - allPoints = set(range(numPointsInGlyph)) - tuples = [] - data = [] - someTuplesSharePoints = False - for gvar in variations: - privateTuple, privateData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - sharedTuple, sharedData = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=allPoints) - # TODO: If we use shared points, Apple MacOS X 10.9.5 cannot display our fonts. - # This is probably a problem with our code; find the problem and fix it. - #if (len(sharedTuple) + len(sharedData)) < (len(privateTuple) + len(privateData)): - if False: - tuples.append(sharedTuple) - data.append(sharedData) - someTuplesSharePoints = True - else: - tuples.append(privateTuple) - data.append(privateData) - if someTuplesSharePoints: - data = bytechr(0) + bytesjoin(data) # 0x00 = "all points in glyph" - tupleCount = TUPLES_SHARE_POINT_NUMBERS | len(tuples) - else: - data = bytesjoin(data) - tupleCount = len(tuples) - tuples = bytesjoin(tuples) - result = struct.pack(">HH", tupleCount, 4 + len(tuples)) + tuples + data - if len(result) % 2 != 0: - result = result + b"\0" # padding - return result - - def decompile(self, data, ttFont): - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - glyphs = ttFont.getGlyphOrder() - sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) - assert len(glyphs) == self.glyphCount - assert len(axisTags) == self.axisCount - offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount) - sharedCoords = self.decompileSharedCoords_(axisTags, data) - self.variations = {} - for i in range(self.glyphCount): - glyphName = glyphs[i] - glyph = ttFont["glyf"][glyphName] - numPointsInGlyph = self.getNumPoints_(glyph) - gvarData = data[self.offsetToData + offsets[i] : self.offsetToData + offsets[i + 1]] - self.variations[glyphName] = \ - self.decompileGlyph_(numPointsInGlyph, sharedCoords, axisTags, gvarData) - - def decompileSharedCoords_(self, axisTags, data): - result, _pos = GlyphVariation.decompileCoords_(axisTags, self.sharedCoordCount, data, self.offsetToCoord) - return result - - @staticmethod - def decompileOffsets_(data, tableFormat, glyphCount): - if tableFormat == 0: - # Short format: array of UInt16 - offsets = array.array("H") - offsetsSize = (glyphCount + 1) * 2 - else: - # Long format: array of UInt32 - offsets = array.array("I") - offsetsSize = (glyphCount + 1) * 4 - offsets.fromstring(data[0 : offsetsSize]) - if sys.byteorder != "big": - offsets.byteswap() - - # In the short format, offsets need to be multiplied by 2. - # This is not documented in Apple's TrueType specification, - # but can be inferred from the FreeType implementation, and - # we could verify it with two sample GX fonts. - if tableFormat == 0: - offsets = [off * 2 for off in offsets] - - return offsets - - @staticmethod - def compileOffsets_(offsets): - """Packs a list of offsets into a 'gvar' offset table. - - Returns a pair (bytestring, tableFormat). Bytestring is the - packed offset table. Format indicates whether the table - uses short (tableFormat=0) or long (tableFormat=1) integers. - The returned tableFormat should get packed into the flags field - of the 'gvar' header. - """ - assert len(offsets) >= 2 - for i in range(1, len(offsets)): - assert offsets[i - 1] <= offsets[i] - if max(offsets) <= 0xffff * 2: - packed = array.array("H", [n >> 1 for n in offsets]) - tableFormat = 0 - else: - packed = array.array("I", offsets) - tableFormat = 1 - if sys.byteorder != "big": - packed.byteswap() - return (packed.tostring(), tableFormat) - - def decompileGlyph_(self, numPointsInGlyph, sharedCoords, axisTags, data): - if len(data) < 4: - return [] - numAxes = len(axisTags) - tuples = [] - flags, offsetToData = struct.unpack(">HH", data[:4]) - pos = 4 - dataPos = offsetToData - if (flags & TUPLES_SHARE_POINT_NUMBERS) != 0: - sharedPoints, dataPos = GlyphVariation.decompilePoints_(numPointsInGlyph, data, dataPos) - else: - sharedPoints = [] - for _ in range(flags & TUPLE_COUNT_MASK): - dataSize, flags = struct.unpack(">HH", data[pos:pos+4]) - tupleSize = GlyphVariation.getTupleSize_(flags, numAxes) - tupleData = data[pos : pos + tupleSize] - pointDeltaData = data[dataPos : dataPos + dataSize] - tuples.append(self.decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, tupleData, pointDeltaData)) - pos += tupleSize - dataPos += dataSize - return tuples - - @staticmethod - def decompileTuple_(numPointsInGlyph, sharedCoords, sharedPoints, axisTags, data, tupleData): - flags = struct.unpack(">H", data[2:4])[0] - - pos = 4 - if (flags & EMBEDDED_TUPLE_COORD) == 0: - coord = sharedCoords[flags & TUPLE_INDEX_MASK] - else: - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - if (flags & INTERMEDIATE_TUPLE) != 0: - minCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - maxCoord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - else: - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - axes = {} - for axis in axisTags: - coords = minCoord[axis], coord[axis], maxCoord[axis] - if coords != (0.0, 0.0, 0.0): - axes[axis] = coords - pos = 0 - if (flags & PRIVATE_POINT_NUMBERS) != 0: - points, pos = GlyphVariation.decompilePoints_(numPointsInGlyph, tupleData, pos) - else: - points = sharedPoints - deltas_x, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas_y, pos = GlyphVariation.decompileDeltas_(len(points), tupleData, pos) - deltas = [None] * numPointsInGlyph - for p, x, y in zip(points, deltas_x, deltas_y): - deltas[p] = (x, y) - return GlyphVariation(axes, deltas) - - @staticmethod - def computeMinMaxCoord_(coord): - minCoord = {} - maxCoord = {} - for (axis, value) in coord.items(): - minCoord[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - maxCoord[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - return (minCoord, maxCoord) - - def toXML(self, writer, ttFont, progress=None): - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("reserved", value=self.reserved) - writer.newline() - axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] - for glyphName in ttFont.getGlyphOrder(): - variations = self.variations.get(glyphName) - if not variations: - continue - writer.begintag("glyphVariations", glyph=glyphName) - writer.newline() - for gvar in variations: - gvar.toXML(writer, axisTags) - writer.endtag("glyphVariations") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = safeEval(attrs["value"]) - elif name == "reserved": - self.reserved = safeEval(attrs["value"]) - elif name == "glyphVariations": - if not hasattr(self, "variations"): - self.variations = {} - glyphName = attrs["glyph"] - glyph = ttFont["glyf"][glyphName] - numPointsInGlyph = self.getNumPoints_(glyph) - glyphVariations = [] - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - if name == "tuple": - gvar = GlyphVariation({}, [None] * numPointsInGlyph) - glyphVariations.append(gvar) - for tupleElement in content: - if isinstance(tupleElement, tuple): - tupleName, tupleAttrs, tupleContent = tupleElement - gvar.fromXML(tupleName, tupleAttrs, tupleContent) - self.variations[glyphName] = glyphVariations - - @staticmethod - def getNumPoints_(glyph): - NUM_PHANTOM_POINTS = 4 - if glyph.isComposite(): - return len(glyph.components) + NUM_PHANTOM_POINTS - else: - # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. - return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS - - -class GlyphVariation(object): - def __init__(self, axes, coordinates): - self.axes = axes - self.coordinates = coordinates - - def __repr__(self): - axes = ",".join(sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])) - return "" % (axes, self.coordinates) - - def __eq__(self, other): - return self.coordinates == other.coordinates and self.axes == other.axes - - def getUsedPoints(self): - result = set() - for i, point in enumerate(self.coordinates): - if point is not None: - result.add(i) - return result - - def hasImpact(self): - """Returns True if this GlyphVariation has any visible impact. - - If the result is False, the GlyphVariation can be omitted from the font - without making any visible difference. - """ - for c in self.coordinates: - if c is not None: - return True - return False - - def toXML(self, writer, axisTags): - writer.begintag("tuple") - writer.newline() - for axis in axisTags: - value = self.axes.get(axis) - if value is not None: - minValue, value, maxValue = value - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if minValue == defaultMinValue and maxValue == defaultMaxValue: - writer.simpletag("coord", axis=axis, value=value) - else: - writer.simpletag("coord", axis=axis, value=value, min=minValue, max=maxValue) - writer.newline() - wrote_any_points = False - for i, point in enumerate(self.coordinates): - if point is not None: - writer.simpletag("delta", pt=i, x=point[0], y=point[1]) - writer.newline() - wrote_any_points = True - if not wrote_any_points: - writer.comment("no deltas") - writer.newline() - writer.endtag("tuple") - writer.newline() - - def fromXML(self, name, attrs, _content): - if name == "coord": - axis = attrs["axis"] - value = float(attrs["value"]) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - minValue = float(attrs.get("min", defaultMinValue)) - maxValue = float(attrs.get("max", defaultMaxValue)) - self.axes[axis] = (minValue, value, maxValue) - elif name == "delta": - point = safeEval(attrs["pt"]) - x = safeEval(attrs["x"]) - y = safeEval(attrs["y"]) - self.coordinates[point] = (x, y) - - def compile(self, axisTags, sharedCoordIndices, sharedPoints): - tupleData = [] - - coord = self.compileCoord(axisTags) - if coord in sharedCoordIndices: - flags = sharedCoordIndices[coord] - else: - flags = EMBEDDED_TUPLE_COORD - tupleData.append(coord) - - intermediateCoord = self.compileIntermediateCoord(axisTags) - if intermediateCoord is not None: - flags |= INTERMEDIATE_TUPLE - tupleData.append(intermediateCoord) - - if sharedPoints is not None: - auxData = self.compileDeltas(sharedPoints) - else: - flags |= PRIVATE_POINT_NUMBERS - points = self.getUsedPoints() - numPointsInGlyph = len(self.coordinates) - auxData = self.compilePoints(points, numPointsInGlyph) + self.compileDeltas(points) - - tupleData = struct.pack('>HH', len(auxData), flags) + bytesjoin(tupleData) - return (tupleData, auxData) - - def compileCoord(self, axisTags): - result = [] - for axis in axisTags: - _minValue, value, _maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - result.append(struct.pack(">h", floatToFixed(value, 14))) - return bytesjoin(result) - - def compileIntermediateCoord(self, axisTags): - needed = False - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 - defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 - if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): - needed = True - break - if not needed: - return None - minCoords = [] - maxCoords = [] - for axis in axisTags: - minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) - minCoords.append(struct.pack(">h", floatToFixed(minValue, 14))) - maxCoords.append(struct.pack(">h", floatToFixed(maxValue, 14))) - return bytesjoin(minCoords + maxCoords) - - @staticmethod - def decompileCoord_(axisTags, data, offset): - coord = {} - pos = offset - for axis in axisTags: - coord[axis] = fixedToFloat(struct.unpack(">h", data[pos:pos+2])[0], 14) - pos += 2 - return coord, pos - - @staticmethod - def decompileCoords_(axisTags, numCoords, data, offset): - result = [] - pos = offset - for _ in range(numCoords): - coord, pos = GlyphVariation.decompileCoord_(axisTags, data, pos) - result.append(coord) - return result, pos - - @staticmethod - def compilePoints(points, numPointsInGlyph): - # If the set consists of all points in the glyph, it gets encoded with - # a special encoding: a single zero byte. - if len(points) == numPointsInGlyph: - return b"\0" - - # In the 'gvar' table, the packing of point numbers is a little surprising. - # It consists of multiple runs, each being a delta-encoded list of integers. - # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as - # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. - # There are two types of runs, with values being either 8 or 16 bit unsigned - # integers. - points = list(points) - points.sort() - numPoints = len(points) - - # The binary representation starts with the total number of points in the set, - # encoded into one or two bytes depending on the value. - if numPoints < 0x80: - result = [bytechr(numPoints)] - else: - result = [bytechr((numPoints >> 8) | 0x80) + bytechr(numPoints & 0xff)] - - MAX_RUN_LENGTH = 127 - pos = 0 - while pos < numPoints: - run = io.BytesIO() - runLength = 0 - lastValue = 0 - useByteEncoding = (points[pos] <= 0xff) - while pos < numPoints and runLength <= MAX_RUN_LENGTH: - curValue = points[pos] - delta = curValue - lastValue - if useByteEncoding and delta > 0xff: - # we need to start a new run (which will not use byte encoding) - break - if useByteEncoding: - run.write(bytechr(delta)) - else: - run.write(bytechr(delta >> 8)) - run.write(bytechr(delta & 0xff)) - lastValue = curValue - pos += 1 - runLength += 1 - if useByteEncoding: - runHeader = bytechr(runLength - 1) - else: - runHeader = bytechr((runLength - 1) | POINTS_ARE_WORDS) - result.append(runHeader) - result.append(run.getvalue()) - - return bytesjoin(result) - - @staticmethod - def decompilePoints_(numPointsInGlyph, data, offset): - """(numPointsInGlyph, data, offset) --> ([point1, point2, ...], newOffset)""" - pos = offset - numPointsInData = byteord(data[pos]) - pos += 1 - if (numPointsInData & POINTS_ARE_WORDS) != 0: - numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | byteord(data[pos]) - pos += 1 - if numPointsInData == 0: - return (range(numPointsInGlyph), pos) - result = [] - while len(result) < numPointsInData: - runHeader = byteord(data[pos]) - pos += 1 - numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 - point = 0 - if (runHeader & POINTS_ARE_WORDS) == 0: - for _ in range(numPointsInRun): - point += byteord(data[pos]) - pos += 1 - result.append(point) - else: - for _ in range(numPointsInRun): - point += struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - result.append(point) - if max(result) >= numPointsInGlyph: - raise TTLibError("malformed 'gvar' table") - return (result, pos) - - def compileDeltas(self, points): - deltaX = [] - deltaY = [] - for p in sorted(list(points)): - c = self.coordinates[p] - if c is not None: - deltaX.append(c[0]) - deltaY.append(c[1]) - return self.compileDeltaValues_(deltaX) + self.compileDeltaValues_(deltaY) - - @staticmethod - def compileDeltaValues_(deltas): - """[value1, value2, value3, ...] --> bytestring - - Emits a sequence of runs. Each run starts with a - byte-sized header whose 6 least significant bits - (header & 0x3F) indicate how many values are encoded - in this run. The stored length is the actual length - minus one; run lengths are thus in the range [1..64]. - If the header byte has its most significant bit (0x80) - set, all values in this run are zero, and no data - follows. Otherwise, the header byte is followed by - ((header & 0x3F) + 1) signed values. If (header & - 0x40) is clear, the delta values are stored as signed - bytes; if (header & 0x40) is set, the delta values are - signed 16-bit integers. - """ # Explaining the format because the 'gvar' spec is hard to understand. - stream = io.BytesIO() - pos = 0 - while pos < len(deltas): - value = deltas[pos] - if value == 0: - pos = GlyphVariation.encodeDeltaRunAsZeroes_(deltas, pos, stream) - elif value >= -128 and value <= 127: - pos = GlyphVariation.encodeDeltaRunAsBytes_(deltas, pos, stream) - else: - pos = GlyphVariation.encodeDeltaRunAsWords_(deltas, pos, stream) - return stream.getvalue() - - @staticmethod - def encodeDeltaRunAsZeroes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64 and deltas[pos] == 0: - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_ZERO | (runLength - 1))) - return pos - - @staticmethod - def encodeDeltaRunAsBytes_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - if value < -128 or value > 127: - break - # Within a byte-encoded run of deltas, a single zero - # is best stored literally as 0x00 value. However, - # if are two or more zeroes in a sequence, it is - # better to start a new run. For example, the sequence - # of deltas [15, 15, 0, 15, 15] becomes 6 bytes - # (04 0F 0F 00 0F 0F) when storing the zero value - # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) - # when starting a new run. - if value == 0 and pos+1 < numDeltas and deltas[pos+1] == 0: - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(runLength - 1)) - for i in range(offset, pos): - stream.write(struct.pack('b', deltas[i])) - return pos - - @staticmethod - def encodeDeltaRunAsWords_(deltas, offset, stream): - runLength = 0 - pos = offset - numDeltas = len(deltas) - while pos < numDeltas and runLength < 64: - value = deltas[pos] - # Within a word-encoded run of deltas, it is easiest - # to start a new run (with a different encoding) - # whenever we encounter a zero value. For example, - # the sequence [0x6666, 0, 0x7777] needs 7 bytes when - # storing the zero literally (42 66 66 00 00 77 77), - # and equally 7 bytes when starting a new run - # (40 66 66 80 40 77 77). - if value == 0: - break - - # Within a word-encoded run of deltas, a single value - # in the range (-128..127) should be encoded literally - # because it is more compact. For example, the sequence - # [0x6666, 2, 0x7777] becomes 7 bytes when storing - # the value literally (42 66 66 00 02 77 77), but 8 bytes - # when starting a new run (40 66 66 00 02 40 77 77). - isByteEncodable = lambda value: value >= -128 and value <= 127 - if isByteEncodable(value) and pos+1 < numDeltas and isByteEncodable(deltas[pos+1]): - break - pos += 1 - runLength += 1 - assert runLength >= 1 and runLength <= 64 - stream.write(bytechr(DELTAS_ARE_WORDS | (runLength - 1))) - for i in range(offset, pos): - stream.write(struct.pack('>h', deltas[i])) - return pos - - @staticmethod - def decompileDeltas_(numDeltas, data, offset): - """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" - result = [] - pos = offset - while len(result) < numDeltas: - runHeader = byteord(data[pos]) - pos += 1 - numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 - if (runHeader & DELTAS_ARE_ZERO) != 0: - result.extend([0] * numDeltasInRun) - elif (runHeader & DELTAS_ARE_WORDS) != 0: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">h", data[pos:pos+2])[0]) - pos += 2 - else: - for _ in range(numDeltasInRun): - result.append(struct.unpack(">b", data[pos:pos+1])[0]) - pos += 1 - assert len(result) == numDeltas - return (result, pos) - - @staticmethod - def getTupleSize_(flags, axisCount): - size = 4 - if (flags & EMBEDDED_TUPLE_COORD) != 0: - size += axisCount * 2 - if (flags & INTERMEDIATE_TUPLE) != 0: - size += axisCount * 4 - return size diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_g_v_a_r_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,539 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr, hexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation -import random -import unittest - -def hexencode(s): - h = hexStr(s).upper() - return ' '.join([h[i:i+2] for i in range(0, len(h), 2)]) - -# Glyph variation table of uppercase I in the Skia font, as printed in Apple's -# TrueType spec. The actual Skia font uses a different table for uppercase I. -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html -SKIA_GVAR_I = deHexStr( - "00 08 00 24 00 33 20 00 00 15 20 01 00 1B 20 02 " - "00 24 20 03 00 15 20 04 00 26 20 07 00 0D 20 06 " - "00 1A 20 05 00 40 01 01 01 81 80 43 FF 7E FF 7E " - "FF 7E FF 7E 00 81 45 01 01 01 03 01 04 01 04 01 " - "04 01 02 80 40 00 82 81 81 04 3A 5A 3E 43 20 81 " - "04 0E 40 15 45 7C 83 00 0D 9E F3 F2 F0 F0 F0 F0 " - "F3 9E A0 A1 A1 A1 9F 80 00 91 81 91 00 0D 0A 0A " - "09 0A 0A 0A 0A 0A 0A 0A 0A 0A 0A 0B 80 00 15 81 " - "81 00 C4 89 00 C4 83 00 0D 80 99 98 96 96 96 96 " - "99 80 82 83 83 83 81 80 40 FF 18 81 81 04 E6 F9 " - "10 21 02 81 04 E8 E5 EB 4D DA 83 00 0D CE D3 D4 " - "D3 D3 D3 D5 D2 CE CC CD CD CD CD 80 00 A1 81 91 " - "00 0D 07 03 04 02 02 02 03 03 07 07 08 08 08 07 " - "80 00 09 81 81 00 28 40 00 A4 02 24 24 66 81 04 " - "08 FA FA FA 28 83 00 82 02 FF FF FF 83 02 01 01 " - "01 84 91 00 80 06 07 08 08 08 08 0A 07 80 03 FE " - "FF FF FF 81 00 08 81 82 02 EE EE EE 8B 6D 00") - -# Shared coordinates in the Skia font, as printed in Apple's TrueType spec. -SKIA_SHARED_COORDS = deHexStr( - "40 00 00 00 C0 00 00 00 00 00 40 00 00 00 C0 00 " - "C0 00 C0 00 40 00 C0 00 40 00 40 00 C0 00 40 00") - - -class GlyphVariationTableTest(unittest.TestCase): - def test_compileOffsets_shortFormat(self): - self.assertEqual((deHexStr("00 00 00 02 FF C0"), 0), - table__g_v_a_r.compileOffsets_([0, 4, 0x1ff80])) - - def test_compileOffsets_longFormat(self): - self.assertEqual((deHexStr("00 00 00 00 00 00 00 04 CA FE BE EF"), 1), - table__g_v_a_r.compileOffsets_([0, 4, 0xCAFEBEEF])) - - def test_decompileOffsets_shortFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([2*0x0011, 2*0x2233, 2*0x4455, 2*0x6677, 2*0x8899, 2*0xaabb], - list(decompileOffsets(data, tableFormat=0, glyphCount=5))) - - def test_decompileOffsets_longFormat(self): - decompileOffsets = table__g_v_a_r.decompileOffsets_ - data = deHexStr("00 11 22 33 44 55 66 77 88 99 aa bb") - self.assertEqual([0x00112233, 0x44556677, 0x8899aabb], - list(decompileOffsets(data, tableFormat=1, glyphCount=2))) - - def test_compileGlyph_noVariations(self): - table = table__g_v_a_r() - table.variations = {} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_emptyVariations(self): - table = table__g_v_a_r() - table.variations = {"glyphname": []} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_onlyRedundantVariations(self): - table = table__g_v_a_r() - axes = {"wght": (0.3, 0.4, 0.5), "opsz": (0.7, 0.8, 0.9)} - table.variations = {"glyphname": [ - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4), - GlyphVariation(axes, [None] * 4) - ]} - self.assertEqual(b"", table.compileGlyph_("glyphname", 8, ["wght", "opsz"], {})) - - def test_compileGlyph_roundTrip(self): - table = table__g_v_a_r() - axisTags = ["wght", "wdth"] - numPointsInGlyph = 4 - glyphCoords = [(1,1), (2,2), (3,3), (4,4)] - gvar1 = GlyphVariation({"wght": (0.5, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - gvar2 = GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (1.0, 1.0, 1.0)}, glyphCoords) - table.variations = {"oslash": [gvar1, gvar2]} - data = table.compileGlyph_("oslash", numPointsInGlyph, axisTags, {}) - self.assertEqual([gvar1, gvar2], table.decompileGlyph_(numPointsInGlyph, {}, axisTags, data)) - - def test_compileSharedCoords(self): - table = table__g_v_a_r() - table.variations = {} - deltas = [None] * 4 - table.variations["A"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.5, 0.7, 1.0)}, deltas) - ] - table.variations["B"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.2, 0.8, 1.0)}, deltas) - ] - table.variations["C"] = [ - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.7, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.8, 1.0)}, deltas), - GlyphVariation({"wght": (1.0, 1.0, 1.0), "wdth": (0.3, 0.9, 1.0)}, deltas) - ] - # {"wght":1.0, "wdth":0.7} is shared 3 times; {"wght":1.0, "wdth":0.8} is shared twice. - # Min and max values are not part of the shared coordinate pool and should get ignored. - result = table.compileSharedCoords_(["wght", "wdth"]) - self.assertEqual(["40 00 2C CD", "40 00 33 33"], [hexencode(c) for c in result]) - - def test_decompileSharedCoords_Skia(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - sharedCoords = table.decompileSharedCoords_(["wght", "wdth"], SKIA_SHARED_COORDS) - self.assertEqual([ - {"wght": 1.0, "wdth": 0.0}, - {"wght": -1.0, "wdth": 0.0}, - {"wght": 0.0, "wdth": 1.0}, - {"wght": 0.0, "wdth": -1.0}, - {"wght": -1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": -1.0}, - {"wght": 1.0, "wdth": 1.0}, - {"wght": -1.0, "wdth": 1.0} - ], sharedCoords) - - def test_decompileSharedCoords_empty(self): - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 0 - self.assertEqual([], table.decompileSharedCoords_(["wght"], b"")) - - def test_decompileGlyph_Skia_I(self): - axes = ["wght", "wdth"] - table = table__g_v_a_r() - table.offsetToCoord = 0 - table.sharedCoordCount = 8 - table.axisCount = len(axes) - sharedCoords = table.decompileSharedCoords_(axes, SKIA_SHARED_COORDS) - tuples = table.decompileGlyph_(18, sharedCoords, axes, SKIA_GVAR_I) - self.assertEqual(8, len(tuples)) - self.assertEqual({"wght": (0.0, 1.0, 1.0)}, tuples[0].axes) - self.assertEqual("257,0 -127,0 -128,58 -130,90 -130,62 -130,67 -130,32 -127,0 257,0 " - "259,14 260,64 260,21 260,69 258,124 0,0 130,0 0,0 0,0", - " ".join(["%d,%d" % c for c in tuples[0].coordinates])) - - def test_decompileGlyph_empty(self): - table = table__g_v_a_r() - self.assertEqual([], table.decompileGlyph_(numPointsInGlyph=5, sharedCoords=[], axisTags=[], data=b"")) - - def test_computeMinMaxCord(self): - coord = {"wght": -0.3, "wdth": 0.7} - minCoord, maxCoord = table__g_v_a_r.computeMinMaxCoord_(coord) - self.assertEqual({"wght": -0.3, "wdth": 0.0}, minCoord) - self.assertEqual({"wght": 0.0, "wdth": 0.7}, maxCoord) - -class GlyphVariationTest(unittest.TestCase): - def test_equal(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - self.assertEqual(gvar1, gvar2) - - def test_equal_differentAxes(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.7, 0.8, 0.9)}, [(0,0), (9,8), (7,6)]) - self.assertNotEqual(gvar1, gvar2) - - def test_equal_differentCoordinates(self): - gvar1 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8), (7,6)]) - gvar2 = GlyphVariation({"wght":(0.0, 1.0, 1.0)}, [(0,0), (9,8)]) - self.assertNotEqual(gvar1, gvar2) - - def test_hasImpact_someDeltasNotZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (9,8), (7,6)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasZero(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [(0,0), (0,0), (0,0)]) - self.assertTrue(gvar.hasImpact()) - - def test_hasImpact_allDeltasNone(self): - axes = {"wght":(0.0, 1.0, 1.0)} - gvar = GlyphVariation(axes, [None, None, None]) - self.assertFalse(gvar.hasImpact()) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - axes = {"wdth":(0.3, 0.4, 0.5), "wght":(0.0, 1.0, 1.0), "opsz":(-0.7, -0.7, 0.0)} - g = GlyphVariation(axes, [(9,8), None, (7,6), (0,0), (-1,-2), None]) - g.toXML(writer, ["wdth", "wght", "opsz"]) - self.assertEqual([ - '', - '', - '', - '', - '', - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_toXML_allDeltasNone(self): - writer = XMLWriter(BytesIO()) - axes = {"wght":(0.0, 1.0, 1.0)} - g = GlyphVariation(axes, [None] * 5) - g.toXML(writer, ["wght", "wdth"]) - self.assertEqual([ - '', - '', - '', - '' - ], GlyphVariationTest.xml_lines(writer)) - - def test_fromXML(self): - g = GlyphVariation({}, [None] * 4) - g.fromXML("coord", {"axis":"wdth", "min":"0.3", "value":"0.4", "max":"0.5"}, []) - g.fromXML("coord", {"axis":"wght", "value":"1.0"}, []) - g.fromXML("coord", {"axis":"opsz", "value":"-0.5"}, []) - g.fromXML("delta", {"pt":"1", "x":"33", "y":"44"}, []) - g.fromXML("delta", {"pt":"2", "x":"-2", "y":"170"}, []) - self.assertEqual({ - "wdth":( 0.3, 0.4, 0.5), - "wght":( 0.0, 1.0, 1.0), - "opsz":(-0.5, -0.5, 0.0) - }, g.axes) - self.assertEqual([None, (33, 44), (-2, 170), None], g.coordinates) - - def test_compile_sharedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=None; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 08 00 77", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.3, 0.5, 0.7), "wdth": (0.1, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints={0,1,2}) - # len(data)=8; flags=INTERMEDIATE_TUPLE; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.3, 0.1), (0.7, 0.9)] - self.assertEqual("00 08 40 77 13 33 06 66 2C CD 39 9A", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[] - self.assertEqual("00 09 20 77", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_sharedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 1.0)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - sharedCoordIndices = { gvar.compileCoord(axisTags): 0x77 } - tuple, data = gvar.compile(axisTags, sharedCoordIndices, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS; tupleIndex=0x77 - # embeddedCoord=[]; intermediateCoord=[(0.0, 0.0), (1.0, 1.0)] - self.assertEqual("00 09 60 77 00 00 00 00 40 00 40 00", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 08 80 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_sharedPoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 1.0), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints={0,1,2}) - # len(data)=8; flags=EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[(0.0, 0.0), (1.0, 0.8)] - self.assertEqual("00 08 C0 00 20 00 33 33 00 00 00 00 40 00 33 33", hexencode(tuple)) - self.assertEqual("02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_nonIntermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.0, 0.5, 0.5), "wdth": (0.0, 0.8, 0.8)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|EMBEDDED_TUPLE_COORD - # embeddedCoord=[(0.5, 0.8)]; intermediateCoord=[] - self.assertEqual("00 09 A0 00 20 00 33 33", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compile_embeddedCoords_intermediate_privatePoints(self): - gvar = GlyphVariation({"wght": (0.4, 0.5, 0.6), "wdth": (0.7, 0.8, 0.9)}, - [(7,4), (8,5), (9,6)]) - axisTags = ["wght", "wdth"] - tuple, data = gvar.compile(axisTags, sharedCoordIndices={}, sharedPoints=None) - # len(data)=13; flags=PRIVATE_POINT_NUMBERS|INTERMEDIATE_TUPLE|EMBEDDED_TUPLE_COORD - # embeddedCoord=(0.5, 0.8); intermediateCoord=[(0.4, 0.7), (0.6, 0.9)] - self.assertEqual("00 09 E0 00 20 00 33 33 19 9A 2C CD 26 66 39 9A", hexencode(tuple)) - self.assertEqual("00 " # all points in glyph - "02 07 08 09 " # deltaX: [7, 8, 9] - "02 04 05 06", # deltaY: [4, 5, 6] - hexencode(data)) - - def test_compileCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, -1.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 20 00", hexencode(gvar.compileCoord(["wght", "wdth"]))) - self.assertEqual("20 00 C0 00", hexencode(gvar.compileCoord(["wdth", "wght"]))) - self.assertEqual("C0 00", hexencode(gvar.compileCoord(["wght"]))) - - def test_compileIntermediateCoord(self): - gvar = GlyphVariation({"wght": (-1.0, -1.0, 0.0), "wdth": (0.4, 0.5, 0.6)}, [None] * 4) - self.assertEqual("C0 00 19 9A 00 00 26 66", hexencode(gvar.compileIntermediateCoord(["wght", "wdth"]))) - self.assertEqual("19 9A C0 00 26 66 00 00", hexencode(gvar.compileIntermediateCoord(["wdth", "wght"]))) - self.assertEqual(None, gvar.compileIntermediateCoord(["wght"])) - self.assertEqual("19 9A 26 66", hexencode(gvar.compileIntermediateCoord(["wdth"]))) - - def test_decompileCoord(self): - decompileCoord = GlyphVariation.decompileCoord_ - data = deHexStr("DE AD C0 00 20 00 DE AD") - self.assertEqual(({"wght": -1.0, "wdth": 0.5}, 6), decompileCoord(["wght", "wdth"], data, 2)) - - def test_decompileCoord_roundTrip(self): - # Make sure we are not affected by https://github.com/behdad/fonttools/issues/286 - data = deHexStr("7F B9 80 35") - values, _ = GlyphVariation.decompileCoord_(["wght", "wdth"], data, 0) - axisValues = {axis:(val, val, val) for axis, val in values.items()} - gvar = GlyphVariation(axisValues, [None] * 4) - self.assertEqual("7F B9 80 35", hexencode(gvar.compileCoord(["wght", "wdth"]))) - - def test_decompileCoords(self): - decompileCoords = GlyphVariation.decompileCoords_ - axes = ["wght", "wdth", "opsz"] - coords = [ - {"wght": 1.0, "wdth": 0.0, "opsz": 0.5}, - {"wght": -1.0, "wdth": 0.0, "opsz": 0.25}, - {"wght": 0.0, "wdth": -1.0, "opsz": 1.0} - ] - data = deHexStr("DE AD 40 00 00 00 20 00 C0 00 00 00 10 00 00 00 C0 00 40 00") - self.assertEqual((coords, 20), decompileCoords(axes, numCoords=3, data=data, offset=2)) - - def test_compilePoints(self): - compilePoints = lambda p: GlyphVariation.compilePoints(set(p), numPointsInGlyph=999) - self.assertEqual("00", hexencode(compilePoints(range(999)))) # all points in glyph - self.assertEqual("01 00 07", hexencode(compilePoints([7]))) - self.assertEqual("01 80 FF FF", hexencode(compilePoints([65535]))) - self.assertEqual("02 01 09 06", hexencode(compilePoints([9, 15]))) - self.assertEqual("06 05 07 01 F7 02 01 F2", hexencode(compilePoints([7, 8, 255, 257, 258, 500]))) - self.assertEqual("03 01 07 01 80 01 F4", hexencode(compilePoints([7, 8, 500]))) - self.assertEqual("04 01 07 01 81 BE EF 0C 0F", hexencode(compilePoints([7, 8, 0xBEEF, 0xCAFE]))) - self.assertEqual("81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - hexencode(compilePoints(range(300)))) - self.assertEqual("81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - hexencode(compilePoints(range(399)))) - - def test_decompilePoints(self): - numPointsInGlyph = 65536 - allPoints = list(range(numPointsInGlyph)) - def decompilePoints(data, offset): - points, offset = GlyphVariation.decompilePoints_(numPointsInGlyph, deHexStr(data), offset) - # Conversion to list needed for Python 3. - return (list(points), offset) - # all points in glyph - self.assertEqual((allPoints, 1), decompilePoints("00", 0)) - # all points in glyph (in overly verbose encoding, not explicitly prohibited by spec) - self.assertEqual((allPoints, 2), decompilePoints("80 00", 0)) - # 2 points; first run: [9, 9+6] - self.assertEqual(([9, 15], 4), decompilePoints("02 01 09 06", 0)) - # 2 points; first run: [0xBEEF, 0xCAFE]. (0x0C0F = 0xCAFE - 0xBEEF) - self.assertEqual(([0xBEEF, 0xCAFE], 6), decompilePoints("02 81 BE EF 0C 0F", 0)) - # 1 point; first run: [7] - self.assertEqual(([7], 3), decompilePoints("01 00 07", 0)) - # 1 point; first run: [7] in overly verbose encoding - self.assertEqual(([7], 4), decompilePoints("01 80 00 07", 0)) - # 1 point; first run: [65535]; requires words to be treated as unsigned numbers - self.assertEqual(([65535], 4), decompilePoints("01 80 FF FF", 0)) - # 4 points; first run: [7, 8]; second run: [255, 257]. 257 is stored in delta-encoded bytes (0xFF + 2). - self.assertEqual(([7, 8, 255, 257], 7), decompilePoints("04 01 07 01 01 FF 02", 0)) - # combination of all encodings, preceded and followed by 4 bytes of unused data - data = "DE AD DE AD 04 01 07 01 81 BE EF 0C 0F DE AD DE AD" - self.assertEqual(([7, 8, 0xBEEF, 0xCAFE], 13), decompilePoints(data, 4)) - self.assertSetEqual(set(range(300)), set(decompilePoints( - "81 2C" + # 300 points (0x12c) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " AB 01 00" + (43 * " 00 01"), # third run, contains 44 points: [256 .. 299] - 0)[0])) - self.assertSetEqual(set(range(399)), set(decompilePoints( - "81 8F" + # 399 points (0x18f) in total - " 7F 00" + (127 * " 01") + # first run, contains 128 points: [0 .. 127] - " 7F 80" + (127 * " 01") + # second run, contains 128 points: [128 .. 255] - " FF 01 00" + (127 * " 00 01") + # third run, contains 128 points: [256 .. 383] - " 8E 01 80" + (14 * " 00 01"), # fourth run, contains 15 points: [384 .. 398] - 0)[0])) - - def test_decompilePoints_shouldGuardAgainstBadPointNumbers(self): - decompilePoints = GlyphVariation.decompilePoints_ - # 2 points; first run: [3, 9]. - numPointsInGlyph = 8 - self.assertRaises(TTLibError, decompilePoints, numPointsInGlyph, deHexStr("02 01 03 06"), 0) - - def test_decompilePoints_roundTrip(self): - numPointsInGlyph = 500 # greater than 255, so we also exercise code path for 16-bit encoding - compile = lambda points: GlyphVariation.compilePoints(points, numPointsInGlyph) - decompile = lambda data: set(GlyphVariation.decompilePoints_(numPointsInGlyph, data, 0)[0]) - for i in range(50): - points = set(random.sample(range(numPointsInGlyph), 30)) - self.assertSetEqual(points, decompile(compile(points)), - "failed round-trip decompile/compilePoints; points=%s" % points) - allPoints = set(range(numPointsInGlyph)) - self.assertSetEqual(allPoints, decompile(compile(allPoints))) - - def test_compileDeltas(self): - gvar = GlyphVariation({}, [(0,0), (1, 0), (2, 0), (3, 3)]) - points = {1, 2} - # deltaX for points: [1, 2]; deltaY for points: [0, 0] - self.assertEqual("01 01 02 81", hexencode(gvar.compileDeltas(points))) - - def test_compileDeltaValues(self): - compileDeltaValues = lambda values: hexencode(GlyphVariation.compileDeltaValues_(values)) - # zeroes - self.assertEqual("80", compileDeltaValues([0])) - self.assertEqual("BF", compileDeltaValues([0] * 64)) - self.assertEqual("BF 80", compileDeltaValues([0] * 65)) - self.assertEqual("BF A3", compileDeltaValues([0] * 100)) - self.assertEqual("BF BF BF BF", compileDeltaValues([0] * 256)) - # bytes - self.assertEqual("00 01", compileDeltaValues([1])) - self.assertEqual("06 01 02 03 7F 80 FF FE", compileDeltaValues([1, 2, 3, 127, -128, -1, -2])) - self.assertEqual("3F" + (64 * " 7F"), compileDeltaValues([127] * 64)) - self.assertEqual("3F" + (64 * " 7F") + " 00 7F", compileDeltaValues([127] * 65)) - # words - self.assertEqual("40 66 66", compileDeltaValues([0x6666])) - self.assertEqual("43 66 66 7F FF FF FF 80 00", compileDeltaValues([0x6666, 32767, -1, -32768])) - self.assertEqual("7F" + (64 * " 11 22"), compileDeltaValues([0x1122] * 64)) - self.assertEqual("7F" + (64 * " 11 22") + " 40 11 22", compileDeltaValues([0x1122] * 65)) - # bytes, zeroes, bytes: a single zero is more compact when encoded as part of the bytes run - self.assertEqual("04 7F 7F 00 7F 7F", compileDeltaValues([127, 127, 0, 127, 127])) - self.assertEqual("01 7F 7F 81 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 82 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 127, 127])) - self.assertEqual("01 7F 7F 83 01 7F 7F", compileDeltaValues([127, 127, 0, 0, 0, 0, 127, 127])) - # bytes, zeroes - self.assertEqual("01 01 00", compileDeltaValues([1, 0])) - self.assertEqual("00 01 81", compileDeltaValues([1, 0, 0])) - # words, bytes, words: a single byte is more compact when encoded as part of the words run - self.assertEqual("42 66 66 00 02 77 77", compileDeltaValues([0x6666, 2, 0x7777])) - self.assertEqual("40 66 66 01 02 02 40 77 77", compileDeltaValues([0x6666, 2, 2, 0x7777])) - # words, zeroes, words - self.assertEqual("40 66 66 80 40 77 77", compileDeltaValues([0x6666, 0, 0x7777])) - self.assertEqual("40 66 66 81 40 77 77", compileDeltaValues([0x6666, 0, 0, 0x7777])) - self.assertEqual("40 66 66 82 40 77 77", compileDeltaValues([0x6666, 0, 0, 0, 0x7777])) - # words, zeroes, bytes - self.assertEqual("40 66 66 80 02 01 02 03", compileDeltaValues([0x6666, 0, 1, 2, 3])) - self.assertEqual("40 66 66 81 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 1, 2, 3])) - self.assertEqual("40 66 66 82 02 01 02 03", compileDeltaValues([0x6666, 0, 0, 0, 1, 2, 3])) - # words, zeroes - self.assertEqual("40 66 66 80", compileDeltaValues([0x6666, 0])) - self.assertEqual("40 66 66 81", compileDeltaValues([0x6666, 0, 0])) - - def test_decompileDeltas(self): - decompileDeltas = GlyphVariation.decompileDeltas_ - # 83 = zero values (0x80), count = 4 (1 + 0x83 & 0x3F) - self.assertEqual(([0, 0, 0, 0], 1), decompileDeltas(4, deHexStr("83"), 0)) - # 41 01 02 FF FF = signed 16-bit values (0x40), count = 2 (1 + 0x41 & 0x3F) - self.assertEqual(([258, -1], 5), decompileDeltas(2, deHexStr("41 01 02 FF FF"), 0)) - # 01 81 07 = signed 8-bit values, count = 2 (1 + 0x01 & 0x3F) - self.assertEqual(([-127, 7], 3), decompileDeltas(2, deHexStr("01 81 07"), 0)) - # combination of all three encodings, preceded and followed by 4 bytes of unused data - data = deHexStr("DE AD BE EF 83 40 01 02 01 81 80 DE AD BE EF") - self.assertEqual(([0, 0, 0, 0, 258, -127, -128], 11), decompileDeltas(7, data, 4)) - - def test_decompileDeltas_roundTrip(self): - numDeltas = 30 - compile = GlyphVariation.compileDeltaValues_ - decompile = lambda data: GlyphVariation.decompileDeltas_(numDeltas, data, 0)[0] - for i in range(50): - deltas = random.sample(range(-128, 127), 10) - deltas.extend(random.sample(range(-32768, 32767), 10)) - deltas.extend([0] * 10) - random.shuffle(deltas) - self.assertListEqual(deltas, decompile(compile(deltas))) - - def test_getTupleSize(self): - getTupleSize = GlyphVariation.getTupleSize_ - numAxes = 3 - self.assertEqual(4 + numAxes * 2, getTupleSize(0x8042, numAxes)) - self.assertEqual(4 + numAxes * 4, getTupleSize(0x4077, numAxes)) - self.assertEqual(4, getTupleSize(0x2077, numAxes)) - self.assertEqual(4, getTupleSize(11, numAxes)) - - @staticmethod - def xml_lines(writer): - content = writer.file.getvalue().decode("utf-8") - return [line.strip() for line in content.splitlines()][1:] - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_h_d_m_x.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_d_m_x.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_h_d_m_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_d_m_x.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,121 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from . import DefaultTable -import array - -hdmxHeaderFormat = """ - > # big endian! - version: H - numRecords: H - recordSize: l -""" - -try: - from collections.abc import Mapping -except: - from UserDict import DictMixin as Mapping - -class _GlyphnamedList(Mapping): - - def __init__(self, reverseGlyphOrder, data): - self._array = data - self._map = dict(reverseGlyphOrder) - - def __getitem__(self, k): - return self._array[self._map[k]] - - def __len__(self): - return len(self._map) - - def __iter__(self): - return iter(self._map) - - def keys(self): - return self._map.keys() - -class table__h_d_m_x(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - numGlyphs = ttFont['maxp'].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) - self.hdmx = {} - for i in range(self.numRecords): - ppem = byteord(data[0]) - maxSize = byteord(data[1]) - widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs])) - self.hdmx[ppem] = widths - data = data[self.recordSize:] - assert len(data) == 0, "too much hdmx data" - - def compile(self, ttFont): - self.version = 0 - numGlyphs = ttFont['maxp'].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) - pad = (self.recordSize - 2 - numGlyphs) * b"\0" - self.numRecords = len(self.hdmx) - data = sstruct.pack(hdmxHeaderFormat, self) - items = sorted(self.hdmx.items()) - for ppem, widths in items: - data = data + bytechr(ppem) + bytechr(max(widths.values())) - for glyphID in range(len(glyphOrder)): - width = widths[glyphOrder[glyphID]] - data = data + bytechr(width) - data = data + pad - return data - - def toXML(self, writer, ttFont): - writer.begintag("hdmxData") - writer.newline() - ppems = sorted(self.hdmx.keys()) - records = [] - format = "" - for ppem in ppems: - widths = self.hdmx[ppem] - records.append(widths) - format = format + "%4d" - glyphNames = ttFont.getGlyphOrder()[:] - glyphNames.sort() - maxNameLen = max(map(len, glyphNames)) - format = "%" + repr(maxNameLen) + 's:' + format + ' ;' - writer.write(format % (("ppem",) + tuple(ppems))) - writer.newline() - writer.newline() - for glyphName in glyphNames: - row = [] - for ppem in ppems: - widths = self.hdmx[ppem] - row.append(widths[glyphName]) - if ";" in glyphName: - glyphName = "\\x3b".join(glyphName.split(";")) - writer.write(format % ((glyphName,) + tuple(row))) - writer.newline() - writer.endtag("hdmxData") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name != "hdmxData": - return - content = strjoin(content) - lines = content.split(";") - topRow = lines[0].split() - assert topRow[0] == "ppem:", "illegal hdmx format" - ppems = list(map(int, topRow[1:])) - self.hdmx = hdmx = {} - for ppem in ppems: - hdmx[ppem] = {} - lines = (line.split() for line in lines[1:]) - for line in lines: - if not line: - continue - assert line[0][-1] == ":", "illegal hdmx format" - glyphName = line[0][:-1] - if "\\" in glyphName: - from fontTools.misc.textTools import safeEval - glyphName = safeEval('"""' + glyphName + '"""') - line = list(map(int, line[1:])) - assert len(line) == len(ppems), "illegal hdmx format" - for i in range(len(ppems)): - hdmx[ppems[i]][glyphName] = line[i] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_h_e_a_d.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_e_a_d.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_h_e_a_d.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_e_a_d.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,92 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow -from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat -from . import DefaultTable -import warnings - - -headFormat = """ - > # big endian - tableVersion: 16.16F - fontRevision: 16.16F - checkSumAdjustment: I - magicNumber: I - flags: H - unitsPerEm: H - created: Q - modified: Q - xMin: h - yMin: h - xMax: h - yMax: h - macStyle: H - lowestRecPPEM: H - fontDirectionHint: h - indexToLocFormat: h - glyphDataFormat: h -""" - -class table__h_e_a_d(DefaultTable.DefaultTable): - - dependencies = ['maxp', 'loca'] - - def decompile(self, data, ttFont): - dummy, rest = sstruct.unpack2(headFormat, data, self) - if rest: - # this is quite illegal, but there seem to be fonts out there that do this - warnings.warn("extra bytes at the end of 'head' table") - assert rest == "\0\0" - - # For timestamp fields, ignore the top four bytes. Some fonts have - # bogus values there. Since till 2038 those bytes only can be zero, - # ignore them. - # - # https://github.com/behdad/fonttools/issues/99#issuecomment-66776810 - for stamp in 'created', 'modified': - value = getattr(self, stamp) - if value > 0xFFFFFFFF: - warnings.warn("'%s' timestamp out of range; ignoring top bytes" % stamp) - value &= 0xFFFFFFFF - setattr(self, stamp, value) - if value < 0x7C259DC0: # January 1, 1970 00:00:00 - warnings.warn("'%s' timestamp seems very low; regarding as unix timestamp" % stamp) - value += 0x7C259DC0 - setattr(self, stamp, value) - - def compile(self, ttFont): - if ttFont.recalcTimestamp: - self.modified = timestampNow() - data = sstruct.pack(headFormat, self) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(headFormat) - for name in names: - value = getattr(self, name) - if name in ("created", "modified"): - value = timestampToString(value) - if name in ("magicNumber", "checkSumAdjustment"): - if value < 0: - value = value + 0x100000000 - value = hex(value) - if value[-1:] == "L": - value = value[:-1] - elif name in ("macStyle", "flags"): - value = num2binary(value, 16) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name in ("created", "modified"): - value = timestampFromString(value) - elif name in ("macStyle", "flags"): - value = binary2num(value) - else: - value = safeEval(value) - setattr(self, name, value) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_h_h_e_a.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_h_e_a.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_h_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -hheaFormat = """ - > # big endian - tableVersion: 16.16F - ascent: h - descent: h - lineGap: h - advanceWidthMax: H - minLeftSideBearing: h - minRightSideBearing: h - xMaxExtent: h - caretSlopeRise: h - caretSlopeRun: h - caretOffset: h - reserved0: h - reserved1: h - reserved2: h - reserved3: h - metricDataFormat: h - numberOfHMetrics: H -""" - - -class table__h_h_e_a(DefaultTable.DefaultTable): - - # Note: Keep in sync with table__v_h_e_a - - dependencies = ['hmtx', 'glyf'] - - def decompile(self, data, ttFont): - sstruct.unpack(hheaFormat, data, self) - - def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: - self.recalc(ttFont) - return sstruct.pack(hheaFormat, self) - - def recalc(self, ttFont): - hmtxTable = ttFont['hmtx'] - if 'glyf' in ttFont: - glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceWidthMax = 0 - minLeftSideBearing = +INFINITY # arbitrary big number - minRightSideBearing = +INFINITY # arbitrary big number - xMaxExtent = -INFINITY # arbitrary big negative number - - for name in ttFont.getGlyphOrder(): - width, lsb = hmtxTable[name] - advanceWidthMax = max(advanceWidthMax, width) - g = glyfTable[name] - if g.numberOfContours == 0: - continue - if g.numberOfContours < 0 and not hasattr(g, "xMax"): - # Composite glyph without extents set. - # Calculate those. - g.recalcBounds(glyfTable) - minLeftSideBearing = min(minLeftSideBearing, lsb) - rsb = width - lsb - (g.xMax - g.xMin) - minRightSideBearing = min(minRightSideBearing, rsb) - extent = lsb + (g.xMax - g.xMin) - xMaxExtent = max(xMaxExtent, extent) - - if xMaxExtent == -INFINITY: - # No glyph has outlines. - minLeftSideBearing = 0 - minRightSideBearing = 0 - xMaxExtent = 0 - - self.advanceWidthMax = advanceWidthMax - self.minLeftSideBearing = minLeftSideBearing - self.minRightSideBearing = minRightSideBearing - self.xMaxExtent = xMaxExtent - else: - # XXX CFF recalc... - pass - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(hheaFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_h_m_t_x.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_m_t_x.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_h_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_h_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,101 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array -import warnings - - -class table__h_m_t_x(DefaultTable.DefaultTable): - - headerTag = 'hhea' - advanceName = 'width' - sideBearingName = 'lsb' - numberOfMetricsName = 'numberOfHMetrics' - - def decompile(self, data, ttFont): - numGlyphs = ttFont['maxp'].numGlyphs - numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName)) - if numberOfMetrics > numGlyphs: - numberOfMetrics = numGlyphs # We warn later. - # Note: advanceWidth is unsigned, but we read/write as signed. - metrics = array.array("h", data[:4 * numberOfMetrics]) - if sys.byteorder != "big": - metrics.byteswap() - data = data[4 * numberOfMetrics:] - numberOfSideBearings = numGlyphs - numberOfMetrics - sideBearings = array.array("h", data[:2 * numberOfSideBearings]) - data = data[2 * numberOfSideBearings:] - - if sys.byteorder != "big": - sideBearings.byteswap() - if data: - warnings.warn("too much 'hmtx'/'vmtx' table data") - self.metrics = {} - glyphOrder = ttFont.getGlyphOrder() - for i in range(numberOfMetrics): - glyphName = glyphOrder[i] - self.metrics[glyphName] = list(metrics[i*2:i*2+2]) - lastAdvance = metrics[-2] - for i in range(numberOfSideBearings): - glyphName = glyphOrder[i + numberOfMetrics] - self.metrics[glyphName] = [lastAdvance, sideBearings[i]] - - def compile(self, ttFont): - metrics = [] - for glyphName in ttFont.getGlyphOrder(): - metrics.append(self.metrics[glyphName]) - lastAdvance = metrics[-1][0] - lastIndex = len(metrics) - while metrics[lastIndex-2][0] == lastAdvance: - lastIndex -= 1 - if lastIndex <= 1: - # all advances are equal - lastIndex = 1 - break - additionalMetrics = metrics[lastIndex:] - additionalMetrics = [sb for advance, sb in additionalMetrics] - metrics = metrics[:lastIndex] - setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics)) - - allMetrics = [] - for item in metrics: - allMetrics.extend(item) - allMetrics = array.array("h", allMetrics) - if sys.byteorder != "big": - allMetrics.byteswap() - data = allMetrics.tostring() - - additionalMetrics = array.array("h", additionalMetrics) - if sys.byteorder != "big": - additionalMetrics.byteswap() - data = data + additionalMetrics.tostring() - return data - - def toXML(self, writer, ttFont): - names = sorted(self.metrics.keys()) - for glyphName in names: - advance, sb = self.metrics[glyphName] - writer.simpletag("mtx", [ - ("name", glyphName), - (self.advanceName, advance), - (self.sideBearingName, sb), - ]) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "metrics"): - self.metrics = {} - if name == "mtx": - self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), - safeEval(attrs[self.sideBearingName])] - - def __delitem__(self, glyphName): - del self.metrics[glyphName] - - def __getitem__(self, glyphName): - return self.metrics[glyphName] - - def __setitem__(self, glyphName, advance_sb_pair): - self.metrics[glyphName] = tuple(advance_sb_pair) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/__init__.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/__init__.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/__init__.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,74 +0,0 @@ - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. -def _moduleFinderHint(): - """Dummy function to let modulefinder know what tables may be - dynamically imported. Generated by MetaTools/buildTableList.py. - - >>> _moduleFinderHint() - """ - from . import B_A_S_E_ - from . import C_B_D_T_ - from . import C_B_L_C_ - from . import C_F_F_ - from . import C_O_L_R_ - from . import C_P_A_L_ - from . import D_S_I_G_ - from . import E_B_D_T_ - from . import E_B_L_C_ - from . import F_F_T_M_ - from . import G_D_E_F_ - from . import G_M_A_P_ - from . import G_P_K_G_ - from . import G_P_O_S_ - from . import G_S_U_B_ - from . import J_S_T_F_ - from . import L_T_S_H_ - from . import M_A_T_H_ - from . import M_E_T_A_ - from . import O_S_2f_2 - from . import S_I_N_G_ - from . import S_V_G_ - from . import T_S_I_B_ - from . import T_S_I_D_ - from . import T_S_I_J_ - from . import T_S_I_P_ - from . import T_S_I_S_ - from . import T_S_I_V_ - from . import T_S_I__0 - from . import T_S_I__1 - from . import T_S_I__2 - from . import T_S_I__3 - from . import T_S_I__5 - from . import V_D_M_X_ - from . import V_O_R_G_ - from . import _a_v_a_r - from . import _c_m_a_p - from . import _c_v_t - from . import _f_e_a_t - from . import _f_p_g_m - from . import _f_v_a_r - from . import _g_a_s_p - from . import _g_l_y_f - from . import _g_v_a_r - from . import _h_d_m_x - from . import _h_e_a_d - from . import _h_h_e_a - from . import _h_m_t_x - from . import _k_e_r_n - from . import _l_o_c_a - from . import _l_t_a_g - from . import _m_a_x_p - from . import _m_e_t_a - from . import _n_a_m_e - from . import _p_o_s_t - from . import _p_r_e_p - from . import _s_b_i_x - from . import _v_h_e_a - from . import _v_m_t_x - -if __name__ == "__main__": - import doctest, sys - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/J_S_T_F_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/J_S_T_F_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/J_S_T_F_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/J_S_T_F_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_J_S_T_F_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_k_e_r_n.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_k_e_r_n.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,200 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.ttLib import getSearchRange -from fontTools.misc.textTools import safeEval, readHex -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from . import DefaultTable -import struct -import array -import warnings - - -class table__k_e_r_n(DefaultTable.DefaultTable): - - def getkern(self, format): - for subtable in self.kernTables: - if subtable.version == format: - return subtable - return None # not found - - def decompile(self, data, ttFont): - version, nTables = struct.unpack(">HH", data[:4]) - apple = False - if (len(data) >= 8) and (version == 1): - # AAT Apple's "new" format. Hm. - version, nTables = struct.unpack(">LL", data[:8]) - self.version = fi2fl(version, 16) - data = data[8:] - apple = True - else: - self.version = version - data = data[4:] - tablesIndex = [] - self.kernTables = [] - for i in range(nTables): - if self.version == 1.0: - # Apple - length, coverage, tupleIndex = struct.unpack(">lHH", data[:8]) - version = coverage & 0xff - else: - version, length = struct.unpack(">HH", data[:4]) - length = int(length) - if version not in kern_classes: - subtable = KernTable_format_unkown(version) - else: - subtable = kern_classes[version]() - subtable.apple = apple - subtable.decompile(data[:length], ttFont) - self.kernTables.append(subtable) - data = data[length:] - - def compile(self, ttFont): - if hasattr(self, "kernTables"): - nTables = len(self.kernTables) - else: - nTables = 0 - if self.version == 1.0: - # AAT Apple's "new" format. - data = struct.pack(">ll", fl2fi(self.version, 16), nTables) - else: - data = struct.pack(">HH", self.version, nTables) - if hasattr(self, "kernTables"): - for subtable in self.kernTables: - data = data + subtable.compile(ttFont) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - for subtable in self.kernTables: - subtable.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = safeEval(attrs["value"]) - return - if name != "kernsubtable": - return - if not hasattr(self, "kernTables"): - self.kernTables = [] - format = safeEval(attrs["format"]) - if format not in kern_classes: - subtable = KernTable_format_unkown(format) - else: - subtable = kern_classes[format]() - self.kernTables.append(subtable) - subtable.fromXML(name, attrs, content, ttFont) - - -class KernTable_format_0(object): - - def decompile(self, data, ttFont): - version, length, coverage = (0,0,0) - if not self.apple: - version, length, coverage = struct.unpack(">HHH", data[:6]) - data = data[6:] - else: - version, length, coverage = struct.unpack(">LHH", data[:8]) - data = data[8:] - self.version, self.coverage = int(version), int(coverage) - - self.kernTable = kernTable = {} - - nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8]) - data = data[8:] - - nPairs = min(nPairs, len(data) // 6) - datas = array.array("H", data[:6 * nPairs]) - if sys.byteorder != "big": - datas.byteswap() - it = iter(datas) - glyphOrder = ttFont.getGlyphOrder() - for k in range(nPairs): - left, right, value = next(it), next(it), next(it) - if value >= 32768: value -= 65536 - try: - kernTable[(glyphOrder[left], glyphOrder[right])] = value - except IndexError: - # Slower, but will not throw an IndexError on an invalid glyph id. - kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value - if len(data) > 6 * nPairs: - warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data)) - - def compile(self, ttFont): - nPairs = len(self.kernTable) - searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) - data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) - - # yeehee! (I mean, turn names into indices) - try: - reverseOrder = ttFont.getReverseGlyphMap() - kernTable = sorted((reverseOrder[left], reverseOrder[right], value) for ((left,right),value) in self.kernTable.items()) - except KeyError: - # Slower, but will not throw KeyError on invalid glyph id. - getGlyphID = ttFont.getGlyphID - kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items()) - - for left, right, value in kernTable: - data = data + struct.pack(">HHh", left, right, value) - return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data - - def toXML(self, writer, ttFont): - writer.begintag("kernsubtable", coverage=self.coverage, format=0) - writer.newline() - items = sorted(self.kernTable.items()) - for (left, right), value in items: - writer.simpletag("pair", [ - ("l", left), - ("r", right), - ("v", value) - ]) - writer.newline() - writer.endtag("kernsubtable") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.coverage = safeEval(attrs["coverage"]) - self.version = safeEval(attrs["format"]) - if not hasattr(self, "kernTable"): - self.kernTable = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) - - def __getitem__(self, pair): - return self.kernTable[pair] - - def __setitem__(self, pair, value): - self.kernTable[pair] = value - - def __delitem__(self, pair): - del self.kernTable[pair] - - -class KernTable_format_unkown(object): - - def __init__(self, format): - self.format = format - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer, ttFont): - writer.begintag("kernsubtable", format=self.format) - writer.newline() - writer.comment("unknown 'kern' subtable format") - writer.newline() - writer.dumphex(self.data) - writer.endtag("kernsubtable") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.decompile(readHex(content), ttFont) - - -kern_classes = {0: KernTable_format_0} diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_k_e_r_n_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,29 +0,0 @@ -from __future__ import print_function, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -import unittest -from ._k_e_r_n import KernTable_format_0 - -class MockFont(object): - - def getGlyphOrder(self): - return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"] - - def getGlyphName(self, glyphID): - return "glyph%.5d" % glyphID - -class KernTable_format_0_Test(unittest.TestCase): - - def test_decompileBadGlyphId(self): - subtable = KernTable_format_0() - subtable.apple = False - subtable.decompile( b'\x00' * 6 - + b'\x00' + b'\x02' + b'\x00' * 6 - + b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01' - + b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02', - MockFont()) - self.assertEqual(subtable[("glyph00001", "glyph00003")], 1) - self.assertEqual(subtable[("glyph00001", "glyph65535")], 2) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_l_o_c_a.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_o_c_a.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_l_o_c_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_o_c_a.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,60 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable -import sys -import array -import warnings - -class table__l_o_c_a(DefaultTable.DefaultTable): - - dependencies = ['glyf'] - - def decompile(self, data, ttFont): - longFormat = ttFont['head'].indexToLocFormat - if longFormat: - format = "I" - else: - format = "H" - locations = array.array(format) - locations.fromstring(data) - if sys.byteorder != "big": - locations.byteswap() - if not longFormat: - l = array.array("I") - for i in range(len(locations)): - l.append(locations[i] * 2) - locations = l - if len(locations) < (ttFont['maxp'].numGlyphs + 1): - warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs)) - self.locations = locations - - def compile(self, ttFont): - try: - max_location = max(self.locations) - except AttributeError: - self.set([]) - max_location = 0 - if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): - locations = array.array("H") - for i in range(len(self.locations)): - locations.append(self.locations[i] // 2) - ttFont['head'].indexToLocFormat = 0 - else: - locations = array.array("I", self.locations) - ttFont['head'].indexToLocFormat = 1 - if sys.byteorder != "big": - locations.byteswap() - return locations.tostring() - - def set(self, locations): - self.locations = array.array("I", locations) - - def toXML(self, writer, ttFont): - writer.comment("The 'loca' table will be calculated by the compiler") - writer.newline() - - def __getitem__(self, index): - return self.locations[index] - - def __len__(self): - return len(self.locations) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_t_a_g.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_t_a_g.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import struct - -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html - -class table__l_t_a_g(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) - assert self.version == 1 - self.tags = [] - for i in range(numTags): - pos = 12 + i * 4 - offset, length = struct.unpack(">HH", data[pos:pos+4]) - tag = data[offset:offset+length].decode("ascii") - self.tags.append(tag) - - def compile(self, ttFont): - dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] - stringPool = "" - for tag in self.tags: - offset = stringPool.find(tag) - if offset < 0: - offset = len(stringPool) - stringPool = stringPool + tag - offset = offset + 12 + len(self.tags) * 4 - dataList.append(struct.pack(">HH", offset, len(tag))) - dataList.append(stringPool) - return bytesjoin(dataList) - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - writer.simpletag("flags", value=self.flags) - writer.newline() - for tag in self.tags: - writer.simpletag("LanguageTag", tag=tag) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "tags"): - self.tags = [] - if name == "LanguageTag": - self.tags.append(attrs["tag"]) - elif "value" in attrs: - value = safeEval(attrs["value"]) - setattr(self, name, value) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_l_t_a_g_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import os -import struct -import unittest -from ._l_t_a_g import table__l_t_a_g - -class Test_l_t_a_g(unittest.TestCase): - - DATA_ = struct.pack(b">LLLHHHHHH", 1, 0, 3, 24 + 0, 2, 24 + 2, 7, 24 + 2, 2) + b"enzh-Hant" - TAGS_ = ["en", "zh-Hant", "zh"] - - def test_decompile_compile(self): - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(0, table.flags) - self.assertEqual(self.TAGS_, table.tags) - self.assertEqual(self.DATA_, table.compile(ttFont=None)) - - def test_fromXML(self): - table = table__l_t_a_g() - table.fromXML("version", {"value": "1"}, content=None, ttFont=None) - table.fromXML("flags", {"value": "777"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "sr-Latn"}, content=None, ttFont=None) - table.fromXML("LanguageTag", {"tag": "fa"}, content=None, ttFont=None) - self.assertEqual(1, table.version) - self.assertEqual(777, table.flags) - self.assertEqual(["sr-Latn", "fa"], table.tags) - - def test_toXML(self): - writer = XMLWriter(BytesIO()) - table = table__l_t_a_g() - table.decompile(self.DATA_, ttFont=None) - table.toXML(writer, ttFont=None) - expected = os.linesep.join([ - '', - '', - '', - '', - '', - '' - ]) + os.linesep - self.assertEqual(expected.encode("utf_8"), writer.file.getvalue()) - - -if __name__ == '__main__': - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/L_T_S_H_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/L_T_S_H_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/L_T_S_H_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/L_T_S_H_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import struct -import array - -# XXX I've lowered the strictness, to make sure Apple's own Chicago -# XXX gets through. They're looking into it, I hope to raise the standards -# XXX back to normal eventually. - -class table_L_T_S_H_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - version, numGlyphs = struct.unpack(">HH", data[:4]) - data = data[4:] - assert version == 0, "unknown version: %s" % version - assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length" - # ouch: the assertion is not true in Chicago! - #assert numGlyphs == ttFont['maxp'].numGlyphs - yPels = array.array("B") - yPels.fromstring(data) - self.yPels = {} - for i in range(numGlyphs): - self.yPels[ttFont.getGlyphName(i)] = yPels[i] - - def compile(self, ttFont): - version = 0 - names = list(self.yPels.keys()) - numGlyphs = len(names) - yPels = [0] * numGlyphs - # ouch: the assertion is not true in Chicago! - #assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs - for name in names: - yPels[ttFont.getGlyphID(name)] = self.yPels[name] - yPels = array.array("B", yPels) - return struct.pack(">HH", version, numGlyphs) + yPels.tostring() - - def toXML(self, writer, ttFont): - names = sorted(self.yPels.keys()) - for name in names: - writer.simpletag("yPel", name=name, value=self.yPels[name]) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "yPels"): - self.yPels = {} - if name != "yPel": - return # ignore unknown tags - self.yPels[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/M_A_T_H_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/M_A_T_H_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/M_A_T_H_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/M_A_T_H_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,7 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTTXConverter - - -class table_M_A_T_H_(BaseTTXConverter): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_m_a_x_p.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_a_x_p.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_m_a_x_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_a_x_p.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,139 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -maxpFormat_0_5 = """ - > # big endian - tableVersion: i - numGlyphs: H -""" - -maxpFormat_1_0_add = """ - > # big endian - maxPoints: H - maxContours: H - maxCompositePoints: H - maxCompositeContours: H - maxZones: H - maxTwilightPoints: H - maxStorage: H - maxFunctionDefs: H - maxInstructionDefs: H - maxStackElements: H - maxSizeOfInstructions: H - maxComponentElements: H - maxComponentDepth: H -""" - - -class table__m_a_x_p(DefaultTable.DefaultTable): - - dependencies = ['glyf'] - - def decompile(self, data, ttFont): - dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) - self.numGlyphs = int(self.numGlyphs) - if self.tableVersion != 0x00005000: - dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) - assert len(data) == 0 - - def compile(self, ttFont): - if 'glyf' in ttFont: - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: - self.recalc(ttFont) - else: - pass # CFF - self.numGlyphs = len(ttFont.getGlyphOrder()) - if self.tableVersion != 0x00005000: - self.tableVersion = 0x00010000 - data = sstruct.pack(maxpFormat_0_5, self) - if self.tableVersion == 0x00010000: - data = data + sstruct.pack(maxpFormat_1_0_add, self) - return data - - def recalc(self, ttFont): - """Recalculate the font bounding box, and most other maxp values except - for the TT instructions values. Also recalculate the value of bit 1 - of the flags field and the font bounding box of the 'head' table. - """ - glyfTable = ttFont['glyf'] - hmtxTable = ttFont['hmtx'] - headTable = ttFont['head'] - self.numGlyphs = len(glyfTable) - INFINITY = 100000 - xMin = +INFINITY - yMin = +INFINITY - xMax = -INFINITY - yMax = -INFINITY - maxPoints = 0 - maxContours = 0 - maxCompositePoints = 0 - maxCompositeContours = 0 - maxComponentElements = 0 - maxComponentDepth = 0 - allXMaxIsLsb = 1 - for glyphName in ttFont.getGlyphOrder(): - g = glyfTable[glyphName] - if g.numberOfContours: - if hmtxTable[glyphName][1] != g.xMin: - allXMaxIsLsb = 0 - xMin = min(xMin, g.xMin) - yMin = min(yMin, g.yMin) - xMax = max(xMax, g.xMax) - yMax = max(yMax, g.yMax) - if g.numberOfContours > 0: - nPoints, nContours = g.getMaxpValues() - maxPoints = max(maxPoints, nPoints) - maxContours = max(maxContours, nContours) - else: - nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable) - maxCompositePoints = max(maxCompositePoints, nPoints) - maxCompositeContours = max(maxCompositeContours, nContours) - maxComponentElements = max(maxComponentElements, len(g.components)) - maxComponentDepth = max(maxComponentDepth, componentDepth) - if xMin == +INFINITY: - headTable.xMin = 0 - headTable.yMin = 0 - headTable.xMax = 0 - headTable.yMax = 0 - else: - headTable.xMin = xMin - headTable.yMin = yMin - headTable.xMax = xMax - headTable.yMax = yMax - self.maxPoints = maxPoints - self.maxContours = maxContours - self.maxCompositePoints = maxCompositePoints - self.maxCompositeContours = maxCompositeContours - self.maxComponentDepth = maxComponentDepth - if allXMaxIsLsb: - headTable.flags = headTable.flags | 0x2 - else: - headTable.flags = headTable.flags & ~0x2 - - def testrepr(self): - items = sorted(self.__dict__.items()) - print(". . . . . . . . .") - for combo in items: - print(" %s: %s" % combo) - print(". . . . . . . . .") - - def toXML(self, writer, ttFont): - if self.tableVersion != 0x00005000: - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) - if self.tableVersion != 0x00005000: - formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) - names = names + names_1_0 - for name in names: - value = getattr(self, name) - if name == "tableVersion": - value = hex(value) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_e_t_a.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_e_t_a.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,93 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import readHex -from fontTools.ttLib import TTLibError -from . import DefaultTable - -# Apple's documentation of 'meta': -# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html - -META_HEADER_FORMAT = """ - > # big endian - version: L - flags: L - dataOffset: L - numDataMaps: L -""" - -# According to Apple's spec, the dataMaps entries contain a dataOffset -# that is documented as "Offset from the beginning of the data section -# to the data for this tag". However, this is *not* the case with -# the fonts that Apple ships pre-installed on MacOS X Yosemite 10.10.4, -# and it also does not reflect how Apple's ftxdumperfuser tool is parsing -# the 'meta' table (tested ftxdumperfuser build 330, FontToolbox.framework -# build 187). Instead of what is claimed in the spec, the data maps contain -# a dataOffset relative to the very beginning of the 'meta' table. -# The dataOffset field of the 'meta' header apparently gets ignored. - -DATA_MAP_FORMAT = """ - > # big endian - tag: 4s - dataOffset: L - dataLength: L -""" - - -class table__m_e_t_a(DefaultTable.DefaultTable): - def __init__(self, tag="meta"): - DefaultTable.DefaultTable.__init__(self, tag) - self.data = {} - - def decompile(self, data, ttFont): - headerSize = sstruct.calcsize(META_HEADER_FORMAT) - header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize]) - if header["version"] != 1: - raise TTLibError("unsupported 'meta' version %d" % - header["version"]) - dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT) - for i in range(header["numDataMaps"]): - dataMapOffset = headerSize + i * dataMapSize - dataMap = sstruct.unpack( - DATA_MAP_FORMAT, - data[dataMapOffset : dataMapOffset + dataMapSize]) - tag = dataMap["tag"] - offset = dataMap["dataOffset"] - self.data[tag] = data[offset : offset + dataMap["dataLength"]] - - def compile(self, ttFont): - keys = sorted(self.data.keys()) - headerSize = sstruct.calcsize(META_HEADER_FORMAT) - dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT) - header = sstruct.pack(META_HEADER_FORMAT, { - "version": 1, - "flags": 0, - "dataOffset": dataOffset, - "numDataMaps": len(keys) - }) - dataMaps = [] - dataBlocks = [] - for tag in keys: - data = self.data[tag] - dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, { - "tag": tag, - "dataOffset": dataOffset, - "dataLength": len(data) - })) - dataBlocks.append(data) - dataOffset += len(data) - return bytesjoin([header] + dataMaps + dataBlocks) - - def toXML(self, writer, ttFont, progress=None): - for tag in sorted(self.data.keys()): - writer.begintag("hexdata", tag=tag) - writer.newline() - writer.dumphex(self.data[tag]) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "hexdata": - self.data[attrs["tag"]] = readHex(content) - else: - raise TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/M_E_T_A_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/M_E_T_A_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/M_E_T_A_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/M_E_T_A_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,305 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import pdb -import struct - - -METAHeaderFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - metaEntriesVersionMajor: H - metaEntriesVersionMinor: H - unicodeVersion: L - metaFlags: H - nMetaRecs: H -""" -# This record is followed by nMetaRecs of METAGlyphRecordFormat. -# This in turn is followd by as many METAStringRecordFormat entries -# as specified by the METAGlyphRecordFormat entries -# this is followed by the strings specifried in the METAStringRecordFormat -METAGlyphRecordFormat = """ - > # big endian - glyphID: H - nMetaEntry: H -""" -# This record is followd by a variable data length field: -# USHORT or ULONG hdrOffset -# Offset from start of META table to the beginning -# of this glyphs array of ns Metadata string entries. -# Size determined by metaFlags field -# METAGlyphRecordFormat entries must be sorted by glyph ID - -METAStringRecordFormat = """ - > # big endian - labelID: H - stringLen: H -""" -# This record is followd by a variable data length field: -# USHORT or ULONG stringOffset -# METAStringRecordFormat entries must be sorted in order of labelID -# There may be more than one entry with the same labelID -# There may be more than one strign with the same content. - -# Strings shall be Unicode UTF-8 encoded, and null-terminated. - -METALabelDict = { - 0: "MojikumiX4051", # An integer in the range 1-20 - 1: "UNIUnifiedBaseChars", - 2: "BaseFontName", - 3: "Language", - 4: "CreationDate", - 5: "FoundryName", - 6: "FoundryCopyright", - 7: "OwnerURI", - 8: "WritingScript", - 10: "StrokeCount", - 11: "IndexingRadical", -} - - -def getLabelString(labelID): - try: - label = METALabelDict[labelID] - except KeyError: - label = "Unknown label" - return str(label) - - -class table_M_E_T_A_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) - self.glyphRecords = [] - for i in range(self.nMetaRecs): - glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord()) - if self.metaFlags == 0: - [glyphRecord.offset] = struct.unpack(">H", newData[:2]) - newData = newData[2:] - elif self.metaFlags == 1: - [glyphRecord.offset] = struct.unpack(">H", newData[:4]) - newData = newData[4:] - else: - assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags) - glyphRecord.stringRecs = [] - newData = data[glyphRecord.offset:] - for j in range(glyphRecord.nMetaEntry): - stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord()) - if self.metaFlags == 0: - [stringRec.offset] = struct.unpack(">H", newData[:2]) - newData = newData[2:] - else: - [stringRec.offset] = struct.unpack(">H", newData[:4]) - newData = newData[4:] - stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen] - glyphRecord.stringRecs.append(stringRec) - self.glyphRecords.append(glyphRecord) - - def compile(self, ttFont): - offsetOK = 0 - self.nMetaRecs = len(self.glyphRecords) - count = 0 - while (offsetOK != 1): - count = count + 1 - if count > 4: - pdb.set_trace() - metaData = sstruct.pack(METAHeaderFormat, self) - stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1)) - stringRecSize = (6 + 2*(self.metaFlags & 1)) - for glyphRec in self.glyphRecords: - glyphRec.offset = stringRecsOffset - if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0): - self.metaFlags = self.metaFlags + 1 - offsetOK = -1 - break - metaData = metaData + glyphRec.compile(self) - stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) - # this will be the String Record offset for the next GlyphRecord. - if offsetOK == -1: - offsetOK = 0 - continue - - # metaData now contains the header and all of the GlyphRecords. Its length should bw - # the offset to the first StringRecord. - stringOffset = stringRecsOffset - for glyphRec in self.glyphRecords: - assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec) - for stringRec in glyphRec.stringRecs: - stringRec.offset = stringOffset - if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0): - self.metaFlags = self.metaFlags + 1 - offsetOK = -1 - break - metaData = metaData + stringRec.compile(self) - stringOffset = stringOffset + stringRec.stringLen - if offsetOK == -1: - offsetOK = 0 - continue - - if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): - self.metaFlags = self.metaFlags - 1 - continue - else: - offsetOK = 1 - - # metaData now contains the header and all of the GlyphRecords and all of the String Records. - # Its length should be the offset to the first string datum. - for glyphRec in self.glyphRecords: - for stringRec in glyphRec.stringRecs: - assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string) - metaData = metaData + stringRec.string - - return metaData - - def toXML(self, writer, ttFont): - writer.comment("Lengths and number of entries in this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(METAHeaderFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - for glyphRec in self.glyphRecords: - glyphRec.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "GlyphRecord": - if not hasattr(self, "glyphRecords"): - self.glyphRecords = [] - glyphRec = GlyphRecord() - self.glyphRecords.append(glyphRec) - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - glyphRec.fromXML(name, attrs, content, ttFont) - glyphRec.offset = -1 - glyphRec.nMetaEntry = len(glyphRec.stringRecs) - else: - setattr(self, name, safeEval(attrs["value"])) - - -class GlyphRecord(object): - def __init__(self): - self.glyphID = -1 - self.nMetaEntry = -1 - self.offset = -1 - self.stringRecs = [] - - def toXML(self, writer, ttFont): - writer.begintag("GlyphRecord") - writer.newline() - writer.simpletag("glyphID", value=self.glyphID) - writer.newline() - writer.simpletag("nMetaEntry", value=self.nMetaEntry) - writer.newline() - for stringRec in self.stringRecs: - stringRec.toXML(writer, ttFont) - writer.endtag("GlyphRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "StringRecord": - stringRec = StringRecord() - self.stringRecs.append(stringRec) - for element in content: - if isinstance(element, basestring): - continue - stringRec.fromXML(name, attrs, content, ttFont) - stringRec.stringLen = len(stringRec.string) - else: - setattr(self, name, safeEval(attrs["value"])) - - def compile(self, parentTable): - data = sstruct.pack(METAGlyphRecordFormat, self) - if parentTable.metaFlags == 0: - datum = struct.pack(">H", self.offset) - elif parentTable.metaFlags == 1: - datum = struct.pack(">L", self.offset) - data = data + datum - return data - - def __repr__(self): - return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]" - -# XXX The following two functions are really broken around UTF-8 vs Unicode - -def mapXMLToUTF8(string): - uString = unicode() - strLen = len(string) - i = 0 - while i < strLen: - prefixLen = 0 - if (string[i:i+3] == "&#x"): - prefixLen = 3 - elif (string[i:i+7] == "&#x"): - prefixLen = 7 - if prefixLen: - i = i+prefixLen - j= i - while string[i] != ";": - i = i+1 - valStr = string[j:i] - - uString = uString + unichr(eval('0x' + valStr)) - else: - uString = uString + unichr(byteord(string[i])) - i = i +1 - - return uString.encode('utf_8') - - -def mapUTF8toXML(string): - uString = string.decode('utf_8') - string = "" - for uChar in uString: - i = ord(uChar) - if (i < 0x80) and (i > 0x1F): - string = string + uChar - else: - string = string + "&#x" + hex(i)[2:] + ";" - return string - - -class StringRecord(object): - - def toXML(self, writer, ttFont): - writer.begintag("StringRecord") - writer.newline() - writer.simpletag("labelID", value=self.labelID) - writer.comment(getLabelString(self.labelID)) - writer.newline() - writer.newline() - writer.simpletag("string", value=mapUTF8toXML(self.string)) - writer.newline() - writer.endtag("StringRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - value = attrs["value"] - if name == "string": - self.string = mapXMLToUTF8(value) - else: - setattr(self, name, safeEval(value)) - - def compile(self, parentTable): - data = sstruct.pack(METAStringRecordFormat, self) - if parentTable.metaFlags == 0: - datum = struct.pack(">H", self.offset) - elif parentTable.metaFlags == 1: - datum = struct.pack(">L", self.offset) - data = data + datum - return data - - def __repr__(self): - return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \ - + ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]" diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_m_e_t_a_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,54 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.textTools import deHexStr -from fontTools.misc.xmlWriter import XMLWriter -from fontTools.ttLib import TTLibError -from fontTools.ttLib.tables._m_e_t_a import table__m_e_t_a -import unittest - - -# From a real font on MacOS X, but substituted 'bild' tag by 'TEST', -# and shortened the payload. Note that from the 'meta' spec, one would -# expect that header.dataOffset is 0x0000001C (pointing to the beginning -# of the data section) and that dataMap[0].dataOffset should be 0 (relative -# to the beginning of the data section). However, in the fonts that Apple -# ships on MacOS X 10.10.4, dataMap[0].dataOffset is actually relative -# to the beginning of the 'meta' table, i.e. 0x0000001C again. While the -# following test data is invalid according to the 'meta' specification, -# it is reflecting the 'meta' table structure in all Apple-supplied fonts. -META_DATA = deHexStr( - "00 00 00 01 00 00 00 00 00 00 00 1C 00 00 00 01 " - "54 45 53 54 00 00 00 1C 00 00 00 04 CA FE BE EF") - - -class MetaTableTest(unittest.TestCase): - def test_decompile(self): - table = table__m_e_t_a() - table.decompile(META_DATA, ttFont={"meta": table}) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - def test_compile(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - self.assertEqual(META_DATA, table.compile(ttFont={"meta": table})) - - def test_toXML(self): - table = table__m_e_t_a() - table.data["TEST"] = b"\xCA\xFE\xBE\xEF" - writer = XMLWriter(BytesIO()) - table.toXML(writer, {"meta": table}) - xml = writer.file.getvalue().decode("utf-8") - self.assertEqual([ - '', - 'cafebeef', - '' - ], [line.strip() for line in xml.splitlines()][1:]) - - def test_fromXML(self): - table = table__m_e_t_a() - table.fromXML("hexdata", {"tag": "TEST"}, ['cafebeef'], ttFont=None) - self.assertEqual({"TEST": b"\xCA\xFE\xBE\xEF"}, table.data) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_n_a_m_e.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_n_a_m_e.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,262 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from fontTools.misc.encodingTools import getEncoding -from . import DefaultTable -import struct - -nameRecordFormat = """ - > # big endian - platformID: H - platEncID: H - langID: H - nameID: H - length: H - offset: H -""" - -nameRecordSize = sstruct.calcsize(nameRecordFormat) - - -class table__n_a_m_e(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - format, n, stringOffset = struct.unpack(">HHH", data[:6]) - expectedStringOffset = 6 + n * nameRecordSize - if stringOffset != expectedStringOffset: - # XXX we need a warn function - print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset)) - stringData = data[stringOffset:] - data = data[6:] - self.names = [] - for i in range(n): - if len(data) < 12: - # compensate for buggy font - break - name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) - name.string = stringData[name.offset:name.offset+name.length] - assert len(name.string) == name.length - #if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): - # if len(name.string) % 2: - # print "2-byte string doesn't have even length!" - # print name.__dict__ - del name.offset, name.length - self.names.append(name) - - def compile(self, ttFont): - if not hasattr(self, "names"): - # only happens when there are NO name table entries read - # from the TTX file - self.names = [] - names = self.names - names.sort() # sort according to the spec; see NameRecord.__lt__() - stringData = b"" - format = 0 - n = len(names) - stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) - data = struct.pack(">HHH", format, n, stringOffset) - lastoffset = 0 - done = {} # remember the data so we can reuse the "pointers" - for name in names: - string = name.toBytes() - if string in done: - name.offset, name.length = done[string] - else: - name.offset, name.length = done[string] = len(stringData), len(string) - stringData = bytesjoin([stringData, string]) - data = data + sstruct.pack(nameRecordFormat, name) - return data + stringData - - def toXML(self, writer, ttFont): - for name in self.names: - name.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name != "namerecord": - return # ignore unknown tags - if not hasattr(self, "names"): - self.names = [] - name = NameRecord() - self.names.append(name) - name.fromXML(name, attrs, content, ttFont) - - def getName(self, nameID, platformID, platEncID, langID=None): - for namerecord in self.names: - if ( namerecord.nameID == nameID and - namerecord.platformID == platformID and - namerecord.platEncID == platEncID): - if langID is None or namerecord.langID == langID: - return namerecord - return None # not found - - def getDebugName(self, nameID): - englishName = someName = None - for name in self.names: - if name.nameID != nameID: - continue - try: - unistr = name.toUnicode() - except UnicodeDecodeError: - continue - - someName = unistr - if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): - englishName = unistr - break - if englishName: - return englishName - elif someName: - return someName - else: - return None - -class NameRecord(object): - - def getEncoding(self, default='ascii'): - """Returns the Python encoding name for this name entry based on its platformID, - platEncID, and langID. If encoding for these values is not known, by default - 'ascii' is returned. That can be overriden by passing a value to the default - argument. - """ - return getEncoding(self.platformID, self.platEncID, self.langID, default) - - def encodingIsUnicodeCompatible(self): - return self.getEncoding(None) in ['utf_16_be', 'ucs2be', 'ascii', 'latin1'] - - def __str__(self): - try: - return self.toUnicode() - except UnicodeDecodeError: - return str(self.string) - - def isUnicode(self): - return (self.platformID == 0 or - (self.platformID == 3 and self.platEncID in [0, 1, 10])) - - def toUnicode(self, errors='strict'): - """ - If self.string is a Unicode string, return it; otherwise try decoding the - bytes in self.string to a Unicode string using the encoding of this - entry as returned by self.getEncoding(); Note that self.getEncoding() - returns 'ascii' if the encoding is unknown to the library. - - Certain heuristics are performed to recover data from bytes that are - ill-formed in the chosen encoding, or that otherwise look misencoded - (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE - but marked otherwise). If the bytes are ill-formed and the heuristics fail, - the error is handled according to the errors parameter to this function, which is - passed to the underlying decode() function; by default it throws a - UnicodeDecodeError exception. - - Note: The mentioned heuristics mean that roundtripping a font to XML and back - to binary might recover some misencoded data whereas just loading the font - and saving it back will not change them. - """ - def isascii(b): - return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] - encoding = self.getEncoding() - string = self.string - - if encoding == 'utf_16_be' and len(string) % 2 == 1: - # Recover badly encoded UTF-16 strings that have an odd number of bytes: - # - If the last byte is zero, drop it. Otherwise, - # - If all the odd bytes are zero and all the even bytes are ASCII, - # prepend one zero byte. Otherwise, - # - If first byte is zero and all other bytes are ASCII, insert zero - # bytes between consecutive ASCII bytes. - # - # (Yes, I've seen all of these in the wild... sigh) - if byteord(string[-1]) == 0: - string = string[:-1] - elif all(byteord(b) == 0 if i % 2 else isascii(byteord(b)) for i,b in enumerate(string)): - string = b'\0' + string - elif byteord(string[0]) == 0 and all(isascii(byteord(b)) for b in string[1:]): - string = bytesjoin(b'\0'+bytechr(byteord(b)) for b in string[1:]) - - string = tounicode(string, encoding=encoding, errors=errors) - - # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. - # Fix it up. - if all(ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i,c in enumerate(string)): - # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, - # narrow it down. - string = ''.join(c for c in string[1::2]) - - return string - - def toBytes(self, errors='strict'): - """ If self.string is a bytes object, return it; otherwise try encoding - the Unicode string in self.string to bytes using the encoding of this - entry as returned by self.getEncoding(); Note that self.getEncoding() - returns 'ascii' if the encoding is unknown to the library. - - If the Unicode string cannot be encoded to bytes in the chosen encoding, - the error is handled according to the errors parameter to this function, - which is passed to the underlying encode() function; by default it throws a - UnicodeEncodeError exception. - """ - return tobytes(self.string, encoding=self.getEncoding(), errors=errors) - - def toXML(self, writer, ttFont): - try: - unistr = self.toUnicode() - except UnicodeDecodeError: - unistr = None - attrs = [ - ("nameID", self.nameID), - ("platformID", self.platformID), - ("platEncID", self.platEncID), - ("langID", hex(self.langID)), - ] - - if unistr is None or not self.encodingIsUnicodeCompatible(): - attrs.append(("unicode", unistr is not None)) - - writer.begintag("namerecord", attrs) - writer.newline() - if unistr is not None: - writer.write(unistr) - else: - writer.write8bit(self.string) - writer.newline() - writer.endtag("namerecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.nameID = safeEval(attrs["nameID"]) - self.platformID = safeEval(attrs["platformID"]) - self.platEncID = safeEval(attrs["platEncID"]) - self.langID = safeEval(attrs["langID"]) - s = strjoin(content).strip() - encoding = self.getEncoding() - if self.encodingIsUnicodeCompatible() or safeEval(attrs.get("unicode", "False")): - self.string = s.encode(encoding) - else: - # This is the inverse of write8bit... - self.string = s.encode("latin1") - - def __lt__(self, other): - if type(self) != type(other): - return NotImplemented - - # implemented so that list.sort() sorts according to the spec. - selfTuple = ( - getattr(self, "platformID", None), - getattr(self, "platEncID", None), - getattr(self, "langID", None), - getattr(self, "nameID", None), - getattr(self, "string", None), - ) - otherTuple = ( - getattr(other, "platformID", None), - getattr(other, "platEncID", None), - getattr(other, "langID", None), - getattr(other, "nameID", None), - getattr(other, "string", None), - ) - return selfTuple < otherTuple - - def __repr__(self): - return "" % ( - self.nameID, self.platformID, self.langID) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_n_a_m_e_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools.misc.xmlWriter import XMLWriter -import unittest -from ._n_a_m_e import table__n_a_m_e, NameRecord - - -def makeName(text, nameID, platformID, platEncID, langID): - name = NameRecord() - name.nameID, name.platformID, name.platEncID, name.langID = ( - nameID, platformID, platEncID, langID) - name.string = tobytes(text, encoding=name.getEncoding()) - return name - - -class NameTableTest(unittest.TestCase): - - def test_getDebugName(self): - table = table__n_a_m_e() - table.names = [ - makeName("Bold", 258, 1, 0, 0), # Mac, MacRoman, English - makeName("Gras", 258, 1, 0, 1), # Mac, MacRoman, French - makeName("Fett", 258, 1, 0, 2), # Mac, MacRoman, German - makeName("Sem Fracções", 292, 1, 0, 8) # Mac, MacRoman, Portuguese - ] - self.assertEqual("Bold", table.getDebugName(258)) - self.assertEqual("Sem Fracções", table.getDebugName(292)) - self.assertEqual(None, table.getDebugName(999)) - - -class NameRecordTest(unittest.TestCase): - - def test_toUnicode_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertEqual("Foo Bold", name.toUnicode()) - - def test_toUnicode_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual("mac_roman", name.getEncoding()) - self.assertEqual("Foo Italic", name.toUnicode()) - - def test_toUnicode_macromanian(self): - name = makeName(b"Foo Italic\xfb", 222, 1, 0, 37) # Mac Romanian - self.assertEqual("mac_romanian", name.getEncoding()) - self.assertEqual("Foo Italic"+unichr(0x02DA), name.toUnicode()) - - def test_toUnicode_UnicodeDecodeError(self): - name = makeName(b"\1", 111, 0, 2, 7) - self.assertEqual("utf_16_be", name.getEncoding()) - self.assertRaises(UnicodeDecodeError, name.toUnicode) - - def toXML(self, name): - writer = XMLWriter(BytesIO()) - name.toXML(writer, ttFont=None) - xml = writer.file.getvalue().decode("utf_8").strip() - return xml.split(writer.newlinestr.decode("utf_8"))[1:] - - def test_toXML_utf16be(self): - name = makeName("Foo Bold", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo Bold', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length1(self): - name = makeName(b"\0F\0o\0o\0", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_odd_length2(self): - name = makeName(b"\0Fooz", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fooz', - '' - ], self.toXML(name)) - - def test_toXML_utf16be_double_encoded(self): - name = makeName(b"\0\0\0F\0\0\0o", 111, 0, 2, 7) - self.assertEqual([ - '', - ' Fo', - '' - ], self.toXML(name)) - - def test_toXML_macroman(self): - name = makeName("Foo Italic", 222, 1, 0, 7) # MacRoman - self.assertEqual([ - '', - ' Foo Italic', - '' - ], self.toXML(name)) - - def test_toXML_macroman_actual_utf16be(self): - name = makeName("\0F\0o\0o", 222, 1, 0, 7) - self.assertEqual([ - '', - ' Foo', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_nonASCII(self): - name = makeName(b"B\x8arli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' BŠrli', - '' - ], self.toXML(name)) - - def test_toXML_unknownPlatEncID_ASCII(self): - name = makeName(b"Barli", 333, 1, 9876, 7) # Unknown Mac encodingID - self.assertEqual([ - '', - ' Barli', - '' - ], self.toXML(name)) - - def test_encoding_macroman_misc(self): - name = makeName('', 123, 1, 0, 17) # Mac Turkish - self.assertEqual(name.getEncoding(), "mac_turkish") - name.langID = 37 - self.assertEqual(name.getEncoding(), "mac_romanian") - name.langID = 45 # Other - self.assertEqual(name.getEncoding(), "mac_roman") - - def test_extended_mac_encodings(self): - name = makeName(b'\xfe', 123, 1, 1, 0) # Mac Japanese - self.assertEqual(name.toUnicode(), unichr(0x2122)) - - def test_extended_unknown(self): - name = makeName(b'\xfe', 123, 10, 11, 12) - self.assertEqual(name.getEncoding(), "ascii") - self.assertEqual(name.getEncoding(None), None) - self.assertEqual(name.getEncoding(default=None), None) - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/O_S_2f_2.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/O_S_2f_2.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/O_S_2f_2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/O_S_2f_2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,230 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from . import DefaultTable -import warnings - - -# panose classification - -panoseFormat = """ - bFamilyType: B - bSerifStyle: B - bWeight: B - bProportion: B - bContrast: B - bStrokeVariation: B - bArmStyle: B - bLetterForm: B - bMidline: B - bXHeight: B -""" - -class Panose(object): - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(panoseFormat) - for name in names: - writer.simpletag(name, value=getattr(self, name)) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) - - -# 'sfnt' OS/2 and Windows Metrics table - 'OS/2' - -OS2_format_0 = """ - > # big endian - version: H # version - xAvgCharWidth: h # average character width - usWeightClass: H # degree of thickness of strokes - usWidthClass: H # aspect ratio - fsType: h # type flags - ySubscriptXSize: h # subscript horizontal font size - ySubscriptYSize: h # subscript vertical font size - ySubscriptXOffset: h # subscript x offset - ySubscriptYOffset: h # subscript y offset - ySuperscriptXSize: h # superscript horizontal font size - ySuperscriptYSize: h # superscript vertical font size - ySuperscriptXOffset: h # superscript x offset - ySuperscriptYOffset: h # superscript y offset - yStrikeoutSize: h # strikeout size - yStrikeoutPosition: h # strikeout position - sFamilyClass: h # font family class and subclass - panose: 10s # panose classification number - ulUnicodeRange1: L # character range - ulUnicodeRange2: L # character range - ulUnicodeRange3: L # character range - ulUnicodeRange4: L # character range - achVendID: 4s # font vendor identification - fsSelection: H # font selection flags - usFirstCharIndex: H # first unicode character index - usLastCharIndex: H # last unicode character index - sTypoAscender: h # typographic ascender - sTypoDescender: h # typographic descender - sTypoLineGap: h # typographic line gap - usWinAscent: H # Windows ascender - usWinDescent: H # Windows descender -""" - -OS2_format_1_addition = """ - ulCodePageRange1: L - ulCodePageRange2: L -""" - -OS2_format_2_addition = OS2_format_1_addition + """ - sxHeight: h - sCapHeight: h - usDefaultChar: H - usBreakChar: H - usMaxContext: H -""" - -OS2_format_5_addition = OS2_format_2_addition + """ - usLowerOpticalPointSize: H - usUpperOpticalPointSize: H -""" - -bigendian = " > # big endian\n" - -OS2_format_1 = OS2_format_0 + OS2_format_1_addition -OS2_format_2 = OS2_format_0 + OS2_format_2_addition -OS2_format_5 = OS2_format_0 + OS2_format_5_addition -OS2_format_1_addition = bigendian + OS2_format_1_addition -OS2_format_2_addition = bigendian + OS2_format_2_addition -OS2_format_5_addition = bigendian + OS2_format_5_addition - - -class table_O_S_2f_2(DefaultTable.DefaultTable): - - """the OS/2 table""" - - def decompile(self, data, ttFont): - dummy, data = sstruct.unpack2(OS2_format_0, data, self) - - if self.version == 1: - dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self) - elif self.version in (2, 3, 4): - dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self) - elif self.version == 5: - dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self) - self.usLowerOpticalPointSize /= 20 - self.usUpperOpticalPointSize /= 20 - elif self.version != 0: - from fontTools import ttLib - raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) - if len(data): - warnings.warn("too much 'OS/2' table data") - - self.panose = sstruct.unpack(panoseFormat, self.panose, Panose()) - - def compile(self, ttFont): - self.updateFirstAndLastCharIndex(ttFont) - panose = self.panose - self.panose = sstruct.pack(panoseFormat, self.panose) - if self.version == 0: - data = sstruct.pack(OS2_format_0, self) - elif self.version == 1: - data = sstruct.pack(OS2_format_1, self) - elif self.version in (2, 3, 4): - data = sstruct.pack(OS2_format_2, self) - elif self.version == 5: - d = self.__dict__.copy() - d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20)) - d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20)) - data = sstruct.pack(OS2_format_5, d) - else: - from fontTools import ttLib - raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version) - self.panose = panose - return data - - def toXML(self, writer, ttFont): - writer.comment( - "The fields 'usFirstCharIndex' and 'usLastCharIndex'\n" - "will be recalculated by the compiler") - writer.newline() - if self.version == 1: - format = OS2_format_1 - elif self.version in (2, 3, 4): - format = OS2_format_2 - elif self.version == 5: - format = OS2_format_5 - else: - format = OS2_format_0 - formatstring, names, fixes = sstruct.getformat(format) - for name in names: - value = getattr(self, name) - if name=="panose": - writer.begintag("panose") - writer.newline() - value.toXML(writer, ttFont) - writer.endtag("panose") - elif name in ("ulUnicodeRange1", "ulUnicodeRange2", - "ulUnicodeRange3", "ulUnicodeRange4", - "ulCodePageRange1", "ulCodePageRange2"): - writer.simpletag(name, value=num2binary(value)) - elif name in ("fsType", "fsSelection"): - writer.simpletag(name, value=num2binary(value, 16)) - elif name == "achVendID": - writer.simpletag(name, value=repr(value)[1:-1]) - else: - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "panose": - self.panose = panose = Panose() - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - panose.fromXML(name, attrs, content, ttFont) - elif name in ("ulUnicodeRange1", "ulUnicodeRange2", - "ulUnicodeRange3", "ulUnicodeRange4", - "ulCodePageRange1", "ulCodePageRange2", - "fsType", "fsSelection"): - setattr(self, name, binary2num(attrs["value"])) - elif name == "achVendID": - setattr(self, name, safeEval("'''" + attrs["value"] + "'''")) - else: - setattr(self, name, safeEval(attrs["value"])) - - def updateFirstAndLastCharIndex(self, ttFont): - codes = set() - for table in ttFont['cmap'].tables: - if table.isUnicode(): - codes.update(table.cmap.keys()) - if codes: - minCode = min(codes) - maxCode = max(codes) - # USHORT cannot hold codepoints greater than 0xFFFF - self.usFirstCharIndex = 0xFFFF if minCode > 0xFFFF else minCode - self.usLastCharIndex = 0xFFFF if maxCode > 0xFFFF else maxCode - - # misspelled attributes kept for legacy reasons - - @property - def usMaxContex(self): - return self.usMaxContext - - @usMaxContex.setter - def usMaxContex(self, value): - self.usMaxContext = value - - @property - def fsFirstCharIndex(self): - return self.usFirstCharIndex - - @fsFirstCharIndex.setter - def fsFirstCharIndex(self, value): - self.usFirstCharIndex = value - - @property - def fsLastCharIndex(self): - return self.usLastCharIndex - - @fsLastCharIndex.setter - def fsLastCharIndex(self, value): - self.usLastCharIndex = value diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/otBase.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/otBase.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/otBase.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/otBase.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,901 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .DefaultTable import DefaultTable -import struct - -class OverflowErrorRecord(object): - def __init__(self, overflowTuple): - self.tableType = overflowTuple[0] - self.LookupListIndex = overflowTuple[1] - self.SubTableIndex = overflowTuple[2] - self.itemName = overflowTuple[3] - self.itemIndex = overflowTuple[4] - - def __repr__(self): - return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) - -class OTLOffsetOverflowError(Exception): - def __init__(self, overflowErrorRecord): - self.value = overflowErrorRecord - - def __str__(self): - return repr(self.value) - - -class BaseTTXConverter(DefaultTable): - - """Generic base class for TTX table converters. It functions as an - adapter between the TTX (ttLib actually) table model and the model - we use for OpenType tables, which is necessarily subtly different. - """ - - def decompile(self, data, font): - from . import otTables - cachingStats = None if True else {} - class GlobalState(object): - def __init__(self, tableType, cachingStats): - self.tableType = tableType - self.cachingStats = cachingStats - globalState = GlobalState(tableType=self.tableTag, - cachingStats=cachingStats) - reader = OTTableReader(data, globalState) - tableClass = getattr(otTables, self.tableTag) - self.table = tableClass() - self.table.decompile(reader, font) - if cachingStats: - stats = sorted([(v, k) for k, v in cachingStats.items()]) - stats.reverse() - print("cachingsstats for ", self.tableTag) - for v, k in stats: - if v < 2: - break - print(v, k) - print("---", len(stats)) - - def compile(self, font): - """ Create a top-level OTFWriter for the GPOS/GSUB table. - Call the compile method for the the table - for each 'converter' record in the table converter list - call converter's write method for each item in the value. - - For simple items, the write method adds a string to the - writer's self.items list. - - For Struct/Table/Subtable items, it add first adds new writer to the - to the writer's self.items, then calls the item's compile method. - This creates a tree of writers, rooted at the GUSB/GPOS writer, with - each writer representing a table, and the writer.items list containing - the child data strings and writers. - call the getAllData method - call _doneWriting, which removes duplicates - call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables - Traverse the flat list of tables, calling getDataLength on each to update their position - Traverse the flat list of tables again, calling getData each get the data in the table, now that - pos's and offset are known. - - If a lookup subtable overflows an offset, we have to start all over. - """ - class GlobalState(object): - def __init__(self, tableType): - self.tableType = tableType - globalState = GlobalState(tableType=self.tableTag) - overflowRecord = None - - while True: - try: - writer = OTTableWriter(globalState) - self.table.compile(writer, font) - return writer.getAllData() - - except OTLOffsetOverflowError as e: - - if overflowRecord == e.value: - raise # Oh well... - - overflowRecord = e.value - print("Attempting to fix OTLOffsetOverflowError", e) - lastItem = overflowRecord - - ok = 0 - if overflowRecord.itemName is None: - from .otTables import fixLookupOverFlows - ok = fixLookupOverFlows(font, overflowRecord) - else: - from .otTables import fixSubTableOverFlows - ok = fixSubTableOverFlows(font, overflowRecord) - if not ok: - raise - - def toXML(self, writer, font): - self.table.toXML2(writer, font) - - def fromXML(self, name, attrs, content, font): - from . import otTables - if not hasattr(self, "table"): - tableClass = getattr(otTables, self.tableTag) - self.table = tableClass() - self.table.fromXML(name, attrs, content, font) - - -class OTTableReader(object): - - """Helper class to retrieve data from an OpenType table.""" - - __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') - - def __init__(self, data, globalState={}, localState=None, offset=0): - self.data = data - self.offset = offset - self.pos = offset - self.globalState = globalState - self.localState = localState - - def advance(self, count): - self.pos += count - def seek(self, pos): - self.pos = pos - - def copy(self): - other = self.__class__(self.data, self.globalState, self.localState, self.offset) - other.pos = self.pos - return other - - def getSubReader(self, offset): - offset = self.offset + offset - cachingStats = self.globalState.cachingStats - if cachingStats is not None: - cachingStats[offset] = cachingStats.get(offset, 0) + 1 - return self.__class__(self.data, self.globalState, self.localState, offset) - - def readUShort(self): - pos = self.pos - newpos = pos + 2 - value, = struct.unpack(">H", self.data[pos:newpos]) - self.pos = newpos - return value - - def readShort(self): - pos = self.pos - newpos = pos + 2 - value, = struct.unpack(">h", self.data[pos:newpos]) - self.pos = newpos - return value - - def readLong(self): - pos = self.pos - newpos = pos + 4 - value, = struct.unpack(">l", self.data[pos:newpos]) - self.pos = newpos - return value - - def readUInt24(self): - pos = self.pos - newpos = pos + 3 - value, = struct.unpack(">l", b'\0'+self.data[pos:newpos]) - self.pos = newpos - return value - - def readULong(self): - pos = self.pos - newpos = pos + 4 - value, = struct.unpack(">L", self.data[pos:newpos]) - self.pos = newpos - return value - - def readTag(self): - pos = self.pos - newpos = pos + 4 - value = Tag(self.data[pos:newpos]) - assert len(value) == 4 - self.pos = newpos - return value - - def readData(self, count): - pos = self.pos - newpos = pos + count - value = self.data[pos:newpos] - self.pos = newpos - return value - - def __setitem__(self, name, value): - state = self.localState.copy() if self.localState else dict() - state[name] = value - self.localState = state - - def __getitem__(self, name): - return self.localState and self.localState[name] - - def __contains__(self, name): - return self.localState and name in self.localState - - -class OTTableWriter(object): - - """Helper class to gather and assemble data for OpenType tables.""" - - def __init__(self, globalState, localState=None): - self.items = [] - self.pos = None - self.globalState = globalState - self.localState = localState - self.longOffset = False - self.parent = None - - def __setitem__(self, name, value): - state = self.localState.copy() if self.localState else dict() - state[name] = value - self.localState = state - - def __getitem__(self, name): - return self.localState[name] - - # assembler interface - - def getAllData(self): - """Assemble all data, including all subtables.""" - self._doneWriting() - tables, extTables = self._gatherTables() - tables.reverse() - extTables.reverse() - # Gather all data in two passes: the absolute positions of all - # subtable are needed before the actual data can be assembled. - pos = 0 - for table in tables: - table.pos = pos - pos = pos + table.getDataLength() - - for table in extTables: - table.pos = pos - pos = pos + table.getDataLength() - - data = [] - for table in tables: - tableData = table.getData() - data.append(tableData) - - for table in extTables: - tableData = table.getData() - data.append(tableData) - - return bytesjoin(data) - - def getDataLength(self): - """Return the length of this table in bytes, without subtables.""" - l = 0 - for item in self.items: - if hasattr(item, "getData") or hasattr(item, "getCountData"): - if item.longOffset: - l = l + 4 # sizeof(ULong) - else: - l = l + 2 # sizeof(UShort) - else: - l = l + len(item) - return l - - def getData(self): - """Assemble the data for this writer/table, without subtables.""" - items = list(self.items) # make a shallow copy - pos = self.pos - numItems = len(items) - for i in range(numItems): - item = items[i] - - if hasattr(item, "getData"): - if item.longOffset: - items[i] = packULong(item.pos - pos) - else: - try: - items[i] = packUShort(item.pos - pos) - except struct.error: - # provide data to fix overflow problem. - # If the overflow is to a lookup, or from a lookup to a subtable, - # just report the current item. Otherwise... - if self.name not in [ 'LookupList', 'Lookup']: - # overflow is within a subTable. Life is more complicated. - # If we split the sub-table just before the current item, we may still suffer overflow. - # This is because duplicate table merging is done only within an Extension subTable tree; - # when we split the subtable in two, some items may no longer be duplicates. - # Get worst case by adding up all the item lengths, depth first traversal. - # and then report the first item that overflows a short. - def getDeepItemLength(table): - if hasattr(table, "getDataLength"): - length = 0 - for item in table.items: - length = length + getDeepItemLength(item) - else: - length = len(table) - return length - - length = self.getDataLength() - if hasattr(self, "sortCoverageLast") and item.name == "Coverage": - # Coverage is first in the item list, but last in the table list, - # The original overflow is really in the item list. Skip the Coverage - # table in the following test. - items = items[i+1:] - - for j in range(len(items)): - item = items[j] - length = length + getDeepItemLength(item) - if length > 65535: - break - overflowErrorRecord = self.getOverflowErrorRecord(item) - - raise OTLOffsetOverflowError(overflowErrorRecord) - - return bytesjoin(items) - - def __hash__(self): - # only works after self._doneWriting() has been called - return hash(self.items) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.items == other.items - - def _doneWriting(self, internedTables=None): - # Convert CountData references to data string items - # collapse duplicate table references to a unique entry - # "tables" are OTTableWriter objects. - - # For Extension Lookup types, we can - # eliminate duplicates only within the tree under the Extension Lookup, - # as offsets may exceed 64K even between Extension LookupTable subtables. - if internedTables is None: - internedTables = {} - items = self.items - iRange = list(range(len(items))) - - if hasattr(self, "Extension"): - newTree = 1 - else: - newTree = 0 - for i in iRange: - item = items[i] - if hasattr(item, "getCountData"): - items[i] = item.getCountData() - elif hasattr(item, "getData"): - if newTree: - item._doneWriting() - else: - item._doneWriting(internedTables) - internedItem = internedTables.get(item) - if internedItem: - items[i] = item = internedItem - else: - internedTables[item] = item - self.items = tuple(items) - - def _gatherTables(self, tables=None, extTables=None, done=None): - # Convert table references in self.items tree to a flat - # list of tables in depth-first traversal order. - # "tables" are OTTableWriter objects. - # We do the traversal in reverse order at each level, in order to - # resolve duplicate references to be the last reference in the list of tables. - # For extension lookups, duplicate references can be merged only within the - # writer tree under the extension lookup. - if tables is None: # init call for first time. - tables = [] - extTables = [] - done = {} - - done[self] = 1 - - numItems = len(self.items) - iRange = list(range(numItems)) - iRange.reverse() - - if hasattr(self, "Extension"): - appendExtensions = 1 - else: - appendExtensions = 0 - - # add Coverage table if it is sorted last. - sortCoverageLast = 0 - if hasattr(self, "sortCoverageLast"): - # Find coverage table - for i in range(numItems): - item = self.items[i] - if hasattr(item, "name") and (item.name == "Coverage"): - sortCoverageLast = 1 - break - if item not in done: - item._gatherTables(tables, extTables, done) - else: - # We're a new parent of item - pass - - for i in iRange: - item = self.items[i] - if not hasattr(item, "getData"): - continue - - if sortCoverageLast and (i==1) and item.name == 'Coverage': - # we've already 'gathered' it above - continue - - if appendExtensions: - assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" - newDone = {} - item._gatherTables(extTables, None, newDone) - - elif item not in done: - item._gatherTables(tables, extTables, done) - else: - # We're a new parent of item - pass - - tables.append(self) - return tables, extTables - - # interface for gathering data, as used by table.compile() - - def getSubWriter(self): - subwriter = self.__class__(self.globalState, self.localState) - subwriter.parent = self # because some subtables have idential values, we discard - # the duplicates under the getAllData method. Hence some - # subtable writers can have more than one parent writer. - # But we just care about first one right now. - return subwriter - - def writeUShort(self, value): - assert 0 <= value < 0x10000 - self.items.append(struct.pack(">H", value)) - - def writeShort(self, value): - self.items.append(struct.pack(">h", value)) - - def writeUInt24(self, value): - assert 0 <= value < 0x1000000 - b = struct.pack(">L", value) - self.items.append(b[1:]) - - def writeLong(self, value): - self.items.append(struct.pack(">l", value)) - - def writeULong(self, value): - self.items.append(struct.pack(">L", value)) - - def writeTag(self, tag): - tag = Tag(tag).tobytes() - assert len(tag) == 4 - self.items.append(tag) - - def writeSubTable(self, subWriter): - self.items.append(subWriter) - - def writeCountReference(self, table, name): - ref = CountReference(table, name) - self.items.append(ref) - return ref - - def writeStruct(self, format, values): - data = struct.pack(*(format,) + values) - self.items.append(data) - - def writeData(self, data): - self.items.append(data) - - def getOverflowErrorRecord(self, item): - LookupListIndex = SubTableIndex = itemName = itemIndex = None - if self.name == 'LookupList': - LookupListIndex = item.repeatIndex - elif self.name == 'Lookup': - LookupListIndex = self.repeatIndex - SubTableIndex = item.repeatIndex - else: - itemName = item.name - if hasattr(item, 'repeatIndex'): - itemIndex = item.repeatIndex - if self.name == 'SubTable': - LookupListIndex = self.parent.repeatIndex - SubTableIndex = self.repeatIndex - elif self.name == 'ExtSubTable': - LookupListIndex = self.parent.parent.repeatIndex - SubTableIndex = self.parent.repeatIndex - else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. - itemName = ".".join([self.name, item.name]) - p1 = self.parent - while p1 and p1.name not in ['ExtSubTable', 'SubTable']: - itemName = ".".join([p1.name, item.name]) - p1 = p1.parent - if p1: - if p1.name == 'ExtSubTable': - LookupListIndex = p1.parent.parent.repeatIndex - SubTableIndex = p1.parent.repeatIndex - else: - LookupListIndex = p1.parent.repeatIndex - SubTableIndex = p1.repeatIndex - - return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) - - -class CountReference(object): - """A reference to a Count value, not a count of references.""" - def __init__(self, table, name): - self.table = table - self.name = name - def setValue(self, value): - table = self.table - name = self.name - if table[name] is None: - table[name] = value - else: - assert table[name] == value, (name, table[name], value) - def getCountData(self): - return packUShort(self.table[self.name]) - - -def packUShort(value): - return struct.pack(">H", value) - - -def packULong(value): - assert 0 <= value < 0x100000000, value - return struct.pack(">L", value) - - -class BaseTable(object): - - """Generic base class for all OpenType (sub)tables.""" - - def __getattr__(self, attr): - reader = self.__dict__.get("reader") - if reader: - del self.reader - font = self.font - del self.font - self.decompile(reader, font) - return getattr(self, attr) - - raise AttributeError(attr) - - def ensureDecompiled(self): - reader = self.__dict__.get("reader") - if reader: - del self.reader - font = self.font - del self.font - self.decompile(reader, font) - - @classmethod - def getRecordSize(cls, reader): - totalSize = 0 - for conv in cls.converters: - size = conv.getRecordSize(reader) - if size is NotImplemented: return NotImplemented - countValue = 1 - if conv.repeat: - if conv.repeat in reader: - countValue = reader[conv.repeat] - else: - return NotImplemented - totalSize += size * countValue - return totalSize - - def getConverters(self): - return self.converters - - def getConverterByName(self, name): - return self.convertersByName[name] - - def decompile(self, reader, font): - self.readFormat(reader) - table = {} - self.__rawTable = table # for debugging - converters = self.getConverters() - for conv in converters: - if conv.name == "SubTable": - conv = conv.getConverter(reader.globalState.tableType, - table["LookupType"]) - if conv.name == "ExtSubTable": - conv = conv.getConverter(reader.globalState.tableType, - table["ExtensionLookupType"]) - if conv.name == "FeatureParams": - conv = conv.getConverter(reader["FeatureTag"]) - if conv.repeat: - if conv.repeat in table: - countValue = table[conv.repeat] - else: - # conv.repeat is a propagated count - countValue = reader[conv.repeat] - countValue += conv.aux - table[conv.name] = conv.readArray(reader, font, table, countValue) - else: - if conv.aux and not eval(conv.aux, None, table): - continue - table[conv.name] = conv.read(reader, font, table) - if conv.isPropagated: - reader[conv.name] = table[conv.name] - - self.postRead(table, font) - - del self.__rawTable # succeeded, get rid of debugging info - - def compile(self, writer, font): - self.ensureDecompiled() - table = self.preWrite(font) - - if hasattr(self, 'sortCoverageLast'): - writer.sortCoverageLast = 1 - - if hasattr(self.__class__, 'LookupType'): - writer['LookupType'].setValue(self.__class__.LookupType) - - self.writeFormat(writer) - for conv in self.getConverters(): - value = table.get(conv.name) - if conv.repeat: - if value is None: - value = [] - countValue = len(value) - conv.aux - if conv.repeat in table: - CountReference(table, conv.repeat).setValue(countValue) - else: - # conv.repeat is a propagated count - writer[conv.repeat].setValue(countValue) - conv.writeArray(writer, font, table, value) - elif conv.isCount: - # Special-case Count values. - # Assumption: a Count field will *always* precede - # the actual array(s). - # We need a default value, as it may be set later by a nested - # table. We will later store it here. - # We add a reference: by the time the data is assembled - # the Count value will be filled in. - ref = writer.writeCountReference(table, conv.name) - table[conv.name] = None - if conv.isPropagated: - writer[conv.name] = ref - elif conv.isLookupType: - ref = writer.writeCountReference(table, conv.name) - table[conv.name] = None - writer['LookupType'] = ref - else: - if conv.aux and not eval(conv.aux, None, table): - continue - conv.write(writer, font, table, value) - if conv.isPropagated: - writer[conv.name] = value - - def readFormat(self, reader): - pass - - def writeFormat(self, writer): - pass - - def postRead(self, table, font): - self.__dict__.update(table) - - def preWrite(self, font): - return self.__dict__.copy() - - def toXML(self, xmlWriter, font, attrs=None, name=None): - tableName = name if name else self.__class__.__name__ - if attrs is None: - attrs = [] - if hasattr(self, "Format"): - attrs = attrs + [("Format", self.Format)] - xmlWriter.begintag(tableName, attrs) - xmlWriter.newline() - self.toXML2(xmlWriter, font) - xmlWriter.endtag(tableName) - xmlWriter.newline() - - def toXML2(self, xmlWriter, font): - # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). - # This is because in TTX our parent writes our main tag, and in otBase.py we - # do it ourselves. I think I'm getting schizophrenic... - for conv in self.getConverters(): - if conv.repeat: - value = getattr(self, conv.name) - for i in range(len(value)): - item = value[i] - conv.xmlWrite(xmlWriter, font, item, conv.name, - [("index", i)]) - else: - if conv.aux and not eval(conv.aux, None, vars(self)): - continue - value = getattr(self, conv.name) - conv.xmlWrite(xmlWriter, font, value, conv.name, []) - - def fromXML(self, name, attrs, content, font): - try: - conv = self.getConverterByName(name) - except KeyError: - raise # XXX on KeyError, raise nice error - value = conv.xmlRead(attrs, content, font) - if conv.repeat: - seq = getattr(self, conv.name, None) - if seq is None: - seq = [] - setattr(self, conv.name, seq) - seq.append(value) - else: - setattr(self, conv.name, value) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - - self.ensureDecompiled() - other.ensureDecompiled() - - return self.__dict__ == other.__dict__ - - -class FormatSwitchingBaseTable(BaseTable): - - """Minor specialization of BaseTable, for tables that have multiple - formats, eg. CoverageFormat1 vs. CoverageFormat2.""" - - @classmethod - def getRecordSize(cls, reader): - return NotImplemented - - def getConverters(self): - return self.converters[self.Format] - - def getConverterByName(self, name): - return self.convertersByName[self.Format][name] - - def readFormat(self, reader): - self.Format = reader.readUShort() - assert self.Format != 0, (self, reader.pos, len(reader.data)) - - def writeFormat(self, writer): - writer.writeUShort(self.Format) - - def toXML(self, xmlWriter, font, attrs=None, name=None): - BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) - - -# -# Support for ValueRecords -# -# This data type is so different from all other OpenType data types that -# it requires quite a bit of code for itself. It even has special support -# in OTTableReader and OTTableWriter... -# - -valueRecordFormat = [ -# Mask Name isDevice signed - (0x0001, "XPlacement", 0, 1), - (0x0002, "YPlacement", 0, 1), - (0x0004, "XAdvance", 0, 1), - (0x0008, "YAdvance", 0, 1), - (0x0010, "XPlaDevice", 1, 0), - (0x0020, "YPlaDevice", 1, 0), - (0x0040, "XAdvDevice", 1, 0), - (0x0080, "YAdvDevice", 1, 0), -# reserved: - (0x0100, "Reserved1", 0, 0), - (0x0200, "Reserved2", 0, 0), - (0x0400, "Reserved3", 0, 0), - (0x0800, "Reserved4", 0, 0), - (0x1000, "Reserved5", 0, 0), - (0x2000, "Reserved6", 0, 0), - (0x4000, "Reserved7", 0, 0), - (0x8000, "Reserved8", 0, 0), -] - -def _buildDict(): - d = {} - for mask, name, isDevice, signed in valueRecordFormat: - d[name] = mask, isDevice, signed - return d - -valueRecordFormatDict = _buildDict() - - -class ValueRecordFactory(object): - - """Given a format code, this object convert ValueRecords.""" - - def __init__(self, valueFormat): - format = [] - for mask, name, isDevice, signed in valueRecordFormat: - if valueFormat & mask: - format.append((name, isDevice, signed)) - self.format = format - - def __len__(self): - return len(self.format) - - def readValueRecord(self, reader, font): - format = self.format - if not format: - return None - valueRecord = ValueRecord() - for name, isDevice, signed in format: - if signed: - value = reader.readShort() - else: - value = reader.readUShort() - if isDevice: - if value: - from . import otTables - subReader = reader.getSubReader(value) - value = getattr(otTables, name)() - value.decompile(subReader, font) - else: - value = None - setattr(valueRecord, name, value) - return valueRecord - - def writeValueRecord(self, writer, font, valueRecord): - for name, isDevice, signed in self.format: - value = getattr(valueRecord, name, 0) - if isDevice: - if value: - subWriter = writer.getSubWriter() - writer.writeSubTable(subWriter) - value.compile(subWriter, font) - else: - writer.writeUShort(0) - elif signed: - writer.writeShort(value) - else: - writer.writeUShort(value) - - -class ValueRecord(object): - - # see ValueRecordFactory - - def getFormat(self): - format = 0 - for name in self.__dict__.keys(): - format = format | valueRecordFormatDict[name][0] - return format - - def toXML(self, xmlWriter, font, valueName, attrs=None): - if attrs is None: - simpleItems = [] - else: - simpleItems = list(attrs) - for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values - if hasattr(self, name): - simpleItems.append((name, getattr(self, name))) - deviceItems = [] - for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records - if hasattr(self, name): - device = getattr(self, name) - if device is not None: - deviceItems.append((name, device)) - if deviceItems: - xmlWriter.begintag(valueName, simpleItems) - xmlWriter.newline() - for name, deviceRecord in deviceItems: - if deviceRecord is not None: - deviceRecord.toXML(xmlWriter, font) - xmlWriter.endtag(valueName) - xmlWriter.newline() - else: - xmlWriter.simpletag(valueName, simpleItems) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - from . import otTables - for k, v in attrs.items(): - setattr(self, k, int(v)) - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - value = getattr(otTables, name)() - for elem2 in content: - if not isinstance(elem2, tuple): - continue - name2, attrs2, content2 = elem2 - value.fromXML(name2, attrs2, content2, font) - setattr(self, name, value) - - def __ne__(self, other): - return not self.__eq__(other) - def __eq__(self, other): - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/otConverters.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/otConverters.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/otConverters.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/otConverters.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,481 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from .otBase import ValueRecordFactory -import array - - -def buildConverters(tableSpec, tableNamespace): - """Given a table spec from otData.py, build a converter object for each - field of the table. This is called for each table in otData.py, and - the results are assigned to the corresponding class in otTables.py.""" - converters = [] - convertersByName = {} - for tp, name, repeat, aux, descr in tableSpec: - tableName = name - if name.startswith("ValueFormat"): - assert tp == "uint16" - converterClass = ValueFormat - elif name.endswith("Count") or name.endswith("LookupType"): - assert tp == "uint16" - converterClass = ComputedUShort - elif name == "SubTable": - converterClass = SubTable - elif name == "ExtSubTable": - converterClass = ExtSubTable - elif name == "FeatureParams": - converterClass = FeatureParams - else: - if not tp in converterMapping: - tableName = tp - converterClass = Struct - else: - converterClass = converterMapping[tp] - tableClass = tableNamespace.get(tableName) - conv = converterClass(name, repeat, aux, tableClass) - if name in ["SubTable", "ExtSubTable"]: - conv.lookupTypes = tableNamespace['lookupTypes'] - # also create reverse mapping - for t in conv.lookupTypes.values(): - for cls in t.values(): - convertersByName[cls.__name__] = Table(name, repeat, aux, cls) - if name == "FeatureParams": - conv.featureParamTypes = tableNamespace['featureParamTypes'] - conv.defaultFeatureParams = tableNamespace['FeatureParams'] - for cls in conv.featureParamTypes.values(): - convertersByName[cls.__name__] = Table(name, repeat, aux, cls) - converters.append(conv) - assert name not in convertersByName, name - convertersByName[name] = conv - return converters, convertersByName - - -class _MissingItem(tuple): - __slots__ = () - -try: - from collections import UserList -except: - from UserList import UserList - -class _LazyList(UserList): - - def __getslice__(self, i, j): - return self.__getitem__(slice(i, j)) - def __getitem__(self, k): - if isinstance(k, slice): - indices = range(*k.indices(len(self))) - return [self[i] for i in indices] - item = self.data[k] - if isinstance(item, _MissingItem): - self.reader.seek(self.pos + item[0] * self.recordSize) - item = self.conv.read(self.reader, self.font, {}) - self.data[k] = item - return item - -class BaseConverter(object): - - """Base class for converter objects. Apart from the constructor, this - is an abstract class.""" - - def __init__(self, name, repeat, aux, tableClass): - self.name = name - self.repeat = repeat - self.aux = aux - self.tableClass = tableClass - self.isCount = name.endswith("Count") - self.isLookupType = name.endswith("LookupType") - self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag", "SettingsCount", "AxisCount"] - - def readArray(self, reader, font, tableDict, count): - """Read an array of values from the reader.""" - lazy = font.lazy and count > 8 - if lazy: - recordSize = self.getRecordSize(reader) - if recordSize is NotImplemented: - lazy = False - if not lazy: - l = [] - for i in range(count): - l.append(self.read(reader, font, tableDict)) - return l - else: - l = _LazyList() - l.reader = reader.copy() - l.pos = l.reader.pos - l.font = font - l.conv = self - l.recordSize = recordSize - l.extend(_MissingItem([i]) for i in range(count)) - reader.advance(count * recordSize) - return l - - def getRecordSize(self, reader): - if hasattr(self, 'staticSize'): return self.staticSize - return NotImplemented - - def read(self, reader, font, tableDict): - """Read a value from the reader.""" - raise NotImplementedError(self) - - def writeArray(self, writer, font, tableDict, values): - for i in range(len(values)): - self.write(writer, font, tableDict, values[i], i) - - def write(self, writer, font, tableDict, value, repeatIndex=None): - """Write a value to the writer.""" - raise NotImplementedError(self) - - def xmlRead(self, attrs, content, font): - """Read a value from XML.""" - raise NotImplementedError(self) - - def xmlWrite(self, xmlWriter, font, value, name, attrs): - """Write a value to XML.""" - raise NotImplementedError(self) - - -class SimpleValue(BaseConverter): - def xmlWrite(self, xmlWriter, font, value, name, attrs): - xmlWriter.simpletag(name, attrs + [("value", value)]) - xmlWriter.newline() - def xmlRead(self, attrs, content, font): - return attrs["value"] - -class IntValue(SimpleValue): - def xmlRead(self, attrs, content, font): - return int(attrs["value"], 0) - -class Long(IntValue): - staticSize = 4 - def read(self, reader, font, tableDict): - return reader.readLong() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeLong(value) - -class ULong(IntValue): - staticSize = 4 - def read(self, reader, font, tableDict): - return reader.readULong() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeULong(value) - -class Short(IntValue): - staticSize = 2 - def read(self, reader, font, tableDict): - return reader.readShort() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeShort(value) - -class UShort(IntValue): - staticSize = 2 - def read(self, reader, font, tableDict): - return reader.readUShort() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUShort(value) - -class UInt24(IntValue): - staticSize = 3 - def read(self, reader, font, tableDict): - return reader.readUInt24() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUInt24(value) - -class ComputedUShort(UShort): - def xmlWrite(self, xmlWriter, font, value, name, attrs): - xmlWriter.comment("%s=%s" % (name, value)) - xmlWriter.newline() - -class Tag(SimpleValue): - staticSize = 4 - def read(self, reader, font, tableDict): - return reader.readTag() - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeTag(value) - -class GlyphID(SimpleValue): - staticSize = 2 - def readArray(self, reader, font, tableDict, count): - glyphOrder = font.getGlyphOrder() - gids = array.array("H", reader.readData(2 * count)) - if sys.byteorder != "big": - gids.byteswap() - try: - l = [glyphOrder[gid] for gid in gids] - except IndexError: - # Slower, but will not throw an IndexError on an invalid glyph id. - l = [font.getGlyphName(gid) for gid in gids] - return l - def read(self, reader, font, tableDict): - return font.getGlyphName(reader.readUShort()) - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUShort(font.getGlyphID(value)) - -class FloatValue(SimpleValue): - def xmlRead(self, attrs, content, font): - return float(attrs["value"]) - -class DeciPoints(FloatValue): - staticSize = 2 - def read(self, reader, font, tableDict): - return reader.readUShort() / 10 - - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeUShort(int(round(value * 10))) - -class Fixed(FloatValue): - staticSize = 4 - def read(self, reader, font, tableDict): - return fi2fl(reader.readLong(), 16) - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.writeLong(fl2fi(value, 16)) - -class Version(BaseConverter): - staticSize = 4 - def read(self, reader, font, tableDict): - value = reader.readLong() - assert (value >> 16) == 1, "Unsupported version 0x%08x" % value - return fi2fl(value, 16) - def write(self, writer, font, tableDict, value, repeatIndex=None): - if value < 0x10000: - value = fl2fi(value, 16) - value = int(round(value)) - assert (value >> 16) == 1, "Unsupported version 0x%08x" % value - writer.writeLong(value) - def xmlRead(self, attrs, content, font): - value = attrs["value"] - value = float(int(value, 0)) if value.startswith("0") else float(value) - if value >= 0x10000: - value = fi2fl(value, 16) - return value - def xmlWrite(self, xmlWriter, font, value, name, attrs): - if value >= 0x10000: - value = fi2fl(value, 16) - if value % 1 != 0: - # Write as hex - value = "0x%08x" % fl2fi(value, 16) - xmlWriter.simpletag(name, attrs + [("value", value)]) - xmlWriter.newline() - - -class Struct(BaseConverter): - - def getRecordSize(self, reader): - return self.tableClass and self.tableClass.getRecordSize(reader) - - def read(self, reader, font, tableDict): - table = self.tableClass() - table.decompile(reader, font) - return table - - def write(self, writer, font, tableDict, value, repeatIndex=None): - value.compile(writer, font) - - def xmlWrite(self, xmlWriter, font, value, name, attrs): - if value is None: - if attrs: - # If there are attributes (probably index), then - # don't drop this even if it's NULL. It will mess - # up the array indices of the containing element. - xmlWriter.simpletag(name, attrs + [("empty", 1)]) - xmlWriter.newline() - else: - pass # NULL table, ignore - else: - value.toXML(xmlWriter, font, attrs, name=name) - - def xmlRead(self, attrs, content, font): - if "empty" in attrs and safeEval(attrs["empty"]): - return None - table = self.tableClass() - Format = attrs.get("Format") - if Format is not None: - table.Format = int(Format) - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - table.fromXML(name, attrs, content, font) - else: - pass - return table - - def __repr__(self): - return "Struct of " + repr(self.tableClass) - - -class Table(Struct): - - longOffset = False - staticSize = 2 - - def readOffset(self, reader): - return reader.readUShort() - - def writeNullOffset(self, writer): - if self.longOffset: - writer.writeULong(0) - else: - writer.writeUShort(0) - - def read(self, reader, font, tableDict): - offset = self.readOffset(reader) - if offset == 0: - return None - if offset <= 3: - # XXX hack to work around buggy pala.ttf - print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \ - % (offset, self.tableClass.__name__)) - return None - table = self.tableClass() - reader = reader.getSubReader(offset) - if font.lazy: - table.reader = reader - table.font = font - else: - table.decompile(reader, font) - return table - - def write(self, writer, font, tableDict, value, repeatIndex=None): - if value is None: - self.writeNullOffset(writer) - else: - subWriter = writer.getSubWriter() - subWriter.longOffset = self.longOffset - subWriter.name = self.name - if repeatIndex is not None: - subWriter.repeatIndex = repeatIndex - writer.writeSubTable(subWriter) - value.compile(subWriter, font) - -class LTable(Table): - - longOffset = True - staticSize = 4 - - def readOffset(self, reader): - return reader.readULong() - - -class SubTable(Table): - def getConverter(self, tableType, lookupType): - tableClass = self.lookupTypes[tableType][lookupType] - return self.__class__(self.name, self.repeat, self.aux, tableClass) - - -class ExtSubTable(LTable, SubTable): - - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer. - Table.write(self, writer, font, tableDict, value, repeatIndex) - -class FeatureParams(Table): - def getConverter(self, featureTag): - tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) - return self.__class__(self.name, self.repeat, self.aux, tableClass) - - -class ValueFormat(IntValue): - staticSize = 2 - def __init__(self, name, repeat, aux, tableClass): - BaseConverter.__init__(self, name, repeat, aux, tableClass) - self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") - def read(self, reader, font, tableDict): - format = reader.readUShort() - reader[self.which] = ValueRecordFactory(format) - return format - def write(self, writer, font, tableDict, format, repeatIndex=None): - writer.writeUShort(format) - writer[self.which] = ValueRecordFactory(format) - - -class ValueRecord(ValueFormat): - def getRecordSize(self, reader): - return 2 * len(reader[self.which]) - def read(self, reader, font, tableDict): - return reader[self.which].readValueRecord(reader, font) - def write(self, writer, font, tableDict, value, repeatIndex=None): - writer[self.which].writeValueRecord(writer, font, value) - def xmlWrite(self, xmlWriter, font, value, name, attrs): - if value is None: - pass # NULL table, ignore - else: - value.toXML(xmlWriter, font, self.name, attrs) - def xmlRead(self, attrs, content, font): - from .otBase import ValueRecord - value = ValueRecord() - value.fromXML(None, attrs, content, font) - return value - - -class DeltaValue(BaseConverter): - - def read(self, reader, font, tableDict): - StartSize = tableDict["StartSize"] - EndSize = tableDict["EndSize"] - DeltaFormat = tableDict["DeltaFormat"] - assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" - nItems = EndSize - StartSize + 1 - nBits = 1 << DeltaFormat - minusOffset = 1 << nBits - mask = (1 << nBits) - 1 - signMask = 1 << (nBits - 1) - - DeltaValue = [] - tmp, shift = 0, 0 - for i in range(nItems): - if shift == 0: - tmp, shift = reader.readUShort(), 16 - shift = shift - nBits - value = (tmp >> shift) & mask - if value & signMask: - value = value - minusOffset - DeltaValue.append(value) - return DeltaValue - - def write(self, writer, font, tableDict, value, repeatIndex=None): - StartSize = tableDict["StartSize"] - EndSize = tableDict["EndSize"] - DeltaFormat = tableDict["DeltaFormat"] - DeltaValue = value - assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" - nItems = EndSize - StartSize + 1 - nBits = 1 << DeltaFormat - assert len(DeltaValue) == nItems - mask = (1 << nBits) - 1 - - tmp, shift = 0, 16 - for value in DeltaValue: - shift = shift - nBits - tmp = tmp | ((value & mask) << shift) - if shift == 0: - writer.writeUShort(tmp) - tmp, shift = 0, 16 - if shift != 16: - writer.writeUShort(tmp) - - def xmlWrite(self, xmlWriter, font, value, name, attrs): - xmlWriter.simpletag(name, attrs + [("value", value)]) - xmlWriter.newline() - - def xmlRead(self, attrs, content, font): - return safeEval(attrs["value"]) - - -converterMapping = { - # type class - "int16": Short, - "uint16": UShort, - "uint24": UInt24, - "uint32": ULong, - "Version": Version, - "Tag": Tag, - "GlyphID": GlyphID, - "DeciPoints": DeciPoints, - "Fixed": Fixed, - "struct": Struct, - "Offset": Table, - "LOffset": LTable, - "ValueRecord": ValueRecord, - "DeltaValue": DeltaValue, -} diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/otData.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/otData.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/otData.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/otData.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1025 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -otData = [ - - # - # common - # - - ('LookupOrder', []), - - ('ScriptList', [ - ('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'), - ('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'), - ]), - - ('ScriptRecord', [ - ('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'), - ('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'), - ]), - - ('Script', [ - ('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'), - ('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'), - ('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'), - ]), - - ('LangSysRecord', [ - ('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'), - ('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'), - ]), - - ('LangSys', [ - ('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'), - ('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'), - ('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'), - ('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'), - ]), - - ('FeatureList', [ - ('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'), - ('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'), - ]), - - ('FeatureRecord', [ - ('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'), - ('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'), - ]), - - ('Feature', [ - ('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'), - ('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'), - ('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'), - ]), - - ('FeatureParams', [ - ]), - - ('FeatureParamsSize', [ - ('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'), - ('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'), - ('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'), - ('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'), - ('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'), - ]), - - ('FeatureParamsStylisticSet', [ - ('uint16', 'Version', None, None, 'Set to 0.'), - ('uint16', 'UINameID', None, None, 'UI NameID.'), - ]), - - ('FeatureParamsCharacterVariants', [ - ('uint16', 'Format', None, None, 'Set to 0.'), - ('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'), - ('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'), - ('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'), - ('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'), - ('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'), - ('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'), - ('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'), - ]), - - ('LookupList', [ - ('uint16', 'LookupCount', None, None, 'Number of lookups in this table'), - ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), - ]), - - ('Lookup', [ - ('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'), - ('uint16', 'LookupFlag', None, None, 'Lookup qualifiers'), - ('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'), - ('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'), - ('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'), - ]), - - ('CoverageFormat1', [ - ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'), - ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'), - ('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'), - ]), - - ('CoverageFormat2', [ - ('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'), - ('uint16', 'RangeCount', None, None, 'Number of RangeRecords'), - ('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'), - ]), - - ('RangeRecord', [ - ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), - ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), - ('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'), - ]), - - ('ClassDefFormat1', [ - ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'), - ('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'), - ('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'), - ('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'), - ]), - - ('ClassDefFormat2', [ - ('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'), - ('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'), - ('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'), - ]), - - ('ClassRangeRecord', [ - ('GlyphID', 'Start', None, None, 'First GlyphID in the range'), - ('GlyphID', 'End', None, None, 'Last GlyphID in the range'), - ('uint16', 'Class', None, None, 'Applied to all glyphs in the range'), - ]), - - ('Device', [ - ('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'), - ('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'), - ('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'), - ('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'), - ]), - - - # - # gpos - # - - ('GPOS', [ - ('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'), - ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'), - ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'), - ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'), - ]), - - ('SinglePosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), - ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), - ('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'), - ]), - - ('SinglePosFormat2', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'), - ('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'), - ('uint16', 'ValueCount', None, None, 'Number of ValueRecords'), - ('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'), - ]), - - ('PairPosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'), - ('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'), - ('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'), - ('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'), - ('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'), - ]), - - ('PairSet', [ - ('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'), - ('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'), - ]), - - ('PairValueRecord', [ - ('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'), - ('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'), - ('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'), - ]), - - ('PairPosFormat2', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'), - ('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'), - ('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'), - ('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'), - ('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'), - ('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'), - ('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'), - ('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'), - ]), - - ('Class1Record', [ - ('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'), - ]), - - ('Class2Record', [ - ('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'), - ('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'), - ]), - - ('CursivePosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'), - ('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'), - ('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'), - ]), - - ('EntryExitRecord', [ - ('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'), - ('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'), - ]), - - ('MarkBasePosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'), - ('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'), - ('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'), - ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'), - ('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'), - ]), - - ('BaseArray', [ - ('uint16', 'BaseCount', None, None, 'Number of BaseRecords'), - ('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'), - ]), - - ('BaseRecord', [ - ('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'), - ]), - - ('MarkLigPosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'), - ('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'), - ('uint16', 'ClassCount', None, None, 'Number of defined mark classes'), - ('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'), - ('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'), - ]), - - ('LigatureArray', [ - ('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'), - ('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'), - ]), - - ('LigatureAttach', [ - ('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'), - ('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'), - ]), - - ('ComponentRecord', [ - ('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'), - ]), - - ('MarkMarkPosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'), - ('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'), - ('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'), - ('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'), - ('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'), - ]), - - ('Mark2Array', [ - ('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'), - ('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'), - ]), - - ('Mark2Record', [ - ('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'), - ]), - - ('PosLookupRecord', [ - ('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'), - ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), - ]), - - ('ContextPosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), - ('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'), - ('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), - ]), - - ('PosRuleSet', [ - ('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'), - ('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'), - ]), - - ('PosRule', [ - ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), - ]), - - ('ContextPosFormat2', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), - ('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'), - ('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'), - ('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'), - ]), - - ('PosClassSet', [ - ('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'), - ('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'), - ]), - - ('PosClassRule', [ - ('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), - ]), - - ('ContextPosFormat3', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), - ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'), - ]), - - ('ChainContextPosFormat1', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'), - ('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'), - ('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'), - ]), - - ('ChainPosRuleSet', [ - ('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'), - ('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'), - ]), - - ('ChainPosRule', [ - ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), - ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), - ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), - ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), - ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), - ]), - - ('ChainContextPosFormat2', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'), - ('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'), - ('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'), - ('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'), - ('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'), - ('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'), - ]), - - ('ChainPosClassSet', [ - ('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'), - ('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'), - ]), - - ('ChainPosClassRule', [ - ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), - ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), - ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), - ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), - ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'), - ]), - - ('ChainContextPosFormat3', [ - ('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'), - ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), - ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), - ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), - ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), - ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), - ('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'), - ('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'), - ]), - - ('ExtensionPosFormat1', [ - ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), - ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), - ('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'), - ]), - - ('ValueRecord', [ - ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'), - ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'), - ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'), - ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'), - ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'), - ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'), - ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'), - ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'), - ]), - - ('AnchorFormat1', [ - ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'), - ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), - ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), - ]), - - ('AnchorFormat2', [ - ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'), - ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), - ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), - ('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'), - ]), - - ('AnchorFormat3', [ - ('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'), - ('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'), - ('int16', 'YCoordinate', None, None, 'Vertical value-in design units'), - ('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'), - ('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'), - ]), - - ('MarkArray', [ - ('uint16', 'MarkCount', None, None, 'Number of MarkRecords'), - ('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'), - ]), - - ('MarkRecord', [ - ('uint16', 'Class', None, None, 'Class defined for this mark'), - ('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'), - ]), - - - # - # gsub - # - - ('GSUB', [ - ('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'), - ('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'), - ('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'), - ('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'), - ]), - - ('SingleSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'DeltaGlyphID', None, None, 'Add to original GlyphID modulo 65536 to get substitute GlyphID'), - ]), - - ('SingleSubstFormat2', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), - ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'), - ]), - - ('MultipleSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'), - ('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'), - ]), - - ('Sequence', [ - ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'), - ('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'), - ]), - - ('AlternateSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'), - ('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'), - ]), - - ('AlternateSet', [ - ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'), - ('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'), - ]), - - ('LigatureSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'), - ('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'), - ]), - - ('LigatureSet', [ - ('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'), - ('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'), - ]), - - ('Ligature', [ - ('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'), - ('uint16', 'CompCount', None, None, 'Number of components in the ligature'), - ('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'), - ]), - - ('SubstLookupRecord', [ - ('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'), - ('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'), - ]), - - ('ContextSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'), - ('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), - ]), - - ('SubRuleSet', [ - ('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'), - ('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'), - ]), - - ('SubRule', [ - ('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), - ]), - - ('ContextSubstFormat2', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'), - ('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'), - ('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'), - ]), - - ('SubClassSet', [ - ('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'), - ('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'), - ]), - - ('SubClassRule', [ - ('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'), - ]), - - ('ContextSubstFormat3', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), - ('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'), - ]), - - ('ChainContextSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'), - ('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'), - ]), - - ('ChainSubRuleSet', [ - ('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'), - ('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'), - ]), - - ('ChainSubRule', [ - ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), - ('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"), - ('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'), - ('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'), - ('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), - ]), - - ('ChainContextSubstFormat2', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'), - ('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'), - ('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'), - ('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'), - ('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'), - ('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'), - ('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'), - ]), - - ('ChainSubClassSet', [ - ('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'), - ('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'), - ]), - - ('ChainSubClassRule', [ - ('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'), - ('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'), - ('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'), - ('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'), - ('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'), - ]), - - ('ChainContextSubstFormat3', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'), - ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), - ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), - ('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'), - ('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), - ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), - ('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'), - ('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'), - ]), - - ('ExtensionSubstFormat1', [ - ('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'), - ('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'), - ('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'), - ]), - - ('ReverseChainSingleSubstFormat1', [ - ('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'), - ('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'), - ('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'), - ('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'), - ('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'), - ('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'), - ('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'), - ('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'), - ]), - - # - # gdef - # - - ('GDEF', [ - ('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'), - ('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'), - ('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'), - ('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'), - ('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'), - ('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), - ]), - - ('AttachList', [ - ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'), - ('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'), - ('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'), - ]), - - ('AttachPoint', [ - ('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'), - ('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'), - ]), - - ('LigCaretList', [ - ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'), - ('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'), - ('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'), - ]), - - ('LigGlyph', [ - ('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'), - ('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'), - ]), - - ('CaretValueFormat1', [ - ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'), - ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), - ]), - - ('CaretValueFormat2', [ - ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'), - ('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'), - ]), - - ('CaretValueFormat3', [ - ('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'), - ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), - ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'), - ]), - - ('MarkGlyphSetsDef', [ - ('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'), - ('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'), - ('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'), - ]), - - # - # base - # - - ('BASE', [ - ('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'), - ('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'), - ('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'), - ]), - - ('Axis', [ - ('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'), - ('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'), - ]), - - ('BaseTagList', [ - ('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'), - ('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'), - ]), - - ('BaseScriptList', [ - ('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'), - ('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'), - ]), - - ('BaseScriptRecord', [ - ('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'), - ('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'), - ]), - - ('BaseScript', [ - ('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'), - ('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'), - ('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'), - ('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'), - ]), - - ('BaseLangSysRecord', [ - ('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'), - ('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'), - ]), - - ('BaseValues', [ - ('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'), - ('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'), - ('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'), - ]), - - ('MinMax', [ - ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'), - ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'), - ('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'), - ('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'), - ]), - - ('FeatMinMaxRecord', [ - ('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'), - ('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'), - ('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'), - ]), - - ('BaseCoordFormat1', [ - ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'), - ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), - ]), - - ('BaseCoordFormat2', [ - ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'), - ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), - ('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'), - ('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'), - ]), - - ('BaseCoordFormat3', [ - ('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'), - ('int16', 'Coordinate', None, None, 'X or Y value, in design units'), - ('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'), - ]), - - - # - # jstf - # - - ('JSTF', [ - ('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'), - ('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'), - ('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'), - ]), - - ('JstfScriptRecord', [ - ('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'), - ('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'), - ]), - - ('JstfScript', [ - ('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'), - ('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'), - ('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'), - ('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'), - ]), - - ('JstfLangSysRecord', [ - ('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'), - ('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'), - ]), - - ('ExtenderGlyph', [ - ('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'), - ('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'), - ]), - - ('JstfLangSys', [ - ('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'), - ('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'), - ]), - - ('JstfPriority', [ - ('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'), - ('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), - ('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'), - ('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'), - ('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'), - ]), - - ('JstfGSUBModList', [ - ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), - ('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'), - ]), - - ('JstfGPOSModList', [ - ('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'), - ('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'), - ]), - - ('JstfMax', [ - ('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'), - ('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'), - ]), - - # - # math - # - - ('MATH', [ - ('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'), - ('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'), - ('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'), - ('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'), - ]), - - ('MathValueRecord', [ - ('int16', 'Value', None, None, 'The X or Y value in design units.'), - ('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'), - ]), - - ('MathConstants', [ - ('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'), - ('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'), - ('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'), - ('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'), - ('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'), - ('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'), - ('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'), - ('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'), - ('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'), - ('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'), - ('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'), - ('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'), - ('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'), - ('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'), - ('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'), - ('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'), - ('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'), - ('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'), - ('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'), - ('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'), - ('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'), - ('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'), - ('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'), - ('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'), - ('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'), - ('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'), - ('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'), - ('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'), - ('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'), - ('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'), - ('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'), - ('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'), - ('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'), - ('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'), - ('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'), - ('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'), - ('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'), - ('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), - ('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'), - ('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'), - ('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'), - ('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'), - ('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'), - ('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'), - ('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'), - ('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'), - ('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'), - ('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'), - ('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'), - ('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'), - ('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'), - ('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'), - ('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'), - ('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'), - ('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'), - ('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'), - ]), - - ('MathGlyphInfo', [ - ('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'), - ('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'), - ('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'), - ('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'), - ]), - - ('MathItalicsCorrectionInfo', [ - ('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'), - ('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'), - ('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'), - ]), - - ('MathTopAccentAttachment', [ - ('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of MathTopAccentAttachment table.'), - ('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'), - ('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'), - ]), - - ('MathKernInfo', [ - ('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'), - ('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'), - ('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'), - ]), - - ('MathKernInfoRecord', [ - ('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'), - ('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'), - ('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'), - ('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'), - ]), - - ('MathKern', [ - ('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'), - ('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'), - ('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'), - ]), - - ('MathVariants', [ - ('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction, in design units.'), - ('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), - ('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'), - ('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'), - ('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'), - ('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'), - ('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'), - ]), - - ('MathGlyphConstruction', [ - ('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'), - ('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'), - ('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'), - ]), - - ('MathGlyphVariantRecord', [ - ('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'), - ('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'), - ]), - - ('GlyphAssembly', [ - ('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'), - ('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'), - ('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'), - ]), - - ('GlyphPartRecord', [ - ('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'), - ('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'), - ('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'), - ('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'), - ('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'), - ]), - - - ## - ## Apple Advanced Typography (AAT) tables - ## - - # - # feat - # - - ('feat', [ - ('Version', 'Version', None, None, 'Version of the feat table-initially set to 0x00010000.'), - ('FeatureNames', 'FeatureNames', None, None, 'The feature names.'), - ]), - - ('FeatureNames', [ - ('uint16', 'FeatureNameCount', None, None, 'Number of entries in the feature name array.'), - ('uint16', 'Reserved1', None, None, 'Reserved (set to zero).'), - ('uint32', 'Reserved2', None, None, 'Reserved (set to zero).'), - ('FeatureName', 'FeatureName', 'FeatureNameCount', 0, 'The feature name array.'), - ]), - - ('FeatureName', [ - ('uint16', 'FeatureType', None, None, 'Feature type.'), - ('uint16', 'SettingsCount', None, None, 'The number of records in the setting name array.'), - ('LOffset', 'Settings', None, None, 'Offset to setting table for this feature.'), - ('uint16', 'FeatureFlags', None, None, 'Single-bit flags associated with the feature type.'), - ('uint16', 'FeatureNameID', None, None, 'The name table index for the feature name.'), - ]), - - ('Settings', [ - ('Setting', 'Setting', 'SettingsCount', 0, 'The setting array.'), - ]), - - ('Setting', [ - ('uint16', 'SettingValue', None, None, 'The setting.'), - ('uint16', 'SettingNameID', None, None, 'The name table index for the setting name.'), - ]), - -] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/otTables.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/otTables.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/otTables.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/otTables.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,774 +0,0 @@ -"""fontTools.ttLib.tables.otTables -- A collection of classes representing the various -OpenType subtables. - -Most are constructed upon import from data in otData.py, all are populated with -converter objects from otConverters.py. -""" -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from .otBase import BaseTable, FormatSwitchingBaseTable -import operator -import warnings - - -class FeatureParams(BaseTable): - - def compile(self, writer, font): - assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__) - BaseTable.compile(self, writer, font) - - def toXML(self, xmlWriter, font, attrs=None, name=None): - BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__) - -class FeatureParamsSize(FeatureParams): - pass - -class FeatureParamsStylisticSet(FeatureParams): - pass - -class FeatureParamsCharacterVariants(FeatureParams): - pass - -class Coverage(FormatSwitchingBaseTable): - - # manual implementation to get rid of glyphID dependencies - - def postRead(self, rawTable, font): - if self.Format == 1: - # TODO only allow glyphs that are valid? - self.glyphs = rawTable["GlyphArray"] - elif self.Format == 2: - glyphs = self.glyphs = [] - ranges = rawTable["RangeRecord"] - glyphOrder = font.getGlyphOrder() - # Some SIL fonts have coverage entries that don't have sorted - # StartCoverageIndex. If it is so, fixup and warn. We undo - # this when writing font out. - sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex) - if ranges != sorted_ranges: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") - ranges = sorted_ranges - del sorted_ranges - for r in ranges: - assert r.StartCoverageIndex == len(glyphs), \ - (r.StartCoverageIndex, len(glyphs)) - start = r.Start - end = r.End - try: - startID = font.getGlyphID(start, requireReal=True) - except KeyError: - warnings.warn("Coverage table has start glyph ID out of range: %s." % start) - continue - try: - endID = font.getGlyphID(end, requireReal=True) + 1 - except KeyError: - # Apparently some tools use 65535 to "match all" the range - if end != 'glyph65535': - warnings.warn("Coverage table has end glyph ID out of range: %s." % end) - # NOTE: We clobber out-of-range things here. There are legit uses for those, - # but none that we have seen in the wild. - endID = len(glyphOrder) - glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID)) - else: - assert 0, "unknown format: %s" % self.Format - del self.Format # Don't need this anymore - - def preWrite(self, font): - glyphs = getattr(self, "glyphs", None) - if glyphs is None: - glyphs = self.glyphs = [] - format = 1 - rawTable = {"GlyphArray": glyphs} - getGlyphID = font.getGlyphID - if glyphs: - # find out whether Format 2 is more compact or not - glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ] - brokenOrder = sorted(glyphIDs) != glyphIDs - - last = glyphIDs[0] - ranges = [[last]] - for glyphID in glyphIDs[1:]: - if glyphID != last + 1: - ranges[-1].append(last) - ranges.append([glyphID]) - last = glyphID - ranges[-1].append(last) - - if brokenOrder or len(ranges) * 3 < len(glyphs): # 3 words vs. 1 word - # Format 2 is more compact - index = 0 - for i in range(len(ranges)): - start, end = ranges[i] - r = RangeRecord() - r.StartID = start - r.Start = font.getGlyphName(start) - r.End = font.getGlyphName(end) - r.StartCoverageIndex = index - ranges[i] = r - index = index + end - start + 1 - if brokenOrder: - warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.") - ranges.sort(key=lambda a: a.StartID) - for r in ranges: - del r.StartID - format = 2 - rawTable = {"RangeRecord": ranges} - #else: - # fallthrough; Format 1 is more compact - self.Format = format - return rawTable - - def toXML2(self, xmlWriter, font): - for glyphName in getattr(self, "glyphs", []): - xmlWriter.simpletag("Glyph", value=glyphName) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - glyphs = getattr(self, "glyphs", None) - if glyphs is None: - glyphs = [] - self.glyphs = glyphs - glyphs.append(attrs["value"]) - - -class SingleSubst(FormatSwitchingBaseTable): - - def postRead(self, rawTable, font): - mapping = {} - input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) - lenMapping = len(input) - if self.Format == 1: - delta = rawTable["DeltaGlyphID"] - inputGIDS = [ font.getGlyphID(name) for name in input ] - outGIDS = [ (glyphID + delta) % 65536 for glyphID in inputGIDS ] - outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ] - list(map(operator.setitem, [mapping]*lenMapping, input, outNames)) - elif self.Format == 2: - assert len(input) == rawTable["GlyphCount"], \ - "invalid SingleSubstFormat2 table" - subst = rawTable["Substitute"] - list(map(operator.setitem, [mapping]*lenMapping, input, subst)) - else: - assert 0, "unknown format: %s" % self.Format - self.mapping = mapping - del self.Format # Don't need this anymore - - def preWrite(self, font): - mapping = getattr(self, "mapping", None) - if mapping is None: - mapping = self.mapping = {} - items = list(mapping.items()) - getGlyphID = font.getGlyphID - gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items] - sortableItems = sorted(zip(gidItems, items)) - - # figure out format - format = 2 - delta = None - for inID, outID in gidItems: - if delta is None: - delta = (outID - inID) % 65536 - - if (inID + delta) % 65536 != outID: - break - else: - format = 1 - - rawTable = {} - self.Format = format - cov = Coverage() - input = [ item [1][0] for item in sortableItems] - subst = [ item [1][1] for item in sortableItems] - cov.glyphs = input - rawTable["Coverage"] = cov - if format == 1: - assert delta is not None - rawTable["DeltaGlyphID"] = delta - else: - rawTable["Substitute"] = subst - return rawTable - - def toXML2(self, xmlWriter, font): - items = sorted(self.mapping.items()) - for inGlyph, outGlyph in items: - xmlWriter.simpletag("Substitution", - [("in", inGlyph), ("out", outGlyph)]) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - mapping = getattr(self, "mapping", None) - if mapping is None: - mapping = {} - self.mapping = mapping - mapping[attrs["in"]] = attrs["out"] - - -class ClassDef(FormatSwitchingBaseTable): - - def postRead(self, rawTable, font): - classDefs = {} - glyphOrder = font.getGlyphOrder() - - if self.Format == 1: - start = rawTable["StartGlyph"] - classList = rawTable["ClassValueArray"] - try: - startID = font.getGlyphID(start, requireReal=True) - except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) - startID = len(glyphOrder) - endID = startID + len(classList) - if endID > len(glyphOrder): - warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList))) - # NOTE: We clobber out-of-range things here. There are legit uses for those, - # but none that we have seen in the wild. - endID = len(glyphOrder) - - for glyphID, cls in zip(range(startID, endID), classList): - classDefs[glyphOrder[glyphID]] = cls - - elif self.Format == 2: - records = rawTable["ClassRangeRecord"] - for rec in records: - start = rec.Start - end = rec.End - cls = rec.Class - try: - startID = font.getGlyphID(start, requireReal=True) - except KeyError: - warnings.warn("ClassDef table has start glyph ID out of range: %s." % start) - continue - try: - endID = font.getGlyphID(end, requireReal=True) + 1 - except KeyError: - # Apparently some tools use 65535 to "match all" the range - if end != 'glyph65535': - warnings.warn("ClassDef table has end glyph ID out of range: %s." % end) - # NOTE: We clobber out-of-range things here. There are legit uses for those, - # but none that we have seen in the wild. - endID = len(glyphOrder) - for glyphID in range(startID, endID): - classDefs[glyphOrder[glyphID]] = cls - else: - assert 0, "unknown format: %s" % self.Format - self.classDefs = classDefs - del self.Format # Don't need this anymore - - def preWrite(self, font): - classDefs = getattr(self, "classDefs", None) - if classDefs is None: - classDefs = self.classDefs = {} - items = list(classDefs.items()) - format = 2 - rawTable = {"ClassRangeRecord": []} - getGlyphID = font.getGlyphID - for i in range(len(items)): - glyphName, cls = items[i] - items[i] = getGlyphID(glyphName), glyphName, cls - items.sort() - if items: - last, lastName, lastCls = items[0] - ranges = [[lastCls, last, lastName]] - for glyphID, glyphName, cls in items[1:]: - if glyphID != last + 1 or cls != lastCls: - ranges[-1].extend([last, lastName]) - ranges.append([cls, glyphID, glyphName]) - last = glyphID - lastName = glyphName - lastCls = cls - ranges[-1].extend([last, lastName]) - - startGlyph = ranges[0][1] - endGlyph = ranges[-1][3] - glyphCount = endGlyph - startGlyph + 1 - if len(ranges) * 3 < glyphCount + 1: - # Format 2 is more compact - for i in range(len(ranges)): - cls, start, startName, end, endName = ranges[i] - rec = ClassRangeRecord() - rec.Start = startName - rec.End = endName - rec.Class = cls - ranges[i] = rec - format = 2 - rawTable = {"ClassRangeRecord": ranges} - else: - # Format 1 is more compact - startGlyphName = ranges[0][2] - classes = [0] * glyphCount - for cls, start, startName, end, endName in ranges: - for g in range(start - startGlyph, end - startGlyph + 1): - classes[g] = cls - format = 1 - rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes} - self.Format = format - return rawTable - - def toXML2(self, xmlWriter, font): - items = sorted(self.classDefs.items()) - for glyphName, cls in items: - xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)]) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - classDefs = getattr(self, "classDefs", None) - if classDefs is None: - classDefs = {} - self.classDefs = classDefs - classDefs[attrs["glyph"]] = int(attrs["class"]) - - -class AlternateSubst(FormatSwitchingBaseTable): - - def postRead(self, rawTable, font): - alternates = {} - if self.Format == 1: - input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) - alts = rawTable["AlternateSet"] - if len(input) != len(alts): - assert len(input) == len(alts) - for i in range(len(input)): - alternates[input[i]] = alts[i].Alternate - else: - assert 0, "unknown format: %s" % self.Format - self.alternates = alternates - del self.Format # Don't need this anymore - - def preWrite(self, font): - self.Format = 1 - alternates = getattr(self, "alternates", None) - if alternates is None: - alternates = self.alternates = {} - items = list(alternates.items()) - for i in range(len(items)): - glyphName, set = items[i] - items[i] = font.getGlyphID(glyphName), glyphName, set - items.sort() - cov = Coverage() - cov.glyphs = [ item[1] for item in items] - alternates = [] - setList = [ item[-1] for item in items] - for set in setList: - alts = AlternateSet() - alts.Alternate = set - alternates.append(alts) - # a special case to deal with the fact that several hundred Adobe Japan1-5 - # CJK fonts will overflow an offset if the coverage table isn't pushed to the end. - # Also useful in that when splitting a sub-table because of an offset overflow - # I don't need to calculate the change in the subtable offset due to the change in the coverage table size. - # Allows packing more rules in subtable. - self.sortCoverageLast = 1 - return {"Coverage": cov, "AlternateSet": alternates} - - def toXML2(self, xmlWriter, font): - items = sorted(self.alternates.items()) - for glyphName, alternates in items: - xmlWriter.begintag("AlternateSet", glyph=glyphName) - xmlWriter.newline() - for alt in alternates: - xmlWriter.simpletag("Alternate", glyph=alt) - xmlWriter.newline() - xmlWriter.endtag("AlternateSet") - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - alternates = getattr(self, "alternates", None) - if alternates is None: - alternates = {} - self.alternates = alternates - glyphName = attrs["glyph"] - set = [] - alternates[glyphName] = set - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - set.append(attrs["glyph"]) - - -class LigatureSubst(FormatSwitchingBaseTable): - - def postRead(self, rawTable, font): - ligatures = {} - if self.Format == 1: - input = _getGlyphsFromCoverageTable(rawTable["Coverage"]) - ligSets = rawTable["LigatureSet"] - assert len(input) == len(ligSets) - for i in range(len(input)): - ligatures[input[i]] = ligSets[i].Ligature - else: - assert 0, "unknown format: %s" % self.Format - self.ligatures = ligatures - del self.Format # Don't need this anymore - - def preWrite(self, font): - self.Format = 1 - ligatures = getattr(self, "ligatures", None) - if ligatures is None: - ligatures = self.ligatures = {} - items = list(ligatures.items()) - for i in range(len(items)): - glyphName, set = items[i] - items[i] = font.getGlyphID(glyphName), glyphName, set - items.sort() - cov = Coverage() - cov.glyphs = [ item[1] for item in items] - - ligSets = [] - setList = [ item[-1] for item in items ] - for set in setList: - ligSet = LigatureSet() - ligs = ligSet.Ligature = [] - for lig in set: - ligs.append(lig) - ligSets.append(ligSet) - # Useful in that when splitting a sub-table because of an offset overflow - # I don't need to calculate the change in subtabl offset due to the coverage table size. - # Allows packing more rules in subtable. - self.sortCoverageLast = 1 - return {"Coverage": cov, "LigatureSet": ligSets} - - def toXML2(self, xmlWriter, font): - items = sorted(self.ligatures.items()) - for glyphName, ligSets in items: - xmlWriter.begintag("LigatureSet", glyph=glyphName) - xmlWriter.newline() - for lig in ligSets: - xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph, - components=",".join(lig.Component)) - xmlWriter.newline() - xmlWriter.endtag("LigatureSet") - xmlWriter.newline() - - def fromXML(self, name, attrs, content, font): - ligatures = getattr(self, "ligatures", None) - if ligatures is None: - ligatures = {} - self.ligatures = ligatures - glyphName = attrs["glyph"] - ligs = [] - ligatures[glyphName] = ligs - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - lig = Ligature() - lig.LigGlyph = attrs["glyph"] - components = attrs["components"] - lig.Component = components.split(",") if components else [] - ligs.append(lig) - - -# -# For each subtable format there is a class. However, we don't really distinguish -# between "field name" and "format name": often these are the same. Yet there's -# a whole bunch of fields with different names. The following dict is a mapping -# from "format name" to "field name". _buildClasses() uses this to create a -# subclass for each alternate field name. -# -_equivalents = { - 'MarkArray': ("Mark1Array",), - 'LangSys': ('DefaultLangSys',), - 'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage', - 'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage', - 'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage', - 'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'), - 'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef', - 'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'), - 'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor', - 'Mark2Anchor', 'MarkAnchor'), - 'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice', - 'XDeviceTable', 'YDeviceTable', 'DeviceTable'), - 'Axis': ('HorizAxis', 'VertAxis',), - 'MinMax': ('DefaultMinMax',), - 'BaseCoord': ('MinCoord', 'MaxCoord',), - 'JstfLangSys': ('DefJstfLangSys',), - 'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB', - 'ExtensionDisableGSUB',), - 'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS', - 'ExtensionDisableGPOS',), - 'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',), - 'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern', - 'BottomLeftMathKern'), - 'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'), -} - -# -# OverFlow logic, to automatically create ExtensionLookups -# XXX This should probably move to otBase.py -# - -def fixLookupOverFlows(ttf, overflowRecord): - """ Either the offset from the LookupList to a lookup overflowed, or - an offset from a lookup to a subtable overflowed. - The table layout is: - GPSO/GUSB - Script List - Feature List - LookUpList - Lookup[0] and contents - SubTable offset list - SubTable[0] and contents - ... - SubTable[n] and contents - ... - Lookup[n] and contents - SubTable offset list - SubTable[0] and contents - ... - SubTable[n] and contents - If the offset to a lookup overflowed (SubTableIndex is None) - we must promote the *previous* lookup to an Extension type. - If the offset from a lookup to subtable overflowed, then we must promote it - to an Extension Lookup type. - """ - ok = 0 - lookupIndex = overflowRecord.LookupListIndex - if (overflowRecord.SubTableIndex is None): - lookupIndex = lookupIndex - 1 - if lookupIndex < 0: - return ok - if overflowRecord.tableType == 'GSUB': - extType = 7 - elif overflowRecord.tableType == 'GPOS': - extType = 9 - - lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup - lookup = lookups[lookupIndex] - # If the previous lookup is an extType, look further back. Very unlikely, but possible. - while lookup.SubTable[0].__class__.LookupType == extType: - lookupIndex = lookupIndex -1 - if lookupIndex < 0: - return ok - lookup = lookups[lookupIndex] - - for si in range(len(lookup.SubTable)): - subTable = lookup.SubTable[si] - extSubTableClass = lookupTypes[overflowRecord.tableType][extType] - extSubTable = extSubTableClass() - extSubTable.Format = 1 - extSubTable.ExtSubTable = subTable - lookup.SubTable[si] = extSubTable - ok = 1 - return ok - -def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord): - ok = 1 - newSubTable.Format = oldSubTable.Format - if hasattr(oldSubTable, 'sortCoverageLast'): - newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast - - oldAlts = sorted(oldSubTable.alternates.items()) - oldLen = len(oldAlts) - - if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: - # Coverage table is written last. overflow is to or within the - # the coverage table. We will just cut the subtable in half. - newLen = oldLen//2 - - elif overflowRecord.itemName == 'AlternateSet': - # We just need to back up by two items - # from the overflowed AlternateSet index to make sure the offset - # to the Coverage table doesn't overflow. - newLen = overflowRecord.itemIndex - 1 - - newSubTable.alternates = {} - for i in range(newLen, oldLen): - item = oldAlts[i] - key = item[0] - newSubTable.alternates[key] = item[1] - del oldSubTable.alternates[key] - - return ok - - -def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord): - ok = 1 - newSubTable.Format = oldSubTable.Format - oldLigs = sorted(oldSubTable.ligatures.items()) - oldLen = len(oldLigs) - - if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']: - # Coverage table is written last. overflow is to or within the - # the coverage table. We will just cut the subtable in half. - newLen = oldLen//2 - - elif overflowRecord.itemName == 'LigatureSet': - # We just need to back up by two items - # from the overflowed AlternateSet index to make sure the offset - # to the Coverage table doesn't overflow. - newLen = overflowRecord.itemIndex - 1 - - newSubTable.ligatures = {} - for i in range(newLen, oldLen): - item = oldLigs[i] - key = item[0] - newSubTable.ligatures[key] = item[1] - del oldSubTable.ligatures[key] - - return ok - - -splitTable = { 'GSUB': { -# 1: splitSingleSubst, -# 2: splitMultipleSubst, - 3: splitAlternateSubst, - 4: splitLigatureSubst, -# 5: splitContextSubst, -# 6: splitChainContextSubst, -# 7: splitExtensionSubst, -# 8: splitReverseChainSingleSubst, - }, - 'GPOS': { -# 1: splitSinglePos, -# 2: splitPairPos, -# 3: splitCursivePos, -# 4: splitMarkBasePos, -# 5: splitMarkLigPos, -# 6: splitMarkMarkPos, -# 7: splitContextPos, -# 8: splitChainContextPos, -# 9: splitExtensionPos, - } - - } - -def fixSubTableOverFlows(ttf, overflowRecord): - """ - An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts. - """ - ok = 0 - table = ttf[overflowRecord.tableType].table - lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex] - subIndex = overflowRecord.SubTableIndex - subtable = lookup.SubTable[subIndex] - - if hasattr(subtable, 'ExtSubTable'): - # We split the subtable of the Extension table, and add a new Extension table - # to contain the new subtable. - - subTableType = subtable.ExtSubTable.__class__.LookupType - extSubTable = subtable - subtable = extSubTable.ExtSubTable - newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType] - newExtSubTable = newExtSubTableClass() - newExtSubTable.Format = extSubTable.Format - lookup.SubTable.insert(subIndex + 1, newExtSubTable) - - newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] - newSubTable = newSubTableClass() - newExtSubTable.ExtSubTable = newSubTable - else: - subTableType = subtable.__class__.LookupType - newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType] - newSubTable = newSubTableClass() - lookup.SubTable.insert(subIndex + 1, newSubTable) - - if hasattr(lookup, 'SubTableCount'): # may not be defined yet. - lookup.SubTableCount = lookup.SubTableCount + 1 - - try: - splitFunc = splitTable[overflowRecord.tableType][subTableType] - except KeyError: - return ok - - ok = splitFunc(subtable, newSubTable, overflowRecord) - return ok - -# End of OverFlow logic - - -def _buildClasses(): - import re - from .otData import otData - - formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$") - namespace = globals() - - # populate module with classes - for name, table in otData: - baseClass = BaseTable - m = formatPat.match(name) - if m: - # XxxFormatN subtable, we only add the "base" table - name = m.group(1) - baseClass = FormatSwitchingBaseTable - if name not in namespace: - # the class doesn't exist yet, so the base implementation is used. - cls = type(name, (baseClass,), {}) - namespace[name] = cls - - for base, alts in _equivalents.items(): - base = namespace[base] - for alt in alts: - namespace[alt] = type(alt, (base,), {}) - - global lookupTypes - lookupTypes = { - 'GSUB': { - 1: SingleSubst, - 2: MultipleSubst, - 3: AlternateSubst, - 4: LigatureSubst, - 5: ContextSubst, - 6: ChainContextSubst, - 7: ExtensionSubst, - 8: ReverseChainSingleSubst, - }, - 'GPOS': { - 1: SinglePos, - 2: PairPos, - 3: CursivePos, - 4: MarkBasePos, - 5: MarkLigPos, - 6: MarkMarkPos, - 7: ContextPos, - 8: ChainContextPos, - 9: ExtensionPos, - }, - } - lookupTypes['JSTF'] = lookupTypes['GPOS'] # JSTF contains GPOS - for lookupEnum in lookupTypes.values(): - for enum, cls in lookupEnum.items(): - cls.LookupType = enum - - global featureParamTypes - featureParamTypes = { - 'size': FeatureParamsSize, - } - for i in range(1, 20+1): - featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet - for i in range(1, 99+1): - featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants - - # add converters to classes - from .otConverters import buildConverters - for name, table in otData: - m = formatPat.match(name) - if m: - # XxxFormatN subtable, add converter to "base" table - name, format = m.groups() - format = int(format) - cls = namespace[name] - if not hasattr(cls, "converters"): - cls.converters = {} - cls.convertersByName = {} - converters, convertersByName = buildConverters(table[1:], namespace) - cls.converters[format] = converters - cls.convertersByName[format] = convertersByName - # XXX Add staticSize? - else: - cls = namespace[name] - cls.converters, cls.convertersByName = buildConverters(table, namespace) - # XXX Add staticSize? - - -_buildClasses() - - -def _getGlyphsFromCoverageTable(coverage): - if coverage is None: - # empty coverage table - return [] - else: - return coverage.glyphs diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_p_o_s_t.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_p_o_s_t.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_p_o_s_t.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_p_o_s_t.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,277 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib -from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, readHex -from . import DefaultTable -import sys -import struct -import array - - -postFormat = """ - > - formatType: 16.16F - italicAngle: 16.16F # italic angle in degrees - underlinePosition: h - underlineThickness: h - isFixedPitch: L - minMemType42: L # minimum memory if TrueType font is downloaded - maxMemType42: L # maximum memory if TrueType font is downloaded - minMemType1: L # minimum memory if Type1 font is downloaded - maxMemType1: L # maximum memory if Type1 font is downloaded -""" - -postFormatSize = sstruct.calcsize(postFormat) - - -class table__p_o_s_t(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - sstruct.unpack(postFormat, data[:postFormatSize], self) - data = data[postFormatSize:] - if self.formatType == 1.0: - self.decode_format_1_0(data, ttFont) - elif self.formatType == 2.0: - self.decode_format_2_0(data, ttFont) - elif self.formatType == 3.0: - self.decode_format_3_0(data, ttFont) - elif self.formatType == 4.0: - self.decode_format_4_0(data, ttFont) - else: - # supported format - raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) - - def compile(self, ttFont): - data = sstruct.pack(postFormat, self) - if self.formatType == 1.0: - pass # we're done - elif self.formatType == 2.0: - data = data + self.encode_format_2_0(ttFont) - elif self.formatType == 3.0: - pass # we're done - elif self.formatType == 4.0: - data = data + self.encode_format_4_0(ttFont) - else: - # supported format - raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType) - return data - - def getGlyphOrder(self): - """This function will get called by a ttLib.TTFont instance. - Do not call this function yourself, use TTFont().getGlyphOrder() - or its relatives instead! - """ - if not hasattr(self, "glyphOrder"): - raise ttLib.TTLibError("illegal use of getGlyphOrder()") - glyphOrder = self.glyphOrder - del self.glyphOrder - return glyphOrder - - def decode_format_1_0(self, data, ttFont): - self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs] - - def decode_format_2_0(self, data, ttFont): - numGlyphs, = struct.unpack(">H", data[:2]) - numGlyphs = int(numGlyphs) - if numGlyphs > ttFont['maxp'].numGlyphs: - # Assume the numGlyphs field is bogus, so sync with maxp. - # I've seen this in one font, and if the assumption is - # wrong elsewhere, well, so be it: it's hard enough to - # work around _one_ non-conforming post format... - numGlyphs = ttFont['maxp'].numGlyphs - data = data[2:] - indices = array.array("H") - indices.fromstring(data[:2*numGlyphs]) - if sys.byteorder != "big": - indices.byteswap() - data = data[2*numGlyphs:] - self.extraNames = extraNames = unpackPStrings(data) - self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs) - for glyphID in range(numGlyphs): - index = indices[glyphID] - if index > 32767: # reserved for future use; ignore - name = "" - elif index > 257: - try: - name = extraNames[index-258] - except IndexError: - name = "" - else: - # fetch names from standard list - name = standardGlyphOrder[index] - glyphOrder[glyphID] = name - self.build_psNameMapping(ttFont) - - def build_psNameMapping(self, ttFont): - mapping = {} - allNames = {} - for i in range(ttFont['maxp'].numGlyphs): - glyphName = psName = self.glyphOrder[i] - if glyphName == "": - glyphName = "glyph%.5d" % i - if glyphName in allNames: - # make up a new glyphName that's unique - n = allNames[glyphName] - while (glyphName + "#" + str(n)) in allNames: - n += 1 - allNames[glyphName] = n + 1 - glyphName = glyphName + "#" + str(n) - - self.glyphOrder[i] = glyphName - allNames[glyphName] = 1 - if glyphName != psName: - mapping[glyphName] = psName - - self.mapping = mapping - - def decode_format_3_0(self, data, ttFont): - # Setting self.glyphOrder to None will cause the TTFont object - # try and construct glyph names from a Unicode cmap table. - self.glyphOrder = None - - def decode_format_4_0(self, data, ttFont): - from fontTools import agl - numGlyphs = ttFont['maxp'].numGlyphs - indices = array.array("H") - indices.fromstring(data) - if sys.byteorder != "big": - indices.byteswap() - # In some older fonts, the size of the post table doesn't match - # the number of glyphs. Sometimes it's bigger, sometimes smaller. - self.glyphOrder = glyphOrder = [''] * int(numGlyphs) - for i in range(min(len(indices),numGlyphs)): - if indices[i] == 0xFFFF: - self.glyphOrder[i] = '' - elif indices[i] in agl.UV2AGL: - self.glyphOrder[i] = agl.UV2AGL[indices[i]] - else: - self.glyphOrder[i] = "uni%04X" % indices[i] - self.build_psNameMapping(ttFont) - - def encode_format_2_0(self, ttFont): - numGlyphs = ttFont['maxp'].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - assert len(glyphOrder) == numGlyphs - indices = array.array("H") - extraDict = {} - extraNames = self.extraNames - for i in range(len(extraNames)): - extraDict[extraNames[i]] = i - for glyphID in range(numGlyphs): - glyphName = glyphOrder[glyphID] - if glyphName in self.mapping: - psName = self.mapping[glyphName] - else: - psName = glyphName - if psName in extraDict: - index = 258 + extraDict[psName] - elif psName in standardGlyphOrder: - index = standardGlyphOrder.index(psName) - else: - index = 258 + len(extraNames) - assert index < 32768, "Too many glyph names for 'post' table format 2" - extraDict[psName] = len(extraNames) - extraNames.append(psName) - indices.append(index) - if sys.byteorder != "big": - indices.byteswap() - return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(extraNames) - - def encode_format_4_0(self, ttFont): - from fontTools import agl - numGlyphs = ttFont['maxp'].numGlyphs - glyphOrder = ttFont.getGlyphOrder() - assert len(glyphOrder) == numGlyphs - indices = array.array("H") - for glyphID in glyphOrder: - glyphID = glyphID.split('#')[0] - if glyphID in agl.AGL2UV: - indices.append(agl.AGL2UV[glyphID]) - elif len(glyphID) == 7 and glyphID[:3] == 'uni': - indices.append(int(glyphID[3:],16)) - else: - indices.append(0xFFFF) - if sys.byteorder != "big": - indices.byteswap() - return indices.tostring() - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(postFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - if hasattr(self, "mapping"): - writer.begintag("psNames") - writer.newline() - writer.comment("This file uses unique glyph names based on the information\n" - "found in the 'post' table. Since these names might not be unique,\n" - "we have to invent artificial names in case of clashes. In order to\n" - "be able to retain the original information, we need a name to\n" - "ps name mapping for those cases where they differ. That's what\n" - "you see below.\n") - writer.newline() - items = sorted(self.mapping.items()) - for name, psName in items: - writer.simpletag("psName", name=name, psName=psName) - writer.newline() - writer.endtag("psNames") - writer.newline() - if hasattr(self, "extraNames"): - writer.begintag("extraNames") - writer.newline() - writer.comment("following are the name that are not taken from the standard Mac glyph order") - writer.newline() - for name in self.extraNames: - writer.simpletag("psName", name=name) - writer.newline() - writer.endtag("extraNames") - writer.newline() - if hasattr(self, "data"): - writer.begintag("hexdata") - writer.newline() - writer.dumphex(self.data) - writer.endtag("hexdata") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name not in ("psNames", "extraNames", "hexdata"): - setattr(self, name, safeEval(attrs["value"])) - elif name == "psNames": - self.mapping = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "psName": - self.mapping[attrs["name"]] = attrs["psName"] - elif name == "extraNames": - self.extraNames = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "psName": - self.extraNames.append(attrs["name"]) - else: - self.data = readHex(content) - - -def unpackPStrings(data): - strings = [] - index = 0 - dataLen = len(data) - while index < dataLen: - length = byteord(data[index]) - strings.append(tostr(data[index+1:index+1+length], encoding="latin1")) - index = index + 1 + length - return strings - - -def packPStrings(strings): - data = b"" - for s in strings: - data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") - return data diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_p_r_e_p.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_p_r_e_p.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_p_r_e_p.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_p_r_e_p.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,8 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib - -superclass = ttLib.getTableClass("fpgm") - -class table__p_r_e_p(superclass): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/sbixGlyph.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/sbixGlyph.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/sbixGlyph.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/sbixGlyph.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,119 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import readHex, safeEval -import struct - - -sbixGlyphHeaderFormat = """ - > - originOffsetX: h # The x-value of the point in the glyph relative to its - # lower-left corner which corresponds to the origin of - # the glyph on the screen, that is the point on the - # baseline at the left edge of the glyph. - originOffsetY: h # The y-value of the point in the glyph relative to its - # lower-left corner which corresponds to the origin of - # the glyph on the screen, that is the point on the - # baseline at the left edge of the glyph. - graphicType: 4s # e.g. "png " -""" - -sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) - - -class Glyph(object): - def __init__(self, glyphName=None, referenceGlyphName=None, originOffsetX=0, originOffsetY=0, graphicType=None, imageData=None, rawdata=None, gid=0): - self.gid = gid - self.glyphName = glyphName - self.referenceGlyphName = referenceGlyphName - self.originOffsetX = originOffsetX - self.originOffsetY = originOffsetY - self.rawdata = rawdata - self.graphicType = graphicType - self.imageData = imageData - - # fix self.graphicType if it is null terminated or too short - if self.graphicType is not None: - if self.graphicType[-1] == "\0": - self.graphicType = self.graphicType[:-1] - if len(self.graphicType) > 4: - from fontTools import ttLib - raise ttLib.TTLibError("Glyph.graphicType must not be longer than 4 characters.") - elif len(self.graphicType) < 4: - # pad with spaces - self.graphicType += " "[:(4 - len(self.graphicType))] - - def decompile(self, ttFont): - self.glyphName = ttFont.getGlyphName(self.gid) - if self.rawdata is None: - from fontTools import ttLib - raise ttLib.TTLibError("No table data to decompile") - if len(self.rawdata) > 0: - if len(self.rawdata) < sbixGlyphHeaderFormatSize: - from fontTools import ttLib - #print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) - raise ttLib.TTLibError("Glyph header too short.") - - sstruct.unpack(sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self) - - if self.graphicType == "dupe": - # this glyph is a reference to another glyph's image data - gid, = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) - self.referenceGlyphName = ttFont.getGlyphName(gid) - else: - self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] - self.referenceGlyphName = None - # clean up - del self.rawdata - del self.gid - - def compile(self, ttFont): - if self.glyphName is None: - from fontTools import ttLib - raise ttLib.TTLibError("Can't compile Glyph without glyph name") - # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? - # (needed if you just want to compile the sbix table on its own) - self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) - if self.graphicType is None: - self.rawdata = "" - else: - self.rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + self.imageData - - def toXML(self, xmlWriter, ttFont): - if self.graphicType == None: - # TODO: ignore empty glyphs? - # a glyph data entry is required for each glyph, - # but empty ones can be calculated at compile time - xmlWriter.simpletag("glyph", name=self.glyphName) - xmlWriter.newline() - return - xmlWriter.begintag("glyph", - graphicType=self.graphicType, - name=self.glyphName, - originOffsetX=self.originOffsetX, - originOffsetY=self.originOffsetY, - ) - xmlWriter.newline() - if self.graphicType == "dupe": - # graphicType == "dupe" is a reference to another glyph id. - xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) - else: - xmlWriter.begintag("hexdata") - xmlWriter.newline() - xmlWriter.dumphex(self.imageData) - xmlWriter.endtag("hexdata") - xmlWriter.newline() - xmlWriter.endtag("glyph") - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "ref": - # glyph is a "dupe", i.e. a reference to another glyph's image data. - # in this case imageData contains the glyph id of the reference glyph - # get glyph id from glyphname - self.imageData = struct.pack(">H", ttFont.getGlyphID(safeEval("'''" + attrs["glyphname"] + "'''"))) - elif name == "hexdata": - self.imageData = readHex(content) - else: - from fontTools import ttLib - raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_s_b_i_x.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_s_b_i_x.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_s_b_i_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_s_b_i_x.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,117 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval, num2binary, binary2num -from . import DefaultTable -from .sbixGlyph import * -from .sbixStrike import * - - -sbixHeaderFormat = """ - > - version: H # Version number (set to 1) - flags: H # The only two bits used in the flags field are bits 0 - # and 1. For historical reasons, bit 0 must always be 1. - # Bit 1 is a sbixDrawOutlines flag and is interpreted as - # follows: - # 0: Draw only 'sbix' bitmaps - # 1: Draw both 'sbix' bitmaps and outlines, in that - # order - numStrikes: L # Number of bitmap strikes to follow -""" -sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) - - -sbixStrikeOffsetFormat = """ - > - strikeOffset: L # Offset from begining of table to data for the - # individual strike -""" -sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) - - -class table__s_b_i_x(DefaultTable.DefaultTable): - def __init__(self, tag): - self.tableTag = tag - self.version = 1 - self.flags = 1 - self.numStrikes = 0 - self.strikes = {} - self.strikeOffsets = [] - - def decompile(self, data, ttFont): - # read table header - sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self) - # collect offsets to individual strikes in self.strikeOffsets - for i in range(self.numStrikes): - current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize - offset_entry = sbixStrikeOffset() - sstruct.unpack(sbixStrikeOffsetFormat, \ - data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \ - offset_entry) - self.strikeOffsets.append(offset_entry.strikeOffset) - - # decompile Strikes - for i in range(self.numStrikes-1, -1, -1): - current_strike = Strike(rawdata=data[self.strikeOffsets[i]:]) - data = data[:self.strikeOffsets[i]] - current_strike.decompile(ttFont) - #print " Strike length: %xh" % len(bitmapSetData) - #print "Number of Glyph entries:", len(current_strike.glyphs) - if current_strike.ppem in self.strikes: - from fontTools import ttLib - raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") - self.strikes[current_strike.ppem] = current_strike - - # after the glyph data records have been extracted, we don't need the offsets anymore - del self.strikeOffsets - del self.numStrikes - - def compile(self, ttFont): - sbixData = "" - self.numStrikes = len(self.strikes) - sbixHeader = sstruct.pack(sbixHeaderFormat, self) - - # calculate offset to start of first strike - setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes - - for si in sorted(self.strikes.keys()): - current_strike = self.strikes[si] - current_strike.compile(ttFont) - # append offset to this strike to table header - current_strike.strikeOffset = setOffset - sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) - setOffset += len(current_strike.data) - sbixData += current_strike.data - - return sbixHeader + sbixData - - def toXML(self, xmlWriter, ttFont): - xmlWriter.simpletag("version", value=self.version) - xmlWriter.newline() - xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) - xmlWriter.newline() - for i in sorted(self.strikes.keys()): - self.strikes[i].toXML(xmlWriter, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name =="version": - setattr(self, name, safeEval(attrs["value"])) - elif name == "flags": - setattr(self, name, binary2num(attrs["value"])) - elif name == "strike": - current_strike = Strike() - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - current_strike.fromXML(name, attrs, content, ttFont) - self.strikes[current_strike.ppem] = current_strike - else: - from fontTools import ttLib - raise ttLib.TTLibError("can't handle '%s' element" % name) - - -# Helper classes - -class sbixStrikeOffset(object): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/sbixStrike.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/sbixStrike.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/sbixStrike.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/sbixStrike.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,150 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import readHex -from .sbixGlyph import * -import struct - -sbixStrikeHeaderFormat = """ - > - ppem: H # The PPEM for which this strike was designed (e.g., 9, - # 12, 24) - resolution: H # The screen resolution (in dpi) for which this strike - # was designed (e.g., 72) -""" - -sbixGlyphDataOffsetFormat = """ - > - glyphDataOffset: L # Offset from the beginning of the strike data record - # to data for the individual glyph -""" - -sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) -sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) - - -class Strike(object): - def __init__(self, rawdata=None, ppem=0, resolution=72): - self.data = rawdata - self.ppem = ppem - self.resolution = resolution - self.glyphs = {} - - def decompile(self, ttFont): - if self.data is None: - from fontTools import ttLib - raise ttLib.TTLibError - if len(self.data) < sbixStrikeHeaderFormatSize: - from fontTools import ttLib - raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \ - % (sbixStrikeHeaderFormatSize, len(self.data)) - - # read Strike header from raw data - sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self) - - # calculate number of glyphs - firstGlyphDataOffset, = struct.unpack(">L", \ - self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize]) - self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1 - # ^ -1 because there's one more offset than glyphs - - # build offset list for single glyph data offsets - self.glyphDataOffsets = [] - for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs - start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize - current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize]) - self.glyphDataOffsets.append(current_offset) - - # iterate through offset list and slice raw data into glyph data records - for i in range(self.numGlyphs): - current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i) - current_glyph.decompile(ttFont) - self.glyphs[current_glyph.glyphName] = current_glyph - del self.glyphDataOffsets - del self.numGlyphs - del self.data - - def compile(self, ttFont): - self.glyphDataOffsets = "" - self.bitmapData = "" - - glyphOrder = ttFont.getGlyphOrder() - - # first glyph starts right after the header - currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) - for glyphName in glyphOrder: - if glyphName in self.glyphs: - # we have glyph data for this glyph - current_glyph = self.glyphs[glyphName] - else: - # must add empty glyph data record for this glyph - current_glyph = Glyph(glyphName=glyphName) - current_glyph.compile(ttFont) - current_glyph.glyphDataOffset = currentGlyphDataOffset - self.bitmapData += current_glyph.rawdata - currentGlyphDataOffset += len(current_glyph.rawdata) - self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph) - - # add last "offset", really the end address of the last glyph data record - dummy = Glyph() - dummy.glyphDataOffset = currentGlyphDataOffset - self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) - - # pack header - self.data = sstruct.pack(sbixStrikeHeaderFormat, self) - # add offsets and image data after header - self.data += self.glyphDataOffsets + self.bitmapData - - def toXML(self, xmlWriter, ttFont): - xmlWriter.begintag("strike") - xmlWriter.newline() - xmlWriter.simpletag("ppem", value=self.ppem) - xmlWriter.newline() - xmlWriter.simpletag("resolution", value=self.resolution) - xmlWriter.newline() - glyphOrder = ttFont.getGlyphOrder() - for i in range(len(glyphOrder)): - if glyphOrder[i] in self.glyphs: - self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) - # TODO: what if there are more glyph data records than (glyf table) glyphs? - xmlWriter.endtag("strike") - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name in ["ppem", "resolution"]: - setattr(self, name, safeEval(attrs["value"])) - elif name == "glyph": - if "graphicType" in attrs: - myFormat = safeEval("'''" + attrs["graphicType"] + "'''") - else: - myFormat = None - if "glyphname" in attrs: - myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") - elif "name" in attrs: - myGlyphName = safeEval("'''" + attrs["name"] + "'''") - else: - from fontTools import ttLib - raise ttLib.TTLibError("Glyph must have a glyph name.") - if "originOffsetX" in attrs: - myOffsetX = safeEval(attrs["originOffsetX"]) - else: - myOffsetX = 0 - if "originOffsetY" in attrs: - myOffsetY = safeEval(attrs["originOffsetY"]) - else: - myOffsetY = 0 - current_glyph = Glyph( - glyphName=myGlyphName, - graphicType=myFormat, - originOffsetX=myOffsetX, - originOffsetY=myOffsetY, - ) - for element in content: - if isinstance(element, tuple): - name, attrs, content = element - current_glyph.fromXML(name, attrs, content, ttFont) - current_glyph.compile(ttFont) - self.glyphs[current_glyph.glyphName] = current_glyph - else: - from fontTools import ttLib - raise ttLib.TTLibError("can't handle '%s' element" % name) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/S_I_N_G_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/S_I_N_G_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/S_I_N_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/S_I_N_G_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,95 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -SINGFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - glyphletVersion: H - permissions: h - mainGID: H - unitsPerEm: H - vertAdvance: h - vertOrigin: h - uniqueName: 28s - METAMD5: 16s - nameLength: 1s -""" -# baseGlyphName is a byte string which follows the record above. - - -class table_S_I_N_G_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, rest = sstruct.unpack2(SINGFormat, data, self) - self.uniqueName = self.decompileUniqueName(self.uniqueName) - self.nameLength = byteord(self.nameLength) - assert len(rest) == self.nameLength - self.baseGlyphName = tostr(rest) - - rawMETAMD5 = self.METAMD5 - self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) - for char in rawMETAMD5[1:]: - self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) - self.METAMD5 = self.METAMD5 + "]" - - def decompileUniqueName(self, data): - name = "" - for char in data: - val = byteord(char) - if val == 0: - break - if (val > 31) or (val < 128): - name += chr(val) - else: - octString = oct(val) - if len(octString) > 3: - octString = octString[1:] # chop off that leading zero. - elif len(octString) < 3: - octString.zfill(3) - name += "\\" + octString - return name - - def compile(self, ttFont): - d = self.__dict__.copy() - d["nameLength"] = bytechr(len(self.baseGlyphName)) - d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) - METAMD5List = eval(self.METAMD5) - d["METAMD5"] = b"" - for val in METAMD5List: - d["METAMD5"] += bytechr(val) - assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table" - data = sstruct.pack(SINGFormat, d) - data = data + tobytes(self.baseGlyphName) - return data - - def compilecompileUniqueName(self, name, length): - nameLen = len(name) - if length <= nameLen: - name = name[:length-1] + "\000" - else: - name += (nameLen - length) * "\000" - return name - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(SINGFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - writer.simpletag("baseGlyphName", value=self.baseGlyphName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name in ["uniqueName", "METAMD5", "baseGlyphName"]: - setattr(self, name, value) - else: - setattr(self, name, safeEval(value)) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/S_V_G_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/S_V_G_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/S_V_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/S_V_G_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,379 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from . import DefaultTable -try: - import xml.etree.cElementTree as ET -except ImportError: - import xml.etree.ElementTree as ET -import struct -import re - -__doc__=""" -Compiles/decompiles version 0 and 1 SVG tables from/to XML. - -Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated. -This module will decompile this correctly, but will compile a version 1 table -only if you add the secret element "" to the SVG element in the TTF file. - -Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes. - -The XML format is: - - - <complete SVG doc> ]] - </svgDoc> -... - <svgDoc endGlyphID="n" startGlyphID="m"> - <![CDATA[ <complete SVG doc> ]] - </svgDoc> - - <colorPalettes> - <colorParamUINameID>n</colorParamUINameID> - ... - <colorParamUINameID>m</colorParamUINameID> - <colorPalette uiNameID="n"> - <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> - ... - <colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" /> - </colorPalette> - ... - <colorPalette uiNameID="m"> - <colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" /> - ... - <colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" /> - </colorPalette> - </colorPalettes> -</SVG> - -Color values must be less than 256. - -The number of color records in each </colorPalette> must be the same as -the number of <colorParamUINameID> elements. - -""" - -XML = ET.XML -XMLElement = ET.Element -xmlToString = ET.tostring - -SVG_format_0 = """ - > # big endian - version: H - offsetToSVGDocIndex: L - offsetToColorPalettes: L -""" - -SVG_format_0Size = sstruct.calcsize(SVG_format_0) - -SVG_format_1 = """ - > # big endian - version: H - numIndicies: H -""" - -SVG_format_1Size = sstruct.calcsize(SVG_format_1) - -doc_index_entry_format_0 = """ - > # big endian - startGlyphID: H - endGlyphID: H - svgDocOffset: L - svgDocLength: L -""" - -doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) - -colorRecord_format_0 = """ - red: B - green: B - blue: B - alpha: B -""" - - -class table_S_V_G_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - self.docList = None - self.colorPalettes = None - pos = 0 - self.version = struct.unpack(">H", data[pos:pos+2])[0] - - if self.version == 1: - self.decompile_format_1(data, ttFont) - else: - if self.version != 0: - print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version)) - self.decompile_format_0(data, ttFont) - - def decompile_format_0(self, data, ttFont): - dummy, data2 = sstruct.unpack2(SVG_format_0, data, self) - # read in SVG Documents Index - self.decompileEntryList(data) - - # read in colorPalettes table. - self.colorPalettes = colorPalettes = ColorPalettes() - pos = self.offsetToColorPalettes - if pos > 0: - colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0] - if numColorParams > 0: - colorPalettes.colorParamUINameIDs = colorParamUINameIDs = [] - pos = pos + 2 - for i in range(numColorParams): - nameID = struct.unpack(">H", data[pos:pos+2])[0] - colorParamUINameIDs.append(nameID) - pos = pos + 2 - - colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0] - pos = pos + 2 - if numColorPalettes > 0: - colorPalettes.colorPaletteList = colorPaletteList = [] - for i in range(numColorPalettes): - colorPalette = ColorPalette() - colorPaletteList.append(colorPalette) - colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0] - pos = pos + 2 - colorPalette.paletteColors = paletteColors = [] - for j in range(numColorParams): - colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord()) - paletteColors.append(colorRecord) - pos += 4 - - def decompile_format_1(self, data, ttFont): - pos = 2 - self.numEntries = struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - self.decompileEntryList(data, pos) - - def decompileEntryList(self, data): - # data starts with the first entry of the entry list. - pos = subTableStart = self.offsetToSVGDocIndex - self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0] - pos += 2 - if self.numEntries > 0: - data2 = data[pos:] - self.docList = [] - self.entries = entries = [] - for i in range(self.numEntries): - docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry()) - entries.append(docIndexEntry) - - for entry in entries: - start = entry.svgDocOffset + subTableStart - end = start + entry.svgDocLength - doc = data[start:end] - if doc.startswith(b"\x1f\x8b"): - import gzip - bytesIO = BytesIO(doc) - with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: - doc = gunzipper.read() - self.compressed = True - del bytesIO - doc = tostr(doc, "utf_8") - self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] ) - - def compile(self, ttFont): - if hasattr(self, "version1"): - data = self.compileFormat1(ttFont) - else: - data = self.compileFormat0(ttFont) - return data - - def compileFormat0(self, ttFont): - version = 0 - offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header. - # get SGVDoc info. - docList = [] - entryList = [] - numEntries = len(self.docList) - datum = struct.pack(">H",numEntries) - entryList.append(datum) - curOffset = len(datum) + doc_index_entry_format_0Size*numEntries - for doc, startGlyphID, endGlyphID in self.docList: - docOffset = curOffset - docBytes = tobytes(doc, encoding="utf_8") - if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"): - import gzip - bytesIO = BytesIO() - with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper: - gzipper.write(docBytes) - gzipped = bytesIO.getvalue() - if len(gzipped) < len(docBytes): - docBytes = gzipped - del gzipped, bytesIO - docLength = len(docBytes) - curOffset += docLength - entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) - entryList.append(entry) - docList.append(docBytes) - entryList.extend(docList) - svgDocData = bytesjoin(entryList) - - # get colorpalette info. - if self.colorPalettes is None: - offsetToColorPalettes = 0 - palettesData = "" - else: - offsetToColorPalettes = SVG_format_0Size + len(svgDocData) - dataList = [] - numColorParams = len(self.colorPalettes.colorParamUINameIDs) - datum = struct.pack(">H", numColorParams) - dataList.append(datum) - for uiNameId in self.colorPalettes.colorParamUINameIDs: - datum = struct.pack(">H", uiNameId) - dataList.append(datum) - numColorPalettes = len(self.colorPalettes.colorPaletteList) - datum = struct.pack(">H", numColorPalettes) - dataList.append(datum) - for colorPalette in self.colorPalettes.colorPaletteList: - datum = struct.pack(">H", colorPalette.uiNameID) - dataList.append(datum) - for colorRecord in colorPalette.paletteColors: - data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha) - dataList.append(data) - palettesData = bytesjoin(dataList) - - header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes) - data = [header, svgDocData, palettesData] - data = bytesjoin(data) - return data - - def compileFormat1(self, ttFont): - version = 1 - numEntries = len(self.docList) - header = struct.pack(">HH", version, numEntries) - dataList = [header] - docList = [] - curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries - for doc, startGlyphID, endGlyphID in self.docList: - docOffset = curOffset - docBytes = tobytes(doc, encoding="utf_8") - docLength = len(docBytes) - curOffset += docLength - entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength) - dataList.append(entry) - docList.append(docBytes) - dataList.extend(docList) - data = bytesjoin(dataList) - return data - - def toXML(self, writer, ttFont): - writer.newline() - for doc, startGID, endGID in self.docList: - writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID) - writer.newline() - writer.writecdata(doc) - writer.newline() - writer.endtag("svgDoc") - writer.newline() - - if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None): - writer.begintag("colorPalettes") - writer.newline() - for uiNameID in self.colorPalettes.colorParamUINameIDs: - writer.begintag("colorParamUINameID") - writer.writeraw(str(uiNameID)) - writer.endtag("colorParamUINameID") - writer.newline() - for colorPalette in self.colorPalettes.colorPaletteList: - writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))]) - writer.newline() - for colorRecord in colorPalette.paletteColors: - colorAttributes = [ - ("red", hex(colorRecord.red)), - ("green", hex(colorRecord.green)), - ("blue", hex(colorRecord.blue)), - ("alpha", hex(colorRecord.alpha)), - ] - writer.begintag("colorRecord", colorAttributes) - writer.endtag("colorRecord") - writer.newline() - writer.endtag("colorPalette") - writer.newline() - - writer.endtag("colorPalettes") - writer.newline() - else: - writer.begintag("colorPalettes") - writer.endtag("colorPalettes") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "svgDoc": - if not hasattr(self, "docList"): - self.docList = [] - doc = strjoin(content) - doc = doc.strip() - startGID = int(attrs["startGlyphID"]) - endGID = int(attrs["endGlyphID"]) - self.docList.append( [doc, startGID, endGID] ) - elif name == "colorPalettes": - self.colorPalettes = ColorPalettes() - self.colorPalettes.fromXML(name, attrs, content, ttFont) - if self.colorPalettes.numColorParams == 0: - self.colorPalettes = None - else: - print("Unknown", name, content) - -class DocumentIndexEntry(object): - def __init__(self): - self.startGlyphID = None # USHORT - self.endGlyphID = None # USHORT - self.svgDocOffset = None # ULONG - self.svgDocLength = None # ULONG - - def __repr__(self): - return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) - -class ColorPalettes(object): - def __init__(self): - self.numColorParams = None # USHORT - self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette. - self.numColorPalettes = None # USHORT - self.colorPaletteList = [] # list of ColorPalette records - - def fromXML(self, name, attrs, content, ttFont): - for element in content: - if isinstance(element, type("")): - continue - name, attrib, content = element - if name == "colorParamUINameID": - uiNameID = int(content[0]) - self.colorParamUINameIDs.append(uiNameID) - elif name == "colorPalette": - colorPalette = ColorPalette() - self.colorPaletteList.append(colorPalette) - colorPalette.fromXML((name, attrib, content), ttFont) - - self.numColorParams = len(self.colorParamUINameIDs) - self.numColorPalettes = len(self.colorPaletteList) - for colorPalette in self.colorPaletteList: - if len(colorPalette.paletteColors) != self.numColorParams: - raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams)) - -class ColorPalette(object): - def __init__(self): - self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette. - self.paletteColors = [] # list of ColorRecords - - def fromXML(self, name, attrs, content, ttFont): - self.uiNameID = int(attrs["uiNameID"]) - for element in content: - if isinstance(element, type("")): - continue - name, attrib, content = element - if name == "colorRecord": - colorRecord = ColorRecord() - self.paletteColors.append(colorRecord) - colorRecord.red = eval(attrib["red"]) - colorRecord.green = eval(attrib["green"]) - colorRecord.blue = eval(attrib["blue"]) - colorRecord.alpha = eval(attrib["alpha"]) - -class ColorRecord(object): - def __init__(self): - self.red = 255 # all are one byte values. - self.green = 255 - self.blue = 255 - self.alpha = 255 diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/table_API_readme.txt fonttools-3.21.2/Tools/fontTools/ttLib/tables/table_API_readme.txt --- fonttools-3.0/Tools/fontTools/ttLib/tables/table_API_readme.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/table_API_readme.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,91 +0,0 @@ -This folder is a subpackage of ttLib. Each module here is a -specialized TT/OT table converter: they can convert raw data -to Python objects and vice versa. Usually you don't need to -use the modules directly: they are imported and used -automatically when needed by ttLib. - -If you are writing you own table converter the following is -important. - -The modules here have pretty strange names: this is due to the -fact that we need to map TT table tags (which are case sensitive) -to filenames (which on Mac and Win aren't case sensitive) as well -as to Python identifiers. The latter means it can only contain -[A-Za-z0-9_] and cannot start with a number. - -ttLib provides functions to expand a tag into the format used here: - ->>> from fontTools import ttLib ->>> ttLib.tagToIdentifier("FOO ") -'F_O_O_' ->>> ttLib.tagToIdentifier("cvt ") -'_c_v_t' ->>> ttLib.tagToIdentifier("OS/2") -'O_S_2f_2' ->>> ttLib.tagToIdentifier("glyf") -'_g_l_y_f' ->>> - -And vice versa: - ->>> ttLib.identifierToTag("F_O_O_") -'FOO ' ->>> ttLib.identifierToTag("_c_v_t") -'cvt ' ->>> ttLib.identifierToTag("O_S_2f_2") -'OS/2' ->>> ttLib.identifierToTag("_g_l_y_f") -'glyf' ->>> - -Eg. the 'glyf' table converter lives in a Python file called: - - _g_l_y_f.py - -The converter itself is a class, named "table_" + expandedtag. Eg: - - class table__g_l_y_f: - etc. - -Note that if you _do_ need to use such modules or classes manually, -there are two convenient API functions that let you find them by tag: - ->>> ttLib.getTableModule('glyf') -<module 'ttLib.tables._g_l_y_f'> ->>> ttLib.getTableClass('glyf') -<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400> ->>> - -You must subclass from DefaultTable.DefaultTable. It provides some default -behavior, as well as a constructor method (__init__) that you don't need to -override. - -Your converter should minimally provide two methods: - -class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO ' - - def decompile(self, data, ttFont): - # 'data' is the raw table data. Unpack it into a - # Python data structure. - # 'ttFont' is a ttLib.TTfile instance, enabling you to - # refer to other tables. Do ***not*** keep a reference to - # it: it will cause a circular reference (ttFont saves - # a reference to us), and that means we'll be leaking - # memory. If you need to use it in other methods, just - # pass it around as a method argument. - - def compile(self, ttFont): - # Return the raw data, as converted from the Python - # data structure. - # Again, 'ttFont' is there so you can access other tables. - # Same warning applies. - -If you want to support TTX import/export as well, you need to provide two -additional methods: - - def toXML(self, writer, ttFont): - # XXX - - def fromXML(self, (name, attrs, content), ttFont): - # XXX - diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__0.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__0.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__0.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__0.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,49 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable -import struct - -tsi0Format = '>HHl' - -def fixlongs(glyphID, textLength, textOffset): - return int(glyphID), int(textLength), textOffset - - -class table_T_S_I__0(DefaultTable.DefaultTable): - - dependencies = ["TSI1"] - - def decompile(self, data, ttFont): - numGlyphs = ttFont['maxp'].numGlyphs - indices = [] - size = struct.calcsize(tsi0Format) - for i in range(numGlyphs + 5): - glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size])) - indices.append((glyphID, textLength, textOffset)) - data = data[size:] - assert len(data) == 0 - assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number" # 0xABFC1F34 - self.indices = indices[:-5] - self.extra_indices = indices[-4:] - - def compile(self, ttFont): - if not hasattr(self, "indices"): - # We have no corresponding table (TSI1 or TSI3); let's return - # no data, which effectively means "ignore us". - return "" - data = b"" - for index, textLength, textOffset in self.indices: - data = data + struct.pack(tsi0Format, index, textLength, textOffset) - data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300) # 0xABFC1F34 - for index, textLength, textOffset in self.extra_indices: - data = data + struct.pack(tsi0Format, index, textLength, textOffset) - return data - - def set(self, indices, extra_indices): - # gets called by 'TSI1' or 'TSI3' - self.indices = indices - self.extra_indices = extra_indices - - def toXML(self, writer, ttFont): - writer.comment("This table will be calculated by the compiler") - writer.newline() diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__1.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__1.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__1.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__1.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,116 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable - -class table_T_S_I__1(DefaultTable.DefaultTable): - - extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"} - - indextable = "TSI0" - - def decompile(self, data, ttFont): - indextable = ttFont[self.indextable] - self.glyphPrograms = {} - for i in range(len(indextable.indices)): - glyphID, textLength, textOffset = indextable.indices[i] - if textLength == 0x8000: - # Ugh. Hi Beat! - textLength = indextable.indices[i+1][1] - if textLength > 0x8000: - pass # XXX Hmmm. - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text - - self.extraPrograms = {} - for i in range(len(indextable.extra_indices)): - extraCode, textLength, textOffset = indextable.extra_indices[i] - if textLength == 0x8000: - if self.extras[extraCode] == "fpgm": # this is the last one - textLength = len(data) - textOffset - else: - textLength = indextable.extra_indices[i+1][1] - text = data[textOffset:textOffset+textLength] - assert len(text) == textLength - if text: - self.extraPrograms[self.extras[extraCode]] = text - - def compile(self, ttFont): - if not hasattr(self, "glyphPrograms"): - self.glyphPrograms = {} - self.extraPrograms = {} - data = b'' - indextable = ttFont[self.indextable] - glyphNames = ttFont.getGlyphOrder() - - indices = [] - for i in range(len(glyphNames)): - if len(data) % 2: - data = data + b"\015" # align on 2-byte boundaries, fill with return chars. Yum. - name = glyphNames[i] - if name in self.glyphPrograms: - text = tobytes(self.glyphPrograms[name]) - else: - text = b"" - textLength = len(text) - if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? - indices.append((i, textLength, len(data))) - data = data + text - - extra_indices = [] - codes = sorted(self.extras.items()) - for i in range(len(codes)): - if len(data) % 2: - data = data + b"\015" # align on 2-byte boundaries, fill with return chars. - code, name = codes[i] - if name in self.extraPrograms: - text = tobytes(self.extraPrograms[name]) - else: - text = b"" - textLength = len(text) - if textLength >= 0x8000: - textLength = 0x8000 # XXX ??? - extra_indices.append((code, textLength, len(data))) - data = data + text - indextable.set(indices, extra_indices) - return data - - def toXML(self, writer, ttFont): - names = sorted(self.glyphPrograms.keys()) - writer.newline() - for name in names: - text = self.glyphPrograms[name] - if not text: - continue - writer.begintag("glyphProgram", name=name) - writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) - writer.newline() - writer.endtag("glyphProgram") - writer.newline() - writer.newline() - extra_names = sorted(self.extraPrograms.keys()) - for name in extra_names: - text = self.extraPrograms[name] - if not text: - continue - writer.begintag("extraProgram", name=name) - writer.newline() - writer.write_noindent(text.replace(b"\r", b"\n")) - writer.newline() - writer.endtag("extraProgram") - writer.newline() - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphPrograms"): - self.glyphPrograms = {} - self.extraPrograms = {} - lines = strjoin(content).replace("\r", "\n").split("\n") - text = '\r'.join(lines[1:-1]) - if name == "glyphProgram": - self.glyphPrograms[attrs["name"]] = text - elif name == "extraProgram": - self.extraPrograms[attrs["name"]] = text diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__2.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__2.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib - -superclass = ttLib.getTableClass("TSI0") - -class table_T_S_I__2(superclass): - - dependencies = ["TSI3"] diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__3.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__3.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__3.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__3.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,11 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib - -superclass = ttLib.getTableClass("TSI1") - -class table_T_S_I__3(superclass): - - extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"} - - indextable = "TSI2" diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__5.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__5.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I__5.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I__5.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,42 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import sys -import array - - -class table_T_S_I__5(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - numGlyphs = ttFont['maxp'].numGlyphs - assert len(data) == 2 * numGlyphs - a = array.array("H") - a.fromstring(data) - if sys.byteorder != "big": - a.byteswap() - self.glyphGrouping = {} - for i in range(numGlyphs): - self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] - - def compile(self, ttFont): - glyphNames = ttFont.getGlyphOrder() - a = array.array("H") - for i in range(len(glyphNames)): - a.append(self.glyphGrouping[glyphNames[i]]) - if sys.byteorder != "big": - a.byteswap() - return a.tostring() - - def toXML(self, writer, ttFont): - names = sorted(self.glyphGrouping.keys()) - for glyphName in names: - writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphGrouping"): - self.glyphGrouping = {} - if name != "glyphgroup": - return - self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_B_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_B_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_B_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_B_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_B_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_D_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_D_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_D_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_D_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_D_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_J_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_J_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_J_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_J_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_J_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_P_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_P_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_P_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_P_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_P_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_S_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_S_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_S_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_S_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_S_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_V_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_V_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/T_S_I_V_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/T_S_I_V_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import asciiTable - -class table_T_S_I_V_(asciiTable.asciiTable): - pass diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/ttProgram.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/ttProgram.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/ttProgram.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/ttProgram.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,498 +0,0 @@ -"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import num2binary, binary2num, readHex -import array -import re - -# first, the list of instructions that eat bytes or words from the instruction stream - -streamInstructions = [ -# -# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes -# - (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn - (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn - (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn - (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn -] - - -# next, the list of "normal" instructions - -instructions = [ -# -#, opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes -# - (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - - (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| - (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) - (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - - (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - - (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b - (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - - (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) - (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek - (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - - (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - - (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n - (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 - (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e - (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - - (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - - (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - - (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b - (0x57, 'EVEN', 0, 'Even', 1, 1), # e b - (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - - (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - - (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - - (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - - (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - - (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - - (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) - (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c - (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result - (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py - (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py - (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b - (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b - (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - - (0x58, 'IF', 0, 'If', 1, 0), # e - - (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - - (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - - (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - - (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - - (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - - (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - - (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - - (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - - (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b - (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b - (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) - (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d - (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - - (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - - (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - - (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) - (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek - (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - - (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem - (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize - (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - - (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 - (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n - (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b - (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) - (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 - (0x56, 'ODD', 0, 'Odd', 1, 1), # e b - (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b - (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - - (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value - (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - - (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - - (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c - (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 - (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v - (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - - (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - - (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - - (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - - (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - - (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - - (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - - (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - - (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - - (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - - (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - - (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - - (0x5f, 'SDS', 0, 'SetDeltaShiftInGState',1, 0), # n - - (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - - (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - - (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - - (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - - (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - - (0x32, 'SHP', 1, 'ShiftPointByLastPoint',-1, 0), # p1, p2, ..., ploopvalue - - (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - - (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - - (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - - (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - - (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - - (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - - (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - - (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - - (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - - (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - - (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - - (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - - (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - - (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) - (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - - (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 - (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - - (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - - (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - - (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - - (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - - (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - - (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - - (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - -] - - -def bitRepr(value, bits): - s = "" - for i in range(bits): - s = "01"[value & 0x1] + s - value = value >> 1 - return s - - -_mnemonicPat = re.compile("[A-Z][A-Z0-9]*$") - -def _makeDict(instructionList): - opcodeDict = {} - mnemonicDict = {} - for op, mnemonic, argBits, name, pops, pushes in instructionList: - assert _mnemonicPat.match(mnemonic) - mnemonicDict[mnemonic] = op, argBits, name - if argBits: - argoffset = op - for i in range(1 << argBits): - opcodeDict[op+i] = mnemonic, argBits, argoffset, name - else: - opcodeDict[op] = mnemonic, 0, 0, name - return opcodeDict, mnemonicDict - -streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) -opcodeDict, mnemonicDict = _makeDict(instructions) - -class tt_instructions_error(Exception): - def __init__(self, error): - self.error = error - def __str__(self): - return "TT instructions error: %s" % repr(self.error) - - -_comment = r"/\*.*?\*/" -_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" -_number = r"-?[0-9]+" -_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) - -_tokenRE = re.compile(_token) -_whiteRE = re.compile(r"\s*") - -_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") - - -def _skipWhite(data, pos): - m = _whiteRE.match(data, pos) - newPos = m.regs[0][1] - assert newPos >= pos - return newPos - - -class Program(object): - - def __init__(self): - pass - - def fromBytecode(self, bytecode): - self.bytecode = array.array("B", bytecode) - if hasattr(self, "assembly"): - del self.assembly - - def fromAssembly(self, assembly): - self.assembly = assembly - if hasattr(self, "bytecode"): - del self.bytecode - - def getBytecode(self): - if not hasattr(self, "bytecode"): - self._assemble() - return self.bytecode.tostring() - - def getAssembly(self, preserve=False): - if not hasattr(self, "assembly"): - self._disassemble(preserve=preserve) - return self.assembly - - def toXML(self, writer, ttFont): - if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions: - assembly = self.getAssembly() - writer.begintag("assembly") - writer.newline() - i = 0 - nInstr = len(assembly) - while i < nInstr: - instr = assembly[i] - writer.write(instr) - writer.newline() - m = _pushCountPat.match(instr) - i = i + 1 - if m: - nValues = int(m.group(1)) - line = [] - j = 0 - for j in range(nValues): - if j and not (j % 25): - writer.write(' '.join(line)) - writer.newline() - line = [] - line.append(assembly[i+j]) - writer.write(' '.join(line)) - writer.newline() - i = i + j + 1 - writer.endtag("assembly") - else: - writer.begintag("bytecode") - writer.newline() - writer.dumphex(self.getBytecode()) - writer.endtag("bytecode") - - def fromXML(self, name, attrs, content, ttFont): - if name == "assembly": - self.fromAssembly(strjoin(content)) - self._assemble() - del self.assembly - else: - assert name == "bytecode" - self.fromBytecode(readHex(content)) - - def _assemble(self): - assembly = self.assembly - if isinstance(assembly, type([])): - assembly = ' '.join(assembly) - bytecode = [] - push = bytecode.append - lenAssembly = len(assembly) - pos = _skipWhite(assembly, 0) - while pos < lenAssembly: - m = _tokenRE.match(assembly, pos) - if m is None: - raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15]) - dummy, mnemonic, arg, number, comment = m.groups() - pos = m.regs[0][1] - if comment: - pos = _skipWhite(assembly, pos) - continue - - arg = arg.strip() - if mnemonic.startswith("INSTR"): - # Unknown instruction - op = int(mnemonic[5:]) - push(op) - elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): - op, argBits, name = mnemonicDict[mnemonic] - if len(arg) != argBits: - raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg)) - if arg: - arg = binary2num(arg) - push(op + arg) - else: - push(op) - else: - args = [] - pos = _skipWhite(assembly, pos) - while pos < lenAssembly: - m = _tokenRE.match(assembly, pos) - if m is None: - raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15]) - dummy, _mnemonic, arg, number, comment = m.groups() - if number is None and comment is None: - break - pos = m.regs[0][1] - pos = _skipWhite(assembly, pos) - if comment is not None: - continue - args.append(int(number)) - nArgs = len(args) - if mnemonic == "PUSH": - # Automatically choose the most compact representation - nWords = 0 - while nArgs: - while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255): - nWords += 1 - nBytes = 0 - while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255: - nBytes += 1 - if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs: - # Will write bytes as words - nWords += nBytes - continue - - # Write words - if nWords: - if nWords <= 8: - op, argBits, name = streamMnemonicDict["PUSHW"] - op = op + nWords - 1 - push(op) - else: - op, argBits, name = streamMnemonicDict["NPUSHW"] - push(op) - push(nWords) - for value in args[:nWords]: - assert -32768 <= value < 32768, "PUSH value out of range %d" % value - push((value >> 8) & 0xff) - push(value & 0xff) - - # Write bytes - if nBytes: - pass - if nBytes <= 8: - op, argBits, name = streamMnemonicDict["PUSHB"] - op = op + nBytes - 1 - push(op) - else: - op, argBits, name = streamMnemonicDict["NPUSHB"] - push(op) - push(nBytes) - for value in args[nWords:nWords+nBytes]: - push(value) - - nTotal = nWords + nBytes - args = args[nTotal:] - nArgs -= nTotal - nWords = 0 - else: - # Write exactly what we've been asked to - words = mnemonic[-1] == "W" - op, argBits, name = streamMnemonicDict[mnemonic] - if mnemonic[0] != "N": - assert nArgs <= 8, nArgs - op = op + nArgs - 1 - push(op) - else: - assert nArgs < 256 - push(op) - push(nArgs) - if words: - for value in args: - assert -32768 <= value < 32768, "PUSHW value out of range %d" % value - push((value >> 8) & 0xff) - push(value & 0xff) - else: - for value in args: - assert 0 <= value < 256, "PUSHB value out of range %d" % value - push(value) - - pos = _skipWhite(assembly, pos) - - if bytecode: - assert max(bytecode) < 256 and min(bytecode) >= 0 - self.bytecode = array.array("B", bytecode) - - def _disassemble(self, preserve=False): - assembly = [] - i = 0 - bytecode = self.bytecode - numBytecode = len(bytecode) - while i < numBytecode: - op = bytecode[i] - try: - mnemonic, argBits, argoffset, name = opcodeDict[op] - except KeyError: - if op in streamOpcodeDict: - values = [] - - # Merge consecutive PUSH operations - while bytecode[i] in streamOpcodeDict: - op = bytecode[i] - mnemonic, argBits, argoffset, name = streamOpcodeDict[op] - words = mnemonic[-1] == "W" - if argBits: - nValues = op - argoffset + 1 - else: - i = i + 1 - nValues = bytecode[i] - i = i + 1 - assert nValues > 0 - if not words: - for j in range(nValues): - value = bytecode[i] - values.append(repr(value)) - i = i + 1 - else: - for j in range(nValues): - # cast to signed int16 - value = (bytecode[i] << 8) | bytecode[i+1] - if value >= 0x8000: - value = value - 0x10000 - values.append(repr(value)) - i = i + 2 - if preserve: - break - - if not preserve: - mnemonic = "PUSH" - nValues = len(values) - if nValues == 1: - assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) - else: - assembly.append("%s[ ] /* %s values pushed */" % (mnemonic, nValues)) - assembly.extend(values) - else: - assembly.append("INSTR%d[ ]" % op) - i = i + 1 - else: - if argBits: - assembly.append(mnemonic + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name)) - else: - assembly.append(mnemonic + "[ ] /* %s */" % name) - i = i + 1 - self.assembly = assembly - - def __bool__(self): - """ - >>> p = Program() - >>> bool(p) - False - >>> bc = array.array("B", [0]) - >>> p.fromBytecode(bc) - >>> bool(p) - True - >>> p.bytecode.pop() - 0 - >>> bool(p) - False - - >>> p = Program() - >>> asm = ['SVTCA[0]'] - >>> p.fromAssembly(asm) - >>> bool(p) - True - >>> p.assembly.pop() - 'SVTCA[0]' - >>> bool(p) - False - """ - return ((hasattr(self, 'assembly') and len(self.assembly) > 0) or - (hasattr(self, 'bytecode') and len(self.bytecode) > 0)) - - __nonzero__ = __bool__ - - -def _test(): - """ - >>> _test() - True - """ - - bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" - - p = Program() - p.fromBytecode(bc) - asm = p.getAssembly(preserve=True) - p.fromAssembly(asm) - print(bc == p.getBytecode()) - -if __name__ == "__main__": - import sys - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/V_D_M_X_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/V_D_M_X_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/V_D_M_X_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/V_D_M_X_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,234 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from . import DefaultTable -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -import struct - -VDMX_HeaderFmt = """ - > # big endian - version: H # Version number (0 or 1) - numRecs: H # Number of VDMX groups present - numRatios: H # Number of aspect ratio groupings -""" -# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect -# ratio ranges); -VDMX_RatRangeFmt = """ - > # big endian - bCharSet: B # Character set - xRatio: B # Value to use for x-Ratio - yStartRatio: B # Starting y-Ratio value - yEndRatio: B # Ending y-Ratio value -""" -# followed by an array of offset[numRatios] from start of VDMX table to the -# VDMX Group for this ratio range (offsets will be re-calculated on compile); -# followed by an array of Group[numRecs] records; -VDMX_GroupFmt = """ - > # big endian - recs: H # Number of height records in this group - startsz: B # Starting yPelHeight - endsz: B # Ending yPelHeight -""" -# followed by an array of vTable[recs] records. -VDMX_vTableFmt = """ - > # big endian - yPelHeight: H # yPelHeight to which values apply - yMax: h # Maximum value (in pels) for this yPelHeight - yMin: h # Minimum value (in pels) for this yPelHeight -""" - - -class table_V_D_M_X_(DefaultTable.DefaultTable): - - def decompile(self, data, ttFont): - pos = 0 # track current position from to start of VDMX table - dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self) - pos += sstruct.calcsize(VDMX_HeaderFmt) - self.ratRanges = [] - for i in range(self.numRatios): - ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data) - pos += sstruct.calcsize(VDMX_RatRangeFmt) - # the mapping between a ratio and a group is defined further below - ratio['groupIndex'] = None - self.ratRanges.append(ratio) - lenOffset = struct.calcsize('>H') - _offsets = [] # temporarily store offsets to groups - for i in range(self.numRatios): - offset = struct.unpack('>H', data[0:lenOffset])[0] - data = data[lenOffset:] - pos += lenOffset - _offsets.append(offset) - self.groups = [] - for groupIndex in range(self.numRecs): - # the offset to this group from beginning of the VDMX table - currOffset = pos - group, data = sstruct.unpack2(VDMX_GroupFmt, data) - # the group lenght and bounding sizes are re-calculated on compile - recs = group.pop('recs') - startsz = group.pop('startsz') - endsz = group.pop('endsz') - pos += sstruct.calcsize(VDMX_GroupFmt) - for j in range(recs): - vTable, data = sstruct.unpack2(VDMX_vTableFmt, data) - vTableLength = sstruct.calcsize(VDMX_vTableFmt) - pos += vTableLength - # group is a dict of (yMax, yMin) tuples keyed by yPelHeight - group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin']) - # make sure startsz and endsz match the calculated values - minSize = min(group.keys()) - maxSize = max(group.keys()) - assert startsz == minSize, \ - "startsz (%s) must equal min yPelHeight (%s): group %d" % \ - (group.startsz, minSize, groupIndex) - assert endsz == maxSize, \ - "endsz (%s) must equal max yPelHeight (%s): group %d" % \ - (group.endsz, maxSize, groupIndex) - self.groups.append(group) - # match the defined offsets with the current group's offset - for offsetIndex, offsetValue in enumerate(_offsets): - # when numRecs < numRatios there can more than one ratio range - # sharing the same VDMX group - if currOffset == offsetValue: - # map the group with the ratio range thas has the same - # index as the offset to that group (it took me a while..) - self.ratRanges[offsetIndex]['groupIndex'] = groupIndex - # check that all ratio ranges have a group - for i in range(self.numRatios): - ratio = self.ratRanges[i] - if ratio['groupIndex'] is None: - from fontTools import ttLib - raise ttLib.TTLibError( - "no group defined for ratRange %d" % i) - - def _getOffsets(self): - """ - Calculate offsets to VDMX_Group records. - For each ratRange return a list of offset values from the beginning of - the VDMX table to a VDMX_Group. - """ - lenHeader = sstruct.calcsize(VDMX_HeaderFmt) - lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt) - lenOffset = struct.calcsize('>H') - lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt) - lenVTable = sstruct.calcsize(VDMX_vTableFmt) - # offset to the first group - pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset - groupOffsets = [] - for group in self.groups: - groupOffsets.append(pos) - lenGroup = lenGroupHeader + len(group) * lenVTable - pos += lenGroup # offset to next group - offsets = [] - for ratio in self.ratRanges: - groupIndex = ratio['groupIndex'] - offsets.append(groupOffsets[groupIndex]) - return offsets - - def compile(self, ttFont): - if not(self.version == 0 or self.version == 1): - from fontTools import ttLib - raise ttLib.TTLibError( - "unknown format for VDMX table: version %s" % self.version) - data = sstruct.pack(VDMX_HeaderFmt, self) - for ratio in self.ratRanges: - data += sstruct.pack(VDMX_RatRangeFmt, ratio) - # recalculate offsets to VDMX groups - for offset in self._getOffsets(): - data += struct.pack('>H', offset) - for group in self.groups: - recs = len(group) - startsz = min(group.keys()) - endsz = max(group.keys()) - gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz} - data += sstruct.pack(VDMX_GroupFmt, gHeader) - for yPelHeight, (yMax, yMin) in sorted(group.items()): - vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin} - data += sstruct.pack(VDMX_vTableFmt, vTable) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - writer.begintag("ratRanges") - writer.newline() - for ratio in self.ratRanges: - groupIndex = ratio['groupIndex'] - writer.simpletag( - "ratRange", - bCharSet=ratio['bCharSet'], - xRatio=ratio['xRatio'], - yStartRatio=ratio['yStartRatio'], - yEndRatio=ratio['yEndRatio'], - groupIndex=groupIndex - ) - writer.newline() - writer.endtag("ratRanges") - writer.newline() - writer.begintag("groups") - writer.newline() - for groupIndex in range(self.numRecs): - group = self.groups[groupIndex] - recs = len(group) - startsz = min(group.keys()) - endsz = max(group.keys()) - writer.begintag("group", index=groupIndex) - writer.newline() - writer.comment("recs=%d, startsz=%d, endsz=%d" % - (recs, startsz, endsz)) - writer.newline() - for yPelHeight in group.keys(): - yMax, yMin = group[yPelHeight] - writer.simpletag( - "record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin) - writer.newline() - writer.endtag("group") - writer.newline() - writer.endtag("groups") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = safeEval(attrs["value"]) - elif name == "ratRanges": - if not hasattr(self, "ratRanges"): - self.ratRanges = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "ratRange": - if not hasattr(self, "numRatios"): - self.numRatios = 1 - else: - self.numRatios += 1 - ratio = { - "bCharSet": safeEval(attrs["bCharSet"]), - "xRatio": safeEval(attrs["xRatio"]), - "yStartRatio": safeEval(attrs["yStartRatio"]), - "yEndRatio": safeEval(attrs["yEndRatio"]), - "groupIndex": safeEval(attrs["groupIndex"]) - } - self.ratRanges.append(ratio) - elif name == "groups": - if not hasattr(self, "groups"): - self.groups = [] - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "group": - if not hasattr(self, "numRecs"): - self.numRecs = 1 - else: - self.numRecs += 1 - group = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - if name == "record": - yPelHeight = safeEval(attrs["yPelHeight"]) - yMax = safeEval(attrs["yMax"]) - yMin = safeEval(attrs["yMin"]) - group[yPelHeight] = (yMax, yMin) - self.groups.append(group) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_v_h_e_a.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_v_h_e_a.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_v_h_e_a.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_v_h_e_a.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,90 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc import sstruct -from fontTools.misc.textTools import safeEval -from . import DefaultTable - -vheaFormat = """ - > # big endian - tableVersion: 16.16F - ascent: h - descent: h - lineGap: h - advanceHeightMax: H - minTopSideBearing: h - minBottomSideBearing: h - yMaxExtent: h - caretSlopeRise: h - caretSlopeRun: h - reserved0: h - reserved1: h - reserved2: h - reserved3: h - reserved4: h - metricDataFormat: h - numberOfVMetrics: H -""" - -class table__v_h_e_a(DefaultTable.DefaultTable): - - # Note: Keep in sync with table__h_h_e_a - - dependencies = ['vmtx', 'glyf'] - - def decompile(self, data, ttFont): - sstruct.unpack(vheaFormat, data, self) - - def compile(self, ttFont): - if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: - self.recalc(ttFont) - return sstruct.pack(vheaFormat, self) - - def recalc(self, ttFont): - vtmxTable = ttFont['vmtx'] - if 'glyf' in ttFont: - glyfTable = ttFont['glyf'] - INFINITY = 100000 - advanceHeightMax = 0 - minTopSideBearing = +INFINITY # arbitrary big number - minBottomSideBearing = +INFINITY # arbitrary big number - yMaxExtent = -INFINITY # arbitrary big negative number - - for name in ttFont.getGlyphOrder(): - height, tsb = vtmxTable[name] - advanceHeightMax = max(advanceHeightMax, height) - g = glyfTable[name] - if g.numberOfContours == 0: - continue - if g.numberOfContours < 0 and not hasattr(g, "yMax"): - # Composite glyph without extents set. - # Calculate those. - g.recalcBounds(glyfTable) - minTopSideBearing = min(minTopSideBearing, tsb) - bsb = height - tsb - (g.yMax - g.yMin) - minBottomSideBearing = min(minBottomSideBearing, bsb) - extent = tsb + (g.yMax - g.yMin) - yMaxExtent = max(yMaxExtent, extent) - - if yMaxExtent == -INFINITY: - # No glyph has outlines. - minTopSideBearing = 0 - minBottomSideBearing = 0 - yMaxExtent = 0 - - self.advanceHeightMax = advanceHeightMax - self.minTopSideBearing = minTopSideBearing - self.minBottomSideBearing = minBottomSideBearing - self.yMaxExtent = yMaxExtent - else: - # XXX CFF recalc... - pass - - def toXML(self, writer, ttFont): - formatstring, names, fixes = sstruct.getformat(vheaFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - setattr(self, name, safeEval(attrs["value"])) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/_v_m_t_x.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/_v_m_t_x.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/_v_m_t_x.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/_v_m_t_x.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools import ttLib - -superclass = ttLib.getTableClass("hmtx") - -class table__v_m_t_x(superclass): - - headerTag = 'vhea' - advanceName = 'height' - sideBearingName = 'tsb' - numberOfMetricsName = 'numberOfVMetrics' diff -Nru fonttools-3.0/Tools/fontTools/ttLib/tables/V_O_R_G_.py fonttools-3.21.2/Tools/fontTools/ttLib/tables/V_O_R_G_.py --- fonttools-3.0/Tools/fontTools/ttLib/tables/V_O_R_G_.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/tables/V_O_R_G_.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.misc.textTools import safeEval -from . import DefaultTable -import operator -import struct - - -class table_V_O_R_G_(DefaultTable.DefaultTable): - - """ This table is structured so that you can treat it like a dictionary keyed by glyph name. - ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph - ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph. - """ - - def decompile(self, data, ttFont): - self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID - self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8]) - assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle" - data = data[8:] - vids = [] - gids = [] - pos = 0 - for i in range(self.numVertOriginYMetrics): - gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4]) - pos += 4 - gids.append(gid) - vids.append(vOrigin) - - self.VOriginRecords = vOrig = {} - glyphOrder = ttFont.getGlyphOrder() - try: - names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids) - except IndexError: - getGlyphName = self.getGlyphName - names = map(getGlyphName, gids ) - - list(map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids)) - - def compile(self, ttFont): - vorgs = list(self.VOriginRecords.values()) - names = list(self.VOriginRecords.keys()) - nameMap = ttFont.getReverseGlyphMap() - lenRecords = len(vorgs) - try: - gids = map(operator.getitem, [nameMap]*lenRecords, names) - except KeyError: - nameMap = ttFont.getReverseGlyphMap(rebuild=True) - gids = map(operator.getitem, [nameMap]*lenRecords, names) - vOriginTable = list(zip(gids, vorgs)) - self.numVertOriginYMetrics = lenRecords - vOriginTable.sort() # must be in ascending GID order - dataList = [ struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] - header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics) - dataList.insert(0, header) - data = bytesjoin(dataList) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("majorVersion", value=self.majorVersion) - writer.newline() - writer.simpletag("minorVersion", value=self.minorVersion) - writer.newline() - writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY) - writer.newline() - writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics) - writer.newline() - vOriginTable = [] - glyphNames = self.VOriginRecords.keys() - for glyphName in glyphNames: - try: - gid = ttFont.getGlyphID(glyphName) - except: - assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName) - vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]]) - vOriginTable.sort() - for entry in vOriginTable: - vOriginRec = VOriginRecord(entry[1], entry[2]) - vOriginRec.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "VOriginRecords"): - self.VOriginRecords = {} - self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID - if name == "VOriginRecord": - vOriginRec = VOriginRecord() - for element in content: - if isinstance(element, basestring): - continue - name, attrs, content = element - vOriginRec.fromXML(name, attrs, content, ttFont) - self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin - elif "value" in attrs: - setattr(self, name, safeEval(attrs["value"])) - - def __getitem__(self, glyphSelector): - if isinstance(glyphSelector, int): - # its a gid, convert to glyph name - glyphSelector = self.getGlyphName(glyphSelector) - - if glyphSelector not in self.VOriginRecords: - return self.defaultVertOriginY - - return self.VOriginRecords[glyphSelector] - - def __setitem__(self, glyphSelector, value): - if isinstance(glyphSelector, int): - # its a gid, convert to glyph name - glyphSelector = self.getGlyphName(glyphSelector) - - if value != self.defaultVertOriginY: - self.VOriginRecords[glyphSelector] = value - elif glyphSelector in self.VOriginRecords: - del self.VOriginRecords[glyphSelector] - - def __delitem__(self, glyphSelector): - del self.VOriginRecords[glyphSelector] - -class VOriginRecord(object): - - def __init__(self, name=None, vOrigin=None): - self.glyphName = name - self.vOrigin = vOrigin - - def toXML(self, writer, ttFont): - writer.begintag("VOriginRecord") - writer.newline() - writer.simpletag("glyphName", value=self.glyphName) - writer.newline() - writer.simpletag("vOrigin", value=self.vOrigin) - writer.newline() - writer.endtag("VOriginRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name == "glyphName": - setattr(self, name, value) - else: - setattr(self, name, safeEval(value)) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx fonttools-3.21.2/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx --- fonttools-3.0/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/testdata/TestOTF-Regular.otx 1970-01-01 00:00:00.000000000 +0000 @@ -1,519 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<ttFont sfntVersion="OTTO" ttLibVersion="2.5"> - - <GlyphOrder> - <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> - <GlyphID id="0" name=".notdef"/> - <GlyphID id="1" name=".null"/> - <GlyphID id="2" name="CR"/> - <GlyphID id="3" name="space"/> - <GlyphID id="4" name="period"/> - <GlyphID id="5" name="ellipsis"/> - </GlyphOrder> - - <head> - <!-- Most of this table will be recalculated by the compiler --> - <tableVersion value="1.0"/> - <fontRevision value="1.0"/> - <checkSumAdjustment value="0x34034793"/> - <magicNumber value="0x5f0f3cf5"/> - <flags value="00000000 00000011"/> - <unitsPerEm value="1000"/> - <created value="Thu Jun 4 14:29:11 2015"/> - <modified value="Sat Aug 1 10:07:17 2015"/> - <xMin value="50"/> - <yMin value="0"/> - <xMax value="668"/> - <yMax value="750"/> - <macStyle value="00000000 00000000"/> - <lowestRecPPEM value="9"/> - <fontDirectionHint value="2"/> - <indexToLocFormat value="0"/> - <glyphDataFormat value="0"/> - </head> - - <hhea> - <tableVersion value="1.0"/> - <ascent value="900"/> - <descent value="-300"/> - <lineGap value="0"/> - <advanceWidthMax value="723"/> - <minLeftSideBearing value="50"/> - <minRightSideBearing value="50"/> - <xMaxExtent value="668"/> - <caretSlopeRise value="1"/> - <caretSlopeRun value="0"/> - <caretOffset value="0"/> - <reserved0 value="0"/> - <reserved1 value="0"/> - <reserved2 value="0"/> - <reserved3 value="0"/> - <metricDataFormat value="0"/> - <numberOfHMetrics value="6"/> - </hhea> - - <maxp> - <tableVersion value="0x5000"/> - <numGlyphs value="6"/> - </maxp> - - <OS_2> - <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' - will be recalculated by the compiler --> - <version value="4"/> - <xAvgCharWidth value="392"/> - <usWeightClass value="400"/> - <usWidthClass value="5"/> - <fsType value="00000000 00000000"/> - <ySubscriptXSize value="700"/> - <ySubscriptYSize value="650"/> - <ySubscriptXOffset value="0"/> - <ySubscriptYOffset value="140"/> - <ySuperscriptXSize value="700"/> - <ySuperscriptYSize value="650"/> - <ySuperscriptXOffset value="0"/> - <ySuperscriptYOffset value="477"/> - <yStrikeoutSize value="50"/> - <yStrikeoutPosition value="250"/> - <sFamilyClass value="2050"/> - <panose> - <bFamilyType value="2"/> - <bSerifStyle value="11"/> - <bWeight value="6"/> - <bProportion value="4"/> - <bContrast value="4"/> - <bStrokeVariation value="2"/> - <bArmStyle value="7"/> - <bLetterForm value="8"/> - <bMidline value="1"/> - <bXHeight value="4"/> - </panose> - <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> - <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> - <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> - <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> - <achVendID value="NONE"/> - <fsSelection value="00000000 11000000"/> - <usFirstCharIndex value="0"/> - <usLastCharIndex value="8230"/> - <sTypoAscender value="750"/> - <sTypoDescender value="-250"/> - <sTypoLineGap value="200"/> - <usWinAscent value="900"/> - <usWinDescent value="300"/> - <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> - <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> - <sxHeight value="500"/> - <sCapHeight value="700"/> - <usDefaultChar value="0"/> - <usBreakChar value="32"/> - <usMaxContext value="0"/> - </OS_2> - - <name> - <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Copyright (c) 2015 by FontTools. No rights reserved. - </namerecord> - <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test OTF - </namerecord> - <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Regular - </namerecord> - <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools: Test OTF: 2015 - </namerecord> - <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test OTF - </namerecord> - <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Version 1.000 - </namerecord> - <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> - TestOTF-Regular - </namerecord> - <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test OTF is not a trademark of FontTools. - </namerecord> - <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools - </namerecord> - <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools - </namerecord> - <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - </namerecord> - <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test TTF - </namerecord> - <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> - Copyright (c) 2015 by FontTools. No rights reserved. - </namerecord> - <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> - Test OTF - </namerecord> - <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> - Regular - </namerecord> - <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> - FontTools: Test OTF: 2015 - </namerecord> - <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> - Test OTF - </namerecord> - <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> - Version 1.000 - </namerecord> - <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> - TestOTF-Regular - </namerecord> - <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> - Test OTF is not a trademark of FontTools. - </namerecord> - <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> - FontTools - </namerecord> - <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> - FontTools - </namerecord> - <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - </namerecord> - </name> - - <cmap> - <tableVersion version="0"/> - <cmap_format_4 platformID="0" platEncID="3" language="0"> - <map code="0x0" name=".null"/><!-- ???? --> - <map code="0xd" name="CR"/><!-- ???? --> - <map code="0x20" name="space"/><!-- SPACE --> - <map code="0x2e" name="period"/><!-- FULL STOP --> - <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> - </cmap_format_4> - <cmap_format_6 platformID="1" platEncID="0" language="0"> - <map code="0x0" name=".null"/> - <map code="0x1" name=".notdef"/> - <map code="0x2" name=".notdef"/> - <map code="0x3" name=".notdef"/> - <map code="0x4" name=".notdef"/> - <map code="0x5" name=".notdef"/> - <map code="0x6" name=".notdef"/> - <map code="0x7" name=".notdef"/> - <map code="0x8" name=".notdef"/> - <map code="0x9" name=".notdef"/> - <map code="0xa" name=".notdef"/> - <map code="0xb" name=".notdef"/> - <map code="0xc" name=".notdef"/> - <map code="0xd" name="CR"/> - <map code="0xe" name=".notdef"/> - <map code="0xf" name=".notdef"/> - <map code="0x10" name=".notdef"/> - <map code="0x11" name=".notdef"/> - <map code="0x12" name=".notdef"/> - <map code="0x13" name=".notdef"/> - <map code="0x14" name=".notdef"/> - <map code="0x15" name=".notdef"/> - <map code="0x16" name=".notdef"/> - <map code="0x17" name=".notdef"/> - <map code="0x18" name=".notdef"/> - <map code="0x19" name=".notdef"/> - <map code="0x1a" name=".notdef"/> - <map code="0x1b" name=".notdef"/> - <map code="0x1c" name=".notdef"/> - <map code="0x1d" name=".notdef"/> - <map code="0x1e" name=".notdef"/> - <map code="0x1f" name=".notdef"/> - <map code="0x20" name="space"/> - <map code="0x21" name=".notdef"/> - <map code="0x22" name=".notdef"/> - <map code="0x23" name=".notdef"/> - <map code="0x24" name=".notdef"/> - <map code="0x25" name=".notdef"/> - <map code="0x26" name=".notdef"/> - <map code="0x27" name=".notdef"/> - <map code="0x28" name=".notdef"/> - <map code="0x29" name=".notdef"/> - <map code="0x2a" name=".notdef"/> - <map code="0x2b" name=".notdef"/> - <map code="0x2c" name=".notdef"/> - <map code="0x2d" name=".notdef"/> - <map code="0x2e" name="period"/> - <map code="0x2f" name=".notdef"/> - <map code="0x30" name=".notdef"/> - <map code="0x31" name=".notdef"/> - <map code="0x32" name=".notdef"/> - <map code="0x33" name=".notdef"/> - <map code="0x34" name=".notdef"/> - <map code="0x35" name=".notdef"/> - <map code="0x36" name=".notdef"/> - <map code="0x37" name=".notdef"/> - <map code="0x38" name=".notdef"/> - <map code="0x39" name=".notdef"/> - <map code="0x3a" name=".notdef"/> - <map code="0x3b" name=".notdef"/> - <map code="0x3c" name=".notdef"/> - <map code="0x3d" name=".notdef"/> - <map code="0x3e" name=".notdef"/> - <map code="0x3f" name=".notdef"/> - <map code="0x40" name=".notdef"/> - <map code="0x41" name=".notdef"/> - <map code="0x42" name=".notdef"/> - <map code="0x43" name=".notdef"/> - <map code="0x44" name=".notdef"/> - <map code="0x45" name=".notdef"/> - <map code="0x46" name=".notdef"/> - <map code="0x47" name=".notdef"/> - <map code="0x48" name=".notdef"/> - <map code="0x49" name=".notdef"/> - <map code="0x4a" name=".notdef"/> - <map code="0x4b" name=".notdef"/> - <map code="0x4c" name=".notdef"/> - <map code="0x4d" name=".notdef"/> - <map code="0x4e" name=".notdef"/> - <map code="0x4f" name=".notdef"/> - <map code="0x50" name=".notdef"/> - <map code="0x51" name=".notdef"/> - <map code="0x52" name=".notdef"/> - <map code="0x53" name=".notdef"/> - <map code="0x54" name=".notdef"/> - <map code="0x55" name=".notdef"/> - <map code="0x56" name=".notdef"/> - <map code="0x57" name=".notdef"/> - <map code="0x58" name=".notdef"/> - <map code="0x59" name=".notdef"/> - <map code="0x5a" name=".notdef"/> - <map code="0x5b" name=".notdef"/> - <map code="0x5c" name=".notdef"/> - <map code="0x5d" name=".notdef"/> - <map code="0x5e" name=".notdef"/> - <map code="0x5f" name=".notdef"/> - <map code="0x60" name=".notdef"/> - <map code="0x61" name=".notdef"/> - <map code="0x62" name=".notdef"/> - <map code="0x63" name=".notdef"/> - <map code="0x64" name=".notdef"/> - <map code="0x65" name=".notdef"/> - <map code="0x66" name=".notdef"/> - <map code="0x67" name=".notdef"/> - <map code="0x68" name=".notdef"/> - <map code="0x69" name=".notdef"/> - <map code="0x6a" name=".notdef"/> - <map code="0x6b" name=".notdef"/> - <map code="0x6c" name=".notdef"/> - <map code="0x6d" name=".notdef"/> - <map code="0x6e" name=".notdef"/> - <map code="0x6f" name=".notdef"/> - <map code="0x70" name=".notdef"/> - <map code="0x71" name=".notdef"/> - <map code="0x72" name=".notdef"/> - <map code="0x73" name=".notdef"/> - <map code="0x74" name=".notdef"/> - <map code="0x75" name=".notdef"/> - <map code="0x76" name=".notdef"/> - <map code="0x77" name=".notdef"/> - <map code="0x78" name=".notdef"/> - <map code="0x79" name=".notdef"/> - <map code="0x7a" name=".notdef"/> - <map code="0x7b" name=".notdef"/> - <map code="0x7c" name=".notdef"/> - <map code="0x7d" name=".notdef"/> - <map code="0x7e" name=".notdef"/> - <map code="0x7f" name=".notdef"/> - <map code="0x80" name=".notdef"/> - <map code="0x81" name=".notdef"/> - <map code="0x82" name=".notdef"/> - <map code="0x83" name=".notdef"/> - <map code="0x84" name=".notdef"/> - <map code="0x85" name=".notdef"/> - <map code="0x86" name=".notdef"/> - <map code="0x87" name=".notdef"/> - <map code="0x88" name=".notdef"/> - <map code="0x89" name=".notdef"/> - <map code="0x8a" name=".notdef"/> - <map code="0x8b" name=".notdef"/> - <map code="0x8c" name=".notdef"/> - <map code="0x8d" name=".notdef"/> - <map code="0x8e" name=".notdef"/> - <map code="0x8f" name=".notdef"/> - <map code="0x90" name=".notdef"/> - <map code="0x91" name=".notdef"/> - <map code="0x92" name=".notdef"/> - <map code="0x93" name=".notdef"/> - <map code="0x94" name=".notdef"/> - <map code="0x95" name=".notdef"/> - <map code="0x96" name=".notdef"/> - <map code="0x97" name=".notdef"/> - <map code="0x98" name=".notdef"/> - <map code="0x99" name=".notdef"/> - <map code="0x9a" name=".notdef"/> - <map code="0x9b" name=".notdef"/> - <map code="0x9c" name=".notdef"/> - <map code="0x9d" name=".notdef"/> - <map code="0x9e" name=".notdef"/> - <map code="0x9f" name=".notdef"/> - <map code="0xa0" name=".notdef"/> - <map code="0xa1" name=".notdef"/> - <map code="0xa2" name=".notdef"/> - <map code="0xa3" name=".notdef"/> - <map code="0xa4" name=".notdef"/> - <map code="0xa5" name=".notdef"/> - <map code="0xa6" name=".notdef"/> - <map code="0xa7" name=".notdef"/> - <map code="0xa8" name=".notdef"/> - <map code="0xa9" name=".notdef"/> - <map code="0xaa" name=".notdef"/> - <map code="0xab" name=".notdef"/> - <map code="0xac" name=".notdef"/> - <map code="0xad" name=".notdef"/> - <map code="0xae" name=".notdef"/> - <map code="0xaf" name=".notdef"/> - <map code="0xb0" name=".notdef"/> - <map code="0xb1" name=".notdef"/> - <map code="0xb2" name=".notdef"/> - <map code="0xb3" name=".notdef"/> - <map code="0xb4" name=".notdef"/> - <map code="0xb5" name=".notdef"/> - <map code="0xb6" name=".notdef"/> - <map code="0xb7" name=".notdef"/> - <map code="0xb8" name=".notdef"/> - <map code="0xb9" name=".notdef"/> - <map code="0xba" name=".notdef"/> - <map code="0xbb" name=".notdef"/> - <map code="0xbc" name=".notdef"/> - <map code="0xbd" name=".notdef"/> - <map code="0xbe" name=".notdef"/> - <map code="0xbf" name=".notdef"/> - <map code="0xc0" name=".notdef"/> - <map code="0xc1" name=".notdef"/> - <map code="0xc2" name=".notdef"/> - <map code="0xc3" name=".notdef"/> - <map code="0xc4" name=".notdef"/> - <map code="0xc5" name=".notdef"/> - <map code="0xc6" name=".notdef"/> - <map code="0xc7" name=".notdef"/> - <map code="0xc8" name=".notdef"/> - <map code="0xc9" name="ellipsis"/> - </cmap_format_6> - <cmap_format_4 platformID="3" platEncID="1" language="0"> - <map code="0x0" name=".null"/><!-- ???? --> - <map code="0xd" name="CR"/><!-- ???? --> - <map code="0x20" name="space"/><!-- SPACE --> - <map code="0x2e" name="period"/><!-- FULL STOP --> - <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> - </cmap_format_4> - </cmap> - - <post> - <formatType value="3.0"/> - <italicAngle value="0.0"/> - <underlinePosition value="-75"/> - <underlineThickness value="50"/> - <isFixedPitch value="0"/> - <minMemType42 value="0"/> - <maxMemType42 value="0"/> - <minMemType1 value="0"/> - <maxMemType1 value="0"/> - </post> - - <CFF> - <CFFFont name="TestOTF-Regular"> - <version value="001.001"/> - <Notice value="Copyright \(c\) 2015 by FontTools. No rights reserved."/> - <FullName value="Test OTF"/> - <FamilyName value="Test OTF"/> - <Weight value="Regular"/> - <isFixedPitch value="0"/> - <ItalicAngle value="0"/> - <UnderlineThickness value="50"/> - <PaintType value="0"/> - <CharstringType value="2"/> - <FontMatrix value="0.001 0 0 0.001 0 0"/> - <FontBBox value="50 0 668 750"/> - <StrokeWidth value="0"/> - <!-- charset is dumped separately as the 'GlyphOrder' element --> - <Encoding name="StandardEncoding"/> - <Private> - <BlueScale value="0.039625"/> - <BlueShift value="7"/> - <BlueFuzz value="1"/> - <ForceBold value="0"/> - <LanguageGroup value="0"/> - <ExpansionFactor value="0.06"/> - <initialRandomSeed value="0"/> - <defaultWidthX value="0"/> - <nominalWidthX value="0"/> - <Subrs> - <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> - <CharString index="0"> - 131 122 -131 hlineto - return - </CharString> - </Subrs> - </Private> - <CharStrings> - <CharString name=".notdef"> - 500 450 hmoveto - 750 -400 -750 vlineto - 50 50 rmoveto - 650 300 -650 vlineto - endchar - </CharString> - <CharString name=".null"> - 0 endchar - </CharString> - <CharString name="CR"> - 250 endchar - </CharString> - <CharString name="ellipsis"> - 723 55 hmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - 241 -122 rmoveto - -107 callsubr - endchar - </CharString> - <CharString name="period"> - 241 55 hmoveto - -107 callsubr - endchar - </CharString> - <CharString name="space"> - 250 endchar - </CharString> - </CharStrings> - </CFFFont> - - <GlobalSubrs> - <!-- The 'index' attribute is only for humans; it is ignored when parsed. --> - </GlobalSubrs> - </CFF> - - <hmtx> - <mtx name=".notdef" width="500" lsb="50"/> - <mtx name=".null" width="0" lsb="0"/> - <mtx name="CR" width="250" lsb="0"/> - <mtx name="ellipsis" width="723" lsb="55"/> - <mtx name="period" width="241" lsb="55"/> - <mtx name="space" width="250" lsb="0"/> - </hmtx> - - <DSIG> - <!-- note that the Digital Signature will be invalid after recompilation! --> - <tableHeader flag="0x0" numSigs="0" version="1"/> - </DSIG> - -</ttFont> diff -Nru fonttools-3.0/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx fonttools-3.21.2/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx --- fonttools-3.0/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/testdata/TestTTF-Regular.ttx 1970-01-01 00:00:00.000000000 +0000 @@ -1,553 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<ttFont sfntVersion="\x00\x01\x00\x00" ttLibVersion="2.5"> - - <GlyphOrder> - <!-- The 'id' attribute is only for humans; it is ignored when parsed. --> - <GlyphID id="0" name=".notdef"/> - <GlyphID id="1" name=".null"/> - <GlyphID id="2" name="CR"/> - <GlyphID id="3" name="space"/> - <GlyphID id="4" name="period"/> - <GlyphID id="5" name="ellipsis"/> - </GlyphOrder> - - <head> - <!-- Most of this table will be recalculated by the compiler --> - <tableVersion value="1.0"/> - <fontRevision value="1.0"/> - <checkSumAdjustment value="0x2ee689e2"/> - <magicNumber value="0x5f0f3cf5"/> - <flags value="00000000 00000011"/> - <unitsPerEm value="1000"/> - <created value="Thu Jun 4 14:29:11 2015"/> - <modified value="Mon Aug 3 13:04:43 2015"/> - <xMin value="50"/> - <yMin value="0"/> - <xMax value="668"/> - <yMax value="750"/> - <macStyle value="00000000 00000000"/> - <lowestRecPPEM value="9"/> - <fontDirectionHint value="2"/> - <indexToLocFormat value="0"/> - <glyphDataFormat value="0"/> - </head> - - <hhea> - <tableVersion value="1.0"/> - <ascent value="900"/> - <descent value="-300"/> - <lineGap value="0"/> - <advanceWidthMax value="723"/> - <minLeftSideBearing value="50"/> - <minRightSideBearing value="50"/> - <xMaxExtent value="668"/> - <caretSlopeRise value="1"/> - <caretSlopeRun value="0"/> - <caretOffset value="0"/> - <reserved0 value="0"/> - <reserved1 value="0"/> - <reserved2 value="0"/> - <reserved3 value="0"/> - <metricDataFormat value="0"/> - <numberOfHMetrics value="6"/> - </hhea> - - <maxp> - <!-- Most of this table will be recalculated by the compiler --> - <tableVersion value="0x10000"/> - <numGlyphs value="6"/> - <maxPoints value="8"/> - <maxContours value="2"/> - <maxCompositePoints value="12"/> - <maxCompositeContours value="3"/> - <maxZones value="1"/> - <maxTwilightPoints value="0"/> - <maxStorage value="0"/> - <maxFunctionDefs value="0"/> - <maxInstructionDefs value="0"/> - <maxStackElements value="0"/> - <maxSizeOfInstructions value="0"/> - <maxComponentElements value="3"/> - <maxComponentDepth value="1"/> - </maxp> - - <OS_2> - <!-- The fields 'usFirstCharIndex' and 'usLastCharIndex' - will be recalculated by the compiler --> - <version value="4"/> - <xAvgCharWidth value="392"/> - <usWeightClass value="400"/> - <usWidthClass value="5"/> - <fsType value="00000000 00000000"/> - <ySubscriptXSize value="700"/> - <ySubscriptYSize value="650"/> - <ySubscriptXOffset value="0"/> - <ySubscriptYOffset value="140"/> - <ySuperscriptXSize value="700"/> - <ySuperscriptYSize value="650"/> - <ySuperscriptXOffset value="0"/> - <ySuperscriptYOffset value="477"/> - <yStrikeoutSize value="50"/> - <yStrikeoutPosition value="250"/> - <sFamilyClass value="2050"/> - <panose> - <bFamilyType value="2"/> - <bSerifStyle value="11"/> - <bWeight value="6"/> - <bProportion value="4"/> - <bContrast value="4"/> - <bStrokeVariation value="2"/> - <bArmStyle value="7"/> - <bLetterForm value="8"/> - <bMidline value="1"/> - <bXHeight value="4"/> - </panose> - <ulUnicodeRange1 value="10000000 00000000 00000000 00000001"/> - <ulUnicodeRange2 value="00000000 00000000 00000000 00000000"/> - <ulUnicodeRange3 value="00000000 00000000 00000000 00000000"/> - <ulUnicodeRange4 value="00000000 00000000 00000000 00000000"/> - <achVendID value="NONE"/> - <fsSelection value="00000000 11000000"/> - <usFirstCharIndex value="0"/> - <usLastCharIndex value="8230"/> - <sTypoAscender value="750"/> - <sTypoDescender value="-250"/> - <sTypoLineGap value="200"/> - <usWinAscent value="900"/> - <usWinDescent value="300"/> - <ulCodePageRange1 value="00000000 00000000 00000000 00000001"/> - <ulCodePageRange2 value="00000000 00000000 00000000 00000000"/> - <sxHeight value="500"/> - <sCapHeight value="700"/> - <usDefaultChar value="0"/> - <usBreakChar value="32"/> - <usMaxContext value="0"/> - </OS_2> - - <hmtx> - <mtx name=".notdef" width="500" lsb="50"/> - <mtx name=".null" width="0" lsb="0"/> - <mtx name="CR" width="250" lsb="0"/> - <mtx name="ellipsis" width="723" lsb="55"/> - <mtx name="period" width="241" lsb="55"/> - <mtx name="space" width="250" lsb="0"/> - </hmtx> - - <cmap> - <tableVersion version="0"/> - <cmap_format_4 platformID="0" platEncID="3" language="0"> - <map code="0x0" name=".null"/><!-- ???? --> - <map code="0xd" name="CR"/><!-- ???? --> - <map code="0x20" name="space"/><!-- SPACE --> - <map code="0x2e" name="period"/><!-- FULL STOP --> - <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> - </cmap_format_4> - <cmap_format_6 platformID="1" platEncID="0" language="0"> - <map code="0x0" name=".null"/> - <map code="0x1" name=".notdef"/> - <map code="0x2" name=".notdef"/> - <map code="0x3" name=".notdef"/> - <map code="0x4" name=".notdef"/> - <map code="0x5" name=".notdef"/> - <map code="0x6" name=".notdef"/> - <map code="0x7" name=".notdef"/> - <map code="0x8" name=".notdef"/> - <map code="0x9" name=".notdef"/> - <map code="0xa" name=".notdef"/> - <map code="0xb" name=".notdef"/> - <map code="0xc" name=".notdef"/> - <map code="0xd" name="CR"/> - <map code="0xe" name=".notdef"/> - <map code="0xf" name=".notdef"/> - <map code="0x10" name=".notdef"/> - <map code="0x11" name=".notdef"/> - <map code="0x12" name=".notdef"/> - <map code="0x13" name=".notdef"/> - <map code="0x14" name=".notdef"/> - <map code="0x15" name=".notdef"/> - <map code="0x16" name=".notdef"/> - <map code="0x17" name=".notdef"/> - <map code="0x18" name=".notdef"/> - <map code="0x19" name=".notdef"/> - <map code="0x1a" name=".notdef"/> - <map code="0x1b" name=".notdef"/> - <map code="0x1c" name=".notdef"/> - <map code="0x1d" name=".notdef"/> - <map code="0x1e" name=".notdef"/> - <map code="0x1f" name=".notdef"/> - <map code="0x20" name="space"/> - <map code="0x21" name=".notdef"/> - <map code="0x22" name=".notdef"/> - <map code="0x23" name=".notdef"/> - <map code="0x24" name=".notdef"/> - <map code="0x25" name=".notdef"/> - <map code="0x26" name=".notdef"/> - <map code="0x27" name=".notdef"/> - <map code="0x28" name=".notdef"/> - <map code="0x29" name=".notdef"/> - <map code="0x2a" name=".notdef"/> - <map code="0x2b" name=".notdef"/> - <map code="0x2c" name=".notdef"/> - <map code="0x2d" name=".notdef"/> - <map code="0x2e" name="period"/> - <map code="0x2f" name=".notdef"/> - <map code="0x30" name=".notdef"/> - <map code="0x31" name=".notdef"/> - <map code="0x32" name=".notdef"/> - <map code="0x33" name=".notdef"/> - <map code="0x34" name=".notdef"/> - <map code="0x35" name=".notdef"/> - <map code="0x36" name=".notdef"/> - <map code="0x37" name=".notdef"/> - <map code="0x38" name=".notdef"/> - <map code="0x39" name=".notdef"/> - <map code="0x3a" name=".notdef"/> - <map code="0x3b" name=".notdef"/> - <map code="0x3c" name=".notdef"/> - <map code="0x3d" name=".notdef"/> - <map code="0x3e" name=".notdef"/> - <map code="0x3f" name=".notdef"/> - <map code="0x40" name=".notdef"/> - <map code="0x41" name=".notdef"/> - <map code="0x42" name=".notdef"/> - <map code="0x43" name=".notdef"/> - <map code="0x44" name=".notdef"/> - <map code="0x45" name=".notdef"/> - <map code="0x46" name=".notdef"/> - <map code="0x47" name=".notdef"/> - <map code="0x48" name=".notdef"/> - <map code="0x49" name=".notdef"/> - <map code="0x4a" name=".notdef"/> - <map code="0x4b" name=".notdef"/> - <map code="0x4c" name=".notdef"/> - <map code="0x4d" name=".notdef"/> - <map code="0x4e" name=".notdef"/> - <map code="0x4f" name=".notdef"/> - <map code="0x50" name=".notdef"/> - <map code="0x51" name=".notdef"/> - <map code="0x52" name=".notdef"/> - <map code="0x53" name=".notdef"/> - <map code="0x54" name=".notdef"/> - <map code="0x55" name=".notdef"/> - <map code="0x56" name=".notdef"/> - <map code="0x57" name=".notdef"/> - <map code="0x58" name=".notdef"/> - <map code="0x59" name=".notdef"/> - <map code="0x5a" name=".notdef"/> - <map code="0x5b" name=".notdef"/> - <map code="0x5c" name=".notdef"/> - <map code="0x5d" name=".notdef"/> - <map code="0x5e" name=".notdef"/> - <map code="0x5f" name=".notdef"/> - <map code="0x60" name=".notdef"/> - <map code="0x61" name=".notdef"/> - <map code="0x62" name=".notdef"/> - <map code="0x63" name=".notdef"/> - <map code="0x64" name=".notdef"/> - <map code="0x65" name=".notdef"/> - <map code="0x66" name=".notdef"/> - <map code="0x67" name=".notdef"/> - <map code="0x68" name=".notdef"/> - <map code="0x69" name=".notdef"/> - <map code="0x6a" name=".notdef"/> - <map code="0x6b" name=".notdef"/> - <map code="0x6c" name=".notdef"/> - <map code="0x6d" name=".notdef"/> - <map code="0x6e" name=".notdef"/> - <map code="0x6f" name=".notdef"/> - <map code="0x70" name=".notdef"/> - <map code="0x71" name=".notdef"/> - <map code="0x72" name=".notdef"/> - <map code="0x73" name=".notdef"/> - <map code="0x74" name=".notdef"/> - <map code="0x75" name=".notdef"/> - <map code="0x76" name=".notdef"/> - <map code="0x77" name=".notdef"/> - <map code="0x78" name=".notdef"/> - <map code="0x79" name=".notdef"/> - <map code="0x7a" name=".notdef"/> - <map code="0x7b" name=".notdef"/> - <map code="0x7c" name=".notdef"/> - <map code="0x7d" name=".notdef"/> - <map code="0x7e" name=".notdef"/> - <map code="0x7f" name=".notdef"/> - <map code="0x80" name=".notdef"/> - <map code="0x81" name=".notdef"/> - <map code="0x82" name=".notdef"/> - <map code="0x83" name=".notdef"/> - <map code="0x84" name=".notdef"/> - <map code="0x85" name=".notdef"/> - <map code="0x86" name=".notdef"/> - <map code="0x87" name=".notdef"/> - <map code="0x88" name=".notdef"/> - <map code="0x89" name=".notdef"/> - <map code="0x8a" name=".notdef"/> - <map code="0x8b" name=".notdef"/> - <map code="0x8c" name=".notdef"/> - <map code="0x8d" name=".notdef"/> - <map code="0x8e" name=".notdef"/> - <map code="0x8f" name=".notdef"/> - <map code="0x90" name=".notdef"/> - <map code="0x91" name=".notdef"/> - <map code="0x92" name=".notdef"/> - <map code="0x93" name=".notdef"/> - <map code="0x94" name=".notdef"/> - <map code="0x95" name=".notdef"/> - <map code="0x96" name=".notdef"/> - <map code="0x97" name=".notdef"/> - <map code="0x98" name=".notdef"/> - <map code="0x99" name=".notdef"/> - <map code="0x9a" name=".notdef"/> - <map code="0x9b" name=".notdef"/> - <map code="0x9c" name=".notdef"/> - <map code="0x9d" name=".notdef"/> - <map code="0x9e" name=".notdef"/> - <map code="0x9f" name=".notdef"/> - <map code="0xa0" name=".notdef"/> - <map code="0xa1" name=".notdef"/> - <map code="0xa2" name=".notdef"/> - <map code="0xa3" name=".notdef"/> - <map code="0xa4" name=".notdef"/> - <map code="0xa5" name=".notdef"/> - <map code="0xa6" name=".notdef"/> - <map code="0xa7" name=".notdef"/> - <map code="0xa8" name=".notdef"/> - <map code="0xa9" name=".notdef"/> - <map code="0xaa" name=".notdef"/> - <map code="0xab" name=".notdef"/> - <map code="0xac" name=".notdef"/> - <map code="0xad" name=".notdef"/> - <map code="0xae" name=".notdef"/> - <map code="0xaf" name=".notdef"/> - <map code="0xb0" name=".notdef"/> - <map code="0xb1" name=".notdef"/> - <map code="0xb2" name=".notdef"/> - <map code="0xb3" name=".notdef"/> - <map code="0xb4" name=".notdef"/> - <map code="0xb5" name=".notdef"/> - <map code="0xb6" name=".notdef"/> - <map code="0xb7" name=".notdef"/> - <map code="0xb8" name=".notdef"/> - <map code="0xb9" name=".notdef"/> - <map code="0xba" name=".notdef"/> - <map code="0xbb" name=".notdef"/> - <map code="0xbc" name=".notdef"/> - <map code="0xbd" name=".notdef"/> - <map code="0xbe" name=".notdef"/> - <map code="0xbf" name=".notdef"/> - <map code="0xc0" name=".notdef"/> - <map code="0xc1" name=".notdef"/> - <map code="0xc2" name=".notdef"/> - <map code="0xc3" name=".notdef"/> - <map code="0xc4" name=".notdef"/> - <map code="0xc5" name=".notdef"/> - <map code="0xc6" name=".notdef"/> - <map code="0xc7" name=".notdef"/> - <map code="0xc8" name=".notdef"/> - <map code="0xc9" name="ellipsis"/> - </cmap_format_6> - <cmap_format_4 platformID="3" platEncID="1" language="0"> - <map code="0x0" name=".null"/><!-- ???? --> - <map code="0xd" name="CR"/><!-- ???? --> - <map code="0x20" name="space"/><!-- SPACE --> - <map code="0x2e" name="period"/><!-- FULL STOP --> - <map code="0x2026" name="ellipsis"/><!-- HORIZONTAL ELLIPSIS --> - </cmap_format_4> - </cmap> - - <fpgm> - <assembly> - SVTCA[0] /* SetFPVectorToAxis */ - </assembly> - </fpgm> - - <prep> - <assembly> - SVTCA[0] /* SetFPVectorToAxis */ - </assembly> - </prep> - - <cvt> - <cv index="0" value="0"/> - </cvt> - - <loca> - <!-- The 'loca' table will be calculated by the compiler --> - </loca> - - <glyf> - - <!-- The xMin, yMin, xMax and yMax values - will be recalculated by the compiler. --> - - <TTGlyph name=".notdef" xMin="50" yMin="0" xMax="450" yMax="750"> - <contour> - <pt x="50" y="0" on="1"/> - <pt x="50" y="750" on="1"/> - <pt x="450" y="750" on="1"/> - <pt x="450" y="0" on="1"/> - </contour> - <contour> - <pt x="400" y="50" on="1"/> - <pt x="400" y="700" on="1"/> - <pt x="100" y="700" on="1"/> - <pt x="100" y="50" on="1"/> - </contour> - <instructions><assembly> - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - </assembly></instructions> - </TTGlyph> - - <TTGlyph name=".null"/><!-- contains no outline data --> - - <TTGlyph name="CR"/><!-- contains no outline data --> - - <TTGlyph name="ellipsis" xMin="55" yMin="0" xMax="668" yMax="122"> - <component glyphName="period" x="0" y="0" flags="0x4"/> - <component glyphName="period" x="241" y="0" flags="0x4"/> - <component glyphName="period" x="482" y="0" flags="0x4"/> - <instructions><assembly> - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - </assembly></instructions> - </TTGlyph> - - <TTGlyph name="period" xMin="55" yMin="0" xMax="186" yMax="122"> - <contour> - <pt x="55" y="122" on="1"/> - <pt x="186" y="122" on="1"/> - <pt x="186" y="0" on="1"/> - <pt x="55" y="0" on="1"/> - </contour> - <instructions><assembly> - SVTCA[0] /* SetFPVectorToAxis */ - SVTCA[1] /* SetFPVectorToAxis */ - </assembly></instructions> - </TTGlyph> - - <TTGlyph name="space"/><!-- contains no outline data --> - - </glyf> - - <name> - <namerecord nameID="0" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Copyright (c) 2015 by FontTools. No rights reserved. - </namerecord> - <namerecord nameID="1" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test TTF - </namerecord> - <namerecord nameID="2" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Regular - </namerecord> - <namerecord nameID="3" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools: Test TTF: 2015 - </namerecord> - <namerecord nameID="4" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test TTF - </namerecord> - <namerecord nameID="5" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Version 1.000 - </namerecord> - <namerecord nameID="6" platformID="1" platEncID="0" langID="0x0" unicode="True"> - TestTTF-Regular - </namerecord> - <namerecord nameID="7" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test TTF is not a trademark of FontTools. - </namerecord> - <namerecord nameID="8" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools - </namerecord> - <namerecord nameID="9" platformID="1" platEncID="0" langID="0x0" unicode="True"> - FontTools - </namerecord> - <namerecord nameID="11" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="12" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="14" platformID="1" platEncID="0" langID="0x0" unicode="True"> - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - </namerecord> - <namerecord nameID="18" platformID="1" platEncID="0" langID="0x0" unicode="True"> - Test TTF - </namerecord> - <namerecord nameID="0" platformID="3" platEncID="1" langID="0x409"> - Copyright (c) 2015 by FontTools. No rights reserved. - </namerecord> - <namerecord nameID="1" platformID="3" platEncID="1" langID="0x409"> - Test TTF - </namerecord> - <namerecord nameID="2" platformID="3" platEncID="1" langID="0x409"> - Regular - </namerecord> - <namerecord nameID="3" platformID="3" platEncID="1" langID="0x409"> - FontTools: Test TTF: 2015 - </namerecord> - <namerecord nameID="4" platformID="3" platEncID="1" langID="0x409"> - Test TTF - </namerecord> - <namerecord nameID="5" platformID="3" platEncID="1" langID="0x409"> - Version 1.000 - </namerecord> - <namerecord nameID="6" platformID="3" platEncID="1" langID="0x409"> - TestTTF-Regular - </namerecord> - <namerecord nameID="7" platformID="3" platEncID="1" langID="0x409"> - Test TTF is not a trademark of FontTools. - </namerecord> - <namerecord nameID="8" platformID="3" platEncID="1" langID="0x409"> - FontTools - </namerecord> - <namerecord nameID="9" platformID="3" platEncID="1" langID="0x409"> - FontTools - </namerecord> - <namerecord nameID="11" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="12" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools - </namerecord> - <namerecord nameID="14" platformID="3" platEncID="1" langID="0x409"> - https://github.com/behdad/fonttools/blob/master/LICENSE.txt - </namerecord> - </name> - - <post> - <formatType value="2.0"/> - <italicAngle value="0.0"/> - <underlinePosition value="-75"/> - <underlineThickness value="50"/> - <isFixedPitch value="0"/> - <minMemType42 value="0"/> - <maxMemType42 value="0"/> - <minMemType1 value="0"/> - <maxMemType1 value="0"/> - <psNames> - <!-- This file uses unique glyph names based on the information - found in the 'post' table. Since these names might not be unique, - we have to invent artificial names in case of clashes. In order to - be able to retain the original information, we need a name to - ps name mapping for those cases where they differ. That's what - you see below. - --> - </psNames> - <extraNames> - <!-- following are the name that are not taken from the standard Mac glyph order --> - <psName name=".null"/> - <psName name="CR"/> - </extraNames> - </post> - - <gasp> - <gaspRange rangeMaxPPEM="8" rangeGaspBehavior="10"/> - <gaspRange rangeMaxPPEM="65535" rangeGaspBehavior="15"/> - </gasp> - - <DSIG> - <!-- note that the Digital Signature will be invalid after recompilation! --> - <tableHeader flag="0x0" numSigs="0" version="1"/> - </DSIG> - -</ttFont> diff -Nru fonttools-3.0/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml fonttools-3.21.2/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml --- fonttools-3.0/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/testdata/test_woff2_metadata.xml 1970-01-01 00:00:00.000000000 +0000 @@ -1,103 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<metadata version="1.0"> - <uniqueid id="org.w3.webfonts.wofftest" /> - <vendor name="Test Vendor" url="http://w3c.org/Fonts" /> - <credits> - <credit name="Credit 1" role="Role 1" url="http://w3c.org/Fonts" /> - <credit name="Credit 2" role="Role 2" url="http://w3c.org/Fonts" /> - </credits> - <description url="http://w3c.org/Fonts"> - <text> - Description without language. - </text> - <text lang="en"> - Description with "en" language. - </text> - <text lang="fr"> - Description with "fr" language. - </text> - </description> - <license url="http://w3c.org/Fonts" id="License ID"> - <text> - License without language. - </text> - <text lang="en"> - License with "en" language. - </text> - <text lang="fr"> - License with "fr" language. - </text> - </license> - <copyright> - <text> - Copyright without language. - </text> - <text lang="en"> - Copyright with "en" language. - </text> - <text lang="fr"> - Copyright with "fr" language. - </text> - </copyright> - <trademark> - <text> - Trademark without language. - </text> - <text lang="en"> - Trademark with "en" language. - </text> - <text lang="fr"> - Trademark with "fr" language. - </text> - </trademark> - <licensee name="Licensee Name" /> - <extension id="Extension 1"> - <name>Extension 1 - Name Without Language</name> - <name lang="en">Extension 1 - Name With "en" Language</name> - <name lang="fr">Extension 1 - Name With "fr" Language</name> - <item id="Extension 1 - Item 1 ID"> - <name>Extension 1 - Item 1 - Name Without Language</name> - <name lang="en">Extension 1 - Item 1 - Name With "en" Language</name> - <name lang="fr">Extension 1 - Item 1 - Name With "fr" Language</name> - <value>Extension 1 - Item 1 - Value Without Language</value> - <value lang="en">Extension 1 - Item 1 - Value With "en" Language</value> - <value lang="fr">Extension 1 - Item 1 - Value With "fr" Language</value> - </item> - <item id="Extension 1 - Item 2 ID"> - <name>Extension 1 - Item 2 - Name Without Language</name> - <name lang="en">Extension 1 - Item 2 - Name With "en" Language</name> - <name lang="fr">Extension 1 - Item 2 - Name With "fr" Language</name> - <value>Extension 1 - Item 2 - Value Without Language</value> - <value lang="en">Extension 1 - Item 2 - Value With "en" Language</value> - <value lang="fr">Extension 1 - Item 2 - Value With "fr" Language</value> - </item> - </extension> - <extension id="Extension 2"> - <name>Extension 2 - Name Without Language</name> - <name lang="en">Extension 2 - Name With "en" Language</name> - <name lang="fr">Extension 2 - Name With "fr" Language</name> - <item id="Extension 2 - Item 1 ID"> - <name>Extension 2 - Item 1 - Name Without Language</name> - <name lang="en">Extension 2 - Item 1 - Name With "en" Language</name> - <name lang="fr">Extension 2 - Item 1 - Name With "fr" Language</name> - <value>Extension 2 - Item 1 - Value Without Language</value> - <value lang="en">Extension 2 - Item 1 - Value With "en" Language</value> - <value lang="fr">Extension 2 - Item 1 - Value With "fr" Language</value> - </item> - <item id="Extension 2 - Item 2 ID"> - <name>Extension 2 - Item 2 - Name Without Language</name> - <name lang="en">Extension 2 - Item 2 - Name With "en" Language</name> - <name lang="fr">Extension 2 - Item 2 - Name With "fr" Language</name> - <value>Extension 2 - Item 2 - Value Without Language</value> - <value lang="en">Extension 2 - Item 2 - Value With "en" Language</value> - <value lang="fr">Extension 2 - Item 2 - Value With "fr" Language</value> - </item> - <item id="Extension 2 - Item 3 ID"> - <name>Extension 2 - Item 3 - Name Without Language</name> - <name lang="en">Extension 2 - Item 3 - Name With "en" Language</name> - <name lang="fr">Extension 2 - Item 3 - Name With "fr" Language</name> - <value>Extension 2 - Item 3 - Value Without Language</value> - <value lang="en">Extension 2 - Item 3 - Value With "en" Language</value> - </item> - </extension> -</metadata> diff -Nru fonttools-3.0/Tools/fontTools/ttLib/woff2.py fonttools-3.21.2/Tools/fontTools/ttLib/woff2.py --- fonttools-3.0/Tools/fontTools/ttLib/woff2.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/woff2.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,1084 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -import sys -import array -import struct -from collections import OrderedDict -from fontTools.misc import sstruct -from fontTools.misc.arrayTools import calcIntBounds -from fontTools.misc.textTools import pad -from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass, - getSearchRange) -from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry, - WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry, - sfntDirectoryEntrySize, calcChecksum) -from fontTools.ttLib.tables import ttProgram - -haveBrotli = False -try: - import brotli - haveBrotli = True -except ImportError: - pass - - -class WOFF2Reader(SFNTReader): - - flavor = "woff2" - - def __init__(self, file, checkChecksums=1, fontNumber=-1): - if not haveBrotli: - print('The WOFF2 decoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) - raise ImportError("No module named brotli") - - self.file = file - - signature = Tag(self.file.read(4)) - if signature != b"wOF2": - raise TTLibError("Not a WOFF2 font (bad signature)") - - self.file.seek(0) - self.DirectoryEntry = WOFF2DirectoryEntry - data = self.file.read(woff2DirectorySize) - if len(data) != woff2DirectorySize: - raise TTLibError('Not a WOFF2 font (not enough data)') - sstruct.unpack(woff2DirectoryFormat, data, self) - - self.tables = OrderedDict() - offset = 0 - for i in range(self.numTables): - entry = self.DirectoryEntry() - entry.fromFile(self.file) - tag = Tag(entry.tag) - self.tables[tag] = entry - entry.offset = offset - offset += entry.length - - totalUncompressedSize = offset - compressedData = self.file.read(self.totalCompressedSize) - decompressedData = brotli.decompress(compressedData) - if len(decompressedData) != totalUncompressedSize: - raise TTLibError( - 'unexpected size for decompressed font data: expected %d, found %d' - % (totalUncompressedSize, len(decompressedData))) - self.transformBuffer = BytesIO(decompressedData) - - self.file.seek(0, 2) - if self.length != self.file.tell(): - raise TTLibError("reported 'length' doesn't match the actual file size") - - self.flavorData = WOFF2FlavorData(self) - - # make empty TTFont to store data while reconstructing tables - self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) - - def __getitem__(self, tag): - """Fetch the raw table data. Reconstruct transformed tables.""" - entry = self.tables[Tag(tag)] - if not hasattr(entry, 'data'): - if tag in woff2TransformedTableTags: - entry.data = self.reconstructTable(tag) - else: - entry.data = entry.loadData(self.transformBuffer) - return entry.data - - def reconstructTable(self, tag): - """Reconstruct table named 'tag' from transformed data.""" - if tag not in woff2TransformedTableTags: - raise TTLibError("transform for table '%s' is unknown" % tag) - entry = self.tables[Tag(tag)] - rawData = entry.loadData(self.transformBuffer) - if tag == 'glyf': - # no need to pad glyph data when reconstructing - padding = self.padding if hasattr(self, 'padding') else None - data = self._reconstructGlyf(rawData, padding) - elif tag == 'loca': - data = self._reconstructLoca() - else: - raise NotImplementedError - return data - - def _reconstructGlyf(self, data, padding=None): - """ Return recostructed glyf table data, and set the corresponding loca's - locations. Optionally pad glyph offsets to the specified number of bytes. - """ - self.ttFont['loca'] = WOFF2LocaTable() - glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(data, self.ttFont) - glyfTable.padding = padding - data = glyfTable.compile(self.ttFont) - return data - - def _reconstructLoca(self): - """ Return reconstructed loca table data. """ - if 'loca' not in self.ttFont: - # make sure glyf is reconstructed first - self.tables['glyf'].data = self.reconstructTable('glyf') - locaTable = self.ttFont['loca'] - data = locaTable.compile(self.ttFont) - if len(data) != self.tables['loca'].origLength: - raise TTLibError( - "reconstructed 'loca' table doesn't match original size: " - "expected %d, found %d" - % (self.tables['loca'].origLength, len(data))) - return data - - -class WOFF2Writer(SFNTWriter): - - flavor = "woff2" - - def __init__(self, file, numTables, sfntVersion="\000\001\000\000", - flavor=None, flavorData=None): - if not haveBrotli: - print('The WOFF2 encoder requires the Brotli Python extension, available at:\n' - 'https://github.com/google/brotli', file=sys.stderr) - raise ImportError("No module named brotli") - - self.file = file - self.numTables = numTables - self.sfntVersion = Tag(sfntVersion) - self.flavorData = flavorData or WOFF2FlavorData() - - self.directoryFormat = woff2DirectoryFormat - self.directorySize = woff2DirectorySize - self.DirectoryEntry = WOFF2DirectoryEntry - - self.signature = Tag("wOF2") - - self.nextTableOffset = 0 - self.transformBuffer = BytesIO() - - self.tables = OrderedDict() - - # make empty TTFont to store data while normalising and transforming tables - self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) - - def __setitem__(self, tag, data): - """Associate new entry named 'tag' with raw table data.""" - if tag in self.tables: - raise TTLibError("cannot rewrite '%s' table" % tag) - if tag == 'DSIG': - # always drop DSIG table, since the encoding process can invalidate it - self.numTables -= 1 - return - - entry = self.DirectoryEntry() - entry.tag = Tag(tag) - entry.flags = getKnownTagIndex(entry.tag) - # WOFF2 table data are written to disk only on close(), after all tags - # have been specified - entry.data = data - - self.tables[tag] = entry - - def close(self): - """ All tags must have been specified. Now write the table data and directory. - """ - if len(self.tables) != self.numTables: - raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables))) - - if self.sfntVersion in ("\x00\x01\x00\x00", "true"): - isTrueType = True - elif self.sfntVersion == "OTTO": - isTrueType = False - else: - raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") - - # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. - # However, the reference WOFF2 implementation still fails to reconstruct - # 'unpadded' glyf tables, therefore we need to 'normalise' them. - # See: - # https://github.com/khaledhosny/ots/issues/60 - # https://github.com/google/woff2/issues/15 - if isTrueType: - self._normaliseGlyfAndLoca(padding=4) - self._setHeadTransformFlag() - - # To pass the legacy OpenType Sanitiser currently included in browsers, - # we must sort the table directory and data alphabetically by tag. - # See: - # https://github.com/google/woff2/pull/3 - # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html - # TODO(user): remove to match spec once browsers are on newer OTS - self.tables = OrderedDict(sorted(self.tables.items())) - - self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() - - fontData = self._transformTables() - compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) - - self.totalCompressedSize = len(compressedFont) - self.length = self._calcTotalSize() - self.majorVersion, self.minorVersion = self._getVersion() - self.reserved = 0 - - directory = self._packTableDirectory() - self.file.seek(0) - self.file.write(pad(directory + compressedFont, size=4)) - self._writeFlavorData() - - def _normaliseGlyfAndLoca(self, padding=4): - """ Recompile glyf and loca tables, aligning glyph offsets to multiples of - 'padding' size. Update the head table's 'indexToLocFormat' accordingly while - compiling loca. - """ - if self.sfntVersion == "OTTO": - return - for tag in ('maxp', 'head', 'loca', 'glyf'): - self._decompileTable(tag) - self.ttFont['glyf'].padding = padding - for tag in ('glyf', 'loca'): - self._compileTable(tag) - - def _setHeadTransformFlag(self): - """ Set bit 11 of 'head' table flags to indicate that the font has undergone - a lossless modifying transform. Re-compile head table data.""" - self._decompileTable('head') - self.ttFont['head'].flags |= (1 << 11) - self._compileTable('head') - - def _decompileTable(self, tag): - """ Fetch table data, decompile it, and store it inside self.ttFont. """ - tag = Tag(tag) - if tag not in self.tables: - raise TTLibError("missing required table: %s" % tag) - if self.ttFont.isLoaded(tag): - return - data = self.tables[tag].data - if tag == 'loca': - tableClass = WOFF2LocaTable - elif tag == 'glyf': - tableClass = WOFF2GlyfTable - else: - tableClass = getTableClass(tag) - table = tableClass(tag) - self.ttFont.tables[tag] = table - table.decompile(data, self.ttFont) - - def _compileTable(self, tag): - """ Compile table and store it in its 'data' attribute. """ - self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) - - def _calcSFNTChecksumsLengthsAndOffsets(self): - """ Compute the 'original' SFNT checksums, lengths and offsets for checksum - adjustment calculation. Return the total size of the uncompressed font. - """ - offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) - for tag, entry in self.tables.items(): - data = entry.data - entry.origOffset = offset - entry.origLength = len(data) - if tag == 'head': - entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:]) - else: - entry.checkSum = calcChecksum(data) - offset += (entry.origLength + 3) & ~3 - return offset - - def _transformTables(self): - """Return transformed font data.""" - for tag, entry in self.tables.items(): - if tag in woff2TransformedTableTags: - data = self.transformTable(tag) - else: - data = entry.data - entry.offset = self.nextTableOffset - entry.saveData(self.transformBuffer, data) - self.nextTableOffset += entry.length - self.writeMasterChecksum() - fontData = self.transformBuffer.getvalue() - return fontData - - def transformTable(self, tag): - """Return transformed table data.""" - if tag not in woff2TransformedTableTags: - raise TTLibError("Transform for table '%s' is unknown" % tag) - if tag == "loca": - data = b"" - elif tag == "glyf": - for tag in ('maxp', 'head', 'loca', 'glyf'): - self._decompileTable(tag) - glyfTable = self.ttFont['glyf'] - data = glyfTable.transform(self.ttFont) - else: - raise NotImplementedError - return data - - def _calcMasterChecksum(self): - """Calculate checkSumAdjustment.""" - tags = list(self.tables.keys()) - checksums = [] - for i in range(len(tags)): - checksums.append(self.tables[tags[i]].checkSum) - - # Create a SFNT directory for checksum calculation purposes - self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16) - directory = sstruct.pack(sfntDirectoryFormat, self) - tables = sorted(self.tables.items()) - for tag, entry in tables: - sfntEntry = SFNTDirectoryEntry() - sfntEntry.tag = entry.tag - sfntEntry.checkSum = entry.checkSum - sfntEntry.offset = entry.origOffset - sfntEntry.length = entry.origLength - directory = directory + sfntEntry.toString() - - directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize - assert directory_end == len(directory) - - checksums.append(calcChecksum(directory)) - checksum = sum(checksums) & 0xffffffff - # BiboAfba! - checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff - return checksumadjustment - - def writeMasterChecksum(self): - """Write checkSumAdjustment to the transformBuffer.""" - checksumadjustment = self._calcMasterChecksum() - self.transformBuffer.seek(self.tables['head'].offset + 8) - self.transformBuffer.write(struct.pack(">L", checksumadjustment)) - - def _calcTotalSize(self): - """Calculate total size of WOFF2 font, including any meta- and/or private data.""" - offset = self.directorySize - for entry in self.tables.values(): - offset += len(entry.toString()) - offset += self.totalCompressedSize - offset = (offset + 3) & ~3 - offset = self._calcFlavorDataOffsetsAndSize(offset) - return offset - - def _calcFlavorDataOffsetsAndSize(self, start): - """Calculate offsets and lengths for any meta- and/or private data.""" - offset = start - data = self.flavorData - if data.metaData: - self.metaOrigLength = len(data.metaData) - self.metaOffset = offset - self.compressedMetaData = brotli.compress( - data.metaData, mode=brotli.MODE_TEXT) - self.metaLength = len(self.compressedMetaData) - offset += self.metaLength - else: - self.metaOffset = self.metaLength = self.metaOrigLength = 0 - self.compressedMetaData = b"" - if data.privData: - # make sure private data is padded to 4-byte boundary - offset = (offset + 3) & ~3 - self.privOffset = offset - self.privLength = len(data.privData) - offset += self.privLength - else: - self.privOffset = self.privLength = 0 - return offset - - def _getVersion(self): - """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" - data = self.flavorData - if data.majorVersion is not None and data.minorVersion is not None: - return data.majorVersion, data.minorVersion - else: - # if None, return 'fontRevision' from 'head' table - if 'head' in self.tables: - return struct.unpack(">HH", self.tables['head'].data[4:8]) - else: - return 0, 0 - - def _packTableDirectory(self): - """Return WOFF2 table directory data.""" - directory = sstruct.pack(self.directoryFormat, self) - for entry in self.tables.values(): - directory = directory + entry.toString() - return directory - - def _writeFlavorData(self): - """Write metadata and/or private data using appropiate padding.""" - compressedMetaData = self.compressedMetaData - privData = self.flavorData.privData - if compressedMetaData and privData: - compressedMetaData = pad(compressedMetaData, size=4) - if compressedMetaData: - self.file.seek(self.metaOffset) - assert self.file.tell() == self.metaOffset - self.file.write(compressedMetaData) - if privData: - self.file.seek(self.privOffset) - assert self.file.tell() == self.privOffset - self.file.write(privData) - - def reordersTables(self): - return True - - -# -- woff2 directory helpers and cruft - -woff2DirectoryFormat = """ - > # big endian - signature: 4s # "wOF2" - sfntVersion: 4s - length: L # total woff2 file size - numTables: H # number of tables - reserved: H # set to 0 - totalSfntSize: L # uncompressed size - totalCompressedSize: L # compressed size - majorVersion: H # major version of WOFF file - minorVersion: H # minor version of WOFF file - metaOffset: L # offset to metadata block - metaLength: L # length of compressed metadata - metaOrigLength: L # length of uncompressed metadata - privOffset: L # offset to private data block - privLength: L # length of private data block -""" - -woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) - -woff2KnownTags = ( - "cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ", - "fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp", - "hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF", - "GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL", - "SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc", - "feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx", - "opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill") - -woff2FlagsFormat = """ - > # big endian - flags: B # table type and flags -""" - -woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) - -woff2UnknownTagFormat = """ - > # big endian - tag: 4s # 4-byte tag (optional) -""" - -woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) - -woff2UnknownTagIndex = 0x3F - -woff2Base128MaxSize = 5 -woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize - -woff2TransformedTableTags = ('glyf', 'loca') - -woff2GlyfTableFormat = """ - > # big endian - version: L # = 0x00000000 - numGlyphs: H # Number of glyphs - indexFormat: H # Offset format for loca table - nContourStreamSize: L # Size of nContour stream - nPointsStreamSize: L # Size of nPoints stream - flagStreamSize: L # Size of flag stream - glyphStreamSize: L # Size of glyph stream - compositeStreamSize: L # Size of composite stream - bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream - instructionStreamSize: L # Size of instruction stream -""" - -woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) - -bboxFormat = """ - > # big endian - xMin: h - yMin: h - xMax: h - yMax: h -""" - - -def getKnownTagIndex(tag): - """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" - for i in range(len(woff2KnownTags)): - if tag == woff2KnownTags[i]: - return i - return woff2UnknownTagIndex - - -class WOFF2DirectoryEntry(DirectoryEntry): - - def fromFile(self, file): - pos = file.tell() - data = file.read(woff2DirectoryEntryMaxSize) - left = self.fromString(data) - consumed = len(data) - len(left) - file.seek(pos + consumed) - - def fromString(self, data): - if len(data) < 1: - raise TTLibError("can't read table 'flags': not enough data") - dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) - if self.flags & 0x3F == 0x3F: - # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value - if len(data) < woff2UnknownTagSize: - raise TTLibError("can't read table 'tag': not enough data") - dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) - else: - # otherwise, tag is derived from a fixed 'Known Tags' table - self.tag = woff2KnownTags[self.flags & 0x3F] - self.tag = Tag(self.tag) - if self.flags & 0xC0 != 0: - raise TTLibError('bits 6-7 are reserved and must be 0') - self.origLength, data = unpackBase128(data) - self.length = self.origLength - if self.tag in woff2TransformedTableTags: - self.length, data = unpackBase128(data) - if self.tag == 'loca' and self.length != 0: - raise TTLibError( - "the transformLength of the 'loca' table must be 0") - # return left over data - return data - - def toString(self): - data = bytechr(self.flags) - if (self.flags & 0x3F) == 0x3F: - data += struct.pack('>4s', self.tag.tobytes()) - data += packBase128(self.origLength) - if self.tag in woff2TransformedTableTags: - data += packBase128(self.length) - return data - - -class WOFF2LocaTable(getTableClass('loca')): - """Same as parent class. The only difference is that it attempts to preserve - the 'indexFormat' as encoded in the WOFF2 glyf table. - """ - - def __init__(self, tag=None): - self.tableTag = Tag(tag or 'loca') - - def compile(self, ttFont): - try: - max_location = max(self.locations) - except AttributeError: - self.set([]) - max_location = 0 - if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'): - # copile loca using the indexFormat specified in the WOFF2 glyf table - indexFormat = ttFont['glyf'].indexFormat - if indexFormat == 0: - if max_location >= 0x20000: - raise TTLibError("indexFormat is 0 but local offsets > 0x20000") - if not all(l % 2 == 0 for l in self.locations): - raise TTLibError("indexFormat is 0 but local offsets not multiples of 2") - locations = array.array("H") - for i in range(len(self.locations)): - locations.append(self.locations[i] // 2) - else: - locations = array.array("I", self.locations) - if sys.byteorder != "big": - locations.byteswap() - data = locations.tostring() - else: - # use the most compact indexFormat given the current glyph offsets - data = super(WOFF2LocaTable, self).compile(ttFont) - return data - - -class WOFF2GlyfTable(getTableClass('glyf')): - """Decoder/Encoder for WOFF2 'glyf' table transform.""" - - subStreams = ( - 'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream', - 'compositeStream', 'bboxStream', 'instructionStream') - - def __init__(self, tag=None): - self.tableTag = Tag(tag or 'glyf') - - def reconstruct(self, data, ttFont): - """ Decompile transformed 'glyf' data. """ - inputDataSize = len(data) - - if inputDataSize < woff2GlyfTableFormatSize: - raise TTLibError("not enough 'glyf' data") - dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) - offset = woff2GlyfTableFormatSize - - for stream in self.subStreams: - size = getattr(self, stream + 'Size') - setattr(self, stream, data[:size]) - data = data[size:] - offset += size - - if offset != inputDataSize: - raise TTLibError( - "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" - % (offset, inputDataSize)) - - bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 - bboxBitmap = self.bboxStream[:bboxBitmapSize] - self.bboxBitmap = array.array('B', bboxBitmap) - self.bboxStream = self.bboxStream[bboxBitmapSize:] - - self.nContourStream = array.array("h", self.nContourStream) - if sys.byteorder != "big": - self.nContourStream.byteswap() - assert len(self.nContourStream) == self.numGlyphs - - if 'head' in ttFont: - ttFont['head'].indexToLocFormat = self.indexFormat - try: - self.glyphOrder = ttFont.getGlyphOrder() - except: - self.glyphOrder = None - if self.glyphOrder is None: - self.glyphOrder = [".notdef"] - self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) - else: - if len(self.glyphOrder) != self.numGlyphs: - raise TTLibError( - "incorrect glyphOrder: expected %d glyphs, found %d" % - (len(self.glyphOrder), self.numGlyphs)) - - glyphs = self.glyphs = {} - for glyphID, glyphName in enumerate(self.glyphOrder): - glyph = self._decodeGlyph(glyphID) - glyphs[glyphName] = glyph - - def transform(self, ttFont): - """ Return transformed 'glyf' data """ - self.numGlyphs = len(self.glyphs) - if not hasattr(self, "glyphOrder"): - try: - self.glyphOrder = ttFont.getGlyphOrder() - except: - self.glyphOrder = None - if self.glyphOrder is None: - self.glyphOrder = [".notdef"] - self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) - if len(self.glyphOrder) != self.numGlyphs: - raise TTLibError( - "incorrect glyphOrder: expected %d glyphs, found %d" % - (len(self.glyphOrder), self.numGlyphs)) - - if 'maxp' in ttFont: - ttFont['maxp'].numGlyphs = self.numGlyphs - self.indexFormat = ttFont['head'].indexToLocFormat - - for stream in self.subStreams: - setattr(self, stream, b"") - bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 - self.bboxBitmap = array.array('B', [0]*bboxBitmapSize) - - for glyphID in range(self.numGlyphs): - self._encodeGlyph(glyphID) - - self.bboxStream = self.bboxBitmap.tostring() + self.bboxStream - for stream in self.subStreams: - setattr(self, stream + 'Size', len(getattr(self, stream))) - self.version = 0 - data = sstruct.pack(woff2GlyfTableFormat, self) - data += bytesjoin([getattr(self, s) for s in self.subStreams]) - return data - - def _decodeGlyph(self, glyphID): - glyph = getTableModule('glyf').Glyph() - glyph.numberOfContours = self.nContourStream[glyphID] - if glyph.numberOfContours == 0: - return glyph - elif glyph.isComposite(): - self._decodeComponents(glyph) - else: - self._decodeCoordinates(glyph) - self._decodeBBox(glyphID, glyph) - return glyph - - def _decodeComponents(self, glyph): - data = self.compositeStream - glyph.components = [] - more = 1 - haveInstructions = 0 - while more: - component = getTableModule('glyf').GlyphComponent() - more, haveInstr, data = component.decompile(data, self) - haveInstructions = haveInstructions | haveInstr - glyph.components.append(component) - self.compositeStream = data - if haveInstructions: - self._decodeInstructions(glyph) - - def _decodeCoordinates(self, glyph): - data = self.nPointsStream - endPtsOfContours = [] - endPoint = -1 - for i in range(glyph.numberOfContours): - ptsOfContour, data = unpack255UShort(data) - endPoint += ptsOfContour - endPtsOfContours.append(endPoint) - glyph.endPtsOfContours = endPtsOfContours - self.nPointsStream = data - self._decodeTriplets(glyph) - self._decodeInstructions(glyph) - - def _decodeInstructions(self, glyph): - glyphStream = self.glyphStream - instructionStream = self.instructionStream - instructionLength, glyphStream = unpack255UShort(glyphStream) - glyph.program = ttProgram.Program() - glyph.program.fromBytecode(instructionStream[:instructionLength]) - self.glyphStream = glyphStream - self.instructionStream = instructionStream[instructionLength:] - - def _decodeBBox(self, glyphID, glyph): - haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) - if glyph.isComposite() and not haveBBox: - raise TTLibError('no bbox values for composite glyph %d' % glyphID) - if haveBBox: - dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) - else: - glyph.recalcBounds(self) - - def _decodeTriplets(self, glyph): - - def withSign(flag, baseval): - assert 0 <= baseval and baseval < 65536, 'integer overflow' - return baseval if flag & 1 else -baseval - - nPoints = glyph.endPtsOfContours[-1] + 1 - flagSize = nPoints - if flagSize > len(self.flagStream): - raise TTLibError("not enough 'flagStream' data") - flagsData = self.flagStream[:flagSize] - self.flagStream = self.flagStream[flagSize:] - flags = array.array('B', flagsData) - - triplets = array.array('B', self.glyphStream) - nTriplets = len(triplets) - assert nPoints <= nTriplets - - x = 0 - y = 0 - glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints) - glyph.flags = array.array("B") - tripletIndex = 0 - for i in range(nPoints): - flag = flags[i] - onCurve = not bool(flag >> 7) - flag &= 0x7f - if flag < 84: - nBytes = 1 - elif flag < 120: - nBytes = 2 - elif flag < 124: - nBytes = 3 - else: - nBytes = 4 - assert ((tripletIndex + nBytes) <= nTriplets) - if flag < 10: - dx = 0 - dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) - elif flag < 20: - dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) - dy = 0 - elif flag < 84: - b0 = flag - 20 - b1 = triplets[tripletIndex] - dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) - dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f)) - elif flag < 120: - b0 = flag - 84 - dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) - dy = withSign(flag >> 1, - 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1]) - elif flag < 124: - b2 = triplets[tripletIndex + 1] - dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) - dy = withSign(flag >> 1, - ((b2 & 0x0f) << 8) + triplets[tripletIndex + 2]) - else: - dx = withSign(flag, - (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1]) - dy = withSign(flag >> 1, - (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3]) - tripletIndex += nBytes - x += dx - y += dy - glyph.coordinates[i] = (x, y) - glyph.flags.append(int(onCurve)) - bytesConsumed = tripletIndex - self.glyphStream = self.glyphStream[bytesConsumed:] - - def _encodeGlyph(self, glyphID): - glyphName = self.getGlyphName(glyphID) - glyph = self[glyphName] - self.nContourStream += struct.pack(">h", glyph.numberOfContours) - if glyph.numberOfContours == 0: - return - elif glyph.isComposite(): - self._encodeComponents(glyph) - else: - self._encodeCoordinates(glyph) - self._encodeBBox(glyphID, glyph) - - def _encodeComponents(self, glyph): - lastcomponent = len(glyph.components) - 1 - more = 1 - haveInstructions = 0 - for i in range(len(glyph.components)): - if i == lastcomponent: - haveInstructions = hasattr(glyph, "program") - more = 0 - component = glyph.components[i] - self.compositeStream += component.compile(more, haveInstructions, self) - if haveInstructions: - self._encodeInstructions(glyph) - - def _encodeCoordinates(self, glyph): - lastEndPoint = -1 - for endPoint in glyph.endPtsOfContours: - ptsOfContour = endPoint - lastEndPoint - self.nPointsStream += pack255UShort(ptsOfContour) - lastEndPoint = endPoint - self._encodeTriplets(glyph) - self._encodeInstructions(glyph) - - def _encodeInstructions(self, glyph): - instructions = glyph.program.getBytecode() - self.glyphStream += pack255UShort(len(instructions)) - self.instructionStream += instructions - - def _encodeBBox(self, glyphID, glyph): - assert glyph.numberOfContours != 0, "empty glyph has no bbox" - if not glyph.isComposite(): - # for simple glyphs, compare the encoded bounding box info with the calculated - # values, and if they match omit the bounding box info - currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax - calculatedBBox = calcIntBounds(glyph.coordinates) - if currentBBox == calculatedBBox: - return - self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) - self.bboxStream += sstruct.pack(bboxFormat, glyph) - - def _encodeTriplets(self, glyph): - assert len(glyph.coordinates) == len(glyph.flags) - coordinates = glyph.coordinates.copy() - coordinates.absoluteToRelative() - - flags = array.array('B') - triplets = array.array('B') - for i in range(len(coordinates)): - onCurve = glyph.flags[i] - x, y = coordinates[i] - absX = abs(x) - absY = abs(y) - onCurveBit = 0 if onCurve else 128 - xSignBit = 0 if (x < 0) else 1 - ySignBit = 0 if (y < 0) else 1 - xySignBits = xSignBit + 2 * ySignBit - - if x == 0 and absY < 1280: - flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit) - triplets.append(absY & 0xff) - elif y == 0 and absX < 1280: - flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit) - triplets.append(absX & 0xff) - elif absX < 65 and absY < 65: - flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits) - triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf)) - elif absX < 769 and absY < 769: - flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits) - triplets.append((absX - 1) & 0xff) - triplets.append((absY - 1) & 0xff) - elif absX < 4096 and absY < 4096: - flags.append(onCurveBit + 120 + xySignBits) - triplets.append(absX >> 4) - triplets.append(((absX & 0xf) << 4) | (absY >> 8)) - triplets.append(absY & 0xff) - else: - flags.append(onCurveBit + 124 + xySignBits) - triplets.append(absX >> 8) - triplets.append(absX & 0xff) - triplets.append(absY >> 8) - triplets.append(absY & 0xff) - - self.flagStream += flags.tostring() - self.glyphStream += triplets.tostring() - - -class WOFF2FlavorData(WOFFFlavorData): - - Flavor = 'woff2' - - def __init__(self, reader=None): - if not haveBrotli: - raise ImportError("No module named brotli") - self.majorVersion = None - self.minorVersion = None - self.metaData = None - self.privData = None - if reader: - self.majorVersion = reader.majorVersion - self.minorVersion = reader.minorVersion - if reader.metaLength: - reader.file.seek(reader.metaOffset) - rawData = reader.file.read(reader.metaLength) - assert len(rawData) == reader.metaLength - data = brotli.decompress(rawData) - assert len(data) == reader.metaOrigLength - self.metaData = data - if reader.privLength: - reader.file.seek(reader.privOffset) - data = reader.file.read(reader.privLength) - assert len(data) == reader.privLength - self.privData = data - - -def unpackBase128(data): - r""" Read one to five bytes from UIntBase128-encoded input string, and return - a tuple containing the decoded integer plus any leftover data. - - >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") - True - >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 - True - >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - File "<stdin>", line 1, in ? - TTLibError: UIntBase128 value must not start with leading zeros - >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - File "<stdin>", line 1, in ? - TTLibError: UIntBase128-encoded sequence is longer than 5 bytes - >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - File "<stdin>", line 1, in ? - TTLibError: UIntBase128 value exceeds 2**32-1 - """ - if len(data) == 0: - raise TTLibError('not enough data to unpack UIntBase128') - result = 0 - if byteord(data[0]) == 0x80: - # font must be rejected if UIntBase128 value starts with 0x80 - raise TTLibError('UIntBase128 value must not start with leading zeros') - for i in range(woff2Base128MaxSize): - if len(data) == 0: - raise TTLibError('not enough data to unpack UIntBase128') - code = byteord(data[0]) - data = data[1:] - # if any of the top seven bits are set then we're about to overflow - if result & 0xFE000000: - raise TTLibError('UIntBase128 value exceeds 2**32-1') - # set current value = old value times 128 bitwise-or (byte bitwise-and 127) - result = (result << 7) | (code & 0x7f) - # repeat until the most significant bit of byte is false - if (code & 0x80) == 0: - # return result plus left over data - return result, data - # make sure not to exceed the size bound - raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes') - - -def base128Size(n): - """ Return the length in bytes of a UIntBase128-encoded sequence with value n. - - >>> base128Size(0) - 1 - >>> base128Size(24567) - 3 - >>> base128Size(2**32-1) - 5 - """ - assert n >= 0 - size = 1 - while n >= 128: - size += 1 - n >>= 7 - return size - - -def packBase128(n): - r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of - bytes using UIntBase128 variable-length encoding. Produce the shortest possible - encoding. - - >>> packBase128(63) == b"\x3f" - True - >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' - True - """ - if n < 0 or n >= 2**32: - raise TTLibError( - "UIntBase128 format requires 0 <= integer <= 2**32-1") - data = b'' - size = base128Size(n) - for i in range(size): - b = (n >> (7 * (size - i - 1))) & 0x7f - if i < size - 1: - b |= 0x80 - data += struct.pack('B', b) - return data - - -def unpack255UShort(data): - """ Read one to three bytes from 255UInt16-encoded input string, and return a - tuple containing the decoded integer plus any leftover data. - - >>> unpack255UShort(bytechr(252))[0] - 252 - - Note that some numbers (e.g. 506) can have multiple encodings: - >>> unpack255UShort(struct.pack("BB", 254, 0))[0] - 506 - >>> unpack255UShort(struct.pack("BB", 255, 253))[0] - 506 - >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] - 506 - """ - code = byteord(data[:1]) - data = data[1:] - if code == 253: - # read two more bytes as an unsigned short - if len(data) < 2: - raise TTLibError('not enough data to unpack 255UInt16') - result, = struct.unpack(">H", data[:2]) - data = data[2:] - elif code == 254: - # read another byte, plus 253 * 2 - if len(data) == 0: - raise TTLibError('not enough data to unpack 255UInt16') - result = byteord(data[:1]) - result += 506 - data = data[1:] - elif code == 255: - # read another byte, plus 253 - if len(data) == 0: - raise TTLibError('not enough data to unpack 255UInt16') - result = byteord(data[:1]) - result += 253 - data = data[1:] - else: - # leave as is if lower than 253 - result = code - # return result plus left over data - return result, data - - -def pack255UShort(value): - r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring - using 255UInt16 variable-length encoding. - - >>> pack255UShort(252) == b'\xfc' - True - >>> pack255UShort(506) == b'\xfe\x00' - True - >>> pack255UShort(762) == b'\xfd\x02\xfa' - True - """ - if value < 0 or value > 0xFFFF: - raise TTLibError( - "255UInt16 format requires 0 <= integer <= 65535") - if value < 253: - return struct.pack(">B", value) - elif value < 506: - return struct.pack(">BB", 255, value - 253) - elif value < 762: - return struct.pack(">BB", 254, value - 506) - else: - return struct.pack(">BH", 253, value) - - -if __name__ == "__main__": - import doctest - sys.exit(doctest.testmod().failed) diff -Nru fonttools-3.0/Tools/fontTools/ttLib/woff2_test.py fonttools-3.21.2/Tools/fontTools/ttLib/woff2_test.py --- fonttools-3.0/Tools/fontTools/ttLib/woff2_test.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttLib/woff2_test.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,747 +0,0 @@ -from __future__ import print_function, division, absolute_import, unicode_literals -from fontTools.misc.py23 import * -from fontTools import ttLib -from .woff2 import (WOFF2Reader, woff2DirectorySize, woff2DirectoryFormat, - woff2FlagsSize, woff2UnknownTagSize, woff2Base128MaxSize, WOFF2DirectoryEntry, - getKnownTagIndex, packBase128, base128Size, woff2UnknownTagIndex, - WOFF2FlavorData, woff2TransformedTableTags, WOFF2GlyfTable, WOFF2LocaTable, - WOFF2Writer) -import unittest -import sstruct -import os -import random -import copy -from collections import OrderedDict - -haveBrotli = False -try: - import brotli - haveBrotli = True -except ImportError: - pass - - -# Python 3 renamed 'assertRaisesRegexp' to 'assertRaisesRegex', and fires -# deprecation warnings if a program uses the old name. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp - - -current_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) -data_dir = os.path.join(current_dir, 'testdata') -TTX = os.path.join(data_dir, 'TestTTF-Regular.ttx') -OTX = os.path.join(data_dir, 'TestOTF-Regular.otx') -METADATA = os.path.join(data_dir, 'test_woff2_metadata.xml') - -TT_WOFF2 = BytesIO() -CFF_WOFF2 = BytesIO() - - -def setUpModule(): - if not haveBrotli: - raise unittest.SkipTest("No module named brotli") - assert os.path.exists(TTX) - assert os.path.exists(OTX) - # import TT-flavoured test font and save it as WOFF2 - ttf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - ttf.importXML(TTX, quiet=True) - ttf.flavor = "woff2" - ttf.save(TT_WOFF2, reorderTables=None) - # import CFF-flavoured test font and save it as WOFF2 - otf = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - otf.importXML(OTX, quiet=True) - otf.flavor = "woff2" - otf.save(CFF_WOFF2, reorderTables=None) - - -class WOFF2ReaderTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(OTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_bad_signature(self): - with self.assertRaisesRegex(ttLib.TTLibError, 'bad signature'): - WOFF2Reader(BytesIO(b"wOFF")) - - def test_not_enough_data_header(self): - incomplete_header = self.file.read(woff2DirectorySize - 1) - with self.assertRaisesRegex(ttLib.TTLibError, 'not enough data'): - WOFF2Reader(BytesIO(incomplete_header)) - - def test_incorrect_compressed_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['totalCompressedSize'] = 0 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaises(brotli.error): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_incorrect_uncompressed_size(self): - decompress_backup = brotli.decompress - brotli.decompress = lambda data: b"" # return empty byte string - with self.assertRaisesRegex(ttLib.TTLibError, 'unexpected size for decompressed'): - WOFF2Reader(self.file) - brotli.decompress = decompress_backup - - def test_incorrect_file_size(self): - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - header['length'] -= 1 - data = sstruct.pack(woff2DirectoryFormat, header) - with self.assertRaisesRegex( - ttLib.TTLibError, "doesn't match the actual file size"): - WOFF2Reader(BytesIO(data + self.file.read())) - - def test_num_tables(self): - tags = [t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')] - data = self.file.read(woff2DirectorySize) - header = sstruct.unpack(woff2DirectoryFormat, data) - self.assertEqual(header['numTables'], len(tags)) - - def test_table_tags(self): - tags = set([t for t in self.font.keys() if t not in ('GlyphOrder', 'DSIG')]) - reader = WOFF2Reader(self.file) - self.assertEqual(set(reader.keys()), tags) - - def test_get_normal_tables(self): - woff2Reader = WOFF2Reader(self.file) - specialTags = woff2TransformedTableTags + ('head', 'GlyphOrder', 'DSIG') - for tag in [t for t in self.font.keys() if t not in specialTags]: - origData = self.font.getTableData(tag) - decompressedData = woff2Reader[tag] - self.assertEqual(origData, decompressedData) - - def test_reconstruct_unknown(self): - reader = WOFF2Reader(self.file) - with self.assertRaisesRegex(ttLib.TTLibError, 'transform for table .* unknown'): - reader.reconstructTable('ZZZZ') - - -class WOFF2ReaderTTFTest(WOFF2ReaderTest): - """ Tests specific to TT-flavored fonts. """ - - @classmethod - def setUpClass(cls): - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - cls.font.importXML(TTX, quiet=True) - - def setUp(self): - self.file.seek(0) - - def test_reconstruct_glyf(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['glyf'] - self.assertEqual(self.font.getTableData('glyf'), reconstructedData) - - def test_reconstruct_loca(self): - woff2Reader = WOFF2Reader(self.file) - reconstructedData = woff2Reader['loca'] - self.assertEqual(self.font.getTableData('loca'), reconstructedData) - self.assertTrue(hasattr(woff2Reader.tables['glyf'], 'data')) - - def test_reconstruct_loca_not_match_orig_size(self): - reader = WOFF2Reader(self.file) - reader.tables['loca'].origLength -= 1 - with self.assertRaisesRegex( - ttLib.TTLibError, "'loca' table doesn't match original size"): - reader.reconstructTable('loca') - - -def normalise_table(font, tag, padding=4): - """ Return normalised table data. Keep 'font' instance unmodified. """ - assert tag in ('glyf', 'loca', 'head') - assert tag in font - if tag == 'head': - origHeadFlags = font['head'].flags - font['head'].flags |= (1 << 11) - tableData = font['head'].compile(font) - if font.sfntVersion in ("\x00\x01\x00\x00", "true"): - assert {'glyf', 'loca', 'head'}.issubset(font.keys()) - origIndexFormat = font['head'].indexToLocFormat - if hasattr(font['loca'], 'locations'): - origLocations = font['loca'].locations[:] - else: - origLocations = [] - glyfTable = ttLib.getTableClass('glyf')() - glyfTable.decompile(font.getTableData('glyf'), font) - glyfTable.padding = padding - if tag == 'glyf': - tableData = glyfTable.compile(font) - elif tag == 'loca': - glyfTable.compile(font) - tableData = font['loca'].compile(font) - if tag == 'head': - glyfTable.compile(font) - font['loca'].compile(font) - tableData = font['head'].compile(font) - font['head'].indexToLocFormat = origIndexFormat - font['loca'].set(origLocations) - if tag == 'head': - font['head'].flags = origHeadFlags - return tableData - - -def normalise_font(font, padding=4): - """ Return normalised font data. Keep 'font' instance unmodified. """ - # drop DSIG but keep a copy - DSIG_copy = copy.deepcopy(font['DSIG']) - del font['DSIG'] - # ovverride TTFont attributes - origFlavor = font.flavor - origRecalcBBoxes = font.recalcBBoxes - origRecalcTimestamp = font.recalcTimestamp - origLazy = font.lazy - font.flavor = None - font.recalcBBoxes = False - font.recalcTimestamp = False - font.lazy = True - # save font to temporary stream - infile = BytesIO() - font.save(infile) - infile.seek(0) - # reorder tables alphabetically - outfile = BytesIO() - reader = ttLib.sfnt.SFNTReader(infile) - writer = ttLib.sfnt.SFNTWriter( - outfile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData) - for tag in sorted(reader.keys()): - if tag in woff2TransformedTableTags + ('head',): - writer[tag] = normalise_table(font, tag, padding) - else: - writer[tag] = reader[tag] - writer.close() - # restore font attributes - font['DSIG'] = DSIG_copy - font.flavor = origFlavor - font.recalcBBoxes = origRecalcBBoxes - font.recalcTimestamp = origRecalcTimestamp - font.lazy = origLazy - return outfile.getvalue() - - -class WOFF2DirectoryEntryTest(unittest.TestCase): - - def setUp(self): - self.entry = WOFF2DirectoryEntry() - - def test_not_enough_data_table_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'flags'"): - self.entry.fromString(b"") - - def test_not_enough_data_table_tag(self): - incompleteData = bytearray([0x3F, 0, 0, 0]) - with self.assertRaisesRegex(ttLib.TTLibError, "can't read table 'tag'"): - self.entry.fromString(bytes(incompleteData)) - - def test_table_reserved_flags(self): - with self.assertRaisesRegex(ttLib.TTLibError, "bits 6-7 are reserved"): - self.entry.fromString(bytechr(0xC0)) - - def test_loca_zero_transformLength(self): - data = bytechr(getKnownTagIndex('loca')) # flags - data += packBase128(random.randint(1, 100)) # origLength - data += packBase128(1) # non-zero transformLength - with self.assertRaisesRegex( - ttLib.TTLibError, "transformLength of the 'loca' table must be 0"): - self.entry.fromString(data) - - def test_fromFile(self): - unknownTag = Tag('ZZZZ') - data = bytechr(getKnownTagIndex(unknownTag)) - data += unknownTag.tobytes() - data += packBase128(random.randint(1, 100)) - expectedPos = len(data) - f = BytesIO(data + b'\0'*100) - self.entry.fromFile(f) - self.assertEqual(f.tell(), expectedPos) - - def test_transformed_toString(self): - self.entry.tag = Tag('glyf') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = random.randint(101, 200) - self.entry.length = random.randint(1, 100) - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength) + - base128Size(self.entry.length)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_known_toString(self): - self.entry.tag = Tag('head') - self.entry.flags = getKnownTagIndex(self.entry.tag) - self.entry.origLength = 54 - expectedSize = (woff2FlagsSize + base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - def test_unknown_toString(self): - self.entry.tag = Tag('ZZZZ') - self.entry.flags = woff2UnknownTagIndex - self.entry.origLength = random.randint(1, 100) - expectedSize = (woff2FlagsSize + woff2UnknownTagSize + - base128Size(self.entry.origLength)) - data = self.entry.toString() - self.assertEqual(len(data), expectedSize) - - -class DummyReader(WOFF2Reader): - - def __init__(self, file, checkChecksums=1, fontNumber=-1): - self.file = file - for attr in ('majorVersion', 'minorVersion', 'metaOffset', 'metaLength', - 'metaOrigLength', 'privLength', 'privOffset'): - setattr(self, attr, 0) - - -class WOFF2FlavorDataTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - # make random byte strings; font data must be 4-byte aligned - cls.fontdata = bytes(bytearray(random.sample(range(0, 256), 80))) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file = BytesIO(self.fontdata) - self.file.seek(0, 2) - - def test_get_metaData_no_privData(self): - self.file.write(self.compressed_metadata) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - - def test_get_privData_no_metaData(self): - self.file.write(self.privData) - reader = DummyReader(self.file) - reader.privOffset = len(self.fontdata) - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_metaData_and_privData(self): - self.file.write(self.compressed_metadata + self.privData) - reader = DummyReader(self.file) - reader.metaOffset = len(self.fontdata) - reader.metaLength = len(self.compressed_metadata) - reader.metaOrigLength = len(self.xml_metadata) - reader.privOffset = reader.metaOffset + reader.metaLength - reader.privLength = len(self.privData) - flavorData = WOFF2FlavorData(reader) - self.assertEqual(self.xml_metadata, flavorData.metaData) - self.assertEqual(self.privData, flavorData.privData) - - def test_get_major_minorVersion(self): - reader = DummyReader(self.file) - reader.majorVersion = reader.minorVersion = 1 - flavorData = WOFF2FlavorData(reader) - self.assertEqual(flavorData.majorVersion, 1) - self.assertEqual(flavorData.minorVersion, 1) - - -class WOFF2WriterTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(OTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(CFF_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - @classmethod - def setUpFlavorData(cls): - assert os.path.exists(METADATA) - with open(METADATA, 'rb') as f: - cls.xml_metadata = f.read() - cls.compressed_metadata = brotli.compress(cls.xml_metadata, mode=brotli.MODE_TEXT) - cls.privData = bytes(bytearray(random.sample(range(0, 256), 20))) - - def setUp(self): - self.file.seek(0) - self.writer = WOFF2Writer(BytesIO(), self.numTables, self.font.sfntVersion) - - def test_DSIG_dropped(self): - self.writer['DSIG'] = b"\0" - self.assertEqual(len(self.writer.tables), 0) - self.assertEqual(self.writer.numTables, self.numTables-1) - - def test_no_rewrite_table(self): - self.writer['ZZZZ'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "cannot rewrite"): - self.writer['ZZZZ'] = b"\0" - - def test_num_tables(self): - self.writer['ABCD'] = b"\0" - with self.assertRaisesRegex(ttLib.TTLibError, "wrong number of tables"): - self.writer.close() - - def test_required_tables(self): - font = ttLib.TTFont(flavor="woff2") - with self.assertRaisesRegex(ttLib.TTLibError, "missing required table"): - font.save(BytesIO()) - - def test_head_transform_flag(self): - headData = self.font.getTableData('head') - origFlags = byteord(headData[16]) - woff2font = ttLib.TTFont(self.file) - newHeadData = woff2font.getTableData('head') - modifiedFlags = byteord(newHeadData[16]) - self.assertNotEqual(origFlags, modifiedFlags) - restoredFlags = modifiedFlags & ~0x08 # turn off bit 11 - self.assertEqual(origFlags, restoredFlags) - - def test_tables_sorted_alphabetically(self): - expected = sorted([t for t in self.tags if t != 'DSIG']) - woff2font = ttLib.TTFont(self.file) - self.assertEqual(expected, list(woff2font.reader.keys())) - - def test_checksums(self): - normFile = BytesIO(normalise_font(self.font, padding=4)) - normFile.seek(0) - normFont = ttLib.TTFont(normFile, checkChecksums=2) - w2font = ttLib.TTFont(self.file) - # force reconstructing glyf table using 4-byte padding - w2font.reader.padding = 4 - for tag in [t for t in self.tags if t != 'DSIG']: - w2data = w2font.reader[tag] - normData = normFont.reader[tag] - if tag == "head": - w2data = w2data[:8] + b'\0\0\0\0' + w2data[12:] - normData = normData[:8] + b'\0\0\0\0' + normData[12:] - w2CheckSum = ttLib.sfnt.calcChecksum(w2data) - normCheckSum = ttLib.sfnt.calcChecksum(normData) - self.assertEqual(w2CheckSum, normCheckSum) - normCheckSumAdjustment = normFont['head'].checkSumAdjustment - self.assertEqual(normCheckSumAdjustment, w2font['head'].checkSumAdjustment) - - def test_calcSFNTChecksumsLengthsAndOffsets(self): - normFont = ttLib.TTFont(BytesIO(normalise_font(self.font, padding=4))) - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - self.writer.tables = OrderedDict(sorted(self.writer.tables.items())) - self.writer._calcSFNTChecksumsLengthsAndOffsets() - for tag, entry in normFont.reader.tables.items(): - self.assertEqual(entry.offset, self.writer.tables[tag].origOffset) - self.assertEqual(entry.length, self.writer.tables[tag].origLength) - self.assertEqual(entry.checkSum, self.writer.tables[tag].checkSum) - - def test_bad_sfntVersion(self): - for i in range(self.numTables): - self.writer[bytechr(65 + i)*4] = b"\0" - self.writer.sfntVersion = 'ZZZZ' - with self.assertRaisesRegex(ttLib.TTLibError, "bad sfntVersion"): - self.writer.close() - - def test_calcTotalSize_no_flavorData(self): - expected = self.length - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData(self): - expected = self.length + len(self.compressed_metadata) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_privData(self): - expected = self.length + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_calcTotalSize_with_metaData_and_privData(self): - metaDataLength = (len(self.compressed_metadata) + 3) & ~3 - expected = self.length + metaDataLength + len(self.privData) - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.metaData = self.xml_metadata - flavorData.privData = self.privData - self.writer.file = BytesIO() - for tag in self.tags: - self.writer[tag] = self.font.getTableData(tag) - self.writer.close() - self.assertEqual(expected, self.writer.length) - self.assertEqual(expected, self.writer.file.tell()) - - def test_getVersion(self): - # no version - self.assertEqual((0, 0), self.writer._getVersion()) - # version from head.fontRevision - fontRevision = self.font['head'].fontRevision - versionTuple = tuple(int(i) for i in str(fontRevision).split(".")) - entry = self.writer.tables['head'] = ttLib.getTableClass('head')() - entry.data = self.font.getTableData('head') - self.assertEqual(versionTuple, self.writer._getVersion()) - # version from writer.flavorData - flavorData = self.writer.flavorData = WOFF2FlavorData() - flavorData.majorVersion, flavorData.minorVersion = (10, 11) - self.assertEqual((10, 11), self.writer._getVersion()) - - -class WOFF2WriterTTFTest(WOFF2WriterTest): - - @classmethod - def setUpClass(cls): - cls.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False, flavor="woff2") - cls.font.importXML(TTX, quiet=True) - cls.tags = [t for t in cls.font.keys() if t != 'GlyphOrder'] - cls.numTables = len(cls.tags) - cls.file = BytesIO(TT_WOFF2.getvalue()) - cls.file.seek(0, 2) - cls.length = (cls.file.tell() + 3) & ~3 - cls.setUpFlavorData() - - def test_normaliseGlyfAndLoca(self): - normTables = {} - for tag in ('head', 'loca', 'glyf'): - normTables[tag] = normalise_table(self.font, tag, padding=4) - for tag in self.tags: - tableData = self.font.getTableData(tag) - self.writer[tag] = tableData - if tag in normTables: - self.assertNotEqual(tableData, normTables[tag]) - self.writer._normaliseGlyfAndLoca(padding=4) - self.writer._setHeadTransformFlag() - for tag in normTables: - self.assertEqual(self.writer.tables[tag].data, normTables[tag]) - - -class WOFF2LocaTableTest(unittest.TestCase): - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head') - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - - def test_compile_short_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0, 0x20000, 2))) - self.font['glyf'].indexFormat = 0 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20000) - - def test_compile_short_loca_overflow(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20000 + 1))) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex( - ttLib.TTLibError, "indexFormat is 0 but local offsets > 0x20000"): - locaTable.compile(self.font) - - def test_compile_short_loca_not_multiples_of_2(self): - locaTable = self.font['loca'] - locaTable.set([1, 3, 5, 7]) - self.font['glyf'].indexFormat = 0 - with self.assertRaisesRegex(ttLib.TTLibError, "offsets not multiples of 2"): - locaTable.compile(self.font) - - def test_compile_long_loca(self): - locaTable = self.font['loca'] - locaTable.set(list(range(0x20001))) - self.font['glyf'].indexFormat = 1 - locaData = locaTable.compile(self.font) - self.assertEqual(len(locaData), 0x20001 * 4) - - def test_compile_set_indexToLocFormat_0(self): - locaTable = self.font['loca'] - # offsets are all multiples of 2 and max length is < 0x10000 - locaTable.set(list(range(0, 0x20000, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(0, newIndexFormat) - - def test_compile_set_indexToLocFormat_1(self): - locaTable = self.font['loca'] - # offsets are not multiples of 2 - locaTable.set(list(range(10))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - # max length is >= 0x10000 - locaTable.set(list(range(0, 0x20000 + 1, 2))) - locaTable.compile(self.font) - newIndexFormat = self.font['head'].indexToLocFormat - self.assertEqual(1, newIndexFormat) - - -class WOFF2GlyfTableTest(unittest.TestCase): - - @classmethod - def setUpClass(cls): - font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font.importXML(TTX, quiet=True) - cls.tables = {} - cls.transformedTags = ('maxp', 'head', 'loca', 'glyf') - for tag in reversed(cls.transformedTags): # compile in inverse order - cls.tables[tag] = font.getTableData(tag) - infile = BytesIO(TT_WOFF2.getvalue()) - reader = WOFF2Reader(infile) - cls.transformedGlyfData = reader.tables['glyf'].loadData( - reader.transformBuffer) - - def setUp(self): - self.font = font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) - font['head'] = ttLib.getTableClass('head')() - font['maxp'] = ttLib.getTableClass('maxp')() - font['loca'] = WOFF2LocaTable() - font['glyf'] = WOFF2GlyfTable() - for tag in self.transformedTags: - font[tag].decompile(self.tables[tag], font) - - def test_reconstruct_glyf_padded_4(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_padded_2(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - data = glyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', glyfTable.padding) - self.assertEqual(normGlyfData, data) - - def test_reconstruct_glyf_unpadded(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.compile(self.font) - self.assertEqual(self.tables['glyf'], data) - - def test_reconstruct_glyf_incorrect_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - badGlyphOrder = self.font.getGlyphOrder()[:-1] - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.reconstruct(self.transformedGlyfData, self.font) - - def test_reconstruct_glyf_missing_glyphOrder(self): - glyfTable = WOFF2GlyfTable() - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.reconstruct(self.transformedGlyfData, self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_reconstruct_loca_padded_4(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 4 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_padded_2(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.padding = 2 - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - normLocaData = normalise_table(self.font, 'loca', glyfTable.padding) - self.assertEqual(normLocaData, data) - - def test_reconstruct_loca_unpadded(self): - locaTable = self.font['loca'] = WOFF2LocaTable() - glyfTable = self.font['glyf'] = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - glyfTable.compile(self.font) - data = locaTable.compile(self.font) - self.assertEqual(self.tables['loca'], data) - - def test_reconstruct_glyf_header_not_enough_data(self): - with self.assertRaisesRegex(ttLib.TTLibError, "not enough 'glyf' data"): - WOFF2GlyfTable().reconstruct(b"", self.font) - - def test_reconstruct_glyf_table_incorrect_size(self): - msg = "incorrect size of transformed 'glyf'" - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData + b"\x00", self.font) - with self.assertRaisesRegex(ttLib.TTLibError, msg): - WOFF2GlyfTable().reconstruct(self.transformedGlyfData[:-1], self.font) - - def test_transform_glyf(self): - glyfTable = self.font['glyf'] - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_transform_glyf_incorrect_glyphOrder(self): - glyfTable = self.font['glyf'] - badGlyphOrder = self.font.getGlyphOrder()[:-1] - del glyfTable.glyphOrder - self.font.setGlyphOrder(badGlyphOrder) - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - glyfTable.glyphOrder = badGlyphOrder - with self.assertRaisesRegex(ttLib.TTLibError, "incorrect glyphOrder"): - glyfTable.transform(self.font) - - def test_transform_glyf_missing_glyphOrder(self): - glyfTable = self.font['glyf'] - del glyfTable.glyphOrder - del self.font.glyphOrder - numGlyphs = self.font['maxp'].numGlyphs - del self.font['maxp'] - glyfTable.transform(self.font) - expected = [".notdef"] - expected.extend(["glyph%.5d" % i for i in range(1, numGlyphs)]) - self.assertEqual(expected, glyfTable.glyphOrder) - - def test_roundtrip_glyf_reconstruct_and_transform(self): - glyfTable = WOFF2GlyfTable() - glyfTable.reconstruct(self.transformedGlyfData, self.font) - data = glyfTable.transform(self.font) - self.assertEqual(self.transformedGlyfData, data) - - def test_roundtrip_glyf_transform_and_reconstruct(self): - glyfTable = self.font['glyf'] - transformedData = glyfTable.transform(self.font) - newGlyfTable = WOFF2GlyfTable() - newGlyfTable.reconstruct(transformedData, self.font) - newGlyfTable.padding = 4 - reconstructedData = newGlyfTable.compile(self.font) - normGlyfData = normalise_table(self.font, 'glyf', newGlyfTable.padding) - self.assertEqual(normGlyfData, reconstructedData) - - -if __name__ == "__main__": - unittest.main() diff -Nru fonttools-3.0/Tools/fontTools/ttx.py fonttools-3.21.2/Tools/fontTools/ttx.py --- fonttools-3.0/Tools/fontTools/ttx.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/ttx.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,370 +0,0 @@ -"""\ -usage: ttx [options] inputfile1 [... inputfileN] - - TTX %s -- From OpenType To XML And Back - - If an input file is a TrueType or OpenType font file, it will be - dumped to an TTX file (an XML-based text format). - If an input file is a TTX file, it will be compiled to a TrueType - or OpenType font file. - - Output files are created so they are unique: an existing file is - never overwritten. - - General options: - -h Help: print this message - -d <outputfolder> Specify a directory where the output files are - to be created. - -o <outputfile> Specify a file to write the output to. A special - value of of - would use the standard output. - -f Overwrite existing output file(s), ie. don't append numbers. - -v Verbose: more messages will be written to stdout about what - is being done. - -q Quiet: No messages will be written to stdout about what - is being done. - -a allow virtual glyphs ID's on compile or decompile. - - Dump options: - -l List table info: instead of dumping to a TTX file, list some - minimal info about each table. - -t <table> Specify a table to dump. Multiple -t options - are allowed. When no -t option is specified, all tables - will be dumped. - -x <table> Specify a table to exclude from the dump. Multiple - -x options are allowed. -t and -x are mutually exclusive. - -s Split tables: save the TTX data into separate TTX files per - table and write one small TTX file that contains references - to the individual table dumps. This file can be used as - input to ttx, as long as the table files are in the - same directory. - -i Do NOT disassemble TT instructions: when this option is given, - all TrueType programs (glyph programs, the font program and the - pre-program) will be written to the TTX file as hex data - instead of assembly. This saves some time and makes the TTX - file smaller. - -z <format> Specify a bitmap data export option for EBDT: - {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: - {'raw', 'extfile'} Each option does one of the following: - -z raw - * export the bitmap data as a hex dump - -z row - * export each row as hex data - -z bitwise - * export each row as binary in an ASCII art style - -z extfile - * export the data as external files with XML references - If no export format is specified 'raw' format is used. - -e Don't ignore decompilation errors, but show a full traceback - and abort. - -y <number> Select font number for TrueType Collection, - starting from 0. - --unicodedata <UnicodeData.txt> Use custom database file to write - character names in the comments of the cmap TTX output. - - Compile options: - -m Merge with TrueType-input-file: specify a TrueType or OpenType - font file to be merged with the TTX file. This option is only - valid when at most one TTX file is specified. - -b Don't recalc glyph bounding boxes: use the values in the TTX - file as-is. - --recalc-timestamp Set font 'modified' timestamp to current time. - By default, the modification time of the TTX file will be used. -""" - - -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * -from fontTools.ttLib import TTFont, TTLibError -from fontTools.misc.macCreatorType import getMacCreatorAndType -from fontTools.unicode import setUnicodeData -from fontTools.misc.timeTools import timestampSinceEpoch -import os -import sys -import getopt -import re - -def usage(): - from fontTools import version - print(__doc__ % version) - sys.exit(2) - - -numberAddedRE = re.compile("#\d+$") -opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''') - -def makeOutputFileName(input, outputDir, extension, overWrite=False): - dirName, fileName = os.path.split(input) - fileName, ext = os.path.splitext(fileName) - if outputDir: - dirName = outputDir - fileName = numberAddedRE.split(fileName)[0] - output = os.path.join(dirName, fileName + extension) - n = 1 - if not overWrite: - while os.path.exists(output): - output = os.path.join(dirName, fileName + "#" + repr(n) + extension) - n = n + 1 - return output - - -class Options(object): - - listTables = False - outputDir = None - outputFile = None - overWrite = False - verbose = False - quiet = False - splitTables = False - disassembleInstructions = True - mergeFile = None - recalcBBoxes = True - allowVID = False - ignoreDecompileErrors = True - bitmapGlyphDataFormat = 'raw' - unicodedata = None - recalcTimestamp = False - - def __init__(self, rawOptions, numFiles): - self.onlyTables = [] - self.skipTables = [] - self.fontNumber = -1 - for option, value in rawOptions: - # general options - if option == "-h": - from fontTools import version - print(__doc__ % version) - sys.exit(0) - elif option == "-d": - if not os.path.isdir(value): - print("The -d option value must be an existing directory") - sys.exit(2) - self.outputDir = value - elif option == "-o": - self.outputFile = value - elif option == "-f": - self.overWrite = True - elif option == "-v": - self.verbose = True - elif option == "-q": - self.quiet = True - # dump options - elif option == "-l": - self.listTables = True - elif option == "-t": - self.onlyTables.append(value) - elif option == "-x": - self.skipTables.append(value) - elif option == "-s": - self.splitTables = True - elif option == "-i": - self.disassembleInstructions = False - elif option == "-z": - validOptions = ('raw', 'row', 'bitwise', 'extfile') - if value not in validOptions: - print("-z does not allow %s as a format. Use %s" % (option, validOptions)) - sys.exit(2) - self.bitmapGlyphDataFormat = value - elif option == "-y": - self.fontNumber = int(value) - # compile options - elif option == "-m": - self.mergeFile = value - elif option == "-b": - self.recalcBBoxes = False - elif option == "-a": - self.allowVID = True - elif option == "-e": - self.ignoreDecompileErrors = False - elif option == "--unicodedata": - self.unicodedata = value - elif option == "--recalc-timestamp": - self.recalcTimestamp = True - if self.onlyTables and self.skipTables: - print("-t and -x options are mutually exclusive") - sys.exit(2) - if self.mergeFile and numFiles > 1: - print("Must specify exactly one TTX source file when using -m") - sys.exit(2) - - -def ttList(input, output, options): - ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) - reader = ttf.reader - tags = sorted(reader.keys()) - print('Listing table info for "%s":' % input) - format = " %4s %10s %7s %7s" - print(format % ("tag ", " checksum", " length", " offset")) - print(format % ("----", "----------", "-------", "-------")) - for tag in tags: - entry = reader.tables[tag] - if ttf.flavor == "woff2": - # WOFF2 doesn't store table checksums, so they must be calculated - from fontTools.ttLib.sfnt import calcChecksum - data = entry.loadData(reader.transformBuffer) - checkSum = calcChecksum(data) - else: - checkSum = int(entry.checkSum) - if checkSum < 0: - checkSum = checkSum + 0x100000000 - checksum = "0x%08X" % checkSum - print(format % (tag, checksum, entry.length, entry.offset)) - print() - ttf.close() - - -def ttDump(input, output, options): - if not options.quiet: - print('Dumping "%s" to "%s"...' % (input, output)) - if options.unicodedata: - setUnicodeData(options.unicodedata) - ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID, - quiet=options.quiet, - ignoreDecompileErrors=options.ignoreDecompileErrors, - fontNumber=options.fontNumber) - ttf.saveXML(output, - quiet=options.quiet, - tables=options.onlyTables, - skipTables=options.skipTables, - splitTables=options.splitTables, - disassembleInstructions=options.disassembleInstructions, - bitmapGlyphDataFormat=options.bitmapGlyphDataFormat) - ttf.close() - - -def ttCompile(input, output, options): - if not options.quiet: - print('Compiling "%s" to "%s"...' % (input, output)) - ttf = TTFont(options.mergeFile, - recalcBBoxes=options.recalcBBoxes, - recalcTimestamp=options.recalcTimestamp, - verbose=options.verbose, allowVID=options.allowVID) - ttf.importXML(input, quiet=options.quiet) - - if not options.recalcTimestamp: - # use TTX file modification time for head "modified" timestamp - mtime = os.path.getmtime(input) - ttf['head'].modified = timestampSinceEpoch(mtime) - - ttf.save(output) - - if options.verbose: - import time - print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time()))) - - -def guessFileType(fileName): - base, ext = os.path.splitext(fileName) - try: - f = open(fileName, "rb") - except IOError: - return None - cr, tp = getMacCreatorAndType(fileName) - if tp in ("sfnt", "FFIL"): - return "TTF" - if ext == ".dfont": - return "TTF" - header = f.read(256) - head = Tag(header[:4]) - if head == "OTTO": - return "OTF" - elif head == "ttcf": - return "TTC" - elif head in ("\0\1\0\0", "true"): - return "TTF" - elif head == "wOFF": - return "WOFF" - elif head == "wOF2": - return "WOFF2" - elif head.lower() == "<?xm": - # Use 'latin1' because that can't fail. - header = tostr(header, 'latin1') - if opentypeheaderRE.search(header): - return "OTX" - else: - return "TTX" - return None - - -def parseOptions(args): - try: - rawOptions, files = getopt.getopt(args, "ld:o:fvqht:x:sim:z:baey:", - ['unicodedata=', "recalc-timestamp"]) - except getopt.GetoptError: - usage() - - if not files: - usage() - - options = Options(rawOptions, len(files)) - jobs = [] - - for input in files: - tp = guessFileType(input) - if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): - extension = ".ttx" - if options.listTables: - action = ttList - else: - action = ttDump - elif tp == "TTX": - extension = ".ttf" - action = ttCompile - elif tp == "OTX": - extension = ".otf" - action = ttCompile - else: - print('Unknown file type: "%s"' % input) - continue - - if options.outputFile: - output = options.outputFile - else: - output = makeOutputFileName(input, options.outputDir, extension, options.overWrite) - # 'touch' output file to avoid race condition in choosing file names - if action != ttList: - open(output, 'a').close() - jobs.append((action, input, output)) - return jobs, options - - -def process(jobs, options): - for action, input, output in jobs: - action(input, output, options) - - -def waitForKeyPress(): - """Force the DOS Prompt window to stay open so the user gets - a chance to see what's wrong.""" - import msvcrt - print('(Hit any key to exit)') - while not msvcrt.kbhit(): - pass - - -def main(args=None): - if args is None: - args = sys.argv[1:] - jobs, options = parseOptions(args) - try: - process(jobs, options) - except KeyboardInterrupt: - print("(Cancelled.)") - except SystemExit: - if sys.platform == "win32": - waitForKeyPress() - else: - raise - except TTLibError as e: - print("Error:",e) - except: - if sys.platform == "win32": - import traceback - traceback.print_exc() - waitForKeyPress() - else: - raise - - -if __name__ == "__main__": - main() diff -Nru fonttools-3.0/Tools/fontTools/unicode.py fonttools-3.21.2/Tools/fontTools/unicode.py --- fonttools-3.0/Tools/fontTools/unicode.py 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/fontTools/unicode.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -from __future__ import print_function, division, absolute_import -from fontTools.misc.py23 import * - -def _makeunicodes(f): - import re - lines = iter(f.readlines()) - unicodes = {} - for line in lines: - if not line: continue - num, name = line.split(';')[:2] - if name[0] == '<': continue # "<control>", etc. - num = int(num, 16) - unicodes[num] = name - return unicodes - - -class _UnicodeCustom(object): - - def __init__(self, f): - if isinstance(f, basestring): - f = open(f) - self.codes = _makeunicodes(f) - - def __getitem__(self, charCode): - try: - return self.codes[charCode] - except KeyError: - return "????" - -class _UnicodeBuiltin(object): - - def __getitem__(self, charCode): - import unicodedata - try: - return unicodedata.name(unichr(charCode)) - except ValueError: - return "????" - -Unicode = _UnicodeBuiltin() - -def setUnicodeData(f): - global Unicode - Unicode = _UnicodeCustom(f) diff -Nru fonttools-3.0/Tools/pyftinspect fonttools-3.21.2/Tools/pyftinspect --- fonttools-3.0/Tools/pyftinspect 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/pyftinspect 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#! /usr/bin/env python - -import sys -from fontTools import inspect - -inspect.main(sys.argv[1:]) diff -Nru fonttools-3.0/Tools/pyftmerge fonttools-3.21.2/Tools/pyftmerge --- fonttools-3.0/Tools/pyftmerge 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/pyftmerge 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#! /usr/bin/env python - -import sys -from fontTools import merge - -merge.main(sys.argv[1:]) diff -Nru fonttools-3.0/Tools/pyftsubset fonttools-3.21.2/Tools/pyftsubset --- fonttools-3.0/Tools/pyftsubset 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/pyftsubset 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#! /usr/bin/env python - -import sys -from fontTools import subset - -subset.main(sys.argv[1:]) diff -Nru fonttools-3.0/Tools/ttx fonttools-3.21.2/Tools/ttx --- fonttools-3.0/Tools/ttx 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Tools/ttx 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#! /usr/bin/env python - -import sys -from fontTools import ttx - -ttx.main(sys.argv[1:]) diff -Nru fonttools-3.0/tox.ini fonttools-3.21.2/tox.ini --- fonttools-3.0/tox.ini 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/tox.ini 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,68 @@ +[tox] +envlist = py{27,36}-cov, htmlcov + +[testenv] +basepython = + py27: {env:TOXPYTHON:python2.7} + pypy: {env:TOXPYTHON:pypy} + py34: {env:TOXPYTHON:python3.4} + py35: {env:TOXPYTHON:python3.5} + py36: {env:TOXPYTHON:python3.6} +deps = + cov: coverage>=4.3 + pytest + -rrequirements.txt +install_command = + pip install -v {opts} {packages} +commands = + # run the test suite against the package installed inside tox env. + # We use parallel mode and then combine later so that coverage.py will take + # paths like .tox/py36/lib/python3.6/site-packages/fontTools and collapse + # them into Lib/fontTools. + cov: coverage run --parallel-mode -m pytest {posargs} + nocov: pytest {posargs} + +[testenv:htmlcov] +basepython = {env:TOXPYTHON:python3.6} +deps = + coverage>=4.3 +skip_install = true +commands = + coverage combine + coverage html + +[testenv:codecov] +passenv = * +basepython = {env:TOXPYTHON:python} +deps = + coverage>=4.3 + codecov +skip_install = true +ignore_outcome = true +commands = + coverage combine + codecov --env TOXENV + +[testenv:bdist] +basepython = {env:TOXPYTHON:python3.6} +deps = + pygments + docutils + setuptools + wheel +skip_install = true +install_command = + # make sure we use the latest setuptools and wheel + pip install --upgrade {opts} {packages} +whitelist_externals = + rm +commands = + # check metadata and rst long_description + python setup.py check --restructuredtext --strict + # clean up build/ and dist/ folders + rm -rf {toxinidir}/dist + python setup.py clean --all + # build sdist + python setup.py sdist --dist-dir {toxinidir}/dist + # build wheel from sdist + pip wheel -v --no-deps --no-index --wheel-dir {toxinidir}/dist --find-links {toxinidir}/dist fonttools diff -Nru fonttools-3.0/.travis/after_success.sh fonttools-3.21.2/.travis/after_success.sh --- fonttools-3.0/.travis/after_success.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.travis/after_success.sh 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e +set -x + +if [ "$TRAVIS_OS_NAME" == "osx" ]; then + source .venv/bin/activate +fi + +# upload coverage data to Codecov.io +[[ ${TOXENV} == *"-cov"* ]] && tox -e codecov diff -Nru fonttools-3.0/.travis/before_deploy.sh fonttools-3.21.2/.travis/before_deploy.sh --- fonttools-3.0/.travis/before_deploy.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.travis/before_deploy.sh 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,15 @@ +#!/bin/bash + +set -e +set -x + +# build sdist and wheel distribution packages in ./dist folder. +# Travis runs the `before_deploy` stage before each deployment, but +# we only want to build them once, as we want to use the same +# files for both Github and PyPI +if $(ls ./dist/fonttools*.zip > /dev/null 2>&1) && \ + $(ls ./dist/fonttools*.whl > /dev/null 2>&1); then + echo "Distribution packages already exists; skipping" +else + tox -e bdist +fi diff -Nru fonttools-3.0/.travis/before_install.sh fonttools-3.21.2/.travis/before_install.sh --- fonttools-3.0/.travis/before_install.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.travis/before_install.sh 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,6 @@ +#!/bin/bash + +if [[ -n "$PYENV_VERSION" ]]; then + wget https://github.com/praekeltfoundation/travis-pyenv/releases/download/${TRAVIS_PYENV_VERSION}/setup-pyenv.sh + source setup-pyenv.sh +fi diff -Nru fonttools-3.0/.travis/install.sh fonttools-3.21.2/.travis/install.sh --- fonttools-3.0/.travis/install.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.travis/install.sh 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +set -x + +ci_requirements="pip setuptools tox" + +if [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [[ ${TOXENV} == *"py27"* ]]; then + # install pip on the system python + curl -O https://bootstrap.pypa.io/get-pip.py + python get-pip.py --user + # install virtualenv and create virtual environment + python -m pip install --user virtualenv + python -m virtualenv .venv/ + elif [[ ${TOXENV} == *"py3"* ]]; then + # install/upgrade current python3 with homebrew + if brew list --versions python3 > /dev/null; then + brew upgrade python3 + else + brew install python3 + fi + # create virtual environment + python3 -m venv .venv/ + else + echo "unsupported $TOXENV: "${TOXENV} + exit 1 + fi + # activate virtual environment + source .venv/bin/activate +fi + +python -m pip install $ci_requirements diff -Nru fonttools-3.0/.travis/run.sh fonttools-3.21.2/.travis/run.sh --- fonttools-3.0/.travis/run.sh 1970-01-01 00:00:00.000000000 +0000 +++ fonttools-3.21.2/.travis/run.sh 2018-01-08 12:40:40.000000000 +0000 @@ -0,0 +1,10 @@ +#!/bin/bash + +set -e +set -x + +if [ "$TRAVIS_OS_NAME" == "osx" ]; then + source .venv/bin/activate +fi + +tox diff -Nru fonttools-3.0/.travis.yml fonttools-3.21.2/.travis.yml --- fonttools-3.0/.travis.yml 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/.travis.yml 2018-01-08 12:40:40.000000000 +0000 @@ -1,25 +1,85 @@ +sudo: false + language: python -python: - - "2.7" - - "3.3" - - "3.4" - - "pypy" -# - "pypy3" # Disable pypy3 until Travis updates it to >= 3.3 + +matrix: + fast_finish: true + include: + - python: 2.7 + env: TOXENV=py27-cov + - python: 3.4 + env: TOXENV=py34-cov + - python: 3.5 + env: TOXENV=py35-cov + - python: 3.6 + env: + - TOXENV=py36-cov + - BUILD_DIST=true + - python: pypy2.7-5.8.0 + # disable coverage.py on pypy because of performance problems + env: TOXENV=pypy-nocov + - language: generic + os: osx + env: TOXENV=py27-cov + - language: generic + os: osx + env: + - TOXENV=py36-cov + - HOMEBREW_NO_AUTO_UPDATE=1 + - env: + - TOXENV=py27-nocov + - PYENV_VERSION='2.7.6' + - PYENV_VERSION_STRING='Python 2.7.6' + - PYENV_ROOT=$HOME/.travis-pyenv + - TRAVIS_PYENV_VERSION='0.4.0' + +cache: + - pip + - directories: + - $HOME/.pyenv_cache + before_install: -# install GCC v4.8 with better C++11 support, required to build Brotli extension -# See: https://github.com/travis-ci/travis-ci/issues/1379 - - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test - - sudo apt-get -qq update - - sudo apt-get install -qq gcc-4.8 - - sudo apt-get install -qq g++-4.8 - - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 90 - - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 90 - - g++ --version + - source ./.travis/before_install.sh + install: - - pip install -vr requirements.txt - - make install + - ./.travis/install.sh + script: - - make check + - ./.travis/run.sh + +after_success: + - ./.travis/after_success.sh + +before_deploy: + - ./.travis/before_deploy.sh + notifications: irc: "irc.freenode.org##fonts" - email: fonttools@googlegroups.com + email: fonttools-dev@googlegroups.com + +deploy: + # deploy to Github Releases on tags + - provider: releases + api_key: + secure: KEcWhJxMcnKay7wmWJCpg2W5GWHTQ+LaRbqGM11IKGcQuEOFxWuG7W1xjGpVdKPj/MQ+cG0b9hGUFpls1hwseOA1HANMv4xjCgYkuvT1OdpX/KOcZ7gfe/qaovzVxHyP9xwohnHSJMb790t37fmDfFUSROx3iEexIX09LLoDjO8= + skip_cleanup: true + file_glob: true + file: "dist/*" + on: + tags: true + repo: fonttools/fonttools + all_branches: true + condition: "$BUILD_DIST == true" + # deploy to PyPI on tags + - provider: pypi + server: https://upload.pypi.org/legacy/ + user: anthrotype + password: + secure: Dz3x8kh4ergBV6qZUgcGVDOEzjoCEFzzQiO5WVw4Zfi04DD8+d1ghmMz2BY4UvoVKSsFrfKDuEB3MCWyqewJsf/zoZQczk/vnWVFjERROieyO1Ckzpz/WkCvbjtniIE0lxzB7zorSV+kGI9VigGAaRlXJyU7mCFojeAFqD6cjS4= + skip_cleanup: true + distributions: pass + on: + tags: true + repo: fonttools/fonttools + all_branches: true + condition: "$BUILD_DIST == true" diff -Nru fonttools-3.0/Windows/fonttools-win-setup.iss fonttools-3.21.2/Windows/fonttools-win-setup.iss --- fonttools-3.0/Windows/fonttools-win-setup.iss 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Windows/fonttools-win-setup.iss 1970-01-01 00:00:00.000000000 +0000 @@ -1,355 +0,0 @@ -;This file has been created by Adam Twardoch <adam@twardoch.com> -;See README.TXT in this folder for instructions on building the setup - -[Setup] -AppName=TTX -AppVerName=TTX 2.0 r040926 for Windows -AppPublisher=Just van Rossum -AppPublisherURL=http://www.letterror.com/code/ttx/ -AppSupportURL=http://www.font.org/software/ttx/ -AppUpdatesURL=http://www.font.org/software/ttx/ -DefaultDirName={pf}\TTX -DefaultGroupName=TTX -AllowNoIcons=false -LicenseFile=..\LICENSE.txt -InfoBeforeFile=fonttools-win-setup.txt -InfoAfterFile=..\Doc\changes.txt -OutputBaseFilename=WinTTX2.0r040926 -AppCopyright=Copyright 1999-2004 by Just van Rossum, Letterror, The Netherlands. -UninstallDisplayIcon={app}\ttx.ico - -[Tasks] -Name: desktopicon; Description: Create a &desktop icon; GroupDescription: Additional icons: - -[Files] -Source: ..\dist\ttx\*.*; DestDir: {app}; Flags: ignoreversion promptifolder -Source: ..\LICENSE.txt; DestDir: {app}; Flags: ignoreversion promptifolder -Source: ..\Doc\documentation.html; DestDir: {app}; Flags: ignoreversion promptifolder -Source: ..\Doc\changes.txt; DestDir: {app}; Flags: ignoreversion promptifolder -Source: ..\Doc\bugs.txt; DestDir: {app}; Flags: ignoreversion promptifolder -Source: fonttools-win-setup.txt; DestDir: {app}; Flags: ignoreversion promptifolder -Source: ttx.ico; DestDir: {app}; Flags: ignoreversion promptifolder; AfterInstall: AddFolderToPathVariable - -[Icons] -Name: {userdesktop}\ttx.exe; Filename: {app}\ttx.exe; Tasks: desktopicon; IconFilename: {app}\ttx.ico; IconIndex: 0 -Name: {group}\TTX; Filename: {app}\ttx.exe; Tasks: desktopicon; IconFilename: {app}\ttx.ico; IconIndex: 0 -Name: {group}\TTX documentation; Filename: {app}\documentation.html; IconIndex: 0 -Name: {group}\Changes; Filename: {app}\changes.txt; IconIndex: 0 -Name: {group}\Bugs; Filename: {app}\bugs.txt; IconIndex: 0 -Name: {group}\License; Filename: {app}\LICENSE.txt; IconIndex: 0 -Name: {group}\Uninstall TTX; Filename: {uninstallexe}; IconIndex: 0 -Name: {reg:HKCU\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders,SendTo}\TTX; Filename: {app}\ttx.exe; WorkingDir: {reg:HKCU\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders,SendTo}; IconFilename: {app}\ttx.ico; IconIndex: 0; MinVersion: 0,5.00.2195 - -[_ISTool] -EnableISX=true - -[Registry] -Root: HKCR; Subkey: .ttx; ValueType: string; ValueData: {reg:HKCR\.xml,}; Flags: createvalueifdoesntexist uninsdeletekey - -[Code] - -// -// InnoSetup Extensions Knowledge Base -// Article 44 - Native ISX procedures for PATH modification -// http://www13.brinkster.com/vincenzog/isxart.asp?idart=44 -// Author: Thomas Vedel -// - -// Version log: -// 03/31/2003: Initial release (thv@lr.dk) - -const - // Modification method - pmAddToBeginning = $1; // Add dir to beginning of Path - pmAddToEnd = $2; // Add dir to end of Path - pmAddAllways = $4; // Add also if specified dir is already included in existing path - pmAddOnlyIfDirExists = $8; // Add only if specified dir actually exists - pmRemove = $10; // Remove dir from path - pmRemoveSubdirsAlso = $20; // Remove dir and all subdirs from path - - // Scope - psCurrentUser = 1; // Modify path for current user - psAllUsers = 2; // Modify path for all users - - // Error results - mpOK = 0; // No errors - mpMissingRights = -1; // User has insufficient rights - mpAutoexecNoWriteacc = -2; // Autoexec can not be written (may be readonly) - mpBothAddAndRemove = -3; // User has specified that dir should both be removed from and added to path - - -{ Helper procedure: Split a path environment variable into individual dirnames } -procedure SplitPath(Path: string; var Dirs: TStringList); -var - pos: integer; - s: string; -begin - Dirs.Clear; - s := ''; - pos := 1; - while (pos<=Length(Path)) do - begin - if (Path[pos]<>';') then - s := s + Path[pos]; - if ((Path[pos]=';') or (pos=Length(Path))) then - begin - s := Trim(s); - s := RemoveQuotes(s); - s := Trim(s); - if (s <> '') then - Dirs.Add(s); - s := ''; - end; - Pos := Pos + 1; - end; -end; // procedure SplitPath - - -{ Helper procedure: Concatenate individual dirnames into a path environment variable } -procedure ConcatPath(Dirs: TStringList; Quotes: boolean; var Path: string); -var - Index, MaxIndex: integer; - s: string; -begin - MaxIndex := Dirs.Count-1; - Path := ''; - for Index := 0 to MaxIndex do - begin - s := Dirs.Strings[Index]; - if ((Quotes) and (pos(' ',s) > 0)) then - s := AddQuotes(s); - Path := Path + s; - if (Index < MaxIndex) then - Path := Path + ';' - end; -end; // procedure ConcatPath - - -{ Helper function: Modifies path environment string } -procedure ModifyPathString(OldPath, DirName: string; Method: integer; Quotes: boolean; var ResultPath: string); -var - Dirs: TStringList; - DirNotInPath: Boolean; - i: integer; -begin - // Create Dirs variable - Dirs := TStringList.Create; - - // Remove quotes form DirName - DirName := Trim(DirName); - DirName := RemoveQuotes(DirName); - DirName := Trim(DirName); - - // Split old path in individual directory names - SplitPath(OldPath, Dirs); - - // Check if dir is allready in path - DirNotInPath := True; - for i:=Dirs.Count-1 downto 0 do - begin - if (uppercase(Dirs.Strings[i]) = uppercase(DirName)) then - DirNotInPath := False; - end; - - // Should dir be removed from existing Path? - if ((Method and (pmRemove or pmRemoveSubdirsAlso)) > 0) then - begin - for i:=Dirs.Count-1 downto 0 do - begin - if (((Method and pmRemoveSubdirsAlso) > 0) and (pos(uppercase(DirName)+'\', uppercase(Dirs.Strings[i])) = 1)) or - (((Method and (pmRemove) or (pmRemoveSubdirsAlso)) > 0) and (uppercase(DirName) = uppercase(Dirs.Strings[i]))) - then - Dirs.Delete(i); - end; - end; - - // Should dir be added to existing Path? - if ((Method and (pmAddToBeginning or pmAddToEnd)) > 0) then - begin - // Add dir to path - if (((Method and pmAddAllways) > 0) or DirNotInPath) then - begin - // Dir is not in path allready or should be added anyway - if (((Method and pmAddOnlyIfDirExists) = 0) or (DirExists(DirName))) then - begin - // Dir actually exsists or should be added anyway - if ((Method and pmAddToBeginning) > 0) then - Dirs.Insert(0, DirName) - else - Dirs.Append(DirName); - end; - end; - end; - - // Concatenate directory names into one single path variable - ConcatPath(Dirs, Quotes, ResultPath); - // Finally free Dirs object - Dirs.Free; -end; // ModifyPathString - - -{ Helper function: Modify path on Windows 9x } -function ModifyPath9x(DirName: string; Method: integer): integer; -var - AutoexecLines: TStringList; - ActualLine: String; - PathLineNos: TStringList; - FirstPathLineNo: Integer; - OldPath, ResultPath: String; - LineNo, CharNo, Index: integer; - - TempString: String; -begin - // Expect everything to be OK - result := mpOK; - - // Create stringslists - AutoexecLines := TStringList.Create; - PathLineNos := TStringList.Create; - - // Read existing path - OldPath := ''; - LoadStringFromFile('c:\Autoexec.bat', TempString); - AutoexecLines.Text := TempString; - PathLineNos.Clear; - // Read Autoexec line by line - for LineNo := 0 to AutoexecLines.Count - 1 do begin - ActualLine := AutoexecLines.Strings[LineNo]; - // Check if line starts with "PATH=" after first stripping spaces and other "fill-chars" - if Pos('=', ActualLine) > 0 then - begin - for CharNo := Pos('=', ActualLine)-1 downto 1 do - if (ActualLine[CharNo]=' ') or (ActualLine[CharNo]=#9) then - Delete(ActualLine, CharNo, 1); - if Pos('@', ActualLine) = 1 then - Delete(ActualLine, 1, 1); - if (Pos('PATH=', uppercase(ActualLine))=1) or (Pos('SETPATH=', uppercase(ActualLine))=1) then - begin - // Remove 'PATH=' and add path to "OldPath" variable - Delete(ActualLine, 1, pos('=', ActualLine)); - // Check if an earlier PATH variable is referenced, but there has been no previous PATH defined in Autoexec - if (pos('%PATH%',uppercase(ActualLine))>0) and (PathLineNos.Count=0) then - OldPath := ExpandConstant('{win}') + ';' + ExpandConstant('{win}')+'\COMMAND'; - if (pos('%PATH%',uppercase(ActualLine))>0) then - begin - ActualLine := Copy(ActualLine, 1, pos('%PATH%',uppercase(ActualLine))-1) + - OldPath + - Copy(ActualLine, pos('%PATH%',uppercase(ActualLine))+6, Length(ActualLine)); - end; - OldPath := ActualLine; - - // Update list of line numbers holding path variables - PathLineNos.Add(IntToStr(LineNo)); - end; - end; - end; - - // Save first line number in Autoexec.bat which modifies path environment variable - if PathLineNos.Count > 0 then - FirstPathLineNo := StrToInt(PathLineNos.Strings[0]) - else - FirstPathLineNo := 0; - - // Modify path - ModifyPathString(OldPath, DirName, Method, True, ResultPath); - - // Write Modified path back to Autoexec.bat - // First delete all existing path references from Autoexec.bat - Index := PathLineNos.Count-1; - while (Index>=0) do - begin - AutoexecLines.Delete(StrToInt(PathLineNos.Strings[Index])); - Index := Index-1; - end; - // Then insert new path variable into Autoexec.bat - AutoexecLines.Insert(FirstPathLineNo, '@PATH='+ResultPath); - // Delete old Autoexec.bat from disk - if not DeleteFile('c:\Autoexec.bat') then - result := mpAutoexecNoWriteAcc; - Sleep(500); - // And finally write Autoexec.bat back to disk - if not (result=mpAutoexecNoWriteAcc) then - SaveStringToFile('c:\Autoexec.bat', AutoexecLines.Text, false); - - // Free stringlists - PathLineNos.Free; - AutoexecLines.Free; -end; // ModifyPath9x - - -{ Helper function: Modify path on Windows NT, 2000 and XP } -function ModifyPathNT(DirName: string; Method, Scope: integer): integer; -var - RegRootKey: integer; - RegSubKeyName: string; - RegValueName: string; - OldPath, ResultPath: string; - OK: boolean; -begin - // Expect everything to be OK - result := mpOK; - - // Initialize registry key and value names to reflect if changes should be global or local to current user only - case Scope of - psCurrentUser: - begin - RegRootKey := HKEY_CURRENT_USER; - RegSubKeyName := 'Environment'; - RegValueName := 'Path'; - end; - psAllUsers: - begin - RegRootKey := HKEY_LOCAL_MACHINE; - RegSubKeyName := 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'; - RegValueName := 'Path'; - end; - end; - - // Read current path value from registry - OK := RegQueryStringValue(RegRootKey, RegSubKeyName, RegValueName, OldPath); - if not OK then - begin - result := mpMissingRights; - Exit; - end; - - // Modify path - ModifyPathString(OldPath, DirName, Method, False, ResultPath); - - // Write new path value to registry - if not RegWriteStringValue(RegRootKey, RegSubKeyName, RegValueName, ResultPath) then - begin - result := mpMissingRights; - Exit; - - end; -end; // ModifyPathNT - - -{ Main function: Modify path } -function ModifyPath(Path: string; Method, Scope: integer): integer; -begin - // Check if both add and remove has been specified (= error!) - if (Method and (pmAddToBeginning or pmAddToEnd) and (pmRemove or pmRemoveSubdirsAlso)) > 0 then - begin - result := mpBothAddAndRemove; - Exit; - end; - - // Perform directory constant expansion - Path := ExpandConstantEx(Path, ' ', ' '); - - // Test if Win9x - if InstallOnThisVersion('4,0','0,0') = irInstall then - ModifyPath9x(Path, Method); - - // Test if WinNT, 2000 or XP - if InstallOnThisVersion('0,4','0,0') = irInstall then - ModifyPathNT(Path, Method, Scope); -end; // ModifyPath - -procedure AddFolderToPathVariable(); -begin - ModifyPath('{app}', pmAddToBeginning, psAllUsers); - ModifyPath('{app}', pmAddToBeginning, psCurrentUser); -end; diff -Nru fonttools-3.0/Windows/fonttools-win-setup.txt fonttools-3.21.2/Windows/fonttools-win-setup.txt --- fonttools-3.0/Windows/fonttools-win-setup.txt 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Windows/fonttools-win-setup.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,12 +0,0 @@ -TTX is an application to convert OpenType and TrueType files to and from an -XML-based text format, also called TTX. - -The TTX setup application can create an icon for TTX on your desktop. You will -then be able to drop .TTF or .OTF files onto the ttx.exe icon to dump the font -to a .TTX file. Dropping a .TTX file onto it builds a TTF or OTF font. - -Also, the setup puts a shortcut to TTX in your Send To context menu in Windows -Explorer. Click on any OTF, TTF or TTX file with the right mouse button, -choose Send To and then TTX. - -For more information, see documentation.html diff -Nru fonttools-3.0/Windows/mcmillan.bat fonttools-3.21.2/Windows/mcmillan.bat --- fonttools-3.0/Windows/mcmillan.bat 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Windows/mcmillan.bat 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -@echo off -mkdir Build -mkdir ..\dist -mkdir ..\dist\ttx -C:\Python23\Installer\Configure.py -C:\Python23\Installer\Makespec.py --upx --onefile --paths "C:\Python23\Lib\encodings;C:\Python23\Lib\site-packages\FontTools\fontTools\encodings;C:\Python23\Lib\site-packages\FontTools\fontTools\misc;C:\Python23\Lib\site-packages\FontTools\fontTools\pens;C:\Python23\Lib\site-packages\FontTools\fontTools\ttLib;" --icon ttx.ico --out Build C:\Python23\Lib\site-packages\FontTools\fontTools\ttx.py -C:\Python23\Installer\Build.py Build\ttx.spec -move Build\ttx.exe ..\dist\ttx - diff -Nru fonttools-3.0/Windows/README.TXT fonttools-3.21.2/Windows/README.TXT --- fonttools-3.0/Windows/README.TXT 2015-08-31 17:57:15.000000000 +0000 +++ fonttools-3.21.2/Windows/README.TXT 1970-01-01 00:00:00.000000000 +0000 @@ -1,53 +0,0 @@ - -TTX 2.0 for Windows -------------------------- - -Creating a Windows (9x/ME/NT/2000/XP) setup executable for TTX -This file has been created by Adam Twardoch <list.adam@twardoch.com> -December 14, 2004 - -Pre-compiled versions are hosted at http://www.font.org/software/ttx/ - -APPROACH I: Using py2exe and InnoSetup - -1. Install Python 2.3 for Windows: http://www.python.org/ -2. Install py2exe: http://starship.python.net/crew/theller/py2exe/ -3. Install InnoSetup 4: http://www.jrsoftware.org/ -4. Download the latest released source code of TTX/FontTools at - http://sourceforge.net/projects/fonttools/ - Or alternatively grab the sources from the VCS: - http://fonttools.sourceforge.net/ -5. Unzip the source code of TTX/FontTools into a folder. -6. In the folder where you unzipped TTX/FontTools, type: - python setup.py py2exe --icon Windows\ttx.ico --packages encodings -7. Run Inno Setup and open Windows\fonttools-win-setup.iss -8. In Inno Setup, select File/Compile, then Run/Run. - -APPROACH II: Using McMillan Installer and InnoSetup - -1. Install Python 2.3 for Windows: http://www.python.org/ -2. Download and unpack McMillan installer: - http://py.vaults.ca/apyllo2.py/22208368 - and put the Installer folder into your Python folder, - e.g. C:\Python23\Installer -3. Install InnoSetup 4: http://www.jrsoftware.org/ -4. Install Microsoft Visual C++ Toolkit 2003: - http://msdn.microsoft.com/visualc/vctoolkit2003/ -5. Put UPX somewhere within your PATH: http://upx.sourceforge.net/ -6. Download the latest released source code of TTX/FontTools at - http://sourceforge.net/projects/fonttools/ - Or alternatively grab the sources from the VCS: - http://fonttools.sourceforge.net/ -7. Unzip the source code of TTX/FontTools into a folder. -8. In the folder where you unzipped TTX/FontTools, type: - python setup.py install -f -9. Edit mcmillan.bat so the paths in the file correspond to the paths in your system, - and run it. -10.Run Inno Setup and open Windows\fonttools-win-setup.iss -11.In Inno Setup, select File/Compile, then Run/Run. - -The distributable TTX Windows setup executable has been saved -in the Output subfolder of the FontTools\Windows folder. - -For information on running TTX on Windows, see fonttools-win-setup.txt in this folder. - Binary files /tmp/tmpvmP72g/PWksCLQD7X/fonttools-3.0/Windows/ttx.ico and /tmp/tmpvmP72g/2cQZg1G5ms/fonttools-3.21.2/Windows/ttx.ico differ