diff --git a/Lib/ufo2ft/featureWriters/kernFeatureWriter2.py b/Lib/ufo2ft/featureWriters/kernFeatureWriter2.py index de4eb663..f491c40e 100644 --- a/Lib/ufo2ft/featureWriters/kernFeatureWriter2.py +++ b/Lib/ufo2ft/featureWriters/kernFeatureWriter2.py @@ -1,116 +1,179 @@ -"""Old implementation of KernFeatureWriter as of ufo2ft v2.30.0 for backward compat.""" +"""Alternative implementation of KernFeatureWriter. + +This behaves like the primary kern feature writer, with the important difference +of grouping kerning data into lookups by kerning direction, not script, like the +feature writer in ufo2ft v2.30 and older did. + +The original idea for the primary splitter was to generate smaller, easier to +pack lookups for each script exclusively, as cross-script kerning dos not work +in browsers. However, other applications may allow it, e.g. Adobe's InDesign. +Subsequently, it was modified to clump together lookups that cross-reference +each other's scripts, negating the size advantages if you design fonts with +cross-script kerning for designer ease. + +As a special edge case, InDesign's default text shaper does not properly itemize +text runs, meaning it may group different scripts into the same run unless the +user specifically marks some text as being a specific script or language. To +make all kerning reachable in that case, it must be put into a single broad LTR, +RTL or neutral direction lookup instead of finer script clusters. That will make +it work in all cases, including when there is no cross-script kerning to fuse +different lookups together. + +Testing showed that size benefits are clawed back with the use of the HarfBuzz +repacker (during compilation) and GPOS compression (after compilation) at +acceptable speed. +""" from __future__ import annotations +import enum +import itertools +import logging +import sys +from collections import OrderedDict from types import SimpleNamespace -from typing import Mapping +from typing import Any, Iterator, Mapping, cast +import fontTools.feaLib.ast as fea_ast from fontTools import unicodedata from fontTools.designspaceLib import DesignSpaceDocument +from fontTools.feaLib.variableScalar import Location as VariableScalarLocation +from fontTools.feaLib.variableScalar import VariableScalar +from fontTools.ufoLib.kerning import lookupKerningValue +from fontTools.unicodedata import script_horizontal_direction -from ufo2ft.constants import INDIC_SCRIPTS, USE_SCRIPTS from ufo2ft.featureWriters import BaseFeatureWriter, ast -from ufo2ft.featureWriters.kernFeatureWriter import ( - KernFeatureWriter as NewKernFeatureWriter, +from ufo2ft.util import ( + DFLT_SCRIPTS, + classifyGlyphs, + collapse_varscalar, + describe_ufo, + get_userspace_location, + quantize, ) -from ufo2ft.util import classifyGlyphs, quantize, unicodeScriptDirection - -SIDE1_PREFIX = "public.kern1." -SIDE2_PREFIX = "public.kern2." - -# In HarfBuzz the 'dist' feature is automatically enabled for these shapers: -# src/hb-ot-shape-complex-myanmar.cc -# src/hb-ot-shape-complex-use.cc -# src/hb-ot-shape-complex-indic.cc -# src/hb-ot-shape-complex-khmer.cc -# We derived the list of scripts associated to each dist-enabled shaper from -# `hb_ot_shape_complex_categorize` in src/hb-ot-shape-complex-private.hh -DIST_ENABLED_SCRIPTS = set(INDIC_SCRIPTS) | set(["Khmr", "Mymr"]) | set(USE_SCRIPTS) - -RTL_BIDI_TYPES = {"R", "AL"} -LTR_BIDI_TYPES = {"L", "AN", "EN"} +from .kernFeatureWriter import ( + AMBIGUOUS_BIDIS, + DIST_ENABLED_SCRIPTS, + LTR_BIDI_TYPES, + RTL_BIDI_TYPES, + SIDE1_PREFIX, + SIDE2_PREFIX, + KerningPair, + addClassDefinition, + log_redefined_group, + log_regrouped_glyph, +) -def unicodeBidiType(uv): - """Return "R" for characters with RTL direction, or "L" for LTR (whether - 'strong' or 'weak'), or None for neutral direction. - """ - char = chr(uv) - bidiType = unicodedata.bidirectional(char) - if bidiType in RTL_BIDI_TYPES: - return "R" - elif bidiType in LTR_BIDI_TYPES: - return "L" - else: - return None +if sys.version_info < (3, 10): + from typing_extensions import TypeAlias +else: + from typing import TypeAlias +LOGGER = logging.getLogger(__name__) -class KerningPair: - __slots__ = ("side1", "side2", "value", "directions", "bidiTypes") +KerningGroup: TypeAlias = "Mapping[str, tuple[str, ...]]" - def __init__(self, side1, side2, value, directions=None, bidiTypes=None): - if isinstance(side1, str): - self.side1 = ast.GlyphName(side1) - elif isinstance(side1, ast.GlyphClassDefinition): - self.side1 = ast.GlyphClassName(side1) - else: - raise AssertionError(side1) - if isinstance(side2, str): - self.side2 = ast.GlyphName(side2) - elif isinstance(side2, ast.GlyphClassDefinition): - self.side2 = ast.GlyphClassName(side2) - else: - raise AssertionError(side2) +class Direction(enum.Enum): + Neutral = "dflt" + LeftToRight = "ltr" + RightToLeft = "rtl" - self.value = value - self.directions = directions or set() - self.bidiTypes = bidiTypes or set() + def __lt__(self, other: Direction) -> bool: + if not isinstance(other, Direction): + return NotImplemented - @property - def firstIsClass(self): - return isinstance(self.side1, ast.GlyphClassName) + return self.name < other.name - @property - def secondIsClass(self): - return isinstance(self.side2, ast.GlyphClassName) - @property - def glyphs(self): - if self.firstIsClass: - classDef1 = self.side1.glyphclass - glyphs1 = {g.asFea() for g in classDef1.glyphSet()} - else: - glyphs1 = {self.side1.asFea()} - if self.secondIsClass: - classDef2 = self.side2.glyphclass - glyphs2 = {g.asFea() for g in classDef2.glyphSet()} - else: - glyphs2 = {self.side2.asFea()} - return glyphs1 | glyphs2 - - def __repr__(self): - return "<{} {} {} {}{}{}>".format( - self.__class__.__name__, - self.side1, - self.side2, - self.value, - " %r" % self.directions if self.directions else "", - " %r" % self.bidiTypes if self.bidiTypes else "", - ) +class KernContext(SimpleNamespace): + bidiGlyphs: dict[Direction, set[str]] + compiler: Any + default_source: Any + existingFeatures: Any + feaFile: Any + feaLanguagesByTag: dict[str, list[str]] + font: Any + gdefClasses: Any + glyphBidi: dict[str, set[Direction]] + glyphDirection: dict[str, set[Direction]] + glyphSet: OrderedDict[str, Any] + insertComments: Any + isVariable: bool + kerning: Any + knownScripts: set[str] + side1Membership: dict[str, str] + side2Membership: dict[str, str] + todo: Any class KernFeatureWriter(BaseFeatureWriter): """Generates a kerning feature based on groups and rules contained in an UFO's kerning data. - There are currently two possible writing modes: - 2) "skip" (default) will not write anything if the features are already present; - 1) "append" will add additional lookups to an existing feature, if present, - or it will add a new one at the end of all features. - If the `quantization` argument is given in the filter options, the resulting anchors are rounded to the nearest multiple of the quantization value. + + ## Implementation Notes + + The algorithm works like this: + + * Parse GDEF GlyphClassDefinition from UFO features.fea to get the set of + "Mark" glyphs (this will be used later to decide whether to add + ignoreMarks flag to kern lookups containing pairs between base and mark + glyphs). + * Get the ordered glyphset for the font, for filtering kerning groups and + kernings that reference unknown glyphs. + * Determine which scripts the kerning affects (read: "the font most probably + supports"), to know which lookups to generate later: + * First, determine the unambiguous script associations for each + (Unicoded) glyph in the glyphset, as in, glyphs that have a single + entry for their Unicode script extensions property; + * then, parse the `languagesystem` statements in the provided feature + file to add on top. + * Compile a Unicode cmap from the UFO and a GSUB table from the features so + far, so we can determine the bidirectionality class, so we can later + filter out kerning pairs that would mix RTL and LTR glyphs, which will not + occur in applications, and put the pairs into their correct lookup. + Unicode BiDi classes R and AL are considered R. Common characters and + numbers are considered neutral even when their BiDi class says otherwise, + so they'll end up in the common lookup available to all scripts. + * Get the kerning groups from the UFO and filter out glyphs not in the + glyphset and empty groups. Remember which group a glyph is a member of, + for kern1 and kern2, so we can later reconstruct per-direction groups. + * Get the bare kerning pairs from the UFO, filtering out pairs with unknown + groups or glyphs not in the glyphset and (redundant) zero class-to-class + kernings and optionally quantizing kerning values. + * Optionally, split kerning pairs into base (only base against base) and + mark (mixed mark and base) pairs, according to the glyphs' GDEF category, + so that kerning against marks can be accounted for correctly later. + * Go through all kerning pairs and split them up by direction, to put them + in different lookups. In pairs with common glyphs, assume the direction of + the dominant script, in pairs of common glyphs, assume no direction. Pairs + with clashing script directions are dropped. + * Partition the first and second side of a pair by BiDi direction (as + above) and emit only those with the same direction or a strong + direction and neutral one. + * Discard pairs that mix RTL and LTR BiDi types, because they won't show + up in applications due to how Unicode text is split into runs. + * Glyphs will have only one direction assigned to them. * Preserve the + type of the kerning pair, so class-to-class kerning stays that way, + even when there's only one glyph on each side. + * Reconstruct kerning group names for the newly split classes. This is done + for debuggability; it makes no difference for the final font binary. + * This first looks at the neutral lookups and then all others, assigning + new group names are it goes. A class like `@kern1.something = [foo bar + baz]` may be split up into `@kern1.dflt.something = [foo]` and + `@kern1.ltr.something = [bar baz]`. Note: If there is no dedicated + dflt lookup, common glyph classes like `[foo]` might carry the name + `@kern1.ltr.foo` if the class was first encountered while going over + the ltr lookup. + * Make a `kern` (and potentially `dist`) feature block and register the + lookups for each script. Some scripts need to be registered in the `dist` + feature for some shapers to discover them, e.g. Yezi. + * Write the new glyph class definitions and then the lookups and feature + blocks to the feature file. """ tableTag = "GPOS" @@ -118,38 +181,105 @@ class KernFeatureWriter(BaseFeatureWriter): options = dict(ignoreMarks=True, quantization=1) def setContext(self, font, feaFile, compiler=None): - ctx = super().setContext(font, feaFile, compiler=compiler) + ctx: KernContext = cast( + KernContext, super().setContext(font, feaFile, compiler=compiler) + ) + + if hasattr(font, "findDefault"): + ctx.default_source = font.findDefault().font + else: + ctx.default_source = font + + # Unless we use the legacy append mode (which ignores insertion + # markers), if the font (Designspace: default source) contains kerning + # and the feaFile contains `kern` or `dist` feature blocks, but we have + # no insertion markers (or they were misspelt and ignored), warn the + # user that the kerning blocks in the feaFile take precedence and other + # kerning is dropped. + if ( + self.mode == "skip" + and ctx.default_source.kerning + and ctx.existingFeatures & self.features + and not ctx.insertComments + ): + LOGGER.warning( + "%s: font has kerning, but also manually written kerning features " + "without an insertion comment. Dropping the former.", + describe_ufo(ctx.default_source), + ) + + # Remember which languages are defined for which OT tag, as all + # generated kerning needs to be registered for the script's `dflt` + # language, but also all those the designer defined manually. Otherwise, + # setting any language for a script would deactivate kerning. + feaLanguagesByScript = ast.getScriptLanguageSystems(feaFile, excludeDflt=False) + ctx.feaLanguagesByTag = { + otTag: languages + for _, languageSystems in feaLanguagesByScript.items() + for otTag, languages in languageSystems + } + + ctx.glyphSet = self.getOrderedGlyphSet() ctx.gdefClasses = self.getGDEFGlyphClasses() - ctx.kerning = self.getKerningData( - font, self.options, feaFile, self.getOrderedGlyphSet() + ctx.knownScripts = self.guessFontScripts() + + # We need the direction of a glyph (with common characters considered + # neutral or "dflt") to know in which of the three lookups to put the + # pair. + cmap = self.makeUnicodeToGlyphNameMapping() + gsub = self.compileGSUB() + extras = self.extraSubstitutions() + dirGlyphs = classifyGlyphs(unicodeScriptDirection, cmap, gsub, extras) + neutral_glyphs = ( + ctx.glyphSet.keys() + - dirGlyphs.get(Direction.LeftToRight, set()) + - dirGlyphs.get(Direction.RightToLeft, set()) + ) + dirGlyphs[Direction.Neutral] = neutral_glyphs + glyphDirection = {} + for direction, glyphs in dirGlyphs.items(): + for name in glyphs: + glyphDirection.setdefault(name, set()).add(direction) + ctx.glyphDirection = glyphDirection + + # We need the BiDi class of a glyph to reject kerning of RTL glyphs + # against LTR glyphs. + ctx.bidiGlyphs = classifyGlyphs(unicodeBidiType, cmap, gsub, extras) + neutral_glyphs = ( + ctx.glyphSet.keys() + - ctx.bidiGlyphs.get(Direction.LeftToRight, set()) + - ctx.bidiGlyphs.get(Direction.RightToLeft, set()) ) + ctx.bidiGlyphs[Direction.Neutral] = neutral_glyphs + glyphBidi = {} + for direction, glyphs in ctx.bidiGlyphs.items(): + for name in glyphs: + glyphBidi.setdefault(name, set()).add(direction) + ctx.glyphBidi = glyphBidi - feaScripts = ast.getScriptLanguageSystems(feaFile) - ctx.scriptGroups = self._groupScriptsByTagAndDirection(feaScripts) + ctx.kerning = extract_kerning_data(ctx, cast(SimpleNamespace, self.options)) return ctx def shouldContinue(self): - if not self.context.kerning.pairs: + if ( + not self.context.kerning.base_pairs_by_direction + and not self.context.kerning.mark_pairs_by_direction + ): self.log.debug("No kerning data; skipped") return False - if "dist" in self.context.todo and "dist" not in self.context.scriptGroups: - self.log.debug( - "No dist-enabled scripts defined in languagesystem " - "statements; dist feature will not be generated" - ) - self.context.todo.remove("dist") - return super().shouldContinue() def _write(self): - lookups = self._makeKerningLookups() + self.context: KernContext + self.options: SimpleNamespace + lookups = make_kerning_lookups(self.context, self.options) if not lookups: self.log.debug("kerning lookups empty; skipped") return False - features = self._makeFeatureBlocks(lookups) + features = make_feature_blocks(self.context, lookups) if not features: self.log.debug("kerning features empty; skipped") return False @@ -158,15 +288,14 @@ def _write(self): feaFile = self.context.feaFile # first add the glyph class definitions - side1Classes = self.context.kerning.side1Classes - side2Classes = self.context.kerning.side2Classes - newClassDefs = [] - for classes in (side1Classes, side2Classes): - newClassDefs.extend([c for _, c in sorted(classes.items())]) + classDefs = self.context.kerning.classDefs + newClassDefs = [c for _, c in sorted(classDefs.items())] lookupGroups = [] - for _, lookupGroup in sorted(lookups.items()): - lookupGroups.extend(lookupGroup) + for _, lookupGroup in sorted(lookups.items(), key=lambda x: x[0].value): + lookupGroups.extend( + lkp for lkp in lookupGroup.values() if lkp not in lookupGroups + ) self._insert( feaFile=feaFile, @@ -176,410 +305,734 @@ def _write(self): ) return True - @classmethod - def getKerningData(cls, font, options, feaFile=None, glyphSet=None): - side1Classes, side2Classes = cls.getKerningClasses(font, feaFile, glyphSet) - pairs = cls.getKerningPairs(font, side1Classes, side2Classes, glyphSet, options) - return SimpleNamespace( - side1Classes=side1Classes, side2Classes=side2Classes, pairs=pairs - ) - @staticmethod - def getKerningGroups(font, glyphSet=None): - if glyphSet: - allGlyphs = set(glyphSet.keys()) - else: - allGlyphs = set(font.keys()) - side1Groups = {} - side2Groups = {} +def unicodeBidiType(uv: int) -> Direction | None: + """Return Direction.RightToLeft for characters with strong RTL + direction, or Direction.LeftToRight for strong LTR and European and Arabic + numbers, or None for neutral direction. + """ + bidiType = unicodedata.bidirectional(chr(uv)) + if bidiType in RTL_BIDI_TYPES: + return Direction.RightToLeft + elif bidiType in LTR_BIDI_TYPES: + return Direction.LeftToRight + return None + + +def unicodeScriptDirection(uv: int) -> Direction | None: + script = unicodedata.script(chr(uv)) + if script in DFLT_SCRIPTS: + return None + direction = unicodedata.script_horizontal_direction(script, "LTR") + if direction == "LTR": + return Direction.LeftToRight + elif direction == "RTL": + return Direction.RightToLeft + raise ValueError(f"Unknown direction {direction}") + + +def extract_kerning_data(context: KernContext, options: SimpleNamespace) -> Any: + side1Groups, side2Groups = get_kerning_groups(context) + if context.isVariable: + pairs = get_variable_kerning_pairs(context, options, side1Groups, side2Groups) + else: + pairs = get_kerning_pairs(context, options, side1Groups, side2Groups) + + if options.ignoreMarks: + marks = context.gdefClasses.mark + base_pairs, mark_pairs = split_base_and_mark_pairs(pairs, marks) + else: + base_pairs = pairs + mark_pairs = [] + + base_pairs_by_direction = split_kerning(context, base_pairs) + mark_pairs_by_direction = split_kerning(context, mark_pairs) - if isinstance(font, DesignSpaceDocument): - default_font = font.findDefault() - assert default_font is not None - font = default_font.font - assert font is not None + return SimpleNamespace( + base_pairs_by_direction=base_pairs_by_direction, + mark_pairs_by_direction=mark_pairs_by_direction, + side1Classes={}, + side2Classes={}, + classDefs={}, + ) + +def get_kerning_groups(context: KernContext) -> tuple[KerningGroup, KerningGroup]: + allGlyphs = context.glyphSet + + side1Groups: dict[str, tuple[str, ...]] = {} + side1Membership: dict[str, str] = {} + side2Groups: dict[str, tuple[str, ...]] = {} + side2Membership: dict[str, str] = {} + + if isinstance(context.font, DesignSpaceDocument): + fonts = [source.font for source in context.font.sources] + else: + fonts = [context.font] + + for font in fonts: + assert font is not None for name, members in font.groups.items(): # prune non-existent or skipped glyphs - members = [g for g in members if g in allGlyphs] + members = {g for g in members if g in allGlyphs} + # skip empty groups if not members: - # skip empty groups continue # skip groups without UFO3 public.kern{1,2} prefix if name.startswith(SIDE1_PREFIX): - side1Groups[name] = members + name_truncated = name[len(SIDE1_PREFIX) :] + known_members = members.intersection(side1Membership.keys()) + if known_members: + for glyph_name in known_members: + original_name_truncated = side1Membership[glyph_name] + if name_truncated != original_name_truncated: + log_regrouped_glyph( + "first", + name, + original_name_truncated, + font, + glyph_name, + ) + # Skip the whole group definition if there is any + # overlap problem. + continue + group = side1Groups.get(name) + if group is None: + side1Groups[name] = tuple(sorted(members)) + for member in members: + side1Membership[member] = name_truncated + elif set(group) != members: + log_redefined_group("left", name, group, font, members) elif name.startswith(SIDE2_PREFIX): - side2Groups[name] = members - return side1Groups, side2Groups - - @classmethod - def getKerningClasses(cls, font, feaFile=None, glyphSet=None): - side1Groups, side2Groups = cls.getKerningGroups(font, glyphSet) - side1Classes = ast.makeGlyphClassDefinitions( - side1Groups, feaFile, stripPrefix="public." - ) - side2Classes = ast.makeGlyphClassDefinitions( - side2Groups, feaFile, stripPrefix="public." + name_truncated = name[len(SIDE2_PREFIX) :] + known_members = members.intersection(side2Membership.keys()) + if known_members: + for glyph_name in known_members: + original_name_truncated = side2Membership[glyph_name] + if name_truncated != original_name_truncated: + log_regrouped_glyph( + "second", + name, + original_name_truncated, + font, + glyph_name, + ) + # Skip the whole group definition if there is any + # overlap problem. + continue + group = side2Groups.get(name) + if group is None: + side2Groups[name] = tuple(sorted(members)) + for member in members: + side2Membership[member] = name_truncated + elif set(group) != members: + log_redefined_group("right", name, group, font, members) + context.side1Membership = side1Membership + context.side2Membership = side2Membership + return side1Groups, side2Groups + + +def get_kerning_pairs( + context: KernContext, + options: SimpleNamespace, + side1Classes: KerningGroup, + side2Classes: KerningGroup, +) -> list[KerningPair]: + glyphSet = context.glyphSet + font = context.font + kerning: Mapping[tuple[str, str], float] = font.kerning + quantization = options.quantization + + result = [] + for (side1, side2), value in kerning.items(): + firstIsClass, secondIsClass = (side1 in side1Classes, side2 in side2Classes) + # Filter out pairs that reference missing groups or glyphs. + if not firstIsClass and side1 not in glyphSet: + continue + if not secondIsClass and side2 not in glyphSet: + continue + # Ignore zero-valued class kern pairs. They are the most general + # kerns, so they don't override anything else like glyph kerns would + # and zero is the default. + if firstIsClass and secondIsClass and value == 0: + continue + if firstIsClass: + side1 = side1Classes[side1] + if secondIsClass: + side2 = side2Classes[side2] + value = quantize(value, quantization) + result.append(KerningPair(side1, side2, value)) + + return result + + +def get_variable_kerning_pairs( + context: KernContext, + options: SimpleNamespace, + side1Classes: KerningGroup, + side2Classes: KerningGroup, +) -> list[KerningPair]: + designspace: DesignSpaceDocument = context.font + glyphSet = context.glyphSet + quantization = options.quantization + + # Gather utility variables for faster kerning lookups. + # TODO: Do we construct these in code elsewhere? + assert not (set(side1Classes) & set(side2Classes)) + unified_groups = {**side1Classes, **side2Classes} + + glyphToFirstGroup = { + glyph_name: group_name # TODO: Is this overwrite safe? User input is adversarial + for group_name, glyphs in side1Classes.items() + for glyph_name in glyphs + } + glyphToSecondGroup = { + glyph_name: group_name + for group_name, glyphs in side2Classes.items() + for glyph_name in glyphs + } + + # Collate every kerning pair in the designspace, as even UFOs that + # provide no entry for the pair must contribute a value at their + # source's location in the VariableScalar. + # NOTE: This is required as the DS+UFO kerning model and the OpenType + # variation model handle the absence of a kerning value at a + # given location differently: + # - DS+UFO: + # If the missing pair excepts another pair, take its value; + # Otherwise, take a value of 0. + # - OpenType: + # Always interpolate from other locations, ignoring more + # general pairs that this one excepts. + # See discussion: https://github.com/googlefonts/ufo2ft/pull/635 + all_pairs: set[tuple[str, str]] = set() + for source in designspace.sources: + if source.layerName is not None: + continue + assert source.font is not None + all_pairs |= set(source.font.kerning) + + kerning_pairs_in_progress: dict[ + tuple[str | tuple[str, ...], str | tuple[str, ...]], VariableScalar + ] = {} + for source in designspace.sources: + # Skip sparse sources, because they can have no kerning. + if source.layerName is not None: + continue + assert source.font is not None + + location = VariableScalarLocation( + get_userspace_location(designspace, source.location) ) - return side1Classes, side2Classes - - @staticmethod - def getKerningPairs(font, side1Classes, side2Classes, glyphSet=None, options=None): - if isinstance(font, DesignSpaceDocument): - # Reuse the newer kern writers variable kerning extractor. Repack - # some arguments and the return type for this. - side1ClassesRaw: Mapping[str, tuple[str, ...]] = { - group_name: tuple( - glyph - for glyphs in glyph_defs.glyphSet() - for glyph in glyphs.glyphSet() - ) - for group_name, glyph_defs in side1Classes.items() - } - side2ClassesRaw: Mapping[str, tuple[str, ...]] = { - group_name: tuple( - glyph - for glyphs in glyph_defs.glyphSet() - for glyph in glyphs.glyphSet() - ) - for group_name, glyph_defs in side2Classes.items() - } - pairs = NewKernFeatureWriter.getVariableKerningPairs( - font, - side1ClassesRaw, - side2ClassesRaw, - glyphSet or {}, - options or SimpleNamespace(**KernFeatureWriter.options), - ) - pairs.sort() - side1toClass: Mapping[tuple[str, ...], ast.GlyphClassDefinition] = { - tuple( - glyph - for glyphs in glyph_defs.glyphSet() - for glyph in glyphs.glyphSet() - ): glyph_defs - for glyph_defs in side1Classes.values() - } - side2toClass: Mapping[tuple[str, ...], ast.GlyphClassDefinition] = { - tuple( - glyph - for glyphs in glyph_defs.glyphSet() - for glyph in glyphs.glyphSet() - ): glyph_defs - for glyph_defs in side2Classes.values() - } - return [ - KerningPair( - ( - side1toClass[pair.side1] - if isinstance(pair.side1, tuple) - else pair.side1 - ), - ( - side2toClass[pair.side2] - if isinstance(pair.side2, tuple) - else pair.side2 - ), - pair.value, - ) - for pair in pairs - ] - if glyphSet: - allGlyphs = set(glyphSet.keys()) - else: - allGlyphs = set(font.keys()) - kerning = font.kerning + kerning: Mapping[tuple[str, str], float] = source.font.kerning + for pair in all_pairs: + side1, side2 = pair + firstIsClass = side1 in side1Classes + secondIsClass = side2 in side2Classes - pairsByFlags = {} - for side1, side2 in kerning: - # filter out pairs that reference missing groups or glyphs - if side1 not in side1Classes and side1 not in allGlyphs: + # Filter out pairs that reference missing groups or glyphs. + # TODO: Can we do this outside of the loop? We know the pairs already. + if not firstIsClass and side1 not in glyphSet: continue - if side2 not in side2Classes and side2 not in allGlyphs: + if not secondIsClass and side2 not in glyphSet: continue - flags = (side1 in side1Classes, side2 in side2Classes) - pairsByFlags.setdefault(flags, set()).add((side1, side2)) - - result = [] - for flags, pairs in sorted(pairsByFlags.items()): - for side1, side2 in sorted(pairs): - value = kerning[side1, side2] - if all(flags) and value == 0: - # ignore zero-valued class kern pairs - continue - firstIsClass, secondIsClass = flags - if firstIsClass: - side1 = side1Classes[side1] - if secondIsClass: - side2 = side2Classes[side2] - result.append(KerningPair(side1, side2, value)) - return result - - def _intersectPairs(self, attribute, glyphSets): - allKeys = set() - for pair in self.context.kerning.pairs: - for key, glyphs in glyphSets.items(): - if not pair.glyphs.isdisjoint(glyphs): - getattr(pair, attribute).add(key) - allKeys.add(key) - return allKeys - - @staticmethod - def _groupScriptsByTagAndDirection(feaScripts): - # Read scripts/languages defined in feaFile's 'languagesystem' - # statements and group them by the feature tag (kern or dist) - # they are associated with, and the global script's horizontal - # direction (DFLT is excluded) - scriptGroups = {} - for scriptCode, scriptLangSys in feaScripts.items(): - if scriptCode: - direction = unicodedata.script_horizontal_direction(scriptCode, "LTR") - else: - direction = "LTR" - if scriptCode in DIST_ENABLED_SCRIPTS: - tag = "dist" - else: - tag = "kern" - scriptGroups.setdefault(tag, {}).setdefault(direction, []).extend( - scriptLangSys + + # Get the kerning value for this source and quantize, following + # the DS+UFO semantics described above. + value = quantize( + lookupKerningValue( + pair, + kerning, + unified_groups, + glyphToFirstGroup=glyphToFirstGroup, + glyphToSecondGroup=glyphToSecondGroup, + ), + quantization, ) - return scriptGroups - - @staticmethod - def _makePairPosRule(pair, rtl=False, quantization=1): - enumerated = pair.firstIsClass ^ pair.secondIsClass - value = quantize(pair.value, quantization) - if rtl and "L" in pair.bidiTypes: - # numbers are always shaped LTR even in RTL scripts - rtl = False - valuerecord = ast.ValueRecord( - xPlacement=value if rtl else None, - yPlacement=0 if rtl else None, - xAdvance=value, - yAdvance=0 if rtl else None, + + if firstIsClass: + side1 = side1Classes[side1] + if secondIsClass: + side2 = side2Classes[side2] + + # TODO: Can we instantiate these outside of the loop? We know the pairs already. + var_scalar = kerning_pairs_in_progress.setdefault( + (side1, side2), VariableScalar() + ) + # NOTE: Avoid using .add_value because it instantiates a new + # VariableScalarLocation on each call. + var_scalar.values[location] = value + + # We may need to provide a default location value to the variation + # model, find out where that is. + default_source = context.font.findDefault() + default_location = VariableScalarLocation( + get_userspace_location(designspace, default_source.location) + ) + + result = [] + for (side1, side2), value in kerning_pairs_in_progress.items(): + # TODO: Should we interpolate a default value if it's not in the + # sources, rather than inserting a zero? What would varLib do? + if default_location not in value.values: + value.values[default_location] = 0 + value = collapse_varscalar(value) + pair = KerningPair(side1, side2, value) + # Ignore zero-valued class kern pairs. They are the most general + # kerns, so they don't override anything else like glyph kerns would + # and zero is the default. + if pair.firstIsClass and pair.secondIsClass and pair.value == 0: + continue + result.append(pair) + + return result + + +def split_base_and_mark_pairs( + pairs: list[KerningPair], marks: set[str] +) -> tuple[list[KerningPair], list[KerningPair]]: + if not marks: + return list(pairs), [] + + basePairs: list[KerningPair] = [] + markPairs: list[KerningPair] = [] + for pair in pairs: + # Disentangle kerning between bases and marks by splitting a pair + # into a list of base-to-base pairs (basePairs) and a list of + # base-to-mark, mark-to-base and mark-to-mark pairs (markPairs). + # This ensures that "kerning exceptions" (a kerning pair modifying + # the effect of another) work as intended because these related + # pairs end up in the same list together. + side1Bases: tuple[str, ...] | str | None = None + side1Marks: tuple[str, ...] | str | None = None + if pair.firstIsClass: + side1Bases = tuple(glyph for glyph in pair.side1 if glyph not in marks) + side1Marks = tuple(glyph for glyph in pair.side1 if glyph in marks) + elif pair.side1 in marks: + side1Marks = pair.side1 + else: + side1Bases = pair.side1 + + side2Bases: tuple[str, ...] | str | None = None + side2Marks: tuple[str, ...] | str | None = None + if pair.secondIsClass: + side2Bases = tuple(glyph for glyph in pair.side2 if glyph not in marks) + side2Marks = tuple(glyph for glyph in pair.side2 if glyph in marks) + elif pair.side2 in marks: + side2Marks = pair.side2 + else: + side2Bases = pair.side2 + + if side1Bases and side2Bases: # base-to-base + basePairs.append(KerningPair(side1Bases, side2Bases, value=pair.value)) + + if side1Bases and side2Marks: # base-to-mark + markPairs.append(KerningPair(side1Bases, side2Marks, value=pair.value)) + if side1Marks and side2Bases: # mark-to-base + markPairs.append(KerningPair(side1Marks, side2Bases, value=pair.value)) + if side1Marks and side2Marks: # mark-to-mark + markPairs.append(KerningPair(side1Marks, side2Marks, value=pair.value)) + + return basePairs, markPairs + + +def split_kerning( + context: KernContext, + pairs: list[KerningPair], +) -> dict[Direction, list[KerningPair]]: + # Split kerning into per-direction buckets, so we can drop them into their + # own lookups. + glyph_bidi = context.glyphBidi + glyph_direction = context.glyphDirection + kerning_per_direction: dict[Direction, list[KerningPair]] = {} + for pair in pairs: + for direction, split_pair in partition_by_direction( + pair, glyph_bidi, glyph_direction + ): + kerning_per_direction.setdefault(direction, []).append(split_pair) + + for pairs in kerning_per_direction.values(): + pairs.sort() + + return kerning_per_direction + + +def partition_by_direction( + pair: KerningPair, + glyph_bidi: Mapping[str, set[Direction]], + glyph_direction: Mapping[str, set[Direction]], +) -> Iterator[tuple[Direction, KerningPair]]: + """Split a potentially mixed-direction pair into pairs of the same + or compatible direction.""" + + side1Bidis: dict[Direction, set[str]] = {} + side2Bidis: dict[Direction, set[str]] = {} + side1Directions: dict[Direction, set[str]] = {} + side2Directions: dict[Direction, set[str]] = {} + for glyph in pair.firstGlyphs: + bidis = glyph_bidi[glyph] + directions = glyph_direction[glyph] + for bidi in bidis: + side1Bidis.setdefault(bidi, set()).add(glyph) + for direction in directions: + side1Directions.setdefault(direction, set()).add(glyph) + for glyph in pair.secondGlyphs: + bidis = glyph_bidi[glyph] + directions = glyph_direction[glyph] + for bidi in bidis: + side2Bidis.setdefault(bidi, set()).add(glyph) + for direction in directions: + side2Directions.setdefault(direction, set()).add(glyph) + + for side1Direction, side2Direction in itertools.product( + sorted(side1Directions), sorted(side2Directions) + ): + localSide1: str | tuple[str, ...] + localSide2: str | tuple[str, ...] + if pair.firstIsClass: + localSide1 = tuple(sorted(side1Directions[side1Direction])) + else: + assert len(side1Directions[side1Direction]) == 1 + (localSide1,) = side1Directions[side1Direction] + if pair.secondIsClass: + localSide2 = tuple(sorted(side2Directions[side2Direction])) + else: + assert len(side2Directions[side2Direction]) == 1 + (localSide2,) = side2Directions[side2Direction] + + # Skip pairs with clashing directions (e.g. "a" to "alef-ar"). + if side1Direction != side2Direction and not any( + side is Direction.Neutral for side in (side1Direction, side2Direction) + ): + LOGGER.info( + "Skipping part of a kerning pair <%s %s %s> with mixed direction (%s, %s)", + localSide1, + localSide2, + pair.value, + side1Direction.name, + side2Direction.name, + ) + continue + + # Skip pairs with clashing BiDi classes (e.g. "alef-ar" to "one-ar"). + localSide1Bidis = { + bidi + for glyph in side1Directions[side1Direction] + for bidi in glyph_bidi[glyph] + } + localSide2Bidis = { + bidi + for glyph in side2Directions[side2Direction] + for bidi in glyph_bidi[glyph] + } + if localSide1Bidis != localSide2Bidis and not any( + Direction.Neutral in side for side in (localSide1Bidis, localSide2Bidis) + ): + LOGGER.info( + "Skipping part of a kerning pair <%s %s %s> with conflicting BiDi classes", + localSide1, + localSide2, + pair.value, + ) + continue + + dominant_direction = ( + side1Direction if side2Direction is Direction.Neutral else side2Direction + ) + yield (dominant_direction, KerningPair(localSide1, localSide2, pair.value)) + + +def make_kerning_lookups( + context: KernContext, options: SimpleNamespace +) -> dict[Direction, dict[str, fea_ast.LookupBlock]]: + lookups: dict[Direction, dict[str, fea_ast.LookupBlock]] = {} + if context.kerning.base_pairs_by_direction: + make_split_kerning_lookups( + context, options, lookups, context.kerning.base_pairs_by_direction ) - return ast.PairPosStatement( - glyphs1=pair.side1, - valuerecord1=valuerecord, - glyphs2=pair.side2, - valuerecord2=None, - enumerated=enumerated, + if context.kerning.mark_pairs_by_direction: + make_split_kerning_lookups( + context, + options, + lookups, + context.kerning.mark_pairs_by_direction, + ignoreMarks=False, + suffix="_marks", ) + return lookups - def _makeKerningLookup( - self, name, pairs, exclude=None, rtl=False, ignoreMarks=True - ): - assert pairs - rules = [] + +def make_split_kerning_lookups( + context: KernContext, + options: SimpleNamespace, + lookups: dict[Direction, dict[str, fea_ast.LookupBlock]], + kerning_per_direction: dict[Direction, list[KerningPair]], + ignoreMarks: bool = True, + suffix: str = "", +) -> None: + bidiGlyphs = context.bidiGlyphs + side1Classes = context.kerning.side1Classes + side2Classes = context.kerning.side2Classes + + newClassDefs, newSide1Classes, newSide2Classes = make_all_glyph_class_definitions( + kerning_per_direction, context, context.feaFile + ) + # NOTE: Consider duplicate names a bug, even if the classes would carry + # the same glyphs. + assert not context.kerning.classDefs.keys() & newClassDefs.keys() + context.kerning.classDefs.update(newClassDefs) + assert not side1Classes.keys() & newSide1Classes.keys() + side1Classes.update(newSide1Classes) + assert not side2Classes.keys() & newSide2Classes.keys() + side2Classes.update(newSide2Classes) + + for direction, pairs in kerning_per_direction.items(): + lookupName = f"kern_{direction.value}{suffix}" + lookup = make_kerning_lookup( + context, options, lookupName, ignoreMarks=ignoreMarks + ) for pair in pairs: - if exclude is not None and exclude(pair): - self.log.debug("pair excluded from '%s' lookup: %r", name, pair) - continue - rules.append( - self._makePairPosRule( - pair, rtl=rtl, quantization=self.options.quantization - ) + bidiTypes = { + direction + for direction, glyphs in bidiGlyphs.items() + if not set(pair.glyphs).isdisjoint(glyphs) + } + if bidiTypes.issuperset(AMBIGUOUS_BIDIS): + assert None, "this should have been caught by the splitter" + # European and Arabic Numbers are always shaped LTR even in RTL scripts: + pairIsRtl = ( + direction == Direction.RightToLeft + and Direction.LeftToRight not in bidiTypes ) + rule = make_pairpos_rule(pair, side1Classes, side2Classes, pairIsRtl) + lookup.statements.append(rule) + lookups.setdefault(direction, {})[lookupName] = lookup - if rules: - lookup = ast.LookupBlock(name) - if ignoreMarks and self.options.ignoreMarks: - lookup.statements.append(ast.makeLookupFlag("IgnoreMarks")) - lookup.statements.extend(rules) - return lookup - def _makeKerningLookups(self): - cmap = self.makeUnicodeToGlyphNameMapping() - if any(unicodeScriptDirection(uv) == "RTL" for uv in cmap): - # If there are any characters from globally RTL scripts in the - # cmap, we compile a temporary GSUB table to resolve substitutions - # and group glyphs by script horizontal direction and bidirectional - # type. We then mark each kerning pair with these properties when - # any of the glyphs involved in a pair intersects these groups. - gsub = self.compileGSUB() - extras = self.extraSubstitutions() - dirGlyphs = classifyGlyphs(unicodeScriptDirection, cmap, gsub, extras) - directions = self._intersectPairs("directions", dirGlyphs) - shouldSplit = "RTL" in directions - if shouldSplit: - bidiGlyphs = classifyGlyphs(unicodeBidiType, cmap, gsub, extras) - self._intersectPairs("bidiTypes", bidiGlyphs) - else: - shouldSplit = False - - marks = self.context.gdefClasses.mark - lookups = {} - if shouldSplit: - # make one DFLT lookup with script-agnostic characters, and two - # LTR/RTL lookups excluding pairs from the opposite group. - # We drop kerning pairs with ambiguous direction: i.e. those containing - # glyphs from scripts with different overall horizontal direction, or - # glyphs with incompatible bidirectional type (e.g. arabic letters vs - # arabic numerals). - pairs = [] - for pair in self.context.kerning.pairs: - if ("RTL" in pair.directions and "LTR" in pair.directions) or ( - "R" in pair.bidiTypes and "L" in pair.bidiTypes - ): - self.log.warning( - "skipped kern pair with ambiguous direction: %r", pair - ) - continue - pairs.append(pair) - if not pairs: - return lookups - - if self.options.ignoreMarks: - # If there are pairs with a mix of mark/base then the IgnoreMarks - # flag is unnecessary and should not be set - basePairs, markPairs = self._splitBaseAndMarkPairs(pairs, marks) - if basePairs: - self._makeSplitDirectionKernLookups(lookups, basePairs) - if markPairs: - self._makeSplitDirectionKernLookups( - lookups, markPairs, ignoreMarks=False, suffix="_marks" - ) - else: - self._makeSplitDirectionKernLookups(lookups, pairs) - else: - # only make a single (implicitly LTR) lookup including all base/base pairs - # and a single lookup including all base/mark pairs (if any) - pairs = self.context.kerning.pairs - if self.options.ignoreMarks: - basePairs, markPairs = self._splitBaseAndMarkPairs(pairs, marks) - lookups["LTR"] = [] - if basePairs: - lookups["LTR"].append( - self._makeKerningLookup("kern_ltr", basePairs) - ) - if markPairs: - lookups["LTR"].append( - self._makeKerningLookup( - "kern_ltr_marks", markPairs, ignoreMarks=False - ) - ) - else: - lookups["LTR"] = [self._makeKerningLookup("kern_ltr", pairs)] - return lookups - - def _splitBaseAndMarkPairs(self, pairs, marks): - basePairs, markPairs = [], [] +def make_all_glyph_class_definitions( + kerning_per_direction: dict[Direction, list[KerningPair]], + context: KernContext, + feaFile: fea_ast.FeatureFile | None = None, +): + # Note: Refer to the context for existing classDefs and mappings of glyph + # class tuples to feaLib AST to avoid overwriting existing class names, + # because base and mark kerning pairs might be separate passes. + newClassDefs = {} + existingSide1Classes = context.kerning.side1Classes + existingSide2Classes = context.kerning.side2Classes + newSide1Classes = {} + newSide2Classes = {} + side1Membership = context.side1Membership + side2Membership = context.side2Membership + + if feaFile is not None: + classNames = {cdef.name for cdef in ast.iterClassDefinitions(feaFile)} + else: + classNames = set() + classNames.update(context.kerning.classDefs.keys()) + + # Generate common class names first so that common classes are correctly + # named in other lookups. + for direction in ( + Direction.Neutral, + Direction.LeftToRight, + Direction.RightToLeft, + ): + for pair in kerning_per_direction.get(direction, []): + if ( + pair.firstIsClass + and pair.side1 not in existingSide1Classes + and pair.side1 not in newSide1Classes + ): + addClassDefinition( + "kern1", + pair.side1, + newSide1Classes, + side1Membership, + newClassDefs, + classNames, + direction.value, + ) + if ( + pair.secondIsClass + and pair.side2 not in existingSide2Classes + and pair.side2 not in newSide2Classes + ): + addClassDefinition( + "kern2", + pair.side2, + newSide2Classes, + side2Membership, + newClassDefs, + classNames, + direction.value, + ) + + return newClassDefs, newSide1Classes, newSide2Classes + + +def make_kerning_lookup( + context: KernContext, options: SimpleNamespace, name: str, ignoreMarks: bool = True +) -> fea_ast.LookupBlock: + lookup = fea_ast.LookupBlock(name) + if ignoreMarks and options.ignoreMarks: + # We only want to filter the spacing marks + marks = set(context.gdefClasses.mark or []) & set(context.glyphSet.keys()) + + spacing = [] if marks: - for pair in pairs: - if any(glyph in marks for glyph in pair.glyphs): - markPairs.append(pair) - else: - basePairs.append(pair) + spacing = filter_spacing_marks(context, marks) + if not spacing: + # Simple case, there are no spacing ("Spacing Combining") marks, + # do what we've always done. + lookup.statements.append(ast.makeLookupFlag("IgnoreMarks")) else: - basePairs[:] = pairs - return basePairs, markPairs + # We want spacing marks to block kerns. + className = f"MFS_{name}" + filteringClass = ast.makeGlyphClassDefinitions( + {className: spacing}, feaFile=context.feaFile + )[className] + lookup.statements.append(filteringClass) + lookup.statements.append( + ast.makeLookupFlag(markFilteringSet=filteringClass) + ) + return lookup - def _makeSplitDirectionKernLookups( - self, lookups, pairs, ignoreMarks=True, suffix="" - ): - dfltKern = self._makeKerningLookup( - "kern_dflt" + suffix, - pairs, - exclude=(lambda pair: {"LTR", "RTL"}.intersection(pair.directions)), - rtl=False, - ignoreMarks=ignoreMarks, + +def filter_spacing_marks(context: KernContext, marks: set[str]) -> list[str]: + if context.isVariable: + spacing = [] + for mark in marks: + if all( + source.font[mark].width != 0 + for source in context.font.sources + if mark in source.font + ): + spacing.append(mark) + return spacing + + return [mark for mark in marks if context.font[mark].width != 0] + + +def make_pairpos_rule( + pair: KerningPair, side1Classes, side2Classes, rtl: bool = False +) -> fea_ast.PairPosStatement: + enumerated = pair.firstIsClass ^ pair.secondIsClass + valuerecord = fea_ast.ValueRecord( + xPlacement=pair.value if rtl else None, + yPlacement=0 if rtl else None, + xAdvance=pair.value, + yAdvance=0 if rtl else None, + ) + + if pair.firstIsClass: + glyphs1 = fea_ast.GlyphClassName(side1Classes[pair.side1]) + else: + glyphs1 = fea_ast.GlyphName(pair.side1) + if pair.secondIsClass: + glyphs2 = fea_ast.GlyphClassName(side2Classes[pair.side2]) + else: + glyphs2 = fea_ast.GlyphName(pair.side2) + + return fea_ast.PairPosStatement( + glyphs1=glyphs1, + valuerecord1=valuerecord, + glyphs2=glyphs2, + valuerecord2=None, + enumerated=enumerated, + ) + + +def make_feature_blocks( + context: KernContext, lookups: dict[Direction, dict[str, Any]] +) -> Any: + features = {} + if "kern" in context.todo: + kern = fea_ast.FeatureBlock("kern") + register_lookups(context, kern, lookups) + if kern.statements: + features["kern"] = kern + if "dist" in context.todo: + dist = fea_ast.FeatureBlock("dist") + register_lookups(context, dist, lookups) + if dist.statements: + features["dist"] = dist + return features + + +def register_lookups( + context: KernContext, + feature: fea_ast.FeatureBlock, + lookups: dict[Direction, dict[str, fea_ast.LookupBlock]], +) -> None: + # Ensure we have kerning for pure common script runs (e.g. ">1") + isKernBlock = feature.name == "kern" + lookupsNeutral: list[fea_ast.LookupBlock] = [] + if isKernBlock and Direction.Neutral in lookups: + lookupsNeutral.extend( + lkp + for lkp in lookups[Direction.Neutral].values() + if lkp not in lookupsNeutral ) - if dfltKern: - lookups.setdefault("DFLT", []).append(dfltKern) - - ltrKern = self._makeKerningLookup( - "kern_ltr" + suffix, - pairs, - exclude=(lambda pair: not pair.directions or "RTL" in pair.directions), - rtl=False, - ignoreMarks=ignoreMarks, + + # InDesign bugfix: register kerning lookups for all LTR scripts under DFLT + # so that the basic composer, without a language selected, will still kern. + # Register LTR lookups if any, otherwise RTL lookups. + if isKernBlock: + lookupsLTR: list[fea_ast.LookupBlock] = ( + list(lookups[Direction.LeftToRight].values()) + if Direction.LeftToRight in lookups + else [] ) - if ltrKern: - lookups.setdefault("LTR", []).append(ltrKern) - - rtlKern = self._makeKerningLookup( - "kern_rtl" + suffix, - pairs, - exclude=(lambda pair: not pair.directions or "LTR" in pair.directions), - rtl=True, - ignoreMarks=ignoreMarks, + lookupsRTL: list[fea_ast.LookupBlock] = ( + list(lookups[Direction.RightToLeft].values()) + if Direction.RightToLeft in lookups + else [] ) - if rtlKern: - lookups.setdefault("RTL", []).append(rtlKern) - - def _makeFeatureBlocks(self, lookups): - features = {} - if "kern" in self.context.todo: - kern = ast.FeatureBlock("kern") - self._registerKernLookups(kern, lookups) - if kern.statements: - features["kern"] = kern - if "dist" in self.context.todo: - dist = ast.FeatureBlock("dist") - self._registerDistLookups(dist, lookups) - if dist.statements: - features["dist"] = dist - return features - - def _registerKernLookups(self, feature, lookups): - if "DFLT" in lookups: - ast.addLookupReferences(feature, lookups["DFLT"]) - - scriptGroups = self.context.scriptGroups - if "dist" in self.context.todo: - distScripts = scriptGroups["dist"] - else: - distScripts = {} - kernScripts = scriptGroups.get("kern", {}) - ltrScripts = kernScripts.get("LTR", []) - rtlScripts = kernScripts.get("RTL", []) - - ltrLookups = lookups.get("LTR") - rtlLookups = lookups.get("RTL") - if ltrLookups and rtlLookups: - if ltrScripts and rtlScripts: - for script, langs in ltrScripts: - ast.addLookupReferences(feature, ltrLookups, script, langs) - for script, langs in rtlScripts: - ast.addLookupReferences(feature, rtlLookups, script, langs) - elif ltrScripts: - ast.addLookupReferences(feature, rtlLookups, script="DFLT") - for script, langs in ltrScripts: - ast.addLookupReferences(feature, ltrLookups, script, langs) - elif rtlScripts: - ast.addLookupReferences(feature, ltrLookups, script="DFLT") - for script, langs in rtlScripts: - ast.addLookupReferences(feature, rtlLookups, script, langs) - else: - if not (distScripts.get("LTR") and distScripts.get("RTL")): - raise ValueError( - "cannot use DFLT script for both LTR and RTL kern " - "lookups; add 'languagesystems' to features for at " - "least one LTR or RTL script using the kern feature" - ) - elif ltrLookups: - if not (rtlScripts or distScripts): - ast.addLookupReferences(feature, ltrLookups) - else: - ast.addLookupReferences(feature, ltrLookups, script="DFLT") - for script, langs in ltrScripts: - ast.addLookupReferences(feature, ltrLookups, script, langs) - elif rtlLookups: - if not (ltrScripts or distScripts): - ast.addLookupReferences(feature, rtlLookups) - else: - ast.addLookupReferences(feature, rtlLookups, script="DFLT") - for script, langs in rtlScripts: - ast.addLookupReferences(feature, rtlLookups, script, langs) - - def _registerDistLookups(self, feature, lookups): - scripts = self.context.scriptGroups["dist"] - ltrLookups = lookups.get("LTR") - if ltrLookups: - for script, langs in scripts.get("LTR", []): - ast.addLookupReferences(feature, ltrLookups, script, langs) - rtlLookups = lookups.get("RTL") - if rtlLookups: - for script, langs in scripts.get("RTL", []): - ast.addLookupReferences(feature, rtlLookups, script, langs) + lookupsNeutral.extend( + lkp for lkp in (lookupsLTR or lookupsRTL) if lkp not in lookupsNeutral + ) + + if lookupsNeutral: + languages = context.feaLanguagesByTag.get("DFLT", ["dflt"]) + ast.addLookupReferences(feature, lookupsNeutral, "DFLT", languages) + + # Feature blocks use script tags to distinguish what to run for a + # Unicode script. + # + # "Script tags generally correspond to a Unicode script. However, the + # associations between them may not always be one-to-one, and the + # OpenType script tags are not guaranteed to be the same as Unicode + # Script property-value aliases or ISO 15924 script IDs." + # + # E.g. {"latn": "Latn", "telu": "Telu", "tel2": "Telu"} + # + # Skip DFLT script because we always take care of it above for `kern`. + # It never occurs in `dist`. + if isKernBlock: + scriptsToReference: set[str] = context.knownScripts - DIST_ENABLED_SCRIPTS + else: + scriptsToReference = DIST_ENABLED_SCRIPTS.intersection(context.knownScripts) + scriptsToReference -= DFLT_SCRIPTS + for script in sorted(scriptsToReference): + script_direction = script_horizontal_direction(script, "LTR") + for tag in unicodedata.ot_tags_from_script(script): + lookupsForThisScript = {} + if Direction.Neutral in lookups: + lookupsForThisScript.update(lookups[Direction.Neutral]) + if script_direction == "LTR" and Direction.LeftToRight in lookups: + lookupsForThisScript.update(lookups[Direction.LeftToRight]) + if script_direction == "RTL" and Direction.RightToLeft in lookups: + lookupsForThisScript.update(lookups[Direction.RightToLeft]) + if not lookupsForThisScript: + continue + if feature.statements: + feature.statements.append(fea_ast.Comment("")) + # Register the lookups for all languages defined in the feature + # file for the script, otherwise kerning is not applied if any + # language is set at all. + languages = context.feaLanguagesByTag.get(tag, ["dflt"]) + ast.addLookupReferences( + feature, lookupsForThisScript.values(), tag, languages + ) \ No newline at end of file diff --git a/tests/featureWriters/__snapshots__/kernFeatureWriter2_test.ambr b/tests/featureWriters/__snapshots__/kernFeatureWriter2_test.ambr index 73fc632f..def5d3d7 100644 --- a/tests/featureWriters/__snapshots__/kernFeatureWriter2_test.ambr +++ b/tests/featureWriters/__snapshots__/kernFeatureWriter2_test.ambr @@ -1,4 +1,36 @@ # serializer version: 1 +# name: test_ambiguous_direction_pair + ''' + lookup kern_ltr { + lookupflag IgnoreMarks; + pos bar bar 1; + } kern_ltr; + + lookup kern_rtl { + lookupflag IgnoreMarks; + pos bar bar 1; + } kern_rtl; + + feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script arab; + language dflt; + lookup kern_rtl; + + script hebr; + language dflt; + lookup kern_rtl; + + script latn; + language dflt; + lookup kern_ltr; + } kern; + + ''' +# --- # name: test_arabic_numerals ''' lookup kern_rtl { @@ -7,6 +39,8 @@ } kern_rtl; feature kern { + script DFLT; + language dflt; lookup kern_rtl; } kern; @@ -20,6 +54,12 @@ } kern_rtl; feature kern { + script DFLT; + language dflt; + lookup kern_rtl; + + script arab; + language dflt; lookup kern_rtl; } kern; @@ -33,6 +73,16 @@ } kern_rtl; feature kern { + script DFLT; + language dflt; + lookup kern_rtl; + + script arab; + language dflt; + lookup kern_rtl; + + script thaa; + language dflt; lookup kern_rtl; } kern; @@ -46,6 +96,12 @@ } kern_rtl; feature kern { + script DFLT; + language dflt; + lookup kern_rtl; + + script thaa; + language dflt; lookup kern_rtl; } kern; @@ -53,67 +109,108 @@ # --- # name: test_defining_classdefs ''' - @kern1.shatelugu.below = [sha-telugu.below]; - @kern1.ssatelugu.alt = [ssa-telugu.alt ss-telugu.alt]; - @kern2.katelugu.below = [ka-telugu.below]; - @kern2.rVocalicMatratelugu = [rVocalicMatra-telugu]; + @kern1.dflt.ssatelugu.alt = [ss-telugu.alt]; + @kern1.ltr.shatelugu.below = [sha-telugu.below]; + @kern1.ltr.ssatelugu.alt = [ssa-telugu.alt]; + @kern2.ltr.katelugu.below = [ka-telugu.below]; + @kern2.ltr.rVocalicMatratelugu = [rVocalicMatra-telugu]; + + lookup kern_dflt { + lookupflag IgnoreMarks; + enum pos @kern1.dflt.ssatelugu.alt sha-telugu.below 150; + } kern_dflt; lookup kern_ltr { lookupflag IgnoreMarks; - enum pos @kern1.ssatelugu.alt sha-telugu.below 150; - pos @kern1.shatelugu.below @kern2.katelugu.below 20; - pos @kern1.ssatelugu.alt @kern2.katelugu.below 60; + enum pos @kern1.ltr.ssatelugu.alt sha-telugu.below 150; + pos @kern1.ltr.shatelugu.below @kern2.ltr.katelugu.below 20; + pos @kern1.dflt.ssatelugu.alt @kern2.ltr.katelugu.below 60; + pos @kern1.ltr.ssatelugu.alt @kern2.ltr.katelugu.below 60; } kern_ltr; lookup kern_ltr_marks { - pos @kern1.ssatelugu.alt @kern2.rVocalicMatratelugu 180; + pos @kern1.dflt.ssatelugu.alt @kern2.ltr.rVocalicMatratelugu 180; + pos @kern1.ltr.ssatelugu.alt @kern2.ltr.rVocalicMatratelugu 180; } kern_ltr_marks; feature kern { + script DFLT; + language dflt; + lookup kern_dflt; lookup kern_ltr; lookup kern_ltr_marks; } kern; + feature dist { + script tel2; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + + script telu; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + } dist; + ''' # --- # name: test_dflt_language ''' + lookup kern_dflt { + lookupflag IgnoreMarks; + pos comma comma 2; + } kern_dflt; + lookup kern_ltr { lookupflag IgnoreMarks; pos a a 1; - pos comma comma 2; } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_dflt; lookup kern_ltr; + language ZND; + + script latn; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + language ANG; } kern; ''' # --- # name: test_dist_LTR ''' - @kern1.KND_aaMatra_R = [aaMatra_kannada]; - @kern2.KND_ailength_L = [aaMatra_kannada]; + @kern1.ltr.KND_aaMatra_R = [aaMatra_kannada]; + @kern2.ltr.KND_ailength_L = [aaMatra_kannada]; lookup kern_ltr { lookupflag IgnoreMarks; - pos @kern1.KND_aaMatra_R @kern2.KND_ailength_L 34; + pos @kern1.ltr.KND_aaMatra_R @kern2.ltr.KND_ailength_L 34; } kern_ltr; feature kern { script DFLT; language dflt; lookup kern_ltr; + script latn; language dflt; lookup kern_ltr; } kern; feature dist { - script knda; + script knd2; language dflt; lookup kern_ltr; - script knd2; + + script knda; language dflt; lookup kern_ltr; } dist; @@ -122,12 +219,12 @@ # --- # name: test_dist_LTR_and_RTL ''' - @kern1.KND_aaMatra_R = [aaMatra_kannada]; - @kern2.KND_ailength_L = [aaMatra_kannada]; + @kern1.ltr.KND_aaMatra_R = [aaMatra_kannada]; + @kern2.ltr.KND_ailength_L = [aaMatra_kannada]; lookup kern_ltr { lookupflag IgnoreMarks; - pos @kern1.KND_aaMatra_R @kern2.KND_ailength_L 34; + pos @kern1.ltr.KND_aaMatra_R @kern2.ltr.KND_ailength_L 34; } kern_ltr; lookup kern_rtl { @@ -135,16 +232,24 @@ pos u10A1E u10A06 <117 0 117 0>; } kern_rtl; - feature dist { - script knda; + feature kern { + script DFLT; language dflt; lookup kern_ltr; + } kern; + + feature dist { + script khar; + language dflt; + lookup kern_rtl; + script knd2; language dflt; lookup kern_ltr; - script khar; + + script knda; language dflt; - lookup kern_rtl; + lookup kern_ltr; } dist; ''' @@ -160,6 +265,7 @@ script DFLT; language dflt; lookup kern_rtl; + script arab; language dflt; lookup kern_rtl; @@ -175,44 +281,50 @@ # --- # name: test_hyphenated_duplicates ''' - @kern1.hyphen = [comma]; - @kern1.hyphen_1 = [period]; + @kern1.dflt.hyphen = [comma]; + @kern1.dflt.hyphen_1 = [period]; - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; - enum pos @kern1.hyphen comma 1; - enum pos @kern1.hyphen_1 period 2; - } kern_ltr; + enum pos @kern1.dflt.hyphen comma 1; + enum pos @kern1.dflt.hyphen_1 period 2; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_ignoreMarks ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos four six -55; pos one six -30; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_ignoreMarks.1 ''' - lookup kern_ltr { + lookup kern_dflt { pos four six -55; pos one six -30; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' @@ -225,39 +337,45 @@ # } kern; - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_insert_comment_after.1 ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_insert_comment_before ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; feature kern { @@ -270,26 +388,30 @@ # --- # name: test_insert_comment_before.1 ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_insert_comment_before_extended ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; feature kern { @@ -302,56 +424,65 @@ # --- # name: test_insert_comment_middle ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' # --- # name: test_kern_LTR_and_RTL ''' - @kern1.A = [A Aacute]; - @kern1.reh = [reh-ar zain-ar reh-ar.fina]; - @kern2.alef = [alef-ar alef-ar.isol]; + @kern1.ltr.A = [A Aacute]; + @kern1.rtl.reh = [reh-ar reh-ar.fina zain-ar]; + @kern2.rtl.alef = [alef-ar alef-ar.isol]; lookup kern_dflt { pos seven four -25; } kern_dflt; lookup kern_ltr { - enum pos @kern1.A V -40; + enum pos @kern1.ltr.A V -40; } kern_ltr; lookup kern_rtl { pos four-ar seven-ar -30; pos reh-ar.fina lam-ar.init <-80 0 -80 0>; - pos @kern1.reh @kern2.alef <-100 0 -100 0>; + pos @kern1.rtl.reh @kern2.rtl.alef <-100 0 -100 0>; } kern_rtl; feature kern { - lookup kern_dflt; - script latn; + script DFLT; language dflt; + lookup kern_dflt; lookup kern_ltr; - language TRK; + script arab; language dflt; + lookup kern_dflt; lookup kern_rtl; language URD; + + script latn; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + language TRK; } kern; ''' # --- # name: test_kern_LTR_and_RTL_with_marks ''' - @kern1.A = [A Aacute]; - @kern1.reh = [reh-ar zain-ar reh-ar.fina]; - @kern2.alef = [alef-ar alef-ar.isol]; + @kern1.ltr.A = [A Aacute]; + @kern1.rtl.reh = [reh-ar reh-ar.fina zain-ar]; + @kern2.rtl.alef = [alef-ar alef-ar.isol]; lookup kern_dflt { lookupflag IgnoreMarks; @@ -360,7 +491,7 @@ lookup kern_ltr { lookupflag IgnoreMarks; - enum pos @kern1.A V -40; + enum pos @kern1.ltr.A V -40; } kern_ltr; lookup kern_ltr_marks { @@ -371,7 +502,7 @@ lookupflag IgnoreMarks; pos four-ar seven-ar -30; pos reh-ar.fina lam-ar.init <-80 0 -80 0>; - pos @kern1.reh @kern2.alef <-100 0 -100 0>; + pos @kern1.rtl.reh @kern2.rtl.alef <-100 0 -100 0>; } kern_rtl; lookup kern_rtl_marks { @@ -379,17 +510,25 @@ } kern_rtl_marks; feature kern { - lookup kern_dflt; - script latn; + script DFLT; language dflt; + lookup kern_dflt; lookup kern_ltr; lookup kern_ltr_marks; - language TRK; + script arab; language dflt; + lookup kern_dflt; lookup kern_rtl; lookup kern_rtl_marks; language URD; + + script latn; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + language TRK; } kern; ''' @@ -407,6 +546,13 @@ } kern_rtl; feature kern { + script DFLT; + language dflt; + lookup kern_dflt; + lookup kern_rtl; + + script hebr; + language dflt; lookup kern_dflt; lookup kern_rtl; } kern; @@ -415,13 +561,13 @@ # --- # name: test_kern_RTL_with_marks ''' - @kern1.reh = [reh-ar zain-ar reh-ar.fina]; - @kern2.alef = [alef-ar alef-ar.isol]; + @kern1.rtl.reh = [reh-ar reh-ar.fina zain-ar]; + @kern2.rtl.alef = [alef-ar alef-ar.isol]; lookup kern_rtl { lookupflag IgnoreMarks; pos reh-ar.fina lam-ar.init <-80 0 -80 0>; - pos @kern1.reh @kern2.alef <-100 0 -100 0>; + pos @kern1.rtl.reh @kern2.rtl.alef <-100 0 -100 0>; } kern_rtl; lookup kern_rtl_marks { @@ -429,14 +575,27 @@ } kern_rtl_marks; feature kern { + script DFLT; + language dflt; + lookup kern_rtl; + lookup kern_rtl_marks; + + script arab; + language dflt; lookup kern_rtl; lookup kern_rtl_marks; + language ARA; } kern; ''' # --- # name: test_kern_hira_kana_hrkt ''' + lookup kern_dflt { + lookupflag IgnoreMarks; + pos period period 5; + } kern_dflt; + lookup kern_ltr { lookupflag IgnoreMarks; pos a-hira a-hira 1; @@ -447,10 +606,165 @@ pos a-kana period 8; pos period a-hira 7; pos period a-kana 9; - pos period period 5; } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + + script kana; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + } kern; + + ''' +# --- +# name: test_kern_independent_of_languagesystem[same] + ''' + lookup kern_ltr { + lookupflag IgnoreMarks; + pos A V -40; + } kern_ltr; + + lookup kern_rtl { + lookupflag IgnoreMarks; + pos reh-ar alef-ar <-100 0 -100 0>; + } kern_rtl; + + feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script arab; + language dflt; + lookup kern_rtl; + + script latn; + language dflt; + lookup kern_ltr; + } kern; + + ''' +# --- +# name: test_kern_mixed_bidis + ''' + lookup kern_dflt { + lookupflag IgnoreMarks; + pos comma comma -1; + } kern_dflt; + + lookup kern_ltr { + lookupflag IgnoreMarks; + pos a a 1; + pos a comma 2; + pos comma a 3; + } kern_ltr; + + lookup kern_rtl { + lookupflag IgnoreMarks; + pos alef-ar alef-ar <4 0 4 0>; + pos alef-ar comma-ar <5 0 5 0>; + pos comma-ar alef-ar <6 0 6 0>; + pos comma-ar one-adlam <12 0 12 0>; + pos one-adlam comma-ar <11 0 11 0>; + pos one-adlam one-adlam <10 0 10 0>; + pos one-ar one-ar 9; + } kern_rtl; + + feature kern { + script DFLT; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + + script arab; + language dflt; + lookup kern_dflt; + lookup kern_rtl; + + script latn; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + } kern; + + feature dist { + script adlm; + language dflt; + lookup kern_dflt; + lookup kern_rtl; + } dist; + + ''' +# --- +# name: test_kern_split_and_drop + ''' + @kern1.ltr.bar = [a-cy]; + @kern1.ltr.bar_1 = [period]; + @kern1.ltr.foo = [a a-orya alpha]; + @kern2.ltr.bar = [a-cy]; + @kern2.ltr.bar_1 = [period]; + @kern2.ltr.foo = [a a-orya alpha]; + + lookup kern_ltr { + lookupflag IgnoreMarks; + pos @kern1.ltr.foo @kern2.ltr.bar 20; + pos @kern1.ltr.foo @kern2.ltr.bar_1 20; + pos @kern1.ltr.bar @kern2.ltr.foo 20; + pos @kern1.ltr.bar_1 @kern2.ltr.foo 20; + } kern_ltr; + + feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script cyrl; + language dflt; + lookup kern_ltr; + + script grek; + language dflt; + lookup kern_ltr; + + script latn; + language dflt; + lookup kern_ltr; + } kern; + + feature dist { + script ory2; + language dflt; + lookup kern_ltr; + + script orya; + language dflt; + lookup kern_ltr; + } dist; + + ''' +# --- +# name: test_kern_split_and_drop_mixed + ''' + @kern1.ltr.foo = [V W]; + @kern2.ltr.foo = [W]; + + lookup kern_ltr { + lookupflag IgnoreMarks; + pos @kern1.ltr.foo @kern2.ltr.foo -20; + } kern_ltr; + + feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script latn; + language dflt; lookup kern_ltr; } kern; @@ -458,8 +772,18 @@ # --- # name: test_kern_split_multi_glyph_class[same] ''' - @kern1.foo = [a period]; - @kern2.foo = [b period]; + @kern1.dflt.foo = [period]; + @kern1.ltr.foo = [a]; + @kern2.dflt.foo = [period]; + @kern2.ltr.foo = [b]; + + lookup kern_dflt { + lookupflag IgnoreMarks; + pos period period 9; + enum pos period @kern2.dflt.foo 13; + enum pos @kern1.dflt.foo period 11; + pos @kern1.dflt.foo @kern2.dflt.foo 14; + } kern_dflt; lookup kern_ltr { lookupflag IgnoreMarks; @@ -471,15 +795,26 @@ pos b period 6; pos period a 7; pos period b 8; - pos period period 9; - enum pos a @kern2.foo 12; - enum pos period @kern2.foo 13; - enum pos @kern1.foo b 10; - enum pos @kern1.foo period 11; - pos @kern1.foo @kern2.foo 14; + enum pos a @kern2.ltr.foo 12; + enum pos a @kern2.dflt.foo 12; + enum pos period @kern2.ltr.foo 13; + enum pos @kern1.ltr.foo b 10; + enum pos @kern1.ltr.foo period 11; + enum pos @kern1.dflt.foo b 10; + pos @kern1.ltr.foo @kern2.ltr.foo 14; + pos @kern1.ltr.foo @kern2.dflt.foo 14; + pos @kern1.dflt.foo @kern2.ltr.foo 14; } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_dflt; + lookup kern_ltr; + + script latn; + language dflt; + lookup kern_dflt; lookup kern_ltr; } kern; @@ -487,18 +822,24 @@ # --- # name: test_kern_uniqueness ''' - @kern1.questiondown = [questiondown]; - @kern2.y = [y]; + @kern1.ltr.questiondown = [questiondown]; + @kern2.ltr.y = [y]; lookup kern_ltr { lookupflag IgnoreMarks; pos questiondown y 35; - enum pos questiondown @kern2.y -35; - enum pos @kern1.questiondown y 35; - pos @kern1.questiondown @kern2.y 15; + enum pos questiondown @kern2.ltr.y -35; + enum pos @kern1.ltr.questiondown y 35; + pos @kern1.ltr.questiondown @kern2.ltr.y 15; } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script latn; + language dflt; lookup kern_ltr; } kern; @@ -506,7 +847,7 @@ # --- # name: test_kern_zyyy_zinh ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos uni0640 uni0640 0; pos uni0650 uni0650 1; @@ -548,37 +889,86 @@ pos uniA700 uniA700 27; pos uniA830 uniA830 28; pos uniFF70 uniFF70 29; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; + + script grek; + language dflt; + lookup kern_dflt; + + script hani; + language dflt; + lookup kern_dflt; + + script kana; + language dflt; + lookup kern_dflt; } kern; + feature dist { + script dev2; + language dflt; + lookup kern_dflt; + + script deva; + language dflt; + lookup kern_dflt; + + script dupl; + language dflt; + lookup kern_dflt; + } dist; + ''' # --- # name: test_mark_base_kerning ''' - @kern1.etamil = [aulengthmark-tamil va-tamil]; - @kern2.etamil = [aulengthmark-tamil va-tamil]; + @kern1.ltr.etamil = [va-tamil]; + @kern1.ltr.etamil_1 = [aulengthmark-tamil]; + @kern2.ltr.etamil = [va-tamil]; + @kern2.ltr.etamil_1 = [aulengthmark-tamil]; lookup kern_ltr { lookupflag IgnoreMarks; pos aa-tamil va-tamil -20; pos va-tamil aa-tamil -20; + enum pos aa-tamil @kern2.ltr.etamil -35; + enum pos @kern1.ltr.etamil aa-tamil -35; + pos @kern1.ltr.etamil @kern2.ltr.etamil -100; } kern_ltr; lookup kern_ltr_marks { pos aulengthmark-tamil aulengthmark-tamil -200; - enum pos aa-tamil @kern2.etamil -35; - enum pos @kern1.etamil aa-tamil -35; - pos @kern1.etamil @kern2.etamil -100; + enum pos aa-tamil @kern2.ltr.etamil_1 -35; + enum pos @kern1.ltr.etamil_1 aa-tamil -35; + pos @kern1.ltr.etamil_1 @kern2.ltr.etamil_1 -100; + pos @kern1.ltr.etamil_1 @kern2.ltr.etamil -100; + pos @kern1.ltr.etamil @kern2.ltr.etamil_1 -100; } kern_ltr_marks; feature kern { + script DFLT; + language dflt; lookup kern_ltr; lookup kern_ltr_marks; } kern; + feature dist { + script tml2; + language dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + + script taml; + language dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + } dist; + ''' # --- # name: test_mark_to_base_kern @@ -593,6 +983,13 @@ } kern_ltr_marks; feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + + script latn; + language dflt; lookup kern_ltr; lookup kern_ltr_marks; } kern; @@ -607,6 +1004,12 @@ } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script latn; + language dflt; lookup kern_ltr; } kern; @@ -614,12 +1017,14 @@ # --- # name: test_mark_to_base_only ''' - lookup kern_ltr_marks { + lookup kern_dflt_marks { pos A acutecomb -55; - } kern_ltr_marks; + } kern_dflt_marks; feature kern { - lookup kern_ltr_marks; + script DFLT; + language dflt; + lookup kern_dflt_marks; } kern; ''' @@ -630,13 +1035,15 @@ pos one four' -50 six; } kern; - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos seven six 25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' @@ -651,14 +1058,16 @@ # --- # name: test_quantize ''' - lookup kern_ltr { + lookup kern_dflt { lookupflag IgnoreMarks; pos four six -55; pos one six -25; - } kern_ltr; + } kern_dflt; feature kern { - lookup kern_ltr; + script DFLT; + language dflt; + lookup kern_dflt; } kern; ''' @@ -666,7 +1075,8 @@ # name: test_skip_spacing_marks ''' lookup kern_ltr { - lookupflag IgnoreMarks; + @MFS_kern_ltr = [highspacingdot-deva]; + lookupflag UseMarkFilteringSet @MFS_kern_ltr; pos ka-deva ra-deva -250; pos ra-deva ka-deva -250; } kern_ltr; @@ -677,29 +1087,48 @@ } kern_ltr_marks; feature kern { + script DFLT; + language dflt; lookup kern_ltr; lookup kern_ltr_marks; } kern; + feature dist { + script dev2; + language dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + + script deva; + language dflt; + lookup kern_ltr; + lookup kern_ltr_marks; + } dist; + ''' # --- # name: test_skip_zero_class_kerns ''' - @kern1.baz = [E F]; - @kern1.foo = [A B]; - @kern2.bar = [C D]; - @kern2.nul = [G H]; + @kern1.ltr.baz = [E F]; + @kern1.ltr.foo = [A B]; + @kern2.ltr.bar = [C D]; lookup kern_ltr { lookupflag IgnoreMarks; pos G H -5; - enum pos A @kern2.bar 5; - enum pos @kern1.foo D 15; - pos @kern1.baz @kern2.bar -10; - pos @kern1.foo @kern2.bar 10; + enum pos A @kern2.ltr.bar 5; + enum pos @kern1.ltr.foo D 15; + pos @kern1.ltr.foo @kern2.ltr.bar 10; + pos @kern1.ltr.baz @kern2.ltr.bar -10; } kern_ltr; feature kern { + script DFLT; + language dflt; + lookup kern_ltr; + + script latn; + language dflt; lookup kern_ltr; } kern; diff --git a/tests/featureWriters/variableFeatureWriter_test.py b/tests/featureWriters/variableFeatureWriter_test.py index e81a90e7..508d1803 100644 --- a/tests/featureWriters/variableFeatureWriter_test.py +++ b/tests/featureWriters/variableFeatureWriter_test.py @@ -103,18 +103,22 @@ def test_variable_features_old_kern_writer(FontClass): markClass dotabove-ar @MC_top; markClass gravecmb @MC_top; - @kern1.a = [a]; - @kern1.alef = [alef-ar.fina]; - @kern2.a = [a]; - @kern2.alef = [alef-ar.fina]; + @kern1.rtl.alef = [alef-ar.fina]; + @kern2.rtl.alef = [alef-ar.fina]; lookup kern_rtl { lookupflag IgnoreMarks; pos alef-ar.fina alef-ar.fina <(wght=100:15 wght=1000:35) 0 (wght=100:15 wght=1000:35) 0>; - pos @kern1.alef @kern2.alef <(wght=100:0 wght=1000:1) 0 (wght=100:0 wght=1000:1) 0>; + pos @kern1.rtl.alef @kern2.rtl.alef <(wght=100:0 wght=1000:1) 0 (wght=100:0 wght=1000:1) 0>; } kern_rtl; feature kern { + script DFLT; + language dflt; + lookup kern_rtl; + + script arab; + language dflt; lookup kern_rtl; } kern; @@ -142,4 +146,4 @@ def test_variable_features_old_kern_writer(FontClass): } curs; """ # noqa: B950 - ) + ) \ No newline at end of file