initial commit, 4.5 stable
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled

This commit is contained in:
2025-09-16 20:46:46 -04:00
commit 9d30169a8d
13378 changed files with 7050105 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,137 @@
/*
* Copyright © 2018 Ebrahim Byagowi
* Copyright © 2020 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
*/
#ifndef OT_COLOR_COLR_COLRV1_CLOSURE_HH
#define OT_COLOR_COLR_COLRV1_CLOSURE_HH
#include "../../../hb-open-type.hh"
#include "COLR.hh"
/*
* COLR -- Color
* https://docs.microsoft.com/en-us/typography/opentype/spec/colr
*/
namespace OT {
HB_INTERNAL void PaintColrLayers::closurev1 (hb_colrv1_closure_context_t* c) const
{
c->add_layer_indices (firstLayerIndex, numLayers);
const LayerList &paint_offset_lists = c->get_colr_table ()->get_layerList ();
for (unsigned i = firstLayerIndex; i < firstLayerIndex + numLayers; i++)
{
const Paint &paint = std::addressof (paint_offset_lists) + paint_offset_lists[i];
paint.dispatch (c);
}
}
HB_INTERNAL void PaintGlyph::closurev1 (hb_colrv1_closure_context_t* c) const
{
c->add_glyph (gid);
(this+paint).dispatch (c);
}
HB_INTERNAL void PaintColrGlyph::closurev1 (hb_colrv1_closure_context_t* c) const
{
const COLR *colr_table = c->get_colr_table ();
const BaseGlyphPaintRecord* baseglyph_paintrecord = colr_table->get_base_glyph_paintrecord (gid);
if (!baseglyph_paintrecord) return;
c->add_glyph (gid);
const BaseGlyphList &baseglyph_list = colr_table->get_baseglyphList ();
(&baseglyph_list+baseglyph_paintrecord->paint).dispatch (c);
}
template <template<typename> class Var>
HB_INTERNAL void PaintTransform<Var>::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
(this+transform).closurev1 (c);
}
HB_INTERNAL void PaintTranslate::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 2;
}
HB_INTERNAL void PaintScale::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 2;
}
HB_INTERNAL void PaintScaleAroundCenter::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 4;
}
HB_INTERNAL void PaintScaleUniform::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 1;
}
HB_INTERNAL void PaintScaleUniformAroundCenter::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 3;
}
HB_INTERNAL void PaintRotate::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 1;
}
HB_INTERNAL void PaintRotateAroundCenter::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 3;
}
HB_INTERNAL void PaintSkew::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 2;
}
HB_INTERNAL void PaintSkewAroundCenter::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
c->num_var_idxes = 4;
}
HB_INTERNAL void PaintComposite::closurev1 (hb_colrv1_closure_context_t* c) const
{
(this+src).dispatch (c);
(this+backdrop).dispatch (c);
}
} /* namespace OT */
#endif /* OT_COLOR_COLR_COLRV1_CLOSURE_HH */

View File

@@ -0,0 +1,366 @@
/*
* Copyright © 2016 Google, Inc.
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Sascha Brawer
*/
#ifndef OT_COLOR_CPAL_CPAL_HH
#define OT_COLOR_CPAL_CPAL_HH
#include "../../../hb-open-type.hh"
#include "../../../hb-ot-color.h"
#include "../../../hb-ot-name.h"
/*
* CPAL -- Color Palette
* https://docs.microsoft.com/en-us/typography/opentype/spec/cpal
*/
#define HB_OT_TAG_CPAL HB_TAG('C','P','A','L')
namespace OT {
struct CPALV1Tail
{
friend struct CPAL;
private:
hb_ot_color_palette_flags_t get_palette_flags (const void *base,
unsigned int palette_index,
unsigned int palette_count) const
{
if (!paletteFlagsZ) return HB_OT_COLOR_PALETTE_FLAG_DEFAULT;
return (hb_ot_color_palette_flags_t) (uint32_t)
(base+paletteFlagsZ).as_array (palette_count)[palette_index];
}
hb_ot_name_id_t get_palette_name_id (const void *base,
unsigned int palette_index,
unsigned int palette_count) const
{
if (!paletteLabelsZ) return HB_OT_NAME_ID_INVALID;
return (base+paletteLabelsZ).as_array (palette_count)[palette_index];
}
hb_ot_name_id_t get_color_name_id (const void *base,
unsigned int color_index,
unsigned int color_count) const
{
if (!colorLabelsZ) return HB_OT_NAME_ID_INVALID;
return (base+colorLabelsZ).as_array (color_count)[color_index];
}
public:
void collect_name_ids (const void *base,
unsigned palette_count,
unsigned color_count,
const hb_map_t *color_index_map,
hb_set_t *nameids_to_retain /* OUT */) const
{
if (paletteLabelsZ)
{
+ (base+paletteLabelsZ).as_array (palette_count)
| hb_sink (nameids_to_retain)
;
}
if (colorLabelsZ)
{
const hb_array_t<const NameID> colorLabels = (base+colorLabelsZ).as_array (color_count);
for (unsigned i = 0; i < color_count; i++)
{
if (!color_index_map->has (i)) continue;
nameids_to_retain->add (colorLabels[i]);
}
}
}
bool serialize (hb_serialize_context_t *c,
unsigned palette_count,
unsigned color_count,
const void *base,
const hb_map_t *color_index_map) const
{
TRACE_SERIALIZE (this);
auto *out = c->allocate_size<CPALV1Tail> (static_size);
if (unlikely (!out)) return_trace (false);
out->paletteFlagsZ = 0;
if (paletteFlagsZ)
out->paletteFlagsZ.serialize_copy (c, paletteFlagsZ, base, 0, hb_serialize_context_t::Head, palette_count);
out->paletteLabelsZ = 0;
if (paletteLabelsZ)
out->paletteLabelsZ.serialize_copy (c, paletteLabelsZ, base, 0, hb_serialize_context_t::Head, palette_count);
const hb_array_t<const NameID> colorLabels = (base+colorLabelsZ).as_array (color_count);
if (colorLabelsZ)
{
c->push ();
for (unsigned i = 0; i < color_count; i++)
{
if (!color_index_map->has (i)) continue;
if (!c->copy<NameID> (colorLabels[i]))
{
c->pop_discard ();
return_trace (false);
}
}
c->add_link (out->colorLabelsZ, c->pop_pack ());
}
return_trace (true);
}
bool sanitize (hb_sanitize_context_t *c,
const void *base,
unsigned int palette_count,
unsigned int color_count) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
(!paletteFlagsZ || (base+paletteFlagsZ).sanitize (c, palette_count)) &&
(!paletteLabelsZ || (base+paletteLabelsZ).sanitize (c, palette_count)) &&
(!colorLabelsZ || (base+colorLabelsZ).sanitize (c, color_count)));
}
protected:
// TODO(garretrieger): these offsets can hold nulls so we should not be using non-null offsets
// here. Currently they are needed since UnsizedArrayOf doesn't define null_size
NNOffset32To<UnsizedArrayOf<HBUINT32>>
paletteFlagsZ; /* Offset from the beginning of CPAL table to
* the Palette Type Array. Set to 0 if no array
* is provided. */
NNOffset32To<UnsizedArrayOf<NameID>>
paletteLabelsZ; /* Offset from the beginning of CPAL table to
* the palette labels array. Set to 0 if no
* array is provided. */
NNOffset32To<UnsizedArrayOf<NameID>>
colorLabelsZ; /* Offset from the beginning of CPAL table to
* the color labels array. Set to 0
* if no array is provided. */
public:
DEFINE_SIZE_STATIC (12);
};
typedef HBUINT32 BGRAColor;
struct CPAL
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_CPAL;
bool has_data () const { return numPalettes; }
unsigned int get_size () const
{ return min_size + numPalettes * sizeof (colorRecordIndicesZ[0]); }
unsigned int get_palette_count () const { return numPalettes; }
unsigned int get_color_count () const { return numColors; }
hb_ot_color_palette_flags_t get_palette_flags (unsigned int palette_index) const
{ return v1 ().get_palette_flags (this, palette_index, numPalettes); }
hb_ot_name_id_t get_palette_name_id (unsigned int palette_index) const
{ return v1 ().get_palette_name_id (this, palette_index, numPalettes); }
hb_ot_name_id_t get_color_name_id (unsigned int color_index) const
{ return v1 ().get_color_name_id (this, color_index, numColors); }
hb_array_t<const BGRAColor> get_palette_colors (unsigned int palette_index) const
{
if (unlikely (palette_index >= numPalettes))
return hb_array_t<const BGRAColor> ();
unsigned int start_index = colorRecordIndicesZ[palette_index];
hb_array_t<const BGRAColor> all_colors ((this+colorRecordsZ).arrayZ, numColorRecords);
return all_colors.sub_array (start_index, numColors);
}
unsigned int get_palette_colors (unsigned int palette_index,
unsigned int start_offset,
unsigned int *color_count, /* IN/OUT. May be NULL. */
hb_color_t *colors /* OUT. May be NULL. */) const
{
if (unlikely (palette_index >= numPalettes))
{
if (color_count) *color_count = 0;
return 0;
}
unsigned int start_index = colorRecordIndicesZ[palette_index];
hb_array_t<const BGRAColor> all_colors ((this+colorRecordsZ).arrayZ, numColorRecords);
hb_array_t<const BGRAColor> palette_colors = all_colors.sub_array (start_index,
numColors);
if (color_count)
{
+ palette_colors.sub_array (start_offset, color_count)
| hb_sink (hb_array (colors, *color_count))
;
}
return numColors;
}
void collect_name_ids (const hb_map_t *color_index_map,
hb_set_t *nameids_to_retain /* OUT */) const
{
if (version == 1)
{
hb_barrier ();
v1 ().collect_name_ids (this, numPalettes, numColors, color_index_map, nameids_to_retain);
}
}
private:
const CPALV1Tail& v1 () const
{
if (version == 0) return Null (CPALV1Tail);
hb_barrier ();
return StructAfter<CPALV1Tail> (*this);
}
public:
bool serialize (hb_serialize_context_t *c,
const hb_array_t<const HBUINT16> &color_record_indices,
const hb_array_t<const BGRAColor> &color_records,
const hb_vector_t<unsigned>& first_color_index_for_layer,
const hb_map_t& first_color_to_layer_index,
const hb_set_t &retained_color_indices) const
{
TRACE_SERIALIZE (this);
// TODO(grieger): limit total final size.
for (const auto idx : color_record_indices)
{
hb_codepoint_t layer_index = first_color_to_layer_index[idx];
HBUINT16 new_idx;
new_idx = layer_index * retained_color_indices.get_population ();
if (!c->copy<HBUINT16> (new_idx)) return_trace (false);
}
c->push ();
for (unsigned first_color_index : first_color_index_for_layer)
{
for (hb_codepoint_t color_index : retained_color_indices)
{
if (!c->copy<BGRAColor> (color_records[first_color_index + color_index]))
{
c->pop_discard ();
return_trace (false);
}
}
}
c->add_link (colorRecordsZ, c->pop_pack ());
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
if (!numPalettes) return_trace (false);
const hb_map_t *color_index_map = &c->plan->colr_palettes;
if (color_index_map->is_empty ()) return_trace (false);
hb_set_t retained_color_indices;
for (const auto _ : color_index_map->keys ())
{
if (_ == 0xFFFF) continue;
retained_color_indices.add (_);
}
if (retained_color_indices.is_empty ()) return_trace (false);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->version = version;
out->numColors = retained_color_indices.get_population ();
out->numPalettes = numPalettes;
hb_vector_t<unsigned> first_color_index_for_layer;
hb_map_t first_color_to_layer_index;
const hb_array_t<const HBUINT16> colorRecordIndices = colorRecordIndicesZ.as_array (numPalettes);
for (const auto first_color_record_idx : colorRecordIndices)
{
if (first_color_to_layer_index.has (first_color_record_idx)) continue;
first_color_index_for_layer.push (first_color_record_idx);
first_color_to_layer_index.set (first_color_record_idx,
first_color_index_for_layer.length - 1);
}
out->numColorRecords = first_color_index_for_layer.length
* retained_color_indices.get_population ();
const hb_array_t<const BGRAColor> color_records = (this+colorRecordsZ).as_array (numColorRecords);
if (!out->serialize (c->serializer,
colorRecordIndices,
color_records,
first_color_index_for_layer,
first_color_to_layer_index,
retained_color_indices))
return_trace (false);
if (version == 1)
{
hb_barrier ();
return_trace (v1 ().serialize (c->serializer, numPalettes, numColors, this, color_index_map));
}
return_trace (true);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
hb_barrier () &&
(this+colorRecordsZ).sanitize (c, numColorRecords) &&
colorRecordIndicesZ.sanitize (c, numPalettes) &&
(version == 0 || v1 ().sanitize (c, this, numPalettes, numColors)));
}
protected:
HBUINT16 version; /* Table version number */
/* Version 0 */
HBUINT16 numColors; /* Number of colors in each palette. */
HBUINT16 numPalettes; /* Number of palettes in the table. */
HBUINT16 numColorRecords; /* Total number of color records, combined for
* all palettes. */
NNOffset32To<UnsizedArrayOf<BGRAColor>>
colorRecordsZ; /* Offset from the beginning of CPAL table to
* the first ColorRecord. */
UnsizedArrayOf<HBUINT16>
colorRecordIndicesZ; /* Index of each palettes first color record in
* the combined color record array. */
/*CPALV1Tail v1;*/
public:
DEFINE_SIZE_ARRAY (12, colorRecordIndicesZ);
};
} /* namespace OT */
#endif /* OT_COLOR_CPAL_CPAL_HH */

View File

@@ -0,0 +1,449 @@
/*
* Copyright © 2018 Ebrahim Byagowi
* Copyright © 2020 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Calder Kitagawa
*/
#ifndef OT_COLOR_SBIX_SBIX_HH
#define OT_COLOR_SBIX_SBIX_HH
#include "../../../hb-open-type.hh"
#include "../../../hb-paint.hh"
/*
* sbix -- Standard Bitmap Graphics
* https://docs.microsoft.com/en-us/typography/opentype/spec/sbix
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6sbix.html
*/
#define HB_OT_TAG_sbix HB_TAG('s','b','i','x')
namespace OT {
struct SBIXGlyph
{
SBIXGlyph* copy (hb_serialize_context_t *c, unsigned int data_length) const
{
TRACE_SERIALIZE (this);
SBIXGlyph* new_glyph = c->start_embed<SBIXGlyph> ();
if (unlikely (!c->extend_min (new_glyph))) return_trace (nullptr);
new_glyph->xOffset = xOffset;
new_glyph->yOffset = yOffset;
new_glyph->graphicType = graphicType;
data.copy (c, data_length);
return_trace (new_glyph);
}
HBINT16 xOffset; /* The horizontal (x-axis) offset from the left
* edge of the graphic to the glyphs origin.
* That is, the x-coordinate of the point on the
* baseline at the left edge of the glyph. */
HBINT16 yOffset; /* The vertical (y-axis) offset from the bottom
* edge of the graphic to the glyphs origin.
* That is, the y-coordinate of the point on the
* baseline at the left edge of the glyph. */
Tag graphicType; /* Indicates the format of the embedded graphic
* data: one of 'jpg ', 'png ' or 'tiff', or the
* special format 'dupe'. */
UnsizedArrayOf<HBUINT8>
data; /* The actual embedded graphic data. The total
* length is inferred from sequential entries in
* the glyphDataOffsets array and the fixed size
* (8 bytes) of the preceding fields. */
public:
DEFINE_SIZE_ARRAY (8, data);
};
struct SBIXStrike
{
static unsigned int get_size (unsigned num_glyphs)
{ return min_size + num_glyphs * HBUINT32::static_size; }
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
imageOffsetsZ.sanitize_shallow (c, c->get_num_glyphs () + 1));
}
hb_blob_t *get_glyph_blob (unsigned int glyph_id,
hb_blob_t *sbix_blob,
hb_tag_t file_type,
int *x_offset,
int *y_offset,
unsigned int num_glyphs,
unsigned int *strike_ppem) const
{
if (unlikely (!ppem)) return hb_blob_get_empty (); /* To get Null() object out of the way. */
unsigned int retry_count = 8;
unsigned int sbix_len = sbix_blob->length;
unsigned int strike_offset = (const char *) this - (const char *) sbix_blob->data;
assert (strike_offset < sbix_len);
retry:
if (unlikely (glyph_id >= num_glyphs ||
imageOffsetsZ[glyph_id + 1] <= imageOffsetsZ[glyph_id] ||
imageOffsetsZ[glyph_id + 1] - imageOffsetsZ[glyph_id] <= SBIXGlyph::min_size ||
(unsigned int) imageOffsetsZ[glyph_id + 1] > sbix_len - strike_offset))
return hb_blob_get_empty ();
unsigned int glyph_offset = strike_offset + (unsigned int) imageOffsetsZ[glyph_id] + SBIXGlyph::min_size;
unsigned int glyph_length = imageOffsetsZ[glyph_id + 1] - imageOffsetsZ[glyph_id] - SBIXGlyph::min_size;
const SBIXGlyph *glyph = &(this+imageOffsetsZ[glyph_id]);
if (glyph->graphicType == HB_TAG ('d','u','p','e'))
{
if (glyph_length >= 2)
{
glyph_id = *((HBUINT16 *) &glyph->data);
if (retry_count--)
goto retry;
}
return hb_blob_get_empty ();
}
if (unlikely (file_type != glyph->graphicType))
return hb_blob_get_empty ();
if (strike_ppem) *strike_ppem = ppem;
if (x_offset) *x_offset = glyph->xOffset;
if (y_offset) *y_offset = glyph->yOffset;
return hb_blob_create_sub_blob (sbix_blob, glyph_offset, glyph_length);
}
bool subset (hb_subset_context_t *c, unsigned int available_len) const
{
TRACE_SUBSET (this);
unsigned int num_output_glyphs = c->plan->num_output_glyphs ();
auto* out = c->serializer->start_embed<SBIXStrike> ();
auto snap = c->serializer->snapshot ();
if (unlikely (!c->serializer->extend (out, num_output_glyphs + 1))) return_trace (false);
out->ppem = ppem;
out->resolution = resolution;
HBUINT32 head;
head = get_size (num_output_glyphs + 1);
bool has_glyphs = false;
for (unsigned new_gid = 0; new_gid < num_output_glyphs; new_gid++)
{
hb_codepoint_t old_gid;
if (!c->plan->old_gid_for_new_gid (new_gid, &old_gid) ||
unlikely (imageOffsetsZ[old_gid].is_null () ||
imageOffsetsZ[old_gid + 1].is_null () ||
imageOffsetsZ[old_gid + 1] <= imageOffsetsZ[old_gid] ||
imageOffsetsZ[old_gid + 1] - imageOffsetsZ[old_gid] <= SBIXGlyph::min_size) ||
(unsigned int) imageOffsetsZ[old_gid + 1] > available_len)
{
out->imageOffsetsZ[new_gid] = head;
continue;
}
has_glyphs = true;
unsigned int delta = imageOffsetsZ[old_gid + 1] - imageOffsetsZ[old_gid];
unsigned int glyph_data_length = delta - SBIXGlyph::min_size;
if (!(this+imageOffsetsZ[old_gid]).copy (c->serializer, glyph_data_length))
return_trace (false);
out->imageOffsetsZ[new_gid] = head;
head += delta;
}
if (has_glyphs)
out->imageOffsetsZ[num_output_glyphs] = head;
else
c->serializer->revert (snap);
return_trace (has_glyphs);
}
public:
HBUINT16 ppem; /* The PPEM size for which this strike was designed. */
HBUINT16 resolution; /* The device pixel density (in PPI) for which this
* strike was designed. (E.g., 96 PPI, 192 PPI.) */
protected:
UnsizedArrayOf<Offset32To<SBIXGlyph>>
imageOffsetsZ; /* Offset from the beginning of the strike data header
* to bitmap data for an individual glyph ID. */
public:
DEFINE_SIZE_ARRAY (4, imageOffsetsZ);
};
struct sbix
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_sbix;
bool has_data () const { return version; }
const SBIXStrike &get_strike (unsigned int i) const { return this+strikes[i]; }
struct accelerator_t
{
accelerator_t (hb_face_t *face)
{
table = hb_sanitize_context_t ().reference_table<sbix> (face);
num_glyphs = face->get_num_glyphs ();
}
~accelerator_t () { table.destroy (); }
bool has_data () const { return table->has_data (); }
bool get_extents (hb_font_t *font,
hb_codepoint_t glyph,
hb_glyph_extents_t *extents,
bool scale = true) const
{
/* We only support PNG right now, and following function checks type. */
return get_png_extents (font, glyph, extents, scale);
}
hb_blob_t *reference_png (hb_font_t *font,
hb_codepoint_t glyph_id,
int *x_offset,
int *y_offset,
unsigned int *available_ppem) const
{
return choose_strike (font).get_glyph_blob (glyph_id, table.get_blob (),
HB_TAG ('p','n','g',' '),
x_offset, y_offset,
num_glyphs, available_ppem);
}
bool paint_glyph (hb_font_t *font, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
if (!has_data ())
return false;
int x_offset = 0, y_offset = 0;
unsigned int strike_ppem = 0;
hb_glyph_extents_t extents;
hb_glyph_extents_t pixel_extents;
if (!font->get_glyph_extents (glyph, &extents, false))
return false;
if (unlikely (!get_extents (font, glyph, &pixel_extents, false)))
return false;
hb_blob_t *blob = reference_png (font, glyph, &x_offset, &y_offset, &strike_ppem);
if (hb_blob_is_immutable (blob))
return false;
bool ret = funcs->image (data,
blob,
pixel_extents.width, -pixel_extents.height,
HB_PAINT_IMAGE_FORMAT_PNG,
0.f,
&extents);
hb_blob_destroy (blob);
return ret;
}
private:
const SBIXStrike &choose_strike (hb_font_t *font) const
{
unsigned count = table->strikes.len;
if (unlikely (!count))
return Null (SBIXStrike);
unsigned int requested_ppem = hb_max (font->x_ppem, font->y_ppem);
if (!requested_ppem)
requested_ppem = 1<<30; /* Choose largest strike. */
/* TODO Add DPI sensitivity as well? */
unsigned int best_i = 0;
unsigned int best_ppem = table->get_strike (0).ppem;
for (unsigned int i = 1; i < count; i++)
{
unsigned int ppem = (table->get_strike (i)).ppem;
if ((requested_ppem <= ppem && ppem < best_ppem) ||
(requested_ppem > best_ppem && ppem > best_ppem))
{
best_i = i;
best_ppem = ppem;
}
}
return table->get_strike (best_i);
}
struct PNGHeader
{
HBUINT8 signature[8];
struct
{
struct
{
HBUINT32 length;
Tag type;
} header;
HBUINT32 width;
HBUINT32 height;
HBUINT8 bitDepth;
HBUINT8 colorType;
HBUINT8 compressionMethod;
HBUINT8 filterMethod;
HBUINT8 interlaceMethod;
} IHDR;
public:
DEFINE_SIZE_STATIC (29);
};
bool get_png_extents (hb_font_t *font,
hb_codepoint_t glyph,
hb_glyph_extents_t *extents,
bool scale = true) const
{
/* Following code is safe to call even without data.
* But faster to short-circuit. */
if (!has_data ())
return false;
int x_offset = 0, y_offset = 0;
unsigned int strike_ppem = 0;
hb_blob_t *blob = reference_png (font, glyph, &x_offset, &y_offset, &strike_ppem);
const PNGHeader &png = *blob->as<PNGHeader>();
if (png.IHDR.height >= 65536 || png.IHDR.width >= 65536)
{
hb_blob_destroy (blob);
return false;
}
extents->x_bearing = x_offset;
extents->y_bearing = png.IHDR.height + y_offset;
extents->width = png.IHDR.width;
extents->height = -1 * png.IHDR.height;
/* Convert to font units. */
if (strike_ppem && scale)
{
float scale = font->face->get_upem () / (float) strike_ppem;
extents->x_bearing = roundf (extents->x_bearing * scale);
extents->y_bearing = roundf (extents->y_bearing * scale);
extents->width = roundf (extents->width * scale);
extents->height = roundf (extents->height * scale);
}
if (scale)
font->scale_glyph_extents (extents);
hb_blob_destroy (blob);
return strike_ppem;
}
private:
hb_blob_ptr_t<sbix> table;
unsigned int num_glyphs;
};
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
version >= 1 &&
strikes.sanitize (c, this)));
}
bool
add_strike (hb_subset_context_t *c, unsigned i) const
{
if (strikes[i].is_null () || c->source_blob->length < (unsigned) strikes[i])
return false;
return (this+strikes[i]).subset (c, c->source_blob->length - (unsigned) strikes[i]);
}
bool serialize_strike_offsets (hb_subset_context_t *c) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed<Array32OfOffset32To<SBIXStrike>> ();
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
hb_vector_t<Offset32To<SBIXStrike>*> new_strikes;
hb_vector_t<hb_serialize_context_t::objidx_t> objidxs;
for (int i = strikes.len - 1; i >= 0; --i)
{
auto* o = out->serialize_append (c->serializer);
if (unlikely (!o)) return_trace (false);
*o = 0;
auto snap = c->serializer->snapshot ();
c->serializer->push ();
bool ret = add_strike (c, i);
if (!ret)
{
c->serializer->pop_discard ();
out->pop ();
c->serializer->revert (snap);
}
else
{
objidxs.push (c->serializer->pop_pack ());
new_strikes.push (o);
}
}
for (unsigned int i = 0; i < new_strikes.length; ++i)
c->serializer->add_link (*new_strikes[i], objidxs[new_strikes.length - 1 - i]);
return_trace (true);
}
bool subset (hb_subset_context_t* c) const
{
TRACE_SUBSET (this);
if (unlikely (!c->serializer->embed (this->version))) return_trace (false);
if (unlikely (!c->serializer->embed (this->flags))) return_trace (false);
return_trace (serialize_strike_offsets (c));
}
protected:
HBUINT16 version; /* Table version number — set to 1 */
HBUINT16 flags; /* Bit 0: Set to 1. Bit 1: Draw outlines.
* Bits 2 to 15: reserved (set to 0). */
Array32OfOffset32To<SBIXStrike>
strikes; /* Offsets from the beginning of the 'sbix'
* table to data for each individual bitmap strike. */
public:
DEFINE_SIZE_ARRAY (8, strikes);
};
struct sbix_accelerator_t : sbix::accelerator_t {
sbix_accelerator_t (hb_face_t *face) : sbix::accelerator_t (face) {}
};
} /* namespace OT */
#endif /* OT_COLOR_SBIX_SBIX_HH */

View File

@@ -0,0 +1,153 @@
/*
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef OT_COLOR_SVG_SVG_HH
#define OT_COLOR_SVG_SVG_HH
#include "../../../hb-open-type.hh"
#include "../../../hb-blob.hh"
#include "../../../hb-paint.hh"
/*
* SVG -- SVG (Scalable Vector Graphics)
* https://docs.microsoft.com/en-us/typography/opentype/spec/svg
*/
#define HB_OT_TAG_SVG HB_TAG('S','V','G',' ')
namespace OT {
struct SVGDocumentIndexEntry
{
int cmp (hb_codepoint_t g) const
{ return g < startGlyphID ? -1 : g > endGlyphID ? 1 : 0; }
hb_blob_t *reference_blob (hb_blob_t *svg_blob, unsigned int index_offset) const
{
return hb_blob_create_sub_blob (svg_blob,
index_offset + (unsigned int) svgDoc,
svgDocLength);
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
hb_barrier () &&
svgDoc.sanitize (c, base, svgDocLength));
}
protected:
HBUINT16 startGlyphID; /* The first glyph ID in the range described by
* this index entry. */
HBUINT16 endGlyphID; /* The last glyph ID in the range described by
* this index entry. Must be >= startGlyphID. */
NNOffset32To<UnsizedArrayOf<HBUINT8>>
svgDoc; /* Offset from the beginning of the SVG Document Index
* to an SVG document. Must be non-zero. */
HBUINT32 svgDocLength; /* Length of the SVG document.
* Must be non-zero. */
public:
DEFINE_SIZE_STATIC (12);
};
struct SVG
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_SVG;
bool has_data () const { return svgDocEntries; }
struct accelerator_t
{
accelerator_t (hb_face_t *face)
{ table = hb_sanitize_context_t ().reference_table<SVG> (face); }
~accelerator_t () { table.destroy (); }
hb_blob_t *reference_blob_for_glyph (hb_codepoint_t glyph_id) const
{
return table->get_glyph_entry (glyph_id).reference_blob (table.get_blob (),
table->svgDocEntries);
}
bool has_data () const { return table->has_data (); }
bool paint_glyph (hb_font_t *font HB_UNUSED, hb_codepoint_t glyph, hb_paint_funcs_t *funcs, void *data) const
{
if (!has_data ())
return false;
hb_blob_t *blob = reference_blob_for_glyph (glyph);
if (blob == hb_blob_get_empty ())
return false;
bool ret = funcs->image (data,
blob,
0, 0,
HB_PAINT_IMAGE_FORMAT_SVG,
0.f,
nullptr);
hb_blob_destroy (blob);
return ret;
}
private:
hb_blob_ptr_t<SVG> table;
public:
DEFINE_SIZE_STATIC (sizeof (hb_blob_ptr_t<SVG>));
};
const SVGDocumentIndexEntry &get_glyph_entry (hb_codepoint_t glyph_id) const
{ return (this+svgDocEntries).bsearch (glyph_id); }
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
(this+svgDocEntries).sanitize_shallow (c)));
}
protected:
HBUINT16 version; /* Table version (starting at 0). */
Offset32To<SortedArray16Of<SVGDocumentIndexEntry>>
svgDocEntries; /* Offset (relative to the start of the SVG table) to the
* SVG Documents Index. Must be non-zero. */
/* Array of SVG Document Index Entries. */
HBUINT32 reserved; /* Set to 0. */
public:
DEFINE_SIZE_STATIC (10);
};
struct SVG_accelerator_t : SVG::accelerator_t {
SVG_accelerator_t (hb_face_t *face) : SVG::accelerator_t (face) {}
};
} /* namespace OT */
#endif /* OT_COLOR_SVG_SVG_HH */

View File

@@ -0,0 +1,374 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGE_HH
#define OT_LAYOUT_COMMON_COVERAGE_HH
#include "../types.hh"
#include "CoverageFormat1.hh"
#include "CoverageFormat2.hh"
namespace OT {
namespace Layout {
namespace Common {
template<typename Iterator>
static inline void Coverage_serialize (hb_serialize_context_t *c,
Iterator it);
struct Coverage
{
protected:
union {
HBUINT16 format; /* Format identifier */
CoverageFormat1_3<SmallTypes> format1;
CoverageFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
CoverageFormat1_3<MediumTypes>format3;
CoverageFormat2_4<MediumTypes>format4;
#endif
} u;
public:
DEFINE_SIZE_UNION (2, format);
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format)
{
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.sanitize (c));
case 4: return_trace (u.format4.sanitize (c));
#endif
default:return_trace (true);
}
}
/* Has interface. */
unsigned operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k] != NOT_COVERED; }
/* Predicate. */
bool operator () (hb_codepoint_t k) const { return has (k); }
unsigned int get (hb_codepoint_t k) const { return get_coverage (k); }
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
switch (u.format) {
case 1: return u.format1.get_coverage (glyph_id);
case 2: return u.format2.get_coverage (glyph_id);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_coverage (glyph_id);
case 4: return u.format4.get_coverage (glyph_id);
#endif
default:return NOT_COVERED;
}
}
unsigned int get_coverage (hb_codepoint_t glyph_id,
hb_ot_lookup_cache_t *cache) const
{
unsigned coverage;
if (cache && cache->get (glyph_id, &coverage)) return coverage;
coverage = get_coverage (glyph_id);
if (cache) cache->set (glyph_id, coverage);
return coverage;
}
unsigned get_population () const
{
switch (u.format) {
case 1: return u.format1.get_population ();
case 2: return u.format2.get_population ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_population ();
case 4: return u.format4.get_population ();
#endif
default:return NOT_COVERED;
}
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned count = hb_len (glyphs);
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
hb_codepoint_t max = 0;
bool unsorted = false;
for (auto g: glyphs)
{
if (last != (hb_codepoint_t) -2 && g < last)
unsorted = true;
if (last + 1 != g)
num_ranges++;
last = g;
if (g > max) max = g;
}
u.format = !unsorted && count <= num_ranges * 3 ? 1 : 2;
#ifndef HB_NO_BEYOND_64K
if (max > 0xFFFFu)
u.format += 2;
if (unlikely (max > 0xFFFFFFu))
#else
if (unlikely (max > 0xFFFFu))
#endif
{
c->check_success (false, HB_SERIALIZE_ERROR_INT_OVERFLOW);
return_trace (false);
}
switch (u.format)
{
case 1: return_trace (u.format1.serialize (c, glyphs));
case 2: return_trace (u.format2.serialize (c, glyphs));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.serialize (c, glyphs));
case 4: return_trace (u.format4.serialize (c, glyphs));
#endif
default:return_trace (false);
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto it =
+ iter ()
| hb_take (c->plan->source->get_num_glyphs ())
| hb_map_retains_sorting (c->plan->glyph_map_gsub)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
// Cache the iterator result as it will be iterated multiple times
// by the serialize code below.
hb_sorted_vector_t<hb_codepoint_t> glyphs (it);
Coverage_serialize (c->serializer, glyphs.iter ());
return_trace (bool (glyphs));
}
bool intersects (const hb_set_t *glyphs) const
{
switch (u.format)
{
case 1: return u.format1.intersects (glyphs);
case 2: return u.format2.intersects (glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersects (glyphs);
case 4: return u.format4.intersects (glyphs);
#endif
default:return false;
}
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{
switch (u.format)
{
case 1: return u.format1.intersects_coverage (glyphs, index);
case 2: return u.format2.intersects_coverage (glyphs, index);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersects_coverage (glyphs, index);
case 4: return u.format4.intersects_coverage (glyphs, index);
#endif
default:return false;
}
}
unsigned cost () const
{
switch (u.format) {
case 1: hb_barrier (); return u.format1.cost ();
case 2: hb_barrier (); return u.format2.cost ();
#ifndef HB_NO_BEYOND_64K
case 3: hb_barrier (); return u.format3.cost ();
case 4: hb_barrier (); return u.format4.cost ();
#endif
default:return 0u;
}
}
/* Might return false if array looks unsorted.
* Used for faster rejection of corrupt data. */
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{
switch (u.format)
{
case 1: return u.format1.collect_coverage (glyphs);
case 2: return u.format2.collect_coverage (glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.collect_coverage (glyphs);
case 4: return u.format4.collect_coverage (glyphs);
#endif
default:return false;
}
}
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
switch (u.format)
{
case 1: return u.format1.intersect_set (glyphs, intersect_glyphs);
case 2: return u.format2.intersect_set (glyphs, intersect_glyphs);
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.intersect_set (glyphs, intersect_glyphs);
case 4: return u.format4.intersect_set (glyphs, intersect_glyphs);
#endif
default:return ;
}
}
struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{
static constexpr bool is_sorted_iterator = true;
iter_t (const Coverage &c_ = Null (Coverage))
{
hb_memset (this, 0, sizeof (*this));
format = c_.u.format;
switch (format)
{
case 1: u.format1.init (c_.u.format1); return;
case 2: u.format2.init (c_.u.format2); return;
#ifndef HB_NO_BEYOND_64K
case 3: u.format3.init (c_.u.format3); return;
case 4: u.format4.init (c_.u.format4); return;
#endif
default: return;
}
}
bool __more__ () const
{
switch (format)
{
case 1: return u.format1.__more__ ();
case 2: return u.format2.__more__ ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.__more__ ();
case 4: return u.format4.__more__ ();
#endif
default:return false;
}
}
void __next__ ()
{
switch (format)
{
case 1: u.format1.__next__ (); break;
case 2: u.format2.__next__ (); break;
#ifndef HB_NO_BEYOND_64K
case 3: u.format3.__next__ (); break;
case 4: u.format4.__next__ (); break;
#endif
default: break;
}
}
typedef hb_codepoint_t __item_t__;
__item_t__ __item__ () const { return get_glyph (); }
hb_codepoint_t get_glyph () const
{
switch (format)
{
case 1: return u.format1.get_glyph ();
case 2: return u.format2.get_glyph ();
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3.get_glyph ();
case 4: return u.format4.get_glyph ();
#endif
default:return 0;
}
}
bool operator != (const iter_t& o) const
{
if (unlikely (format != o.format)) return true;
switch (format)
{
case 1: return u.format1 != o.u.format1;
case 2: return u.format2 != o.u.format2;
#ifndef HB_NO_BEYOND_64K
case 3: return u.format3 != o.u.format3;
case 4: return u.format4 != o.u.format4;
#endif
default:return false;
}
}
iter_t __end__ () const
{
iter_t it = {};
it.format = format;
switch (format)
{
case 1: it.u.format1 = u.format1.__end__ (); break;
case 2: it.u.format2 = u.format2.__end__ (); break;
#ifndef HB_NO_BEYOND_64K
case 3: it.u.format3 = u.format3.__end__ (); break;
case 4: it.u.format4 = u.format4.__end__ (); break;
#endif
default: break;
}
return it;
}
private:
unsigned int format;
union {
#ifndef HB_NO_BEYOND_64K
CoverageFormat2_4<MediumTypes>::iter_t format4; /* Put this one first since it's larger; helps shut up compiler. */
CoverageFormat1_3<MediumTypes>::iter_t format3;
#endif
CoverageFormat2_4<SmallTypes>::iter_t format2; /* Put this one first since it's larger; helps shut up compiler. */
CoverageFormat1_3<SmallTypes>::iter_t format1;
} u;
};
iter_t iter () const { return iter_t (*this); }
};
template<typename Iterator>
static inline void
Coverage_serialize (hb_serialize_context_t *c,
Iterator it)
{ c->start_embed<Coverage> ()->serialize (c, it); }
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGE_HH

View File

@@ -0,0 +1,135 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
#define OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH
namespace OT {
namespace Layout {
namespace Common {
#define NOT_COVERED ((unsigned int) -1)
template <typename Types>
struct CoverageFormat1_3
{
friend struct Coverage;
protected:
HBUINT16 coverageFormat; /* Format identifier--format = 1 */
SortedArray16Of<typename Types::HBGlyphID>
glyphArray; /* Array of GlyphIDs--in numerical order */
public:
DEFINE_SIZE_ARRAY (4, glyphArray);
private:
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (glyphArray.sanitize (c));
}
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
unsigned int i;
glyphArray.bfind (glyph_id, &i, HB_NOT_FOUND_STORE, NOT_COVERED);
return i;
}
unsigned get_population () const
{
return glyphArray.len;
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
return_trace (glyphArray.serialize (c, glyphs));
}
bool intersects (const hb_set_t *glyphs) const
{
if (glyphArray.len > glyphs->get_population () * hb_bit_storage ((unsigned) glyphArray.len))
{
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
}
for (const auto& g : glyphArray.as_array ())
if (glyphs->has (g))
return true;
return false;
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{ return glyphs->has (glyphArray[index]); }
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
unsigned count = glyphArray.len;
for (unsigned i = 0; i < count; i++)
if (glyphs.has (glyphArray[i]))
intersect_glyphs << glyphArray[i];
}
unsigned cost () const { return hb_bit_storage ((unsigned) glyphArray.len); /* bsearch cost */ }
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{ return glyphs->add_sorted_array (glyphArray.as_array ()); }
public:
/* Older compilers need this to be public. */
struct iter_t
{
void init (const struct CoverageFormat1_3 &c_) { c = &c_; i = 0; }
bool __more__ () const { return i < c->glyphArray.len; }
void __next__ () { i++; }
hb_codepoint_t get_glyph () const { return c->glyphArray[i]; }
bool operator != (const iter_t& o) const
{ return i != o.i; }
iter_t __end__ () const { iter_t it; it.init (*c); it.i = c->glyphArray.len; return it; }
private:
const struct CoverageFormat1_3 *c;
unsigned int i;
};
private:
};
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT1_HH

View File

@@ -0,0 +1,241 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
#define OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH
#include "RangeRecord.hh"
namespace OT {
namespace Layout {
namespace Common {
template <typename Types>
struct CoverageFormat2_4
{
friend struct Coverage;
protected:
HBUINT16 coverageFormat; /* Format identifier--format = 2 */
SortedArray16Of<RangeRecord<Types>>
rangeRecord; /* Array of glyph ranges--ordered by
* Start GlyphID. rangeCount entries
* long */
public:
DEFINE_SIZE_ARRAY (4, rangeRecord);
private:
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (rangeRecord.sanitize (c));
}
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
const RangeRecord<Types> &range = rangeRecord.bsearch (glyph_id);
return likely (range.first <= range.last)
? (unsigned int) range.value + (glyph_id - range.first)
: NOT_COVERED;
}
unsigned get_population () const
{
typename Types::large_int ret = 0;
for (const auto &r : rangeRecord)
ret += r.get_population ();
return ret > UINT_MAX ? UINT_MAX : (unsigned) ret;
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
unsigned num_ranges = 0;
hb_codepoint_t last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
num_ranges++;
last = g;
}
if (unlikely (!rangeRecord.serialize (c, num_ranges))) return_trace (false);
if (!num_ranges) return_trace (true);
unsigned count = 0;
unsigned range = (unsigned) -1;
last = (hb_codepoint_t) -2;
unsigned unsorted = false;
for (auto g: glyphs)
{
if (last + 1 != g)
{
if (unlikely (last != (hb_codepoint_t) -2 && last + 1 > g))
unsorted = true;
range++;
rangeRecord.arrayZ[range].first = g;
rangeRecord.arrayZ[range].value = count;
}
rangeRecord.arrayZ[range].last = g;
last = g;
count++;
}
if (unlikely (unsorted))
rangeRecord.as_array ().qsort (RangeRecord<Types>::cmp_range);
return_trace (true);
}
bool intersects (const hb_set_t *glyphs) const
{
if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len))
{
for (auto g : *glyphs)
if (get_coverage (g) != NOT_COVERED)
return true;
return false;
}
return hb_any (+ hb_iter (rangeRecord)
| hb_map ([glyphs] (const RangeRecord<Types> &range) { return range.intersects (*glyphs); }));
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{
auto *range = rangeRecord.as_array ().bsearch (index);
if (range)
return range->intersects (*glyphs);
return false;
}
template <typename IterableOut,
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
/* Break out of loop for overlapping, broken, tables,
* to avoid fuzzer timouts. */
hb_codepoint_t last = 0;
for (const auto& range : rangeRecord)
{
if (unlikely (range.first < last))
break;
last = range.last;
for (hb_codepoint_t g = range.first - 1;
glyphs.next (&g) && g <= last;)
intersect_glyphs << g;
}
}
unsigned cost () const { return hb_bit_storage ((unsigned) rangeRecord.len); /* bsearch cost */ }
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{
for (const auto& range: rangeRecord)
if (unlikely (!range.collect_coverage (glyphs)))
return false;
return true;
}
public:
/* Older compilers need this to be public. */
struct iter_t
{
void init (const CoverageFormat2_4 &c_)
{
c = &c_;
coverage = 0;
i = 0;
j = c->rangeRecord.len ? c->rangeRecord[0].first : 0;
if (unlikely (c->rangeRecord[0].first > c->rangeRecord[0].last))
{
/* Broken table. Skip. */
i = c->rangeRecord.len;
j = 0;
}
}
bool __more__ () const { return i < c->rangeRecord.len; }
void __next__ ()
{
if (j >= c->rangeRecord[i].last)
{
i++;
if (__more__ ())
{
unsigned int old = coverage;
j = c->rangeRecord.arrayZ[i].first;
coverage = c->rangeRecord.arrayZ[i].value;
if (unlikely (coverage != old + 1))
{
/* Broken table. Skip. Important to avoid DoS.
* Also, our callers depend on coverage being
* consecutive and monotonically increasing,
* ie. iota(). */
i = c->rangeRecord.len;
j = 0;
return;
}
}
else
j = 0;
return;
}
coverage++;
j++;
}
hb_codepoint_t get_glyph () const { return j; }
bool operator != (const iter_t& o) const
{ return i != o.i || j != o.j; }
iter_t __end__ () const
{
iter_t it;
it.init (*c);
it.i = c->rangeRecord.len;
it.j = 0;
return it;
}
private:
const struct CoverageFormat2_4 *c;
unsigned int i, coverage;
hb_codepoint_t j;
};
private:
};
}
}
}
#endif // #ifndef OT_LAYOUT_COMMON_COVERAGEFORMAT2_HH

View File

@@ -0,0 +1,97 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_COMMON_RANGERECORD_HH
#define OT_LAYOUT_COMMON_RANGERECORD_HH
namespace OT {
namespace Layout {
namespace Common {
template <typename Types>
struct RangeRecord
{
typename Types::HBGlyphID first; /* First GlyphID in the range */
typename Types::HBGlyphID last; /* Last GlyphID in the range */
HBUINT16 value; /* Value */
DEFINE_SIZE_STATIC (2 + 2 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
int cmp (hb_codepoint_t g) const
{ return g < first ? -1 : g <= last ? 0 : +1; }
HB_INTERNAL static int cmp_range (const void *pa, const void *pb) {
const RangeRecord *a = (const RangeRecord *) pa;
const RangeRecord *b = (const RangeRecord *) pb;
if (a->first < b->first) return -1;
if (a->first > b->first) return +1;
if (a->last < b->last) return -1;
if (a->last > b->last) return +1;
if (a->value < b->value) return -1;
if (a->value > b->value) return +1;
return 0;
}
unsigned get_population () const
{
if (unlikely (last < first)) return 0;
return (last - first + 1);
}
bool intersects (const hb_set_t &glyphs) const
{ return glyphs.intersects (first, last); }
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{ return glyphs->add_range (first, last); }
};
}
}
}
// TODO(garretrieger): This was previously implemented using
// DECLARE_NULL_NAMESPACE_BYTES_TEMPLATE1 (OT, RangeRecord, 9);
// but that only works when there is only a single namespace level.
// The macro should probably be fixed so it can work in this situation.
extern HB_INTERNAL const unsigned char _hb_Null_OT_RangeRecord[9];
template <typename Spec>
struct Null<OT::Layout::Common::RangeRecord<Spec>> {
static OT::Layout::Common::RangeRecord<Spec> const & get_null () {
return *reinterpret_cast<const OT::Layout::Common::RangeRecord<Spec> *> (_hb_Null_OT_RangeRecord);
}
};
#endif // #ifndef OT_LAYOUT_COMMON_RANGERECORD_HH

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,84 @@
#ifndef OT_LAYOUT_GPOS_ANCHOR_HH
#define OT_LAYOUT_GPOS_ANCHOR_HH
#include "AnchorFormat1.hh"
#include "AnchorFormat2.hh"
#include "AnchorFormat3.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct Anchor
{
protected:
union {
HBUINT16 format; /* Format identifier */
AnchorFormat1 format1;
AnchorFormat2 format2;
AnchorFormat3 format3;
} u;
public:
DEFINE_SIZE_UNION (2, format);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
case 3: return_trace (u.format3.sanitize (c));
default:return_trace (true);
}
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id,
float *x, float *y) const
{
*x = *y = 0;
switch (u.format) {
case 1: u.format1.get_anchor (c, glyph_id, x, y); return;
case 2: u.format2.get_anchor (c, glyph_id, x, y); return;
case 3: u.format3.get_anchor (c, glyph_id, x, y); return;
default: return;
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
switch (u.format) {
case 1: return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer))));
case 2:
if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
{
// AnchorFormat 2 just containins extra hinting information, so
// if hints are being dropped convert to format 1.
return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer))));
}
return_trace (bool (reinterpret_cast<Anchor *> (u.format2.copy (c->serializer))));
case 3: return_trace (u.format3.subset (c));
default:return_trace (false);
}
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
switch (u.format) {
case 1: case 2:
return;
case 3:
u.format3.collect_variation_indices (c);
return;
default: return;
}
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_ANCHOR_HH

View File

@@ -0,0 +1,46 @@
#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT1_HH
#define OT_LAYOUT_GPOS_ANCHORFORMAT1_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct AnchorFormat1
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
FWORD xCoordinate; /* Horizontal value--in design units */
FWORD yCoordinate; /* Vertical value--in design units */
public:
DEFINE_SIZE_STATIC (6);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED,
float *x, float *y) const
{
hb_font_t *font = c->font;
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
}
AnchorFormat1* copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
AnchorFormat1* out = c->embed<AnchorFormat1> (this);
if (!out) return_trace (out);
out->format = 1;
return_trace (out);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_ANCHORFORMAT1_HH

View File

@@ -0,0 +1,58 @@
#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT2_HH
#define OT_LAYOUT_GPOS_ANCHORFORMAT2_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct AnchorFormat2
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
FWORD xCoordinate; /* Horizontal value--in design units */
FWORD yCoordinate; /* Vertical value--in design units */
HBUINT16 anchorPoint; /* Index to glyph contour point */
public:
DEFINE_SIZE_STATIC (8);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id,
float *x, float *y) const
{
hb_font_t *font = c->font;
#ifdef HB_NO_HINTING
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
return;
#endif
unsigned int x_ppem = font->x_ppem;
unsigned int y_ppem = font->y_ppem;
hb_position_t cx = 0, cy = 0;
bool ret;
ret = (x_ppem || y_ppem) &&
font->get_glyph_contour_point_for_origin (glyph_id, anchorPoint, HB_DIRECTION_LTR, &cx, &cy);
*x = ret && x_ppem ? cx : font->em_fscale_x (xCoordinate);
*y = ret && y_ppem ? cy : font->em_fscale_y (yCoordinate);
}
AnchorFormat2* copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
return_trace (c->embed<AnchorFormat2> (this));
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_ANCHORFORMAT2_HH

View File

@@ -0,0 +1,123 @@
#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT3_HH
#define OT_LAYOUT_GPOS_ANCHORFORMAT3_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct AnchorFormat3
{
protected:
HBUINT16 format; /* Format identifier--format = 3 */
FWORD xCoordinate; /* Horizontal value--in design units */
FWORD yCoordinate; /* Vertical value--in design units */
Offset16To<Device>
xDeviceTable; /* Offset to Device table for X
* coordinate-- from beginning of
* Anchor table (may be NULL) */
Offset16To<Device>
yDeviceTable; /* Offset to Device table for Y
* coordinate-- from beginning of
* Anchor table (may be NULL) */
public:
DEFINE_SIZE_STATIC (10);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this))) return_trace (false);
return_trace (xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this));
}
void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED,
float *x, float *y) const
{
hb_font_t *font = c->font;
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
if ((font->x_ppem || font->has_nonzero_coords) && xDeviceTable.sanitize (&c->sanitizer, this))
{
hb_barrier ();
*x += (this+xDeviceTable).get_x_delta (font, c->var_store, c->var_store_cache);
}
if ((font->y_ppem || font->has_nonzero_coords) && yDeviceTable.sanitize (&c->sanitizer, this))
{
hb_barrier ();
*y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache);
}
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->embed (format))) return_trace (false);
if (unlikely (!c->serializer->embed (xCoordinate))) return_trace (false);
if (unlikely (!c->serializer->embed (yCoordinate))) return_trace (false);
unsigned x_varidx = xDeviceTable ? (this+xDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (x_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (x_varidx, &new_varidx_delta))
return_trace (false);
x_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->xCoordinate, xCoordinate + delta,
HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
}
unsigned y_varidx = yDeviceTable ? (this+yDeviceTable).get_variation_index () : HB_OT_LAYOUT_NO_VARIATIONS_INDEX;
if (y_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
{
hb_pair_t<unsigned, int> *new_varidx_delta;
if (!c->plan->layout_variation_idx_delta_map.has (y_varidx, &new_varidx_delta))
return_trace (false);
y_varidx = hb_first (*new_varidx_delta);
int delta = hb_second (*new_varidx_delta);
if (delta != 0)
{
if (!c->serializer->check_assign (out->yCoordinate, yCoordinate + delta,
HB_SERIALIZE_ERROR_INT_OVERFLOW))
return_trace (false);
}
}
bool no_downgrade = (!xDeviceTable.is_null () && !(this+xDeviceTable).is_variation_device ()) ||
x_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX ||
y_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX ||
(!yDeviceTable.is_null () && !(this+yDeviceTable).is_variation_device ());
if (!no_downgrade)
return_trace (c->serializer->check_assign (out->format, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (xDeviceTable)) return_trace (false);
if (!c->serializer->embed (yDeviceTable)) return_trace (false);
out->xDeviceTable.serialize_copy (c->serializer, xDeviceTable, this, 0, hb_serialize_context_t::Head, &c->plan->layout_variation_idx_delta_map);
out->yDeviceTable.serialize_copy (c->serializer, yDeviceTable, this, 0, hb_serialize_context_t::Head, &c->plan->layout_variation_idx_delta_map);
return_trace (out);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
(this+xDeviceTable).collect_variation_indices (c);
(this+yDeviceTable).collect_variation_indices (c);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_ANCHORFORMAT3_HH

View File

@@ -0,0 +1,87 @@
#ifndef OT_LAYOUT_GPOS_ANCHORMATRIX_HH
#define OT_LAYOUT_GPOS_ANCHORMATRIX_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct AnchorMatrix
{
HBUINT16 rows; /* Number of rows */
UnsizedArrayOf<Offset16To<Anchor, AnchorMatrix>>
matrixZ; /* Matrix of offsets to Anchor tables--
* from beginning of AnchorMatrix table */
public:
DEFINE_SIZE_ARRAY (2, matrixZ);
bool sanitize (hb_sanitize_context_t *c, unsigned int cols) const
{
TRACE_SANITIZE (this);
if (!c->check_struct (this)) return_trace (false);
hb_barrier ();
if (unlikely (hb_unsigned_mul_overflows (rows, cols))) return_trace (false);
unsigned int count = rows * cols;
if (!c->check_array (matrixZ.arrayZ, count)) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
hb_barrier ();
for (unsigned int i = 0; i < count; i++)
if (!matrixZ[i].sanitize (c, this)) return_trace (false);
return_trace (true);
}
const Anchor& get_anchor (hb_ot_apply_context_t *c,
unsigned int row, unsigned int col,
unsigned int cols, bool *found) const
{
*found = false;
if (unlikely (row >= rows || col >= cols)) return Null (Anchor);
auto &offset = matrixZ[row * cols + col];
if (unlikely (!offset.sanitize (&c->sanitizer, this))) return Null (Anchor);
hb_barrier ();
*found = !offset.is_null ();
return this+offset;
}
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
Iterator index_iter) const
{
for (unsigned i : index_iter)
(this+matrixZ[i]).collect_variation_indices (c);
}
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool subset (hb_subset_context_t *c,
unsigned num_rows,
Iterator index_iter) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (this);
if (!index_iter) return_trace (false);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->rows = num_rows;
for (const unsigned i : index_iter)
{
auto *offset = c->serializer->embed (matrixZ[i]);
if (!offset) return_trace (false);
offset->serialize_subset (c, matrixZ[i], this);
}
return_trace (true);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_ANCHORMATRIX_HH */

View File

@@ -0,0 +1,14 @@
#ifndef OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH
#define OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct ChainContextPos : ChainContext {};
}
}
}
#endif /* OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH */

View File

@@ -0,0 +1,33 @@
#ifndef OT_LAYOUT_GPOS_COMMON_HH
#define OT_LAYOUT_GPOS_COMMON_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
enum attach_type_t {
ATTACH_TYPE_NONE = 0X00,
/* Each attachment should be either a mark or a cursive; can't be both. */
ATTACH_TYPE_MARK = 0X01,
ATTACH_TYPE_CURSIVE = 0X02,
};
/* buffer **position** var allocations */
#define attach_chain() var.i16[0] /* glyph to which this attaches to, relative to current glyphs; negative for going back, positive for forward. */
#define attach_type() var.u8[2] /* attachment type */
/* Note! if attach_chain() is zero, the value of attach_type() is irrelevant. */
template<typename Iterator, typename SrcLookup>
static void SinglePos_serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned new_format);
}
}
}
#endif // OT_LAYOUT_GPOS_COMMON_HH

View File

@@ -0,0 +1,14 @@
#ifndef OT_LAYOUT_GPOS_CONTEXTPOS_HH
#define OT_LAYOUT_GPOS_CONTEXTPOS_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct ContextPos : Context {};
}
}
}
#endif /* OT_LAYOUT_GPOS_CONTEXTPOS_HH */

View File

@@ -0,0 +1,35 @@
#ifndef OT_LAYOUT_GPOS_CURSIVEPOS_HH
#define OT_LAYOUT_GPOS_CURSIVEPOS_HH
#include "CursivePosFormat1.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct CursivePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
CursivePosFormat1 format1;
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_CURSIVEPOS_HH */

View File

@@ -0,0 +1,311 @@
#ifndef OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH
#include "Anchor.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct EntryExitRecord
{
friend struct CursivePosFormat1;
bool sanitize (hb_sanitize_context_t *c, const struct CursivePosFormat1 *base) const
{
TRACE_SANITIZE (this);
return_trace (entryAnchor.sanitize (c, base) && exitAnchor.sanitize (c, base));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const struct CursivePosFormat1 *src_base) const
{
(src_base+entryAnchor).collect_variation_indices (c);
(src_base+exitAnchor).collect_variation_indices (c);
}
bool subset (hb_subset_context_t *c,
const struct CursivePosFormat1 *src_base) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
bool ret = false;
ret |= out->entryAnchor.serialize_subset (c, entryAnchor, src_base);
ret |= out->exitAnchor.serialize_subset (c, exitAnchor, src_base);
return_trace (ret);
}
protected:
Offset16To<Anchor, struct CursivePosFormat1>
entryAnchor; /* Offset to EntryAnchor table--from
* beginning of CursivePos
* subtable--may be NULL */
Offset16To<Anchor, struct CursivePosFormat1>
exitAnchor; /* Offset to ExitAnchor table--from
* beginning of CursivePos
* subtable--may be NULL */
public:
DEFINE_SIZE_STATIC (4);
};
static void
reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent) {
int chain = pos[i].attach_chain(), type = pos[i].attach_type();
if (likely (!chain || 0 == (type & ATTACH_TYPE_CURSIVE)))
return;
pos[i].attach_chain() = 0;
unsigned int j = (int) i + chain;
/* Stop if we see new parent in the chain. */
if (j == new_parent)
return;
reverse_cursive_minor_offset (pos, j, direction, new_parent);
if (HB_DIRECTION_IS_HORIZONTAL (direction))
pos[j].y_offset = -pos[i].y_offset;
else
pos[j].x_offset = -pos[i].x_offset;
pos[j].attach_chain() = -chain;
pos[j].attach_type() = type;
}
struct CursivePosFormat1
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
Array16Of<EntryExitRecord>
entryExitRecord; /* Array of EntryExit records--in
* Coverage Index order */
public:
DEFINE_SIZE_ARRAY (6, entryExitRecord);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!coverage.sanitize (c, this)))
return_trace (false);
if (c->lazy_some_gpos)
return_trace (entryExitRecord.sanitize_shallow (c));
else
return_trace (entryExitRecord.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
+ hb_zip (this+coverage, entryExitRecord)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
| hb_apply ([&] (const EntryExitRecord& record) { record.collect_variation_indices (c, this); })
;
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ if (unlikely (!(this+coverage).collect_coverage (c->input))) return; }
const Coverage &get_coverage () const { return this+coverage; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)];
if (!this_record.entryAnchor ||
unlikely (!this_record.entryAnchor.sanitize (&c->sanitizer, this))) return_trace (false);
hb_barrier ();
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_from;
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)];
if (!prev_record.exitAnchor ||
unlikely (!prev_record.exitAnchor.sanitize (&c->sanitizer, this)))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
}
hb_barrier ();
unsigned int i = skippy_iter.idx;
unsigned int j = buffer->idx;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"cursive attaching glyph at %u to glyph at %u",
i, j);
}
buffer->unsafe_to_break (i, j + 1);
float entry_x, entry_y, exit_x, exit_y;
(this+prev_record.exitAnchor).get_anchor (c, buffer->info[i].codepoint, &exit_x, &exit_y);
(this+this_record.entryAnchor).get_anchor (c, buffer->info[j].codepoint, &entry_x, &entry_y);
hb_glyph_position_t *pos = buffer->pos;
hb_position_t d;
/* Main-direction adjustment */
switch (c->direction) {
case HB_DIRECTION_LTR:
pos[i].x_advance = roundf (exit_x) + pos[i].x_offset;
d = roundf (entry_x) + pos[j].x_offset;
pos[j].x_advance -= d;
pos[j].x_offset -= d;
break;
case HB_DIRECTION_RTL:
d = roundf (exit_x) + pos[i].x_offset;
pos[i].x_advance -= d;
pos[i].x_offset -= d;
pos[j].x_advance = roundf (entry_x) + pos[j].x_offset;
break;
case HB_DIRECTION_TTB:
pos[i].y_advance = roundf (exit_y) + pos[i].y_offset;
d = roundf (entry_y) + pos[j].y_offset;
pos[j].y_advance -= d;
pos[j].y_offset -= d;
break;
case HB_DIRECTION_BTT:
d = roundf (exit_y) + pos[i].y_offset;
pos[i].y_advance -= d;
pos[i].y_offset -= d;
pos[j].y_advance = roundf (entry_y);
break;
case HB_DIRECTION_INVALID:
default:
break;
}
/* Cross-direction adjustment */
/* We attach child to parent (think graph theory and rooted trees whereas
* the root stays on baseline and each node aligns itself against its
* parent.
*
* Optimize things for the case of RightToLeft, as that's most common in
* Arabic. */
unsigned int child = i;
unsigned int parent = j;
hb_position_t x_offset = roundf (entry_x - exit_x);
hb_position_t y_offset = roundf (entry_y - exit_y);
if (!(c->lookup_props & LookupFlag::RightToLeft))
{
unsigned int k = child;
child = parent;
parent = k;
x_offset = -x_offset;
y_offset = -y_offset;
}
/* If child was already connected to someone else, walk through its old
* chain and reverse the link direction, such that the whole tree of its
* previous connection now attaches to new parent. Watch out for case
* where new parent is on the path from old chain...
*/
reverse_cursive_minor_offset (pos, child, c->direction, parent);
pos[child].attach_type() = ATTACH_TYPE_CURSIVE;
pos[child].attach_chain() = (int) parent - (int) child;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction)))
pos[child].y_offset = y_offset;
else
pos[child].x_offset = x_offset;
/* If parent was attached to child, separate them.
* https://github.com/harfbuzz/harfbuzz/issues/2469
*/
if (unlikely (pos[parent].attach_chain() == -pos[child].attach_chain()))
{
pos[parent].attach_chain() = 0;
if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction)))
pos[parent].y_offset = 0;
else
pos[parent].x_offset = 0;
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"cursive attached glyph at %u to glyph at %u",
i, j);
}
buffer->idx++;
return_trace (true);
}
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
void serialize (hb_subset_context_t *c,
Iterator it,
const struct CursivePosFormat1 *src_base)
{
if (unlikely (!c->serializer->extend_min ((*this)))) return;
this->format = 1;
this->entryExitRecord.len = it.len ();
for (const EntryExitRecord& entry_record : + it
| hb_map (hb_second))
entry_record.subset (c, src_base);
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
coverage.serialize_serialize (c->serializer, glyphs);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
auto it =
+ hb_zip (this+coverage, entryExitRecord)
| hb_filter (glyphset, hb_first)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const EntryExitRecord&> p) -> hb_pair_t<hb_codepoint_t, const EntryExitRecord&>
{ return hb_pair (glyph_map[p.first], p.second);})
;
bool ret = bool (it);
out->serialize (c, it, this);
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH */

View File

@@ -0,0 +1,17 @@
#ifndef OT_LAYOUT_GPOS_EXTENSIONPOS_HH
#define OT_LAYOUT_GPOS_EXTENSIONPOS_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct ExtensionPos : Extension<ExtensionPos>
{
typedef struct PosLookupSubTable SubTable;
};
}
}
}
#endif /* OT_LAYOUT_GPOS_EXTENSIONPOS_HH */

View File

@@ -0,0 +1,174 @@
#ifndef OT_LAYOUT_GPOS_GPOS_HH
#define OT_LAYOUT_GPOS_GPOS_HH
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
#include "PosLookup.hh"
namespace OT {
using Layout::GPOS_impl::PosLookup;
namespace Layout {
static void
propagate_attachment_offsets (hb_glyph_position_t *pos,
unsigned int len,
unsigned int i,
hb_direction_t direction,
unsigned nesting_level = HB_MAX_NESTING_LEVEL);
/*
* GPOS -- Glyph Positioning
* https://docs.microsoft.com/en-us/typography/opentype/spec/gpos
*/
struct GPOS : GSUBGPOS
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_GPOS;
using Lookup = PosLookup;
const PosLookup& get_lookup (unsigned int i) const
{ return static_cast<const PosLookup &> (GSUBGPOS::get_lookup (i)); }
static inline void position_start (hb_font_t *font, hb_buffer_t *buffer);
static inline void position_finish_advances (hb_font_t *font, hb_buffer_t *buffer);
static inline void position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer);
bool subset (hb_subset_context_t *c) const
{
hb_subset_layout_context_t l (c, tableTag);
return GSUBGPOS::subset<PosLookup> (&l);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (GSUBGPOS::sanitize<PosLookup> (c));
}
HB_INTERNAL bool is_blocklisted (hb_blob_t *blob,
hb_face_t *face) const;
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
for (unsigned i = 0; i < GSUBGPOS::get_lookup_count (); i++)
{
if (!c->gpos_lookups->has (i)) continue;
const PosLookup &l = get_lookup (i);
l.dispatch (c);
}
}
void closure_lookups (hb_face_t *face,
const hb_set_t *glyphs,
hb_set_t *lookup_indexes /* IN/OUT */) const
{ GSUBGPOS::closure_lookups<PosLookup> (face, glyphs, lookup_indexes); }
typedef GSUBGPOS::accelerator_t<GPOS> accelerator_t;
};
static void
propagate_attachment_offsets (hb_glyph_position_t *pos,
unsigned int len,
unsigned int i,
hb_direction_t direction,
unsigned nesting_level)
{
/* Adjusts offsets of attached glyphs (both cursive and mark) to accumulate
* offset of glyph they are attached to. */
int chain = pos[i].attach_chain(), type = pos[i].attach_type();
if (likely (!chain))
return;
pos[i].attach_chain() = 0;
unsigned int j = (int) i + chain;
if (unlikely (j >= len))
return;
if (unlikely (!nesting_level))
return;
propagate_attachment_offsets (pos, len, j, direction, nesting_level - 1);
assert (!!(type & GPOS_impl::ATTACH_TYPE_MARK) ^ !!(type & GPOS_impl::ATTACH_TYPE_CURSIVE));
if (type & GPOS_impl::ATTACH_TYPE_CURSIVE)
{
if (HB_DIRECTION_IS_HORIZONTAL (direction))
pos[i].y_offset += pos[j].y_offset;
else
pos[i].x_offset += pos[j].x_offset;
}
else /*if (type & GPOS_impl::ATTACH_TYPE_MARK)*/
{
pos[i].x_offset += pos[j].x_offset;
pos[i].y_offset += pos[j].y_offset;
assert (j < i);
if (HB_DIRECTION_IS_FORWARD (direction))
for (unsigned int k = j; k < i; k++) {
pos[i].x_offset -= pos[k].x_advance;
pos[i].y_offset -= pos[k].y_advance;
}
else
for (unsigned int k = j + 1; k < i + 1; k++) {
pos[i].x_offset += pos[k].x_advance;
pos[i].y_offset += pos[k].y_advance;
}
}
}
void
GPOS::position_start (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer)
{
unsigned int count = buffer->len;
for (unsigned int i = 0; i < count; i++)
buffer->pos[i].attach_chain() = buffer->pos[i].attach_type() = 0;
}
void
GPOS::position_finish_advances (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer HB_UNUSED)
{
//_hb_buffer_assert_gsubgpos_vars (buffer);
}
void
GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer)
{
_hb_buffer_assert_gsubgpos_vars (buffer);
unsigned int len;
hb_glyph_position_t *pos = hb_buffer_get_glyph_positions (buffer, &len);
hb_direction_t direction = buffer->props.direction;
/* Handle attachments */
if (buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT)
for (unsigned i = 0; i < len; i++)
propagate_attachment_offsets (pos, len, i, direction);
if (unlikely (font->slant_xy) &&
HB_DIRECTION_IS_HORIZONTAL (direction))
{
/* Slanting shaping results is only supported for horizontal text,
* as it gets weird otherwise. */
for (unsigned i = 0; i < len; i++)
if (unlikely (pos[i].y_offset))
pos[i].x_offset += roundf (font->slant_xy * pos[i].y_offset);
}
}
}
struct GPOS_accelerator_t : Layout::GPOS::accelerator_t {
GPOS_accelerator_t (hb_face_t *face) : Layout::GPOS::accelerator_t (face) {}
};
}
#endif /* OT_LAYOUT_GPOS_GPOS_HH */

View File

@@ -0,0 +1,57 @@
#ifndef OT_LAYOUT_GPOS_LIGATUREARRAY_HH
#define OT_LAYOUT_GPOS_LIGATUREARRAY_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef AnchorMatrix LigatureAttach; /* component-major--
* in order of writing direction--,
* mark-minor--
* ordered by class--zero-based. */
/* Array of LigatureAttach tables ordered by LigatureCoverage Index */
struct LigatureArray : List16OfOffset16To<LigatureAttach>
{
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool subset (hb_subset_context_t *c,
Iterator coverage,
unsigned class_count,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
{
auto *matrix = out->serialize_append (c->serializer);
if (unlikely (!matrix)) return_trace (false);
const LigatureAttach& src = (this + _.second);
auto indexes =
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
;
ret |= matrix->serialize_subset (c,
_.second,
this,
src.rows,
indexes);
}
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_LIGATUREARRAY_HH */

View File

@@ -0,0 +1,128 @@
#ifndef OT_LAYOUT_GPOS_MARKARRAY_HH
#define OT_LAYOUT_GPOS_MARKARRAY_HH
#include "AnchorMatrix.hh"
#include "MarkRecord.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Coverage order */
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (Array16Of<MarkRecord>::sanitize (c, this));
}
bool apply (hb_ot_apply_context_t *c,
unsigned int mark_index, unsigned int glyph_index,
const AnchorMatrix &anchors, unsigned int class_count,
unsigned int glyph_pos) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
const MarkRecord &record = Array16Of<MarkRecord>::operator[](mark_index);
unsigned int mark_class = record.klass;
const Anchor& mark_anchor = this + record.markAnchor;
bool found;
const Anchor& glyph_anchor = anchors.get_anchor (c, glyph_index, mark_class, class_count, &found);
/* If this subtable doesn't have an anchor for this base and this class,
* return false such that the subsequent subtables have a chance at it. */
if (unlikely (!found)) return_trace (false);
float mark_x, mark_y, base_x, base_y;
buffer->unsafe_to_break (glyph_pos, buffer->idx + 1);
mark_anchor.get_anchor (c, buffer->cur().codepoint, &mark_x, &mark_y);
glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"attaching mark glyph at %u to glyph at %u",
c->buffer->idx, glyph_pos);
}
hb_glyph_position_t &o = buffer->cur_pos();
o.x_offset = roundf (base_x - mark_x);
o.y_offset = roundf (base_y - mark_y);
o.attach_type() = ATTACH_TYPE_MARK;
o.attach_chain() = (int) glyph_pos - (int) buffer->idx;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"attached mark glyph at %u to glyph at %u",
c->buffer->idx, glyph_pos);
}
buffer->idx++;
return_trace (true);
}
template <typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool subset (hb_subset_context_t *c,
Iterator coverage,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
auto* out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
auto mark_iter =
+ hb_zip (coverage, this->iter ())
| hb_filter (glyphset, hb_first)
| hb_map (hb_second)
;
bool ret = false;
unsigned new_length = 0;
for (const auto& mark_record : mark_iter) {
ret |= mark_record.subset (c, this, klass_mapping);
new_length++;
}
if (unlikely (!c->serializer->check_assign (out->len, new_length,
HB_SERIALIZE_ERROR_ARRAY_OVERFLOW)))
return_trace (false);
return_trace (ret);
}
};
HB_INTERNAL inline
void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage,
const MarkArray &mark_array,
const hb_set_t &glyphset,
hb_map_t* klass_mapping /* INOUT */)
{
hb_set_t orig_classes;
+ hb_zip (mark_coverage, mark_array)
| hb_filter (glyphset, hb_first)
| hb_map (hb_second)
| hb_map (&MarkRecord::get_class)
| hb_sink (orig_classes)
;
unsigned idx = 0;
for (auto klass : orig_classes.iter ())
{
if (klass_mapping->has (klass)) continue;
klass_mapping->set (klass, idx);
idx++;
}
}
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKARRAY_HH */

View File

@@ -0,0 +1,41 @@
#ifndef OT_LAYOUT_GPOS_MARKBASEPOS_HH
#define OT_LAYOUT_GPOS_MARKBASEPOS_HH
#include "MarkBasePosFormat1.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct MarkBasePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkBasePosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkBasePosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKBASEPOS_HH */

View File

@@ -0,0 +1,243 @@
#ifndef OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH
#include "MarkArray.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef AnchorMatrix BaseArray; /* base-major--
* in order of BaseCoverage Index--,
* mark-minor--
* ordered by class--zero-based. */
template <typename Types>
struct MarkBasePosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
markCoverage; /* Offset to MarkCoverage table--from
* beginning of MarkBasePos subtable */
typename Types::template OffsetTo<Coverage>
baseCoverage; /* Offset to BaseCoverage table--from
* beginning of MarkBasePos subtable */
HBUINT16 classCount; /* Number of classes defined for marks */
typename Types::template OffsetTo<MarkArray>
markArray; /* Offset to MarkArray table--from
* beginning of MarkBasePos subtable */
typename Types::template OffsetTo<BaseArray>
baseArray; /* Offset to BaseArray table--from
* beginning of MarkBasePos subtable */
public:
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
markCoverage.sanitize (c, this) &&
baseCoverage.sanitize (c, this) &&
markArray.sanitize (c, this) &&
baseArray.sanitize (c, this, (unsigned int) classCount));
}
bool intersects (const hb_set_t *glyphs) const
{
return (this+markCoverage).intersects (glyphs) &&
(this+baseCoverage).intersects (glyphs);
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
+ hb_zip (this+markCoverage, this+markArray)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
| hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); })
;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping);
unsigned basecount = (this+baseArray).rows;
auto base_iter =
+ hb_zip (this+baseCoverage, hb_range (basecount))
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
;
hb_sorted_vector_t<unsigned> base_indexes;
for (const unsigned row : base_iter)
{
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
| hb_sink (base_indexes)
;
}
(this+baseArray).collect_variation_indices (c, base_indexes.iter ());
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return;
if (unlikely (!(this+baseCoverage).collect_coverage (c->input))) return;
}
const Coverage &get_coverage () const { return this+markCoverage; }
static inline bool accept (hb_buffer_t *buffer, unsigned idx)
{
/* We only want to attach to the first of a MultipleSubst sequence.
* https://github.com/harfbuzz/harfbuzz/issues/740
* Reject others...
* ...but stop if we find a mark in the MultipleSubst sequence:
* https://github.com/harfbuzz/harfbuzz/issues/1020 */
return !_hb_glyph_info_multiplied (&buffer->info[idx]) ||
0 == _hb_glyph_info_get_lig_comp (&buffer->info[idx]) ||
(idx == 0 ||
_hb_glyph_info_is_mark (&buffer->info[idx - 1]) ||
!_hb_glyph_info_multiplied (&buffer->info[idx - 1]) ||
_hb_glyph_info_get_lig_id (&buffer->info[idx]) !=
_hb_glyph_info_get_lig_id (&buffer->info[idx - 1]) ||
_hb_glyph_info_get_lig_comp (&buffer->info[idx]) !=
_hb_glyph_info_get_lig_comp (&buffer->info[idx - 1]) + 1
);
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
if (likely (mark_index == NOT_COVERED)) return_trace (false);
/* Now we search backwards for a non-mark glyph.
* We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
auto &skippy_iter = c->iter_input;
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
if (c->last_base_until > buffer->idx)
{
c->last_base_until = 0;
c->last_base = -1;
}
unsigned j;
for (j = buffer->idx; j > c->last_base_until; j--)
{
auto match = skippy_iter.match (buffer->info[j - 1]);
if (match == skippy_iter.MATCH)
{
// https://github.com/harfbuzz/harfbuzz/issues/4124
if (!accept (buffer, j - 1) &&
NOT_COVERED == (this+baseCoverage).get_coverage (buffer->info[j - 1].codepoint))
match = skippy_iter.SKIP;
}
if (match == skippy_iter.MATCH)
{
c->last_base = (signed) j - 1;
break;
}
}
c->last_base_until = buffer->idx;
if (c->last_base == -1)
{
buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
return_trace (false);
}
unsigned idx = (unsigned) c->last_base;
/* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_base_glyph (&buffer->info[idx])) { return_trace (false); }
unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[idx].codepoint);
if (base_index == NOT_COVERED)
{
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
return_trace (false);
}
return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, idx));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping);
if (!klass_mapping.get_population ()) return_trace (false);
out->classCount = klass_mapping.get_population ();
auto mark_iter =
+ hb_zip (this+markCoverage, this+markArray)
| hb_filter (glyphset, hb_first)
;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ mark_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->markCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned basecount = (this+baseArray).rows;
auto base_iter =
+ hb_zip (this+baseCoverage, hb_range (basecount))
| hb_filter (glyphset, hb_first)
;
new_coverage.reset ();
+ base_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->baseCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
hb_sorted_vector_t<unsigned> base_indexes;
for (const unsigned row : + base_iter
| hb_map (hb_second))
{
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
| hb_sink (base_indexes)
;
}
return_trace (out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
base_indexes.iter ()));
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH */

View File

@@ -0,0 +1,41 @@
#ifndef OT_LAYOUT_GPOS_MARKLIGPOS_HH
#define OT_LAYOUT_GPOS_MARKLIGPOS_HH
#include "MarkLigPosFormat1.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct MarkLigPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkLigPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkLigPosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKLIGPOS_HH */

View File

@@ -0,0 +1,224 @@
#ifndef OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH
#include "LigatureArray.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct MarkLigPosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
markCoverage; /* Offset to Mark Coverage table--from
* beginning of MarkLigPos subtable */
typename Types::template OffsetTo<Coverage>
ligatureCoverage; /* Offset to Ligature Coverage
* table--from beginning of MarkLigPos
* subtable */
HBUINT16 classCount; /* Number of defined mark classes */
typename Types::template OffsetTo<MarkArray>
markArray; /* Offset to MarkArray table--from
* beginning of MarkLigPos subtable */
typename Types::template OffsetTo<LigatureArray>
ligatureArray; /* Offset to LigatureArray table--from
* beginning of MarkLigPos subtable */
public:
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
markCoverage.sanitize (c, this) &&
ligatureCoverage.sanitize (c, this) &&
markArray.sanitize (c, this) &&
ligatureArray.sanitize (c, this, (unsigned int) classCount));
}
bool intersects (const hb_set_t *glyphs) const
{
return (this+markCoverage).intersects (glyphs) &&
(this+ligatureCoverage).intersects (glyphs);
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
+ hb_zip (this+markCoverage, this+markArray)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
| hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); })
;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping);
unsigned ligcount = (this+ligatureArray).len;
auto lig_iter =
+ hb_zip (this+ligatureCoverage, hb_range (ligcount))
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
;
const LigatureArray& lig_array = this+ligatureArray;
for (const unsigned i : lig_iter)
{
hb_sorted_vector_t<unsigned> lig_indexes;
unsigned row_count = lig_array[i].rows;
for (unsigned row : + hb_range (row_count))
{
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
| hb_sink (lig_indexes)
;
}
lig_array[i].collect_variation_indices (c, lig_indexes.iter ());
}
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return;
if (unlikely (!(this+ligatureCoverage).collect_coverage (c->input))) return;
}
const Coverage &get_coverage () const { return this+markCoverage; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint);
if (likely (mark_index == NOT_COVERED)) return_trace (false);
/* Now we search backwards for a non-mark glyph */
auto &skippy_iter = c->iter_input;
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
if (c->last_base_until > buffer->idx)
{
c->last_base_until = 0;
c->last_base = -1;
}
unsigned j;
for (j = buffer->idx; j > c->last_base_until; j--)
{
auto match = skippy_iter.match (buffer->info[j - 1]);
if (match == skippy_iter.MATCH)
{
c->last_base = (signed) j - 1;
break;
}
}
c->last_base_until = buffer->idx;
if (c->last_base == -1)
{
buffer->unsafe_to_concat_from_outbuffer (0, buffer->idx + 1);
return_trace (false);
}
unsigned idx = (unsigned) c->last_base;
/* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */
//if (!_hb_glyph_info_is_ligature (&buffer->info[idx])) { return_trace (false); }
unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[idx].codepoint);
if (lig_index == NOT_COVERED)
{
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
return_trace (false);
}
const LigatureArray& lig_array = this+ligatureArray;
const LigatureAttach& lig_attach = lig_array[lig_index];
/* Find component to attach to */
unsigned int comp_count = lig_attach.rows;
if (unlikely (!comp_count))
{
buffer->unsafe_to_concat_from_outbuffer (idx, buffer->idx + 1);
return_trace (false);
}
/* We must now check whether the ligature ID of the current mark glyph
* is identical to the ligature ID of the found ligature. If yes, we
* can directly use the component index. If not, we attach the mark
* glyph to the last component of the ligature. */
unsigned int comp_index;
unsigned int lig_id = _hb_glyph_info_get_lig_id (&buffer->info[idx]);
unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur());
unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur());
if (lig_id && lig_id == mark_id && mark_comp > 0)
comp_index = hb_min (comp_count, _hb_glyph_info_get_lig_comp (&buffer->cur())) - 1;
else
comp_index = comp_count - 1;
return_trace ((this+markArray).apply (c, mark_index, comp_index, lig_attach, classCount, idx));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = c->plan->glyph_map_gsub;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping);
if (!klass_mapping.get_population ()) return_trace (false);
out->classCount = klass_mapping.get_population ();
auto mark_iter =
+ hb_zip (this+markCoverage, this+markArray)
| hb_filter (glyphset, hb_first)
;
auto new_mark_coverage =
+ mark_iter
| hb_map_retains_sorting (hb_first)
| hb_map_retains_sorting (glyph_map)
;
if (!out->markCoverage.serialize_serialize (c->serializer, new_mark_coverage))
return_trace (false);
if (unlikely (!out->markArray.serialize_subset (c, markArray, this,
(this+markCoverage).iter (),
&klass_mapping)))
return_trace (false);
auto new_ligature_coverage =
+ hb_iter (this + ligatureCoverage)
| hb_take ((this + ligatureArray).len)
| hb_map_retains_sorting (glyph_map)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage))
return_trace (false);
return_trace (out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage),
classCount, &klass_mapping));
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH */

View File

@@ -0,0 +1,42 @@
#ifndef OT_LAYOUT_GPOS_MARKMARKPOS_HH
#define OT_LAYOUT_GPOS_MARKMARKPOS_HH
#include "MarkMarkPosFormat1.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct MarkMarkPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
MarkMarkPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkMarkPosFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKMARKPOS_HH */

View File

@@ -0,0 +1,231 @@
#ifndef OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH
#include "MarkMarkPosFormat1.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef AnchorMatrix Mark2Array; /* mark2-major--
* in order of Mark2Coverage Index--,
* mark1-minor--
* ordered by class--zero-based. */
template <typename Types>
struct MarkMarkPosFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
mark1Coverage; /* Offset to Combining Mark1 Coverage
* table--from beginning of MarkMarkPos
* subtable */
typename Types::template OffsetTo<Coverage>
mark2Coverage; /* Offset to Combining Mark2 Coverage
* table--from beginning of MarkMarkPos
* subtable */
HBUINT16 classCount; /* Number of defined mark classes */
typename Types::template OffsetTo<MarkArray>
mark1Array; /* Offset to Mark1Array table--from
* beginning of MarkMarkPos subtable */
typename Types::template OffsetTo<Mark2Array>
mark2Array; /* Offset to Mark2Array table--from
* beginning of MarkMarkPos subtable */
public:
DEFINE_SIZE_STATIC (4 + 4 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
mark1Coverage.sanitize (c, this) &&
mark2Coverage.sanitize (c, this) &&
mark1Array.sanitize (c, this) &&
hb_barrier () &&
mark2Array.sanitize (c, this, (unsigned int) classCount));
}
bool intersects (const hb_set_t *glyphs) const
{
return (this+mark1Coverage).intersects (glyphs) &&
(this+mark2Coverage).intersects (glyphs);
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
+ hb_zip (this+mark1Coverage, this+mark1Array)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
| hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+mark1Array)); })
;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, *c->glyph_set, &klass_mapping);
unsigned mark2_count = (this+mark2Array).rows;
auto mark2_iter =
+ hb_zip (this+mark2Coverage, hb_range (mark2_count))
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
;
hb_sorted_vector_t<unsigned> mark2_indexes;
for (const unsigned row : mark2_iter)
{
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
| hb_sink (mark2_indexes)
;
}
(this+mark2Array).collect_variation_indices (c, mark2_indexes.iter ());
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+mark1Coverage).collect_coverage (c->input))) return;
if (unlikely (!(this+mark2Coverage).collect_coverage (c->input))) return;
}
const Coverage &get_coverage () const { return this+mark1Coverage; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int mark1_index = (this+mark1Coverage).get_coverage (buffer->cur().codepoint);
if (likely (mark1_index == NOT_COVERED)) return_trace (false);
/* now we search backwards for a suitable mark glyph until a non-mark glyph */
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
unsigned unsafe_from;
if (unlikely (!skippy_iter.prev (&unsafe_from)))
{
buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1);
return_trace (false);
}
if (likely (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx])))
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
}
unsigned int j = skippy_iter.idx;
unsigned int id1 = _hb_glyph_info_get_lig_id (&buffer->cur());
unsigned int id2 = _hb_glyph_info_get_lig_id (&buffer->info[j]);
unsigned int comp1 = _hb_glyph_info_get_lig_comp (&buffer->cur());
unsigned int comp2 = _hb_glyph_info_get_lig_comp (&buffer->info[j]);
if (likely (id1 == id2))
{
if (id1 == 0) /* Marks belonging to the same base. */
goto good;
else if (comp1 == comp2) /* Marks belonging to the same ligature component. */
goto good;
}
else
{
/* If ligature ids don't match, it may be the case that one of the marks
* itself is a ligature. In which case match. */
if ((id1 > 0 && !comp1) || (id2 > 0 && !comp2))
goto good;
}
/* Didn't match. */
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
good:
unsigned int mark2_index = (this+mark2Coverage).get_coverage (buffer->info[j].codepoint);
if (mark2_index == NOT_COVERED)
{
buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1);
return_trace (false);
}
return_trace ((this+mark1Array).apply (c, mark1_index, mark2_index, this+mark2Array, classCount, j));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_map_t klass_mapping;
Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, glyphset, &klass_mapping);
if (!klass_mapping.get_population ()) return_trace (false);
out->classCount = klass_mapping.get_population ();
auto mark1_iter =
+ hb_zip (this+mark1Coverage, this+mark1Array)
| hb_filter (glyphset, hb_first)
;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ mark1_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->mark1Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
if (unlikely (!out->mark1Array.serialize_subset (c, mark1Array, this,
(this+mark1Coverage).iter (),
&klass_mapping)))
return_trace (false);
unsigned mark2count = (this+mark2Array).rows;
auto mark2_iter =
+ hb_zip (this+mark2Coverage, hb_range (mark2count))
| hb_filter (glyphset, hb_first)
;
new_coverage.reset ();
+ mark2_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->mark2Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
hb_sorted_vector_t<unsigned> mark2_indexes;
for (const unsigned row : + mark2_iter
| hb_map (hb_second))
{
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
| hb_sink (mark2_indexes)
;
}
return_trace (out->mark2Array.serialize_subset (c, mark2Array, this,
mark2_iter.len (),
mark2_indexes.iter ()));
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH */

View File

@@ -0,0 +1,51 @@
#ifndef OT_LAYOUT_GPOS_MARKRECORD_HH
#define OT_LAYOUT_GPOS_MARKRECORD_HH
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct MarkRecord
{
friend struct MarkArray;
public:
HBUINT16 klass; /* Class defined for this mark */
Offset16To<Anchor>
markAnchor; /* Offset to Anchor table--from
* beginning of MarkArray table */
public:
DEFINE_SIZE_STATIC (4);
unsigned get_class () const { return (unsigned) klass; }
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && markAnchor.sanitize (c, base));
}
bool subset (hb_subset_context_t *c,
const void *src_base,
const hb_map_t *klass_mapping) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
out->klass = klass_mapping->get (klass);
return_trace (out->markAnchor.serialize_subset (c, markAnchor, src_base));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const void *src_base) const
{
(src_base+markAnchor).collect_variation_indices (c);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_MARKRECORD_HH */

View File

@@ -0,0 +1,46 @@
#ifndef OT_LAYOUT_GPOS_PAIRPOS_HH
#define OT_LAYOUT_GPOS_PAIRPOS_HH
#include "PairPosFormat1.hh"
#include "PairPosFormat2.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct PairPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
PairPosFormat1_3<SmallTypes> format1;
PairPosFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
PairPosFormat1_3<MediumTypes> format3;
PairPosFormat2_4<MediumTypes> format4;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
case 4: return_trace (c->dispatch (u.format4, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRPOS_HH

View File

@@ -0,0 +1,271 @@
#ifndef OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH
#include "PairSet.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairPosFormat1_3
{
using PairSet = GPOS_impl::PairSet<Types>;
using PairValueRecord = GPOS_impl::PairValueRecord<Types>;
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat[2]; /* [0] Defines the types of data in
* ValueRecord1--for the first glyph
* in the pair--may be zero (0) */
/* [1] Defines the types of data in
* ValueRecord2--for the second glyph
* in the pair--may be zero (0) */
Array16Of<typename Types::template OffsetTo<PairSet>>
pairSet; /* Array of PairSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (8 + Types::size, pairSet);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!c->check_struct (this)) return_trace (false);
hb_barrier ();
unsigned int len1 = valueFormat[0].get_len ();
unsigned int len2 = valueFormat[1].get_len ();
typename PairSet::sanitize_closure_t closure =
{
valueFormat,
len1,
PairSet::get_size (len1, len2)
};
return_trace (coverage.sanitize (c, this) && pairSet.sanitize (c, this, &closure));
}
bool intersects (const hb_set_t *glyphs) const
{
auto &cov = this+coverage;
if (pairSet.len > glyphs->get_population () * hb_bit_storage ((unsigned) pairSet.len))
{
for (hb_codepoint_t g : glyphs->iter())
{
unsigned i = cov.get_coverage (g);
if ((this+pairSet[i]).intersects (glyphs, valueFormat))
return true;
}
return false;
}
return
+ hb_zip (cov, pairSet)
| hb_filter (*glyphs, hb_first)
| hb_map (hb_second)
| hb_map ([glyphs, this] (const typename Types::template OffsetTo<PairSet> &_)
{ return (this+_).intersects (glyphs, valueFormat); })
| hb_any
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
if ((!valueFormat[0].has_device ()) && (!valueFormat[1].has_device ())) return;
auto it =
+ hb_zip (this+coverage, pairSet)
| hb_filter (c->glyph_set, hb_first)
| hb_map (hb_second)
;
if (!it) return;
+ it
| hb_map (hb_add (this))
| hb_apply ([&] (const PairSet& _) { _.collect_variation_indices (c, valueFormat); })
;
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
unsigned int count = pairSet.len;
for (unsigned int i = 0; i < count; i++)
(this+pairSet[i]).collect_glyphs (c, valueFormat);
}
const Coverage &get_coverage () const { return this+coverage; }
unsigned cache_cost () const
{
return (this+coverage).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
case hb_ot_lookup_cache_op_t::CREATE:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) hb_malloc (sizeof (hb_ot_lookup_cache_t));
if (likely (cache))
cache->clear ();
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) p;
hb_free (cache);
return nullptr;
}
}
return nullptr;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
hb_ot_lookup_cache_t *cache = cached ? (hb_ot_lookup_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);
}
return_trace ((this+pairSet[index]).apply (c, valueFormat, skippy_iter.idx));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_pair_t<unsigned, unsigned> newFormats = hb_pair (valueFormat[0], valueFormat[1]);
if (c->plan->normalized_coords)
{
/* all device flags will be dropped when full instancing, no need to strip
* hints, also do not strip emtpy cause we don't compute the new default
* value during stripping */
newFormats = compute_effective_value_formats (glyphset, false, false, &c->plan->layout_variation_idx_delta_map);
}
/* do not strip hints for VF */
else if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
{
hb_blob_t* blob = hb_face_reference_table (c->plan->source, HB_TAG ('f','v','a','r'));
bool has_fvar = (blob != hb_blob_get_empty ());
hb_blob_destroy (blob);
bool strip = !has_fvar;
/* special case: strip hints when a VF has no GDEF varstore after
* subsetting*/
if (has_fvar && !c->plan->has_gdef_varstore)
strip = true;
newFormats = compute_effective_value_formats (glyphset, strip, true);
}
out->valueFormat[0] = newFormats.first;
out->valueFormat[1] = newFormats.second;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, pairSet)
| hb_filter (glyphset, hb_first)
| hb_filter ([this, c, out] (const typename Types::template OffsetTo<PairSet>& _)
{
auto snap = c->serializer->snapshot ();
auto *o = out->pairSet.serialize_append (c->serializer);
if (unlikely (!o)) return false;
bool ret = o->serialize_subset (c, _, this, valueFormat, out->valueFormat);
if (!ret)
{
out->pairSet.pop ();
c->serializer->revert (snap);
}
return ret;
},
hb_second)
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
out->coverage.serialize_serialize (c->serializer, new_coverage.iter ());
return_trace (bool (new_coverage));
}
hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_set_t& glyphset,
bool strip_hints, bool strip_empty,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map = nullptr) const
{
unsigned record_size = PairSet::get_size (valueFormat);
unsigned format1 = 0;
unsigned format2 = 0;
for (const auto & _ :
+ hb_zip (this+coverage, pairSet)
| hb_filter (glyphset, hb_first)
| hb_map (hb_second)
)
{
const PairSet& set = (this + _);
const PairValueRecord *record = &set.firstPairValueRecord;
unsigned count = set.len;
for (unsigned i = 0; i < count; i++)
{
if (record->intersects (glyphset))
{
format1 = format1 | valueFormat[0].get_effective_format (record->get_values_1 (), strip_hints, strip_empty, &set, varidx_delta_map);
format2 = format2 | valueFormat[1].get_effective_format (record->get_values_2 (valueFormat[0]), strip_hints, strip_empty, &set, varidx_delta_map);
}
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
if (format1 == valueFormat[0] && format2 == valueFormat[1])
break;
}
return hb_pair (format1, format2);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH

View File

@@ -0,0 +1,413 @@
#ifndef OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH
#define OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH
#include "ValueFormat.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairPosFormat2_4 : ValueBase
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat1; /* ValueRecord definition--for the
* first glyph of the pair--may be zero
* (0) */
ValueFormat valueFormat2; /* ValueRecord definition--for the
* second glyph of the pair--may be
* zero (0) */
typename Types::template OffsetTo<ClassDef>
classDef1; /* Offset to ClassDef table--from
* beginning of PairPos subtable--for
* the first glyph of the pair */
typename Types::template OffsetTo<ClassDef>
classDef2; /* Offset to ClassDef table--from
* beginning of PairPos subtable--for
* the second glyph of the pair */
HBUINT16 class1Count; /* Number of classes in ClassDef1
* table--includes Class0 */
HBUINT16 class2Count; /* Number of classes in ClassDef2
* table--includes Class0 */
ValueRecord values; /* Matrix of value pairs:
* class1-major, class2-minor,
* Each entry has value1 and value2 */
public:
DEFINE_SIZE_ARRAY (10 + 3 * Types::size, values);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!(c->check_struct (this)
&& coverage.sanitize (c, this)
&& classDef1.sanitize (c, this)
&& classDef2.sanitize (c, this))) return_trace (false);
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int stride = HBUINT16::static_size * (len1 + len2);
unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count;
return_trace (c->check_range ((const void *) values,
count,
stride) &&
(c->lazy_some_gpos ||
(valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) &&
valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride))));
}
bool intersects (const hb_set_t *glyphs) const
{
return (this+coverage).intersects (glyphs) &&
(this+classDef2).intersects (glyphs);
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
if (!intersects (c->glyph_set)) return;
if ((!valueFormat1.has_device ()) && (!valueFormat2.has_device ())) return;
hb_set_t klass1_glyphs, klass2_glyphs;
if (!(this+classDef1).collect_coverage (&klass1_glyphs)) return;
if (!(this+classDef2).collect_coverage (&klass2_glyphs)) return;
hb_set_t class1_set, class2_set;
for (const unsigned cp : + c->glyph_set->iter () | hb_filter (this + coverage))
{
if (!klass1_glyphs.has (cp)) class1_set.add (0);
else
{
unsigned klass1 = (this+classDef1).get (cp);
class1_set.add (klass1);
}
}
class2_set.add (0);
for (const unsigned cp : + c->glyph_set->iter () | hb_filter (klass2_glyphs))
{
unsigned klass2 = (this+classDef2).get (cp);
class2_set.add (klass2);
}
if (class1_set.is_empty ()
|| class2_set.is_empty ()
|| (class2_set.get_population() == 1 && class2_set.has(0)))
return;
unsigned len1 = valueFormat1.get_len ();
unsigned len2 = valueFormat2.get_len ();
const hb_array_t<const Value> values_array = values.as_array ((unsigned)class1Count * (unsigned) class2Count * (len1 + len2));
for (const unsigned class1_idx : class1_set.iter ())
{
for (const unsigned class2_idx : class2_set.iter ())
{
unsigned start_offset = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2);
if (valueFormat1.has_device ())
valueFormat1.collect_variation_indices (c, this, values_array.sub_array (start_offset, len1));
if (valueFormat2.has_device ())
valueFormat2.collect_variation_indices (c, this, values_array.sub_array (start_offset+len1, len2));
}
}
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
if (unlikely (!(this+classDef2).collect_coverage (c->input))) return;
}
const Coverage &get_coverage () const { return this+coverage; }
struct pair_pos_cache_t
{
hb_ot_lookup_cache_t coverage;
hb_ot_lookup_cache_t first;
hb_ot_lookup_cache_t second;
};
unsigned cache_cost () const
{
return (this+coverage).cost () + (this+classDef1).cost () + (this+classDef2).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
case hb_ot_lookup_cache_op_t::CREATE:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) hb_malloc (sizeof (pair_pos_cache_t));
if (likely (cache))
{
cache->coverage.clear ();
cache->first.clear ();
cache->second.clear ();
}
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) p;
hb_free (cache);
return nullptr;
}
}
return nullptr;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
pair_pos_cache_t *cache = cached ? (pair_pos_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache ? &cache->coverage : nullptr);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (unlikely (!skippy_iter.next (&unsafe_to)))
{
buffer->unsafe_to_concat (buffer->idx, unsafe_to);
return_trace (false);
}
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint, cache ? &cache->first : nullptr);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint, cache ? &cache->second : nullptr);
#else
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
#endif
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
return_trace (false);
}
unsigned int len1 = valueFormat1.get_len ();
unsigned int len2 = valueFormat2.get_len ();
unsigned int record_len = len1 + len2;
const Value *v = &values[record_len * (klass1 * class2Count + klass2)];
bool applied_first = false, applied_second = false;
/* Isolate simple kerning and apply it half to each side.
* Results in better cursor positioning / underline drawing.
*
* Disabled, because causes issues... :-(
* https://github.com/harfbuzz/harfbuzz/issues/3408
* https://github.com/harfbuzz/harfbuzz/pull/3235#issuecomment-1029814978
*/
#ifndef HB_SPLIT_KERN
if (false)
#endif
{
if (!len2)
{
const hb_direction_t dir = buffer->props.direction;
const bool horizontal = HB_DIRECTION_IS_HORIZONTAL (dir);
const bool backward = HB_DIRECTION_IS_BACKWARD (dir);
unsigned mask = horizontal ? ValueFormat::xAdvance : ValueFormat::yAdvance;
if (backward)
mask |= mask >> 2; /* Add eg. xPlacement in RTL. */
/* Add Devices. */
mask |= mask << 4;
if (valueFormat1 & ~mask)
goto bail;
/* Is simple kern. Apply value on an empty position slot,
* then split it between sides. */
hb_glyph_position_t pos{};
if (valueFormat1.apply_value (c, this, v, pos))
{
hb_position_t *src = &pos.x_advance;
hb_position_t *dst1 = &buffer->cur_pos().x_advance;
hb_position_t *dst2 = &buffer->pos[skippy_iter.idx].x_advance;
unsigned i = horizontal ? 0 : 1;
hb_position_t kern = src[i];
hb_position_t kern1 = kern >> 1;
hb_position_t kern2 = kern - kern1;
if (!backward)
{
dst1[i] += kern1;
dst2[i] += kern2;
dst2[i + 2] += kern2;
}
else
{
dst1[i] += kern1;
dst1[i + 2] += src[i + 2] - kern2;
dst2[i] += kern2;
}
applied_first = applied_second = kern != 0;
goto success;
}
goto boring;
}
}
bail:
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"try kerning glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
applied_first = len1 && valueFormat1.apply_value (c, this, v, buffer->cur_pos());
applied_second = len2 && valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"kerned glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"tried kerning glyphs at %u,%u",
c->buffer->idx, skippy_iter.idx);
}
success:
if (applied_first || applied_second)
buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1);
else
boring:
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);
if (len2)
{
skippy_iter.idx++;
// https://github.com/harfbuzz/harfbuzz/issues/3824
// https://github.com/harfbuzz/harfbuzz/issues/3888#issuecomment-1326781116
buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1);
}
buffer->idx = skippy_iter.idx;
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_map_t klass1_map;
out->classDef1.serialize_subset (c, classDef1, this, &klass1_map, true, true, &(this + coverage));
out->class1Count = klass1_map.get_population ();
hb_map_t klass2_map;
out->classDef2.serialize_subset (c, classDef2, this, &klass2_map, true, false);
out->class2Count = klass2_map.get_population ();
unsigned len1 = valueFormat1.get_len ();
unsigned len2 = valueFormat2.get_len ();
hb_pair_t<unsigned, unsigned> newFormats = hb_pair (valueFormat1, valueFormat2);
if (c->plan->normalized_coords)
{
/* in case of full instancing, all var device flags will be dropped so no
* need to strip hints here */
newFormats = compute_effective_value_formats (klass1_map, klass2_map, false, false, &c->plan->layout_variation_idx_delta_map);
}
/* do not strip hints for VF */
else if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
{
hb_blob_t* blob = hb_face_reference_table (c->plan->source, HB_TAG ('f','v','a','r'));
bool has_fvar = (blob != hb_blob_get_empty ());
hb_blob_destroy (blob);
bool strip = !has_fvar;
/* special case: strip hints when a VF has no GDEF varstore after
* subsetting*/
if (has_fvar && !c->plan->has_gdef_varstore)
strip = true;
newFormats = compute_effective_value_formats (klass1_map, klass2_map, strip, true);
}
out->valueFormat1 = newFormats.first;
out->valueFormat2 = newFormats.second;
unsigned total_len = len1 + len2;
hb_vector_t<unsigned> class2_idxs (+ hb_range ((unsigned) class2Count) | hb_filter (klass2_map));
for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map))
{
for (unsigned class2_idx : class2_idxs)
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * total_len;
valueFormat1.copy_values (c->serializer, out->valueFormat1, this, &values[idx], &c->plan->layout_variation_idx_delta_map);
valueFormat2.copy_values (c->serializer, out->valueFormat2, this, &values[idx + len1], &c->plan->layout_variation_idx_delta_map);
}
}
bool ret = out->coverage.serialize_subset(c, coverage, this);
return_trace (out->class1Count && out->class2Count && ret);
}
hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_map_t& klass1_map,
const hb_map_t& klass2_map,
bool strip_hints, bool strip_empty,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map = nullptr) const
{
unsigned len1 = valueFormat1.get_len ();
unsigned len2 = valueFormat2.get_len ();
unsigned record_size = len1 + len2;
unsigned format1 = 0;
unsigned format2 = 0;
for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map))
{
for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map))
{
unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * record_size;
format1 = format1 | valueFormat1.get_effective_format (&values[idx], strip_hints, strip_empty, this, varidx_delta_map);
format2 = format2 | valueFormat2.get_effective_format (&values[idx + len1], strip_hints, strip_empty, this, varidx_delta_map);
}
if (format1 == valueFormat1 && format2 == valueFormat2)
break;
}
return hb_pair (format1, format2);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH

View File

@@ -0,0 +1,210 @@
#ifndef OT_LAYOUT_GPOS_PAIRSET_HH
#define OT_LAYOUT_GPOS_PAIRSET_HH
#include "PairValueRecord.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairSet : ValueBase
{
template <typename Types2>
friend struct PairPosFormat1_3;
using PairValueRecord = GPOS_impl::PairValueRecord<Types>;
protected:
HBUINT16 len; /* Number of PairValueRecords */
PairValueRecord firstPairValueRecord;
/* Array of PairValueRecords--ordered
* by GlyphID of the second glyph */
public:
DEFINE_SIZE_MIN (2);
static unsigned get_size (unsigned len1, unsigned len2)
{
return Types::HBGlyphID::static_size + Value::static_size * (len1 + len2);
}
static unsigned get_size (const ValueFormat valueFormats[2])
{
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
return get_size (len1, len2);
}
struct sanitize_closure_t
{
const ValueFormat *valueFormats;
unsigned int len1; /* valueFormats[0].get_len() */
unsigned int stride; /* bytes */
};
bool sanitize (hb_sanitize_context_t *c, const sanitize_closure_t *closure) const
{
TRACE_SANITIZE (this);
if (!(c->check_struct (this) &&
hb_barrier () &&
c->check_range (&firstPairValueRecord,
len,
closure->stride))) return_trace (false);
hb_barrier ();
unsigned int count = len;
const PairValueRecord *record = &firstPairValueRecord;
return_trace (c->lazy_some_gpos ||
(closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) &&
closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride)));
}
bool intersects (const hb_set_t *glyphs,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
unsigned int count = len;
for (unsigned int i = 0; i < count; i++)
{
if (glyphs->has (record->secondGlyph))
return true;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
return false;
}
void collect_glyphs (hb_collect_glyphs_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
c->input->add_array (&record->secondGlyph, len, record_size);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats) const
{
unsigned record_size = get_size (valueFormats);
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len;
for (unsigned i = 0; i < count; i++)
{
if (c->glyph_set->has (record->secondGlyph))
{ record->collect_variation_indices (c, valueFormats, this); }
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
}
bool apply (hb_ot_apply_context_t *c,
const ValueFormat *valueFormats,
unsigned int pos) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int len1 = valueFormats[0].get_len ();
unsigned int len2 = valueFormats[1].get_len ();
unsigned record_size = get_size (len1, len2);
const PairValueRecord *record = hb_bsearch (buffer->info[pos].codepoint,
&firstPairValueRecord,
len,
record_size);
if (record)
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"try kerning glyphs at %u,%u",
c->buffer->idx, pos);
}
bool applied_first = len1 && valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos());
bool applied_second = len2 && valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]);
if (applied_first || applied_second)
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"kerned glyphs at %u,%u",
c->buffer->idx, pos);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"tried kerning glyphs at %u,%u",
c->buffer->idx, pos);
}
if (applied_first || applied_second)
buffer->unsafe_to_break (buffer->idx, pos + 1);
if (len2)
{
pos++;
// https://github.com/harfbuzz/harfbuzz/issues/3824
// https://github.com/harfbuzz/harfbuzz/issues/3888#issuecomment-1326781116
buffer->unsafe_to_break (buffer->idx, pos + 1);
}
buffer->idx = pos;
return_trace (true);
}
buffer->unsafe_to_concat (buffer->idx, pos + 1);
return_trace (false);
}
bool subset (hb_subset_context_t *c,
const ValueFormat valueFormats[2],
const ValueFormat newFormats[2]) const
{
TRACE_SUBSET (this);
auto snap = c->serializer->snapshot ();
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->len = 0;
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
unsigned len1 = valueFormats[0].get_len ();
unsigned len2 = valueFormats[1].get_len ();
unsigned record_size = get_size (len1, len2);
typename PairValueRecord::context_t context =
{
this,
valueFormats,
newFormats,
len1,
&glyph_map,
&c->plan->layout_variation_idx_delta_map
};
const PairValueRecord *record = &firstPairValueRecord;
unsigned count = len, num = 0;
for (unsigned i = 0; i < count; i++)
{
if (glyphset.has (record->secondGlyph)
&& record->subset (c, &context)) num++;
record = &StructAtOffset<const PairValueRecord> (record, record_size);
}
out->len = num;
if (!num) c->serializer->revert (snap);
return_trace (num);
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRSET_HH

View File

@@ -0,0 +1,99 @@
#ifndef OT_LAYOUT_GPOS_PAIRVALUERECORD_HH
#define OT_LAYOUT_GPOS_PAIRVALUERECORD_HH
#include "ValueFormat.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
template <typename Types>
struct PairValueRecord
{
template <typename Types2>
friend struct PairSet;
protected:
typename Types::HBGlyphID
secondGlyph; /* GlyphID of second glyph in the
* pair--first glyph is listed in the
* Coverage table */
ValueRecord values; /* Positioning data for the first glyph
* followed by for second glyph */
public:
DEFINE_SIZE_ARRAY (Types::HBGlyphID::static_size, values);
int cmp (hb_codepoint_t k) const
{ return secondGlyph.cmp (k); }
struct context_t
{
const ValueBase *base;
const ValueFormat *valueFormats;
const ValueFormat *newFormats;
unsigned len1; /* valueFormats[0].get_len() */
const hb_map_t *glyph_map;
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map;
};
bool subset (hb_subset_context_t *c,
context_t *closure) const
{
TRACE_SERIALIZE (this);
auto *s = c->serializer;
auto *out = s->start_embed (*this);
if (unlikely (!s->extend_min (out))) return_trace (false);
out->secondGlyph = (*closure->glyph_map)[secondGlyph];
closure->valueFormats[0].copy_values (s,
closure->newFormats[0],
closure->base, &values[0],
closure->layout_variation_idx_delta_map);
closure->valueFormats[1].copy_values (s,
closure->newFormats[1],
closure->base,
&values[closure->len1],
closure->layout_variation_idx_delta_map);
return_trace (true);
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueFormat *valueFormats,
const ValueBase *base) const
{
unsigned record1_len = valueFormats[0].get_len ();
unsigned record2_len = valueFormats[1].get_len ();
const hb_array_t<const Value> values_array = values.as_array (record1_len + record2_len);
if (valueFormats[0].has_device ())
valueFormats[0].collect_variation_indices (c, base, values_array.sub_array (0, record1_len));
if (valueFormats[1].has_device ())
valueFormats[1].collect_variation_indices (c, base, values_array.sub_array (record1_len, record2_len));
}
bool intersects (const hb_set_t& glyphset) const
{
return glyphset.has(secondGlyph);
}
const Value* get_values_1 () const
{
return &values[0];
}
const Value* get_values_2 (ValueFormat format1) const
{
return &values[format1.get_len ()];
}
};
}
}
}
#endif // OT_LAYOUT_GPOS_PAIRVALUERECORD_HH

View File

@@ -0,0 +1,79 @@
#ifndef OT_LAYOUT_GPOS_POSLOOKUP_HH
#define OT_LAYOUT_GPOS_POSLOOKUP_HH
#include "PosLookupSubTable.hh"
#include "../../../hb-ot-layout-common.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct PosLookup : Lookup
{
using SubTable = PosLookupSubTable;
const SubTable& get_subtable (unsigned int i) const
{ return Lookup::get_subtable<SubTable> (i); }
bool is_reverse () const
{
return false;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
return_trace (dispatch (c));
}
bool intersects (const hb_set_t *glyphs) const
{
hb_intersects_context_t c (glyphs);
return dispatch (&c);
}
hb_collect_glyphs_context_t::return_t collect_glyphs (hb_collect_glyphs_context_t *c) const
{ return dispatch (c); }
hb_closure_lookups_context_t::return_t closure_lookups (hb_closure_lookups_context_t *c, unsigned this_index) const
{
if (c->is_lookup_visited (this_index))
return hb_closure_lookups_context_t::default_return_value ();
c->set_lookup_visited (this_index);
if (!intersects (c->glyphs))
{
c->set_lookup_inactive (this_index);
return hb_closure_lookups_context_t::default_return_value ();
}
hb_closure_lookups_context_t::return_t ret = dispatch (c);
return ret;
}
template <typename set_t>
void collect_coverage (set_t *glyphs) const
{
hb_collect_coverage_context_t<set_t> c (glyphs);
dispatch (&c);
}
template <typename context_t>
static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index);
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ return Lookup::dispatch<SubTable> (c, std::forward<Ts> (ds)...); }
bool subset (hb_subset_context_t *c) const
{ return Lookup::subset<SubTable> (c); }
bool sanitize (hb_sanitize_context_t *c) const
{ return Lookup::sanitize<SubTable> (c); }
};
}
}
}
#endif /* OT_LAYOUT_GPOS_POSLOOKUP_HH */

View File

@@ -0,0 +1,79 @@
#ifndef OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH
#define OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH
#include "SinglePos.hh"
#include "PairPos.hh"
#include "CursivePos.hh"
#include "MarkBasePos.hh"
#include "MarkLigPos.hh"
#include "MarkMarkPos.hh"
#include "ContextPos.hh"
#include "ChainContextPos.hh"
#include "ExtensionPos.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct PosLookupSubTable
{
friend struct ::OT::Lookup;
friend struct PosLookup;
enum Type {
Single = 1,
Pair = 2,
Cursive = 3,
MarkBase = 4,
MarkLig = 5,
MarkMark = 6,
Context = 7,
ChainContext = 8,
Extension = 9
};
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const
{
TRACE_DISPATCH (this, lookup_type);
switch (lookup_type) {
case Single: return_trace (u.single.dispatch (c, std::forward<Ts> (ds)...));
case Pair: return_trace (u.pair.dispatch (c, std::forward<Ts> (ds)...));
case Cursive: return_trace (u.cursive.dispatch (c, std::forward<Ts> (ds)...));
case MarkBase: return_trace (u.markBase.dispatch (c, std::forward<Ts> (ds)...));
case MarkLig: return_trace (u.markLig.dispatch (c, std::forward<Ts> (ds)...));
case MarkMark: return_trace (u.markMark.dispatch (c, std::forward<Ts> (ds)...));
case Context: return_trace (u.context.dispatch (c, std::forward<Ts> (ds)...));
case ChainContext: return_trace (u.chainContext.dispatch (c, std::forward<Ts> (ds)...));
case Extension: return_trace (u.extension.dispatch (c, std::forward<Ts> (ds)...));
default: return_trace (c->default_return_value ());
}
}
bool intersects (const hb_set_t *glyphs, unsigned int lookup_type) const
{
hb_intersects_context_t c (glyphs);
return dispatch (&c, lookup_type);
}
protected:
union {
SinglePos single;
PairPos pair;
CursivePos cursive;
MarkBasePos markBase;
MarkLigPos markLig;
MarkMarkPos markMark;
ContextPos context;
ChainContextPos chainContext;
ExtensionPos extension;
} u;
public:
DEFINE_SIZE_MIN (0);
};
}
}
}
#endif /* HB_OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH */

View File

@@ -0,0 +1,98 @@
#ifndef OT_LAYOUT_GPOS_SINGLEPOS_HH
#define OT_LAYOUT_GPOS_SINGLEPOS_HH
#include "SinglePosFormat1.hh"
#include "SinglePosFormat2.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct SinglePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
SinglePosFormat1 format1;
SinglePosFormat2 format2;
} u;
public:
template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
unsigned get_format (Iterator glyph_val_iter_pairs)
{
hb_array_t<const Value> first_val_iter = hb_second (*glyph_val_iter_pairs);
for (const auto iter : glyph_val_iter_pairs)
for (const auto _ : hb_zip (iter.second, first_val_iter))
if (_.first != _.second)
return 2;
return 1;
}
template<typename Iterator,
typename SrcLookup,
hb_requires (hb_is_iterator (Iterator))>
void serialize (hb_serialize_context_t *c,
const SrcLookup* src,
Iterator glyph_val_iter_pairs,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned newFormat)
{
if (unlikely (!c->extend_min (u.format))) return;
unsigned format = 2;
ValueFormat new_format;
new_format = newFormat;
if (glyph_val_iter_pairs)
format = get_format (glyph_val_iter_pairs);
u.format = format;
switch (u.format) {
case 1: u.format1.serialize (c,
src,
glyph_val_iter_pairs,
new_format,
layout_variation_idx_delta_map);
return;
case 2: u.format2.serialize (c,
src,
glyph_val_iter_pairs,
new_format,
layout_variation_idx_delta_map);
return;
default:return;
}
}
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}
}
};
template<typename Iterator, typename SrcLookup>
static void
SinglePos_serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned new_format)
{ c->start_embed<SinglePos> ()->serialize (c, src, it, layout_variation_idx_delta_map, new_format); }
}
}
}
#endif /* OT_LAYOUT_GPOS_SINGLEPOS_HH */

View File

@@ -0,0 +1,190 @@
#ifndef OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH
#define OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH
#include "Common.hh"
#include "ValueFormat.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct SinglePosFormat1 : ValueBase
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat; /* Defines the types of data in the
* ValueRecord */
ValueRecord values; /* Defines positioning
* value(s)--applied to all glyphs in
* the Coverage table */
public:
DEFINE_SIZE_ARRAY (6, values);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
coverage.sanitize (c, this) &&
hb_barrier () &&
/* The coverage table may use a range to represent a set
* of glyphs, which means a small number of bytes can
* generate a large glyph set. Manually modify the
* sanitizer max ops to take this into account.
*
* Note: This check *must* be right after coverage sanitize. */
c->check_ops ((this + coverage).get_population () >> 1) &&
valueFormat.sanitize_value (c, this, values));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
if (!valueFormat.has_device ()) return;
hb_set_t intersection;
(this+coverage).intersect_set (*c->glyph_set, intersection);
if (!intersection) return;
valueFormat.collect_variation_indices (c, this, values.as_array (valueFormat.get_len ()));
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ if (unlikely (!(this+coverage).collect_coverage (c->input))) return; }
const Coverage &get_coverage () const { return this+coverage; }
ValueFormat get_value_format () const { return valueFormat; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (index == NOT_COVERED) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioning glyph at %u",
c->buffer->idx);
}
valueFormat.apply_value (c, this, values, buffer->cur_pos());
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioned glyph at %u",
c->buffer->idx);
}
buffer->idx++;
return_trace (true);
}
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
{
unsigned int index = (this+coverage).get_coverage (gid);
if (likely (index == NOT_COVERED)) return false;
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this, values, pos);
return true;
}
template<typename Iterator,
typename SrcLookup,
hb_requires (hb_is_iterator (Iterator))>
void serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
ValueFormat newFormat,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map)
{
if (unlikely (!c->extend_min (this))) return;
if (unlikely (!c->check_assign (valueFormat,
newFormat,
HB_SERIALIZE_ERROR_INT_OVERFLOW))) return;
for (const hb_array_t<const Value>& _ : + it | hb_map (hb_second))
{
src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_delta_map);
// Only serialize the first entry in the iterator, the rest are assumed to
// be the same.
break;
}
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
coverage.serialize_serialize (c, glyphs);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_set_t intersection;
(this+coverage).intersect_set (glyphset, intersection);
unsigned new_format = valueFormat;
if (c->plan->normalized_coords)
{
new_format = valueFormat.get_effective_format (values.arrayZ, false, false, this, &c->plan->layout_variation_idx_delta_map);
}
/* do not strip hints for VF */
else if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
{
hb_blob_t* blob = hb_face_reference_table (c->plan->source, HB_TAG ('f','v','a','r'));
bool has_fvar = (blob != hb_blob_get_empty ());
hb_blob_destroy (blob);
bool strip = !has_fvar;
/* special case: strip hints when a VF has no GDEF varstore after
* subsetting*/
if (has_fvar && !c->plan->has_gdef_varstore)
strip = true;
new_format = valueFormat.get_effective_format (values.arrayZ,
strip, /* strip hints */
true, /* strip empty */
this, nullptr);
}
auto it =
+ hb_iter (intersection)
| hb_map_retains_sorting (glyph_map)
| hb_zip (hb_repeat (values.as_array (valueFormat.get_len ())))
;
bool ret = bool (it);
SinglePos_serialize (c->serializer, this, it, &c->plan->layout_variation_idx_delta_map, new_format);
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH */

View File

@@ -0,0 +1,213 @@
#ifndef OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH
#define OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
struct SinglePosFormat2 : ValueBase
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
Offset16To<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of subtable */
ValueFormat valueFormat; /* Defines the types of data in the
* ValueRecord */
HBUINT16 valueCount; /* Number of ValueRecords */
ValueRecord values; /* Array of ValueRecords--positioning
* values applied to glyphs */
public:
DEFINE_SIZE_ARRAY (8, values);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
coverage.sanitize (c, this) &&
valueFormat.sanitize_values (c, this, values, valueCount));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
if (!valueFormat.has_device ()) return;
auto it =
+ hb_zip (this+coverage, hb_range ((unsigned) valueCount))
| hb_filter (c->glyph_set, hb_first)
;
if (!it) return;
unsigned sub_length = valueFormat.get_len ();
const hb_array_t<const Value> values_array = values.as_array (valueCount * sub_length);
for (unsigned i : + it
| hb_map (hb_second))
valueFormat.collect_variation_indices (c, this, values_array.sub_array (i * sub_length, sub_length));
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ if (unlikely (!(this+coverage).collect_coverage (c->input))) return; }
const Coverage &get_coverage () const { return this+coverage; }
ValueFormat get_value_format () const { return valueFormat; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
if (index == NOT_COVERED) return_trace (false);
if (unlikely (index >= valueCount)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioning glyph at %u",
c->buffer->idx);
}
valueFormat.apply_value (c, this,
&values[index * valueFormat.get_len ()],
buffer->cur_pos());
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"positioned glyph at %u",
c->buffer->idx);
}
buffer->idx++;
return_trace (true);
}
bool
position_single (hb_font_t *font,
hb_blob_t *table_blob,
hb_direction_t direction,
hb_codepoint_t gid,
hb_glyph_position_t &pos) const
{
unsigned int index = (this+coverage).get_coverage (gid);
if (likely (index == NOT_COVERED)) return false;
if (unlikely (index >= valueCount)) return false;
/* This is ugly... */
hb_buffer_t buffer;
buffer.props.direction = direction;
OT::hb_ot_apply_context_t c (1, font, &buffer, table_blob);
valueFormat.apply_value (&c, this,
&values[index * valueFormat.get_len ()],
pos);
return true;
}
template<typename Iterator,
typename SrcLookup,
hb_requires (hb_is_iterator (Iterator))>
void serialize (hb_serialize_context_t *c,
const SrcLookup *src,
Iterator it,
ValueFormat newFormat,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map)
{
auto out = c->extend_min (this);
if (unlikely (!out)) return;
if (unlikely (!c->check_assign (valueFormat, newFormat, HB_SERIALIZE_ERROR_INT_OVERFLOW))) return;
if (unlikely (!c->check_assign (valueCount, it.len (), HB_SERIALIZE_ERROR_ARRAY_OVERFLOW))) return;
+ it
| hb_map (hb_second)
| hb_apply ([&] (hb_array_t<const Value> _)
{ src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_delta_map); })
;
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
coverage.serialize_serialize (c, glyphs);
}
template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
unsigned compute_effective_format (const hb_face_t *face,
Iterator it,
bool is_instancing, bool strip_hints,
bool has_gdef_varstore,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map) const
{
hb_blob_t* blob = hb_face_reference_table (face, HB_TAG ('f','v','a','r'));
bool has_fvar = (blob != hb_blob_get_empty ());
hb_blob_destroy (blob);
unsigned new_format = 0;
if (is_instancing)
{
new_format = new_format | valueFormat.get_effective_format (+ it | hb_map (hb_second), false, false, this, varidx_delta_map);
}
/* do not strip hints for VF */
else if (strip_hints)
{
bool strip = !has_fvar;
if (has_fvar && !has_gdef_varstore)
strip = true;
new_format = new_format | valueFormat.get_effective_format (+ it | hb_map (hb_second), strip, true, this, nullptr);
}
else
new_format = valueFormat;
return new_format;
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
unsigned sub_length = valueFormat.get_len ();
auto values_array = values.as_array (valueCount * sub_length);
auto it =
+ hb_zip (this+coverage, hb_range ((unsigned) valueCount))
| hb_filter (glyphset, hb_first)
| hb_map_retains_sorting ([&] (const hb_pair_t<hb_codepoint_t, unsigned>& _)
{
return hb_pair (glyph_map[_.first],
values_array.sub_array (_.second * sub_length,
sub_length));
})
;
unsigned new_format = compute_effective_format (c->plan->source, it,
bool (c->plan->normalized_coords),
bool (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING),
c->plan->has_gdef_varstore,
&c->plan->layout_variation_idx_delta_map);
bool ret = bool (it);
SinglePos_serialize (c->serializer, this, it, &c->plan->layout_variation_idx_delta_map, new_format);
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH */

View File

@@ -0,0 +1,441 @@
#ifndef OT_LAYOUT_GPOS_VALUEFORMAT_HH
#define OT_LAYOUT_GPOS_VALUEFORMAT_HH
#include "../../../hb-ot-layout-gsubgpos.hh"
namespace OT {
namespace Layout {
namespace GPOS_impl {
typedef HBUINT16 Value;
struct ValueBase {}; // Dummy base class tag for OffsetTo<Value> bases.
typedef UnsizedArrayOf<Value> ValueRecord;
struct ValueFormat : HBUINT16
{
enum Flags {
xPlacement = 0x0001u, /* Includes horizontal adjustment for placement */
yPlacement = 0x0002u, /* Includes vertical adjustment for placement */
xAdvance = 0x0004u, /* Includes horizontal adjustment for advance */
yAdvance = 0x0008u, /* Includes vertical adjustment for advance */
xPlaDevice = 0x0010u, /* Includes horizontal Device table for placement */
yPlaDevice = 0x0020u, /* Includes vertical Device table for placement */
xAdvDevice = 0x0040u, /* Includes horizontal Device table for advance */
yAdvDevice = 0x0080u, /* Includes vertical Device table for advance */
ignored = 0x0F00u, /* Was used in TrueType Open for MM fonts */
reserved = 0xF000u, /* For future use */
devices = 0x00F0u /* Mask for having any Device table */
};
/* All fields are options. Only those available advance the value pointer. */
#if 0
HBINT16 xPlacement; /* Horizontal adjustment for
* placement--in design units */
HBINT16 yPlacement; /* Vertical adjustment for
* placement--in design units */
HBINT16 xAdvance; /* Horizontal adjustment for
* advance--in design units (only used
* for horizontal writing) */
HBINT16 yAdvance; /* Vertical adjustment for advance--in
* design units (only used for vertical
* writing) */
Offset16To<Device> xPlaDevice; /* Offset to Device table for
* horizontal placement--measured from
* beginning of PosTable (may be NULL) */
Offset16To<Device> yPlaDevice; /* Offset to Device table for vertical
* placement--measured from beginning
* of PosTable (may be NULL) */
Offset16To<Device> xAdvDevice; /* Offset to Device table for
* horizontal advance--measured from
* beginning of PosTable (may be NULL) */
Offset16To<Device> yAdvDevice; /* Offset to Device table for vertical
* advance--measured from beginning of
* PosTable (may be NULL) */
#endif
NumType& operator = (uint16_t i) { v = i; return *this; }
unsigned int get_len () const { return hb_popcount ((unsigned int) *this); }
unsigned int get_size () const { return get_len () * Value::static_size; }
hb_vector_t<unsigned> get_device_table_indices () const {
unsigned i = 0;
hb_vector_t<unsigned> result;
unsigned format = *this;
if (format & xPlacement) i++;
if (format & yPlacement) i++;
if (format & xAdvance) i++;
if (format & yAdvance) i++;
if (format & xPlaDevice) result.push (i++);
if (format & yPlaDevice) result.push (i++);
if (format & xAdvDevice) result.push (i++);
if (format & yAdvDevice) result.push (i++);
return result;
}
bool apply_value (hb_ot_apply_context_t *c,
const ValueBase *base,
const Value *values,
hb_glyph_position_t &glyph_pos) const
{
bool ret = false;
unsigned int format = *this;
if (!format) return ret;
hb_font_t *font = c->font;
bool horizontal =
#ifndef HB_NO_VERTICAL
HB_DIRECTION_IS_HORIZONTAL (c->direction)
#else
true
#endif
;
if (format & xPlacement) glyph_pos.x_offset += font->em_scale_x (get_short (values++, &ret));
if (format & yPlacement) glyph_pos.y_offset += font->em_scale_y (get_short (values++, &ret));
if (format & xAdvance) {
if (likely (horizontal)) glyph_pos.x_advance += font->em_scale_x (get_short (values, &ret));
values++;
}
/* y_advance values grow downward but font-space grows upward, hence negation */
if (format & yAdvance) {
if (unlikely (!horizontal)) glyph_pos.y_advance -= font->em_scale_y (get_short (values, &ret));
values++;
}
if (!has_device ()) return ret;
bool use_x_device = font->x_ppem || font->has_nonzero_coords;
bool use_y_device = font->y_ppem || font->has_nonzero_coords;
if (!use_x_device && !use_y_device) return ret;
const ItemVariationStore &store = c->var_store;
auto *cache = c->var_store_cache;
/* pixel -> fractional pixel */
if (format & xPlaDevice)
{
if (use_x_device) glyph_pos.x_offset += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yPlaDevice)
{
if (use_y_device) glyph_pos.y_offset += get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
if (format & xAdvDevice)
{
if (horizontal && use_x_device) glyph_pos.x_advance += get_device (values, &ret, base, c->sanitizer).get_x_delta (font, store, cache);
values++;
}
if (format & yAdvDevice)
{
/* y_advance values grow downward but font-space grows upward, hence negation */
if (!horizontal && use_y_device) glyph_pos.y_advance -= get_device (values, &ret, base, c->sanitizer).get_y_delta (font, store, cache);
values++;
}
return ret;
}
unsigned int get_effective_format (const Value *values, bool strip_hints, bool strip_empty, const ValueBase *base,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map) const
{
unsigned int format = *this;
for (unsigned flag = xPlacement; flag <= yAdvDevice; flag = flag << 1) {
if (format & flag)
{
if (strip_hints && flag >= xPlaDevice)
{
format = format & ~flag;
values++;
continue;
}
if (varidx_delta_map && flag >= xPlaDevice)
{
update_var_flag (values++, (Flags) flag, &format, base, varidx_delta_map);
continue;
}
/* do not strip empty when instancing, cause we don't know whether the new
* default value is 0 or not */
if (strip_empty) should_drop (*values, (Flags) flag, &format);
values++;
}
}
return format;
}
template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
unsigned int get_effective_format (Iterator it, bool strip_hints, bool strip_empty, const ValueBase *base,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map) const {
unsigned int new_format = 0;
for (const hb_array_t<const Value>& values : it)
new_format = new_format | get_effective_format (&values, strip_hints, strip_empty, base, varidx_delta_map);
return new_format;
}
void copy_values (hb_serialize_context_t *c,
unsigned int new_format,
const ValueBase *base,
const Value *values,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map) const
{
unsigned int format = *this;
if (!format) return;
HBINT16 *x_placement = nullptr, *y_placement = nullptr, *x_adv = nullptr, *y_adv = nullptr;
if (format & xPlacement) x_placement = copy_value (c, new_format, xPlacement, *values++);
if (format & yPlacement) y_placement = copy_value (c, new_format, yPlacement, *values++);
if (format & xAdvance) x_adv = copy_value (c, new_format, xAdvance, *values++);
if (format & yAdvance) y_adv = copy_value (c, new_format, yAdvance, *values++);
if (!has_device ())
return;
if (format & xPlaDevice)
{
add_delta_to_value (x_placement, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, xPlaDevice);
}
if (format & yPlaDevice)
{
add_delta_to_value (y_placement, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, yPlaDevice);
}
if (format & xAdvDevice)
{
add_delta_to_value (x_adv, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, xAdvDevice);
}
if (format & yAdvDevice)
{
add_delta_to_value (y_adv, base, values, layout_variation_idx_delta_map);
copy_device (c, base, values++, layout_variation_idx_delta_map, new_format, yAdvDevice);
}
}
HBINT16* copy_value (hb_serialize_context_t *c,
unsigned int new_format,
Flags flag,
Value value) const
{
// Filter by new format.
if (!(new_format & flag)) return nullptr;
return reinterpret_cast<HBINT16 *> (c->copy (value));
}
void collect_variation_indices (hb_collect_variation_indices_context_t *c,
const ValueBase *base,
const hb_array_t<const Value>& values) const
{
unsigned format = *this;
unsigned i = 0;
if (format & xPlacement) i++;
if (format & yPlacement) i++;
if (format & xAdvance) i++;
if (format & yAdvance) i++;
if (format & xPlaDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yPlaDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::xAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
if (format & ValueFormat::yAdvDevice)
{
(base + get_device (&(values[i]))).collect_variation_indices (c);
i++;
}
}
private:
bool sanitize_value_devices (hb_sanitize_context_t *c, const ValueBase *base, const Value *values) const
{
unsigned int format = *this;
if (format & xPlacement) values++;
if (format & yPlacement) values++;
if (format & xAdvance) values++;
if (format & yAdvance) values++;
if ((format & xPlaDevice) && !get_device (values++).sanitize (c, base)) return false;
if ((format & yPlaDevice) && !get_device (values++).sanitize (c, base)) return false;
if ((format & xAdvDevice) && !get_device (values++).sanitize (c, base)) return false;
if ((format & yAdvDevice) && !get_device (values++).sanitize (c, base)) return false;
return true;
}
static inline Offset16To<Device, ValueBase>& get_device (Value* value)
{
return *static_cast<Offset16To<Device, ValueBase> *> (value);
}
static inline const Offset16To<Device, ValueBase>& get_device (const Value* value)
{
return *static_cast<const Offset16To<Device, ValueBase> *> (value);
}
static inline const Device& get_device (const Value* value,
bool *worked,
const ValueBase *base,
hb_sanitize_context_t &c)
{
if (worked) *worked |= bool (*value);
auto &offset = *static_cast<const Offset16To<Device> *> (value);
if (unlikely (!offset.sanitize (&c, base)))
return Null(Device);
hb_barrier ();
return base + offset;
}
void add_delta_to_value (HBINT16 *value,
const ValueBase *base,
const Value *src_value,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map) const
{
if (!value) return;
unsigned varidx = (base + get_device (src_value)).get_variation_index ();
hb_pair_t<unsigned, int> *varidx_delta;
if (!layout_variation_idx_delta_map->has (varidx, &varidx_delta)) return;
*value += hb_second (*varidx_delta);
}
bool copy_device (hb_serialize_context_t *c,
const ValueBase *base,
const Value *src_value,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned int new_format, Flags flag) const
{
// Filter by new format.
if (!(new_format & flag)) return true;
Value *dst_value = c->copy (*src_value);
if (!dst_value) return false;
if (*dst_value == 0) return true;
*dst_value = 0;
c->push ();
if ((base + get_device (src_value)).copy (c, layout_variation_idx_delta_map))
{
c->add_link (*dst_value, c->pop_pack ());
return true;
}
else
{
c->pop_discard ();
return false;
}
}
static inline const HBINT16& get_short (const Value* value, bool *worked=nullptr)
{
if (worked) *worked |= bool (*value);
return *reinterpret_cast<const HBINT16 *> (value);
}
public:
bool has_device () const
{
unsigned int format = *this;
return (format & devices) != 0;
}
bool sanitize_value (hb_sanitize_context_t *c, const ValueBase *base, const Value *values) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_range (values, get_size ()))) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
return_trace (!has_device () || sanitize_value_devices (c, base, values));
}
bool sanitize_values (hb_sanitize_context_t *c, const ValueBase *base, const Value *values, unsigned int count) const
{
TRACE_SANITIZE (this);
unsigned size = get_size ();
if (!c->check_range (values, count, size)) return_trace (false);
if (c->lazy_some_gpos)
return_trace (true);
hb_barrier ();
return_trace (sanitize_values_stride_unsafe (c, base, values, count, size));
}
/* Just sanitize referenced Device tables. Doesn't check the values themselves. */
bool sanitize_values_stride_unsafe (hb_sanitize_context_t *c, const ValueBase *base, const Value *values, unsigned int count, unsigned int stride) const
{
TRACE_SANITIZE (this);
if (!has_device ()) return_trace (true);
for (unsigned int i = 0; i < count; i++) {
if (!sanitize_value_devices (c, base, values))
return_trace (false);
values = &StructAtOffset<const Value> (values, stride);
}
return_trace (true);
}
private:
void should_drop (Value value, Flags flag, unsigned int* format) const
{
if (value) return;
*format = *format & ~flag;
}
void update_var_flag (const Value* value, Flags flag,
unsigned int* format, const ValueBase *base,
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *varidx_delta_map) const
{
if (*value)
{
unsigned varidx = (base + get_device (value)).get_variation_index ();
hb_pair_t<unsigned, int> *varidx_delta;
if (varidx_delta_map->has (varidx, &varidx_delta) &&
varidx_delta->first != HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
return;
}
*format = *format & ~flag;
}
};
}
}
}
#endif // #ifndef OT_LAYOUT_GPOS_VALUEFORMAT_HH

View File

@@ -0,0 +1,126 @@
#ifndef OT_LAYOUT_GSUB_ALTERNATESET_HH
#define OT_LAYOUT_GSUB_ALTERNATESET_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct AlternateSet
{
protected:
Array16Of<typename Types::HBGlyphID>
alternates; /* Array of alternate GlyphIDs--in
* arbitrary order */
public:
DEFINE_SIZE_ARRAY (2, alternates);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (alternates.sanitize (c));
}
bool intersects (const hb_set_t *glyphs) const
{ return hb_any (alternates, glyphs); }
void closure (hb_closure_context_t *c) const
{ c->output->add_array (alternates.arrayZ, alternates.len); }
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ c->output->add_array (alternates.arrayZ, alternates.len); }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int count = alternates.len;
if (unlikely (!count)) return_trace (false);
hb_mask_t glyph_mask = c->buffer->cur().mask;
hb_mask_t lookup_mask = c->lookup_mask;
/* Note: This breaks badly if two features enabled this lookup together. */
unsigned int shift = hb_ctz (lookup_mask);
unsigned int alt_index = ((lookup_mask & glyph_mask) >> shift);
/* If alt_index is MAX_VALUE, randomize feature if it is the rand feature. */
if (alt_index == HB_OT_MAP_MAX_VALUE && c->random)
{
/* Maybe we can do better than unsafe-to-break all; but since we are
* changing random state, it would be hard to track that. Good 'nough. */
c->buffer->unsafe_to_break (0, c->buffer->len);
alt_index = c->random_number () % count + 1;
}
if (unlikely (alt_index > count || alt_index == 0)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (alternate substitution)",
c->buffer->idx);
}
c->replace_glyph (alternates[alt_index - 1]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (alternate substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
unsigned
get_alternates (unsigned start_offset,
unsigned *alternate_count /* IN/OUT. May be NULL. */,
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
{
if (alternates.len && alternate_count)
{
+ alternates.as_array ().sub_array (start_offset, alternate_count)
| hb_sink (hb_array (alternate_glyphs, *alternate_count))
;
}
return alternates.len;
}
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c,
Iterator alts)
{
TRACE_SERIALIZE (this);
return_trace (alternates.serialize (c, alts));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto it =
+ hb_iter (alternates)
| hb_filter (glyphset)
| hb_map (glyph_map)
;
auto *out = c->serializer->start_embed (*this);
return_trace (out->serialize (c->serializer, it) &&
out->alternates);
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_ALTERNATESET_HH */

View File

@@ -0,0 +1,62 @@
#ifndef OT_LAYOUT_GSUB_ALTERNATESUBST_HH
#define OT_LAYOUT_GSUB_ALTERNATESUBST_HH
#include "AlternateSubstFormat1.hh"
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct AlternateSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
AlternateSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
AlternateSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
/* TODO This function is unused and not updated to 24bit GIDs. Should be done by using
* iterators. While at it perhaps using iterator of arrays of hb_codepoint_t instead. */
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const HBGlyphID16> alternate_glyphs_list)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c, glyphs, alternate_len_list, alternate_glyphs_list));
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};
}
}
}
#endif /* OT_LAYOUT_GSUB_ALTERNATESUBST_HH */

View File

@@ -0,0 +1,128 @@
#ifndef OT_LAYOUT_GSUB_ALTERNATESUBSTFORMAT1_HH
#define OT_LAYOUT_GSUB_ALTERNATESUBSTFORMAT1_HH
#include "AlternateSet.hh"
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct AlternateSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16Of<typename Types::template OffsetTo<AlternateSet<Types>>>
alternateSet; /* Array of AlternateSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (2 + 2 * Types::size, alternateSet);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && alternateSet.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
bool may_have_non_1to1 () const
{ return false; }
void closure (hb_closure_context_t *c) const
{
+ hb_zip (this+coverage, alternateSet)
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const AlternateSet<Types> &_) { _.closure (c); })
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
+ hb_zip (this+coverage, alternateSet)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const AlternateSet<Types> &_) { _.collect_glyphs (c); })
;
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
unsigned
get_glyph_alternates (hb_codepoint_t gid,
unsigned start_offset,
unsigned *alternate_count /* IN/OUT. May be NULL. */,
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
{ return (this+alternateSet[(this+coverage).get_coverage (gid)])
.get_alternates (start_offset, alternate_count, alternate_glyphs); }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int index = (this+coverage).get_coverage (c->buffer->cur().codepoint);
if (index == NOT_COVERED) return_trace (false);
return_trace ((this+alternateSet[index]).apply (c));
}
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const HBGlyphID16> alternate_glyphs_list)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!alternateSet.serialize (c, glyphs.length))) return_trace (false);
for (unsigned int i = 0; i < glyphs.length; i++)
{
unsigned int alternate_len = alternate_len_list[i];
if (unlikely (!alternateSet[i]
.serialize_serialize (c, alternate_glyphs_list.sub_array (0, alternate_len))))
return_trace (false);
alternate_glyphs_list += alternate_len;
}
return_trace (coverage.serialize_serialize (c, glyphs));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, alternateSet)
| hb_filter (glyphset, hb_first)
| hb_filter (subset_offset_array (c, out->alternateSet, this), hb_second)
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
out->coverage.serialize_serialize (c->serializer, new_coverage.iter ());
return_trace (bool (new_coverage));
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_ALTERNATESUBSTFORMAT1_HH */

View File

@@ -0,0 +1,18 @@
#ifndef OT_LAYOUT_GSUB_CHAINCONTEXTSUBST_HH
#define OT_LAYOUT_GSUB_CHAINCONTEXTSUBST_HH
// TODO(garretrieger): move to new layout.
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct ChainContextSubst : ChainContext {};
}
}
}
#endif /* OT_LAYOUT_GSUB_CHAINCONTEXTSUBST_HH */

View File

@@ -0,0 +1,19 @@
#ifndef OT_LAYOUT_GSUB_COMMON_HH
#define OT_LAYOUT_GSUB_COMMON_HH
#include "../../../hb-serialize.hh"
#include "../../../hb-ot-layout-gsubgpos.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template<typename Iterator>
static void SingleSubst_serialize (hb_serialize_context_t *c,
Iterator it);
}
}
}
#endif /* OT_LAYOUT_GSUB_COMMON_HH */

View File

@@ -0,0 +1,18 @@
#ifndef OT_LAYOUT_GSUB_CONTEXTSUBST_HH
#define OT_LAYOUT_GSUB_CONTEXTSUBST_HH
// TODO(garretrieger): move to new layout.
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct ContextSubst : Context {};
}
}
}
#endif /* OT_LAYOUT_GSUB_CONTEXTSUBST_HH */

View File

@@ -0,0 +1,22 @@
#ifndef OT_LAYOUT_GSUB_EXTENSIONSUBST_HH
#define OT_LAYOUT_GSUB_EXTENSIONSUBST_HH
// TODO(garretrieger): move to new layout.
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct ExtensionSubst : Extension<ExtensionSubst>
{
typedef struct SubstLookupSubTable SubTable;
bool is_reverse () const;
};
}
}
}
#endif /* OT_LAYOUT_GSUB_EXTENSIONSUBST_HH */

View File

@@ -0,0 +1,61 @@
#ifndef OT_LAYOUT_GSUB_GSUB_HH
#define OT_LAYOUT_GSUB_GSUB_HH
#include "../../../hb-ot-layout-gsubgpos.hh"
#include "Common.hh"
#include "SubstLookup.hh"
namespace OT {
using Layout::GSUB_impl::SubstLookup;
namespace Layout {
/*
* GSUB -- Glyph Substitution
* https://docs.microsoft.com/en-us/typography/opentype/spec/gsub
*/
struct GSUB : GSUBGPOS
{
using Lookup = SubstLookup;
static constexpr hb_tag_t tableTag = HB_OT_TAG_GSUB;
const SubstLookup& get_lookup (unsigned int i) const
{ return static_cast<const SubstLookup &> (GSUBGPOS::get_lookup (i)); }
bool subset (hb_subset_context_t *c) const
{
hb_subset_layout_context_t l (c, tableTag);
return GSUBGPOS::subset<SubstLookup> (&l);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (GSUBGPOS::sanitize<SubstLookup> (c));
}
HB_INTERNAL bool is_blocklisted (hb_blob_t *blob,
hb_face_t *face) const;
void closure_lookups (hb_face_t *face,
const hb_set_t *glyphs,
hb_set_t *lookup_indexes /* IN/OUT */) const
{ GSUBGPOS::closure_lookups<SubstLookup> (face, glyphs, lookup_indexes); }
typedef GSUBGPOS::accelerator_t<GSUB> accelerator_t;
};
}
struct GSUB_accelerator_t : Layout::GSUB::accelerator_t {
GSUB_accelerator_t (hb_face_t *face) : Layout::GSUB::accelerator_t (face) {}
};
}
#endif /* OT_LAYOUT_GSUB_GSUB_HH */

View File

@@ -0,0 +1,203 @@
#ifndef OT_LAYOUT_GSUB_LIGATURE_HH
#define OT_LAYOUT_GSUB_LIGATURE_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct Ligature
{
public:
typename Types::HBGlyphID
ligGlyph; /* GlyphID of ligature to substitute */
HeadlessArray16Of<typename Types::HBGlyphID>
component; /* Array of component GlyphIDs--start
* with the second component--ordered
* in writing direction */
public:
DEFINE_SIZE_ARRAY (Types::size + 2, component);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (ligGlyph.sanitize (c) && component.sanitize (c));
}
bool intersects (const hb_set_t *glyphs) const
{ return hb_all (component, glyphs); }
bool intersects_lig_glyph (const hb_set_t *glyphs) const
{ return glyphs->has(ligGlyph); }
void closure (hb_closure_context_t *c) const
{
if (!intersects (c->glyphs)) return;
c->output->add (ligGlyph);
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
c->input->add_array (component.arrayZ, component.get_length ());
c->output->add (ligGlyph);
}
bool would_apply (hb_would_apply_context_t *c) const
{
if (c->len != component.lenP1)
return false;
for (unsigned int i = 1; i < c->len; i++)
if (likely (c->glyphs[i] != component[i]))
return false;
return true;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int count = component.lenP1;
if (unlikely (!count)) return_trace (false);
/* Special-case to make it in-place and not consider this
* as a "ligated" substitution. */
if (unlikely (count == 1))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (ligature substitution)",
c->buffer->idx);
}
c->replace_glyph (ligGlyph);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (ligature substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
unsigned int total_component_count = 0;
if (unlikely (count > HB_MAX_CONTEXT_LENGTH)) return false;
unsigned match_positions_stack[4];
unsigned *match_positions = match_positions_stack;
if (unlikely (count > ARRAY_LENGTH (match_positions_stack)))
{
match_positions = (unsigned *) hb_malloc (hb_max (count, 1u) * sizeof (unsigned));
if (unlikely (!match_positions))
return_trace (false);
}
unsigned int match_end = 0;
if (likely (!match_input (c, count,
&component[1],
match_glyph,
nullptr,
&match_end,
match_positions,
&total_component_count)))
{
c->buffer->unsafe_to_concat (c->buffer->idx, match_end);
if (match_positions != match_positions_stack)
hb_free (match_positions);
return_trace (false);
}
unsigned pos = 0;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
unsigned delta = c->buffer->sync_so_far ();
pos = c->buffer->idx;
char buf[HB_MAX_CONTEXT_LENGTH * 16] = {0};
char *p = buf;
match_end += delta;
for (unsigned i = 0; i < count; i++)
{
match_positions[i] += delta;
if (i)
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", match_positions[i]);
p += strlen(p);
}
c->buffer->message (c->font,
"ligating glyphs at %s",
buf);
}
ligate_input (c,
count,
match_positions,
match_end,
ligGlyph,
total_component_count);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"ligated glyph at %u",
pos);
}
if (match_positions != match_positions_stack)
hb_free (match_positions);
return_trace (true);
}
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c,
hb_codepoint_t ligature,
Iterator components /* Starting from second */)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
ligGlyph = ligature;
if (unlikely (!component.serialize (c, components))) return_trace (false);
return_trace (true);
}
bool subset (hb_subset_context_t *c, unsigned coverage_idx) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
if (!intersects (&glyphset) || !glyphset.has (ligGlyph)) return_trace (false);
// Ensure Coverage table is always packed after this.
c->serializer->add_virtual_link (coverage_idx);
auto it =
+ hb_iter (component)
| hb_map (glyph_map)
;
auto *out = c->serializer->start_embed (*this);
return_trace (out->serialize (c->serializer,
glyph_map[ligGlyph],
it)); }
};
}
}
}
#endif /* OT_LAYOUT_GSUB_LIGATURE_HH */

View File

@@ -0,0 +1,188 @@
#ifndef OT_LAYOUT_GSUB_LIGATURESET_HH
#define OT_LAYOUT_GSUB_LIGATURESET_HH
#include "Common.hh"
#include "Ligature.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct LigatureSet
{
public:
Array16OfOffset16To<Ligature<Types>>
ligature; /* Array LigatureSet tables
* ordered by preference */
DEFINE_SIZE_ARRAY (2, ligature);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (ligature.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
{
return
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_map ([glyphs] (const Ligature<Types> &_) { return _.intersects (glyphs); })
| hb_any
;
}
bool intersects_lig_glyph (const hb_set_t *glyphs) const
{
return
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_map ([glyphs] (const Ligature<Types> &_) {
return _.intersects_lig_glyph (glyphs) && _.intersects (glyphs);
})
| hb_any
;
}
void closure (hb_closure_context_t *c) const
{
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_apply ([c] (const Ligature<Types> &_) { _.closure (c); })
;
}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_apply ([c] (const Ligature<Types> &_) { _.collect_glyphs (c); })
;
}
bool would_apply (hb_would_apply_context_t *c) const
{
return
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_map ([c] (const Ligature<Types> &_) { return _.would_apply (c); })
| hb_any
;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int num_ligs = ligature.len;
#ifndef HB_NO_OT_RULESETS_FAST_PATH
if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 4)
#endif
{
slow:
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature.arrayZ[i];
if (lig.apply (c)) return_trace (true);
}
return_trace (false);
}
/* This version is optimized for speed by matching the first component
* of the ligature here, instead of calling into the ligation code.
*
* This is replicated in ChainRuleSet and RuleSet. */
auto &skippy_iter = c->iter_input;
skippy_iter.reset (c->buffer->idx);
skippy_iter.set_match_func (match_always, nullptr);
skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
unsigned unsafe_to;
hb_codepoint_t first = (unsigned) -1;
bool matched = skippy_iter.next (&unsafe_to);
if (likely (matched))
{
first = c->buffer->info[skippy_iter.idx].codepoint;
unsafe_to = skippy_iter.idx + 1;
if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
{
/* Can't use the fast path if eg. the next char is a default-ignorable
* or other skippable. */
goto slow;
}
}
else
goto slow;
bool unsafe_to_concat = false;
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature.arrayZ[i];
if (unlikely (lig.component.lenP1 <= 1) ||
lig.component.arrayZ[0] == first)
{
if (lig.apply (c))
{
if (unsafe_to_concat)
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (true);
}
}
else if (likely (lig.component.lenP1 > 1))
unsafe_to_concat = true;
}
if (likely (unsafe_to_concat))
c->buffer->unsafe_to_concat (c->buffer->idx, unsafe_to);
return_trace (false);
}
bool serialize (hb_serialize_context_t *c,
hb_array_t<const HBGlyphID16> ligatures,
hb_array_t<const unsigned int> component_count_list,
hb_array_t<const HBGlyphID16> &component_list /* Starting from second for each ligature */)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!ligature.serialize (c, ligatures.length))) return_trace (false);
for (unsigned int i = 0; i < ligatures.length; i++)
{
unsigned int component_count = (unsigned) hb_max ((int) component_count_list[i] - 1, 0);
if (unlikely (!ligature[i].serialize_serialize (c,
ligatures[i],
component_list.sub_array (0, component_count))))
return_trace (false);
component_list += component_count;
}
return_trace (true);
}
bool subset (hb_subset_context_t *c, unsigned coverage_idx) const
{
TRACE_SUBSET (this);
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
+ hb_iter (ligature)
| hb_filter (subset_offset_array (c, out->ligature, this, coverage_idx))
| hb_drain
;
if (bool (out->ligature))
// Ensure Coverage table is always packed after this.
c->serializer->add_virtual_link (coverage_idx);
return_trace (bool (out->ligature));
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_LIGATURESET_HH */

View File

@@ -0,0 +1,71 @@
#ifndef OT_LAYOUT_GSUB_LIGATURESUBST_HH
#define OT_LAYOUT_GSUB_LIGATURESUBST_HH
#include "Common.hh"
#include "LigatureSubstFormat1.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct LigatureSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
LigatureSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
LigatureSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
/* TODO This function is only used by small GIDs, and not updated to 24bit GIDs. Should
* be done by using iterators. While at it perhaps using iterator of arrays of hb_codepoint_t
* instead. */
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const HBGlyphID16> ligatures_list,
hb_array_t<const unsigned int> component_count_list,
hb_array_t<const HBGlyphID16> component_list /* Starting from second for each ligature */)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c,
first_glyphs,
ligature_per_first_glyph_count_list,
ligatures_list,
component_count_list,
component_list));
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};
}
}
}
#endif /* OT_LAYOUT_GSUB_LIGATURESUBST_HH */

View File

@@ -0,0 +1,203 @@
#ifndef OT_LAYOUT_GSUB_LIGATURESUBSTFORMAT1_HH
#define OT_LAYOUT_GSUB_LIGATURESUBSTFORMAT1_HH
#include "Common.hh"
#include "LigatureSet.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct LigatureSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16Of<typename Types::template OffsetTo<LigatureSet<Types>>>
ligatureSet; /* Array LigatureSet tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (4 + Types::size, ligatureSet);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && ligatureSet.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
{
return
+ hb_zip (this+coverage, ligatureSet)
| hb_filter (*glyphs, hb_first)
| hb_map (hb_second)
| hb_map ([this, glyphs] (const typename Types::template OffsetTo<LigatureSet<Types>> &_)
{ return (this+_).intersects (glyphs); })
| hb_any
;
}
bool may_have_non_1to1 () const
{ return true; }
void closure (hb_closure_context_t *c) const
{
+ hb_zip (this+coverage, ligatureSet)
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const LigatureSet<Types> &_) { _.closure (c); })
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
+ hb_zip (this+coverage, ligatureSet)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const LigatureSet<Types> &_) { _.collect_glyphs (c); })
;
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{
unsigned int index = (this+coverage).get_coverage (c->glyphs[0]);
if (likely (index == NOT_COVERED)) return false;
const auto &lig_set = this+ligatureSet[index];
return lig_set.would_apply (c);
}
unsigned cache_cost () const
{
return (this+coverage).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
case hb_ot_lookup_cache_op_t::CREATE:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) hb_malloc (sizeof (hb_ot_lookup_cache_t));
if (likely (cache))
cache->clear ();
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) p;
hb_free (cache);
return nullptr;
}
}
return nullptr;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
hb_ot_lookup_cache_t *cache = cached ? (hb_ot_lookup_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
const auto &lig_set = this+ligatureSet[index];
return_trace (lig_set.apply (c));
}
bool serialize (hb_serialize_context_t *c,
hb_sorted_array_t<const HBGlyphID16> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const HBGlyphID16> ligatures_list,
hb_array_t<const unsigned int> component_count_list,
hb_array_t<const HBGlyphID16> component_list /* Starting from second for each ligature */)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!ligatureSet.serialize (c, first_glyphs.length))) return_trace (false);
for (unsigned int i = 0; i < first_glyphs.length; i++)
{
unsigned int ligature_count = ligature_per_first_glyph_count_list[i];
if (unlikely (!ligatureSet[i]
.serialize_serialize (c,
ligatures_list.sub_array (0, ligature_count),
component_count_list.sub_array (0, ligature_count),
component_list))) return_trace (false);
ligatures_list += ligature_count;
component_count_list += ligature_count;
}
return_trace (coverage.serialize_serialize (c, first_glyphs));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
// Due to a bug in some older versions of windows 7 the Coverage table must be
// packed after the LigatureSet and Ligature tables, so serialize Coverage first
// which places it last in the packed order.
hb_set_t new_coverage;
+ hb_zip (this+coverage, hb_iter (ligatureSet) | hb_map (hb_add (this)))
| hb_filter (glyphset, hb_first)
| hb_filter ([&] (const LigatureSet<Types>& _) {
return _.intersects_lig_glyph (&glyphset);
}, hb_second)
| hb_map (hb_first)
| hb_sink (new_coverage);
if (!c->serializer->push<Coverage> ()
->serialize (c->serializer,
+ new_coverage.iter () | hb_map_retains_sorting (glyph_map)))
{
c->serializer->pop_discard ();
return_trace (false);
}
unsigned coverage_idx = c->serializer->pop_pack ();
c->serializer->add_link (out->coverage, coverage_idx);
+ hb_zip (this+coverage, ligatureSet)
| hb_filter (new_coverage, hb_first)
| hb_map (hb_second)
// to ensure that the repacker always orders the coverage table after the LigatureSet
// and LigatureSubtable's they will be linked to the Coverage table via a virtual link
// the coverage table object idx is passed down to facilitate this.
| hb_apply (subset_offset_array (c, out->ligatureSet, this, coverage_idx))
;
return_trace (bool (new_coverage));
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_LIGATURESUBSTFORMAT1_HH */

View File

@@ -0,0 +1,62 @@
#ifndef OT_LAYOUT_GSUB_MULTIPLESUBST_HH
#define OT_LAYOUT_GSUB_MULTIPLESUBST_HH
#include "Common.hh"
#include "MultipleSubstFormat1.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct MultipleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
MultipleSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MultipleSubstFormat1_2<MediumTypes> format2;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
Iterator it)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c, it));
default:return_trace (false);
}
}
/* TODO subset() should choose format. */
};
}
}
}
#endif /* OT_LAYOUT_GSUB_MULTIPLESUBST_HH */

View File

@@ -0,0 +1,130 @@
#ifndef OT_LAYOUT_GSUB_MULTIPLESUBSTFORMAT1_HH
#define OT_LAYOUT_GSUB_MULTIPLESUBSTFORMAT1_HH
#include "Common.hh"
#include "Sequence.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct MultipleSubstFormat1_2
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16Of<typename Types::template OffsetTo<Sequence<Types>>>
sequence; /* Array of Sequence tables
* ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (4 + Types::size, sequence);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && sequence.sanitize (c, this));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
bool may_have_non_1to1 () const
{ return true; }
void closure (hb_closure_context_t *c) const
{
+ hb_zip (this+coverage, sequence)
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const Sequence<Types> &_) { _.closure (c); })
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
+ hb_zip (this+coverage, sequence)
| hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([c] (const Sequence<Types> &_) { _.collect_glyphs (c); })
;
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int index = (this+coverage).get_coverage (c->buffer->cur().codepoint);
if (index == NOT_COVERED) return_trace (false);
return_trace ((this+sequence[index]).apply (c));
}
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
Iterator it)
{
TRACE_SERIALIZE (this);
auto sequences =
+ it
| hb_map (hb_second)
;
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!sequence.serialize (c, sequences.length))) return_trace (false);
for (auto& pair : hb_zip (sequences, sequence))
{
if (unlikely (!pair.second
.serialize_serialize (c, pair.first)))
return_trace (false);
}
return_trace (coverage.serialize_serialize (c, glyphs));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto *out = c->serializer->start_embed (*this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
out->format = format;
hb_sorted_vector_t<hb_codepoint_t> new_coverage;
+ hb_zip (this+coverage, sequence)
| hb_filter (glyphset, hb_first)
| hb_filter (subset_offset_array (c, out->sequence, this), hb_second)
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
out->coverage.serialize_serialize (c->serializer, new_coverage.iter ());
return_trace (bool (new_coverage));
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_MULTIPLESUBSTFORMAT1_HH */

View File

@@ -0,0 +1,36 @@
#ifndef OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBST_HH
#define OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBST_HH
#include "Common.hh"
#include "ReverseChainSingleSubstFormat1.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct ReverseChainSingleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
ReverseChainSingleSubstFormat1 format1;
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}
}
};
}
}
}
#endif /* HB_OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBST_HH */

View File

@@ -0,0 +1,245 @@
#ifndef OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBSTFORMAT1_HH
#define OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBSTFORMAT1_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct ReverseChainSingleSubstFormat1
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
Offset16To<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of table */
Array16OfOffset16To<Coverage>
backtrack; /* Array of coverage tables
* in backtracking sequence, in glyph
* sequence order */
Array16OfOffset16To<Coverage>
lookaheadX; /* Array of coverage tables
* in lookahead sequence, in glyph
* sequence order */
Array16Of<HBGlyphID16>
substituteX; /* Array of substitute
* GlyphIDs--ordered by Coverage Index */
public:
DEFINE_SIZE_MIN (10);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!(coverage.sanitize (c, this) && backtrack.sanitize (c, this)))
return_trace (false);
hb_barrier ();
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
if (!lookahead.sanitize (c, this))
return_trace (false);
hb_barrier ();
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
return_trace (substitute.sanitize (c));
}
bool intersects (const hb_set_t *glyphs) const
{
if (!(this+coverage).intersects (glyphs))
return false;
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
unsigned int count;
count = backtrack.len;
for (unsigned int i = 0; i < count; i++)
if (!(this+backtrack[i]).intersects (glyphs))
return false;
count = lookahead.len;
for (unsigned int i = 0; i < count; i++)
if (!(this+lookahead[i]).intersects (glyphs))
return false;
return true;
}
bool may_have_non_1to1 () const
{ return false; }
void closure (hb_closure_context_t *c) const
{
if (!intersects (c->glyphs)) return;
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
+ hb_zip (this+coverage, substitute)
| hb_filter (c->parent_active_glyphs (), hb_first)
| hb_map (hb_second)
| hb_sink (c->output)
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
unsigned int count;
count = backtrack.len;
for (unsigned int i = 0; i < count; i++)
if (unlikely (!(this+backtrack[i]).collect_coverage (c->before))) return;
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
count = lookahead.len;
for (unsigned int i = 0; i < count; i++)
if (unlikely (!(this+lookahead[i]).collect_coverage (c->after))) return;
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
count = substitute.len;
c->output->add_array (substitute.arrayZ, substitute.len);
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int index = (this+coverage).get_coverage (c->buffer->cur ().codepoint);
if (index == NOT_COVERED) return_trace (false);
if (unlikely (c->nesting_level_left != HB_MAX_NESTING_LEVEL))
return_trace (false); /* No chaining to this type */
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
if (unlikely (index >= substitute.len)) return_trace (false);
unsigned int start_index = 0, end_index = 0;
if (match_backtrack (c,
backtrack.len, (HBUINT16 *) backtrack.arrayZ,
match_coverage, this,
&start_index) &&
match_lookahead (c,
lookahead.len, (HBUINT16 *) lookahead.arrayZ,
match_coverage, this,
c->buffer->idx + 1, &end_index))
{
c->buffer->unsafe_to_break_from_outbuffer (start_index, end_index);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replacing glyph at %u (reverse chaining substitution)",
c->buffer->idx);
}
c->replace_glyph_inplace (substitute[index]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (reverse chaining substitution)",
c->buffer->idx);
}
/* Note: We DON'T decrease buffer->idx. The main loop does it
* for us. This is useful for preventing surprises if someone
* calls us through a Context lookup. */
return_trace (true);
}
else
{
c->buffer->unsafe_to_concat_from_outbuffer (start_index, end_index);
return_trace (false);
}
}
template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool serialize_coverage_offset_array (hb_subset_context_t *c, Iterator it) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed<Array16OfOffset16To<Coverage>> ();
if (unlikely (!c->serializer->allocate_size<HBUINT16> (HBUINT16::static_size)))
return_trace (false);
for (auto& offset : it) {
auto *o = out->serialize_append (c->serializer);
if (unlikely (!o) || !o->serialize_subset (c, offset, this))
return_trace (false);
}
return_trace (true);
}
template<typename Iterator, typename BacktrackIterator, typename LookaheadIterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_pair_t)),
hb_requires (hb_is_iterator (BacktrackIterator)),
hb_requires (hb_is_iterator (LookaheadIterator))>
bool serialize (hb_subset_context_t *c,
Iterator coverage_subst_iter,
BacktrackIterator backtrack_iter,
LookaheadIterator lookahead_iter) const
{
TRACE_SERIALIZE (this);
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->embed (this->format))) return_trace (false);
if (unlikely (!c->serializer->embed (this->coverage))) return_trace (false);
if (!serialize_coverage_offset_array (c, backtrack_iter)) return_trace (false);
if (!serialize_coverage_offset_array (c, lookahead_iter)) return_trace (false);
auto *substitute_out = c->serializer->start_embed<Array16Of<HBGlyphID16>> ();
auto substitutes =
+ coverage_subst_iter
| hb_map (hb_second)
;
auto glyphs =
+ coverage_subst_iter
| hb_map_retains_sorting (hb_first)
;
if (unlikely (! c->serializer->check_success (substitute_out->serialize (c->serializer, substitutes))))
return_trace (false);
if (unlikely (!out->coverage.serialize_serialize (c->serializer, glyphs)))
return_trace (false);
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
const auto &lookahead = StructAfter<decltype (lookaheadX)> (backtrack);
const auto &substitute = StructAfter<decltype (substituteX)> (lookahead);
auto it =
+ hb_zip (this+coverage, substitute)
| hb_filter (glyphset, hb_first)
| hb_filter (glyphset, hb_second)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const HBGlyphID16 &> p) -> hb_codepoint_pair_t
{ return hb_pair (glyph_map[p.first], glyph_map[p.second]); })
;
return_trace (bool (it) && serialize (c, it, backtrack.iter (), lookahead.iter ()));
}
};
}
}
}
#endif /* HB_OT_LAYOUT_GSUB_REVERSECHAINSINGLESUBSTFORMAT1_HH */

View File

@@ -0,0 +1,165 @@
#ifndef OT_LAYOUT_GSUB_SEQUENCE_HH
#define OT_LAYOUT_GSUB_SEQUENCE_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct Sequence
{
protected:
Array16Of<typename Types::HBGlyphID>
substitute; /* String of GlyphIDs to substitute */
public:
DEFINE_SIZE_ARRAY (2, substitute);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (substitute.sanitize (c));
}
bool intersects (const hb_set_t *glyphs) const
{ return hb_all (substitute, glyphs); }
void closure (hb_closure_context_t *c) const
{ c->output->add_array (substitute.arrayZ, substitute.len); }
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ c->output->add_array (substitute.arrayZ, substitute.len); }
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int count = substitute.len;
/* Special-case to make it in-place and not consider this
* as a "multiplied" substitution. */
if (unlikely (count == 1))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (multiple substitution)",
c->buffer->idx);
}
c->replace_glyph (substitute.arrayZ[0]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (multiple substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
/* Spec disallows this, but Uniscribe allows it.
* https://github.com/harfbuzz/harfbuzz/issues/253 */
else if (unlikely (count == 0))
{
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"deleting glyph at %u (multiple substitution)",
c->buffer->idx);
}
c->buffer->delete_glyph ();
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"deleted glyph at %u (multiple substitution)",
c->buffer->idx);
}
return_trace (true);
}
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"multiplying glyph at %u",
c->buffer->idx);
}
unsigned int klass = _hb_glyph_info_is_ligature (&c->buffer->cur()) ?
HB_OT_LAYOUT_GLYPH_PROPS_BASE_GLYPH : 0;
unsigned lig_id = _hb_glyph_info_get_lig_id (&c->buffer->cur());
for (unsigned int i = 0; i < count; i++)
{
/* If is attached to a ligature, don't disturb that.
* https://github.com/harfbuzz/harfbuzz/issues/3069 */
if (!lig_id)
_hb_glyph_info_set_lig_props_for_component (&c->buffer->cur(), i);
c->output_glyph_for_component (substitute.arrayZ[i], klass);
}
c->buffer->skip_glyph ();
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
char buf[HB_MAX_CONTEXT_LENGTH * 16] = {0};
char *p = buf;
for (unsigned i = c->buffer->idx - count; i < c->buffer->idx; i++)
{
if (buf < p && sizeof(buf) - 1u > unsigned (p - buf))
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", i);
p += strlen(p);
}
c->buffer->message (c->font,
"multiplied glyphs at %s",
buf);
}
return_trace (true);
}
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c,
Iterator subst)
{
TRACE_SERIALIZE (this);
return_trace (substitute.serialize (c, subst));
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
if (!intersects (&glyphset)) return_trace (false);
auto it =
+ hb_iter (substitute)
| hb_map (glyph_map)
;
auto *out = c->serializer->start_embed (*this);
return_trace (out->serialize (c->serializer, it));
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_SEQUENCE_HH */

View File

@@ -0,0 +1,103 @@
#ifndef OT_LAYOUT_GSUB_SINGLESUBST_HH
#define OT_LAYOUT_GSUB_SINGLESUBST_HH
#include "Common.hh"
#include "SingleSubstFormat1.hh"
#include "SingleSubstFormat2.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct SingleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
SingleSubstFormat1_3<SmallTypes> format1;
SingleSubstFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
SingleSubstFormat1_3<MediumTypes> format3;
SingleSubstFormat2_4<MediumTypes> format4;
#endif
} u;
public:
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
case 4: return_trace (c->dispatch (u.format4, std::forward<Ts> (ds)...));
#endif
default:return_trace (c->default_return_value ());
}
}
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator,
const hb_codepoint_pair_t))>
bool serialize (hb_serialize_context_t *c,
Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned format = 2;
unsigned delta = 0;
if (glyphs)
{
format = 1;
hb_codepoint_t mask = 0xFFFFu;
#ifndef HB_NO_BEYOND_64K
if (+ glyphs
| hb_map_retains_sorting (hb_second)
| hb_filter ([] (hb_codepoint_t gid) { return gid > 0xFFFFu; }))
{
format += 2;
mask = 0xFFFFFFu;
}
#endif
auto get_delta = [=] (hb_codepoint_pair_t _)
{ return (unsigned) (_.second - _.first) & mask; };
delta = get_delta (*glyphs);
if (!hb_all (++(+glyphs), delta, get_delta)) format += 1;
}
u.format = format;
switch (u.format) {
case 1: return_trace (u.format1.serialize (c,
+ glyphs
| hb_map_retains_sorting (hb_first),
delta));
case 2: return_trace (u.format2.serialize (c, glyphs));
#ifndef HB_NO_BEYOND_64K
case 3: return_trace (u.format3.serialize (c,
+ glyphs
| hb_map_retains_sorting (hb_first),
delta));
case 4: return_trace (u.format4.serialize (c, glyphs));
#endif
default:return_trace (false);
}
}
};
template<typename Iterator>
static void
SingleSubst_serialize (hb_serialize_context_t *c,
Iterator it)
{ c->start_embed<SingleSubst> ()->serialize (c, it); }
}
}
}
#endif /* OT_LAYOUT_GSUB_SINGLESUBST_HH */

View File

@@ -0,0 +1,204 @@
#ifndef OT_LAYOUT_GSUB_SINGLESUBSTFORMAT1_HH
#define OT_LAYOUT_GSUB_SINGLESUBSTFORMAT1_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct SingleSubstFormat1_3
{
protected:
HBUINT16 format; /* Format identifier--format = 1 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
typename Types::HBUINT
deltaGlyphID; /* Add to original GlyphID to get
* substitute GlyphID, modulo 0x10000 */
public:
DEFINE_SIZE_STATIC (2 + 2 * Types::size);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
coverage.sanitize (c, this) &&
/* The coverage table may use a range to represent a set
* of glyphs, which means a small number of bytes can
* generate a large glyph set. Manually modify the
* sanitizer max ops to take this into account.
*
* Note: This check *must* be right after coverage sanitize. */
c->check_ops ((this + coverage).get_population () >> 1));
}
hb_codepoint_t get_mask () const
{ return (1 << (8 * Types::size)) - 1; }
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
bool may_have_non_1to1 () const
{ return false; }
void closure (hb_closure_context_t *c) const
{
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
/* Help fuzzer avoid this function as much. */
unsigned pop = (this+coverage).get_population ();
if (pop >= mask)
return;
hb_set_t intersection;
(this+coverage).intersect_set (c->parent_active_glyphs (), intersection);
/* In degenerate fuzzer-found fonts, but not real fonts,
* this table can keep adding new glyphs in each round of closure.
* Refuse to close-over, if it maps glyph range to overlapping range. */
hb_codepoint_t min_before = intersection.get_min ();
hb_codepoint_t max_before = intersection.get_max ();
hb_codepoint_t min_after = (min_before + d) & mask;
hb_codepoint_t max_after = (max_before + d) & mask;
if (intersection.get_population () == max_before - min_before + 1 &&
((min_before <= min_after && min_after <= max_before) ||
(min_before <= max_after && max_after <= max_before)))
return;
+ hb_iter (intersection)
| hb_map ([d, mask] (hb_codepoint_t g) { return (g + d) & mask; })
| hb_sink (c->output)
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
+ hb_iter (this+coverage)
| hb_map ([d, mask] (hb_codepoint_t g) { return (g + d) & mask; })
| hb_sink (c->output)
;
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
unsigned
get_glyph_alternates (hb_codepoint_t glyph_id,
unsigned start_offset,
unsigned *alternate_count /* IN/OUT. May be NULL. */,
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
{
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (likely (index == NOT_COVERED))
{
if (alternate_count)
*alternate_count = 0;
return 0;
}
if (alternate_count && *alternate_count)
{
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
glyph_id = (glyph_id + d) & mask;
*alternate_glyphs = glyph_id;
*alternate_count = 1;
}
return 1;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
hb_codepoint_t glyph_id = c->buffer->cur().codepoint;
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (index == NOT_COVERED) return_trace (false);
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
glyph_id = (glyph_id + d) & mask;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (single substitution)",
c->buffer->idx);
}
c->replace_glyph (glyph_id);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (single substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c,
Iterator glyphs,
unsigned delta)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!coverage.serialize_serialize (c, glyphs))) return_trace (false);
c->check_assign (deltaGlyphID, delta, HB_SERIALIZE_ERROR_INT_OVERFLOW);
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
hb_set_t intersection;
(this+coverage).intersect_set (glyphset, intersection);
auto it =
+ hb_iter (intersection)
| hb_map_retains_sorting ([d, mask] (hb_codepoint_t g) {
return hb_codepoint_pair_t (g,
(g + d) & mask); })
| hb_filter (glyphset, hb_second)
| hb_map_retains_sorting ([&] (hb_codepoint_pair_t p) -> hb_codepoint_pair_t
{ return hb_pair (glyph_map[p.first], glyph_map[p.second]); })
;
bool ret = bool (it);
SingleSubst_serialize (c->serializer, it);
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_SINGLESUBSTFORMAT1_HH */

View File

@@ -0,0 +1,176 @@
#ifndef OT_LAYOUT_GSUB_SINGLESUBSTFORMAT2_HH
#define OT_LAYOUT_GSUB_SINGLESUBSTFORMAT2_HH
#include "Common.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
template <typename Types>
struct SingleSubstFormat2_4
{
protected:
HBUINT16 format; /* Format identifier--format = 2 */
typename Types::template OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from
* beginning of Substitution table */
Array16Of<typename Types::HBGlyphID>
substitute; /* Array of substitute
* GlyphIDs--ordered by Coverage Index */
public:
DEFINE_SIZE_ARRAY (4 + Types::size, substitute);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (coverage.sanitize (c, this) && substitute.sanitize (c));
}
bool intersects (const hb_set_t *glyphs) const
{ return (this+coverage).intersects (glyphs); }
bool may_have_non_1to1 () const
{ return false; }
void closure (hb_closure_context_t *c) const
{
auto &cov = this+coverage;
auto &glyph_set = c->parent_active_glyphs ();
if (substitute.len > glyph_set.get_population () * 4)
{
for (auto g : glyph_set)
{
unsigned i = cov.get_coverage (g);
if (i == NOT_COVERED || i >= substitute.len)
continue;
c->output->add (substitute.arrayZ[i]);
}
return;
}
+ hb_zip (cov, substitute)
| hb_filter (glyph_set, hb_first)
| hb_map (hb_second)
| hb_sink (c->output)
;
}
void closure_lookups (hb_closure_lookups_context_t *c) const {}
void collect_glyphs (hb_collect_glyphs_context_t *c) const
{
if (unlikely (!(this+coverage).collect_coverage (c->input))) return;
+ hb_zip (this+coverage, substitute)
| hb_map (hb_second)
| hb_sink (c->output)
;
}
const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const
{ return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
unsigned
get_glyph_alternates (hb_codepoint_t glyph_id,
unsigned start_offset,
unsigned *alternate_count /* IN/OUT. May be NULL. */,
hb_codepoint_t *alternate_glyphs /* OUT. May be NULL. */) const
{
unsigned int index = (this+coverage).get_coverage (glyph_id);
if (likely (index == NOT_COVERED))
{
if (alternate_count)
*alternate_count = 0;
return 0;
}
if (alternate_count && *alternate_count)
{
glyph_id = substitute[index];
*alternate_glyphs = glyph_id;
*alternate_count = 1;
}
return 1;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
unsigned int index = (this+coverage).get_coverage (c->buffer->cur().codepoint);
if (index == NOT_COVERED) return_trace (false);
if (unlikely (index >= substitute.len)) return_trace (false);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->sync_so_far ();
c->buffer->message (c->font,
"replacing glyph at %u (single substitution)",
c->buffer->idx);
}
c->replace_glyph (substitute[index]);
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
{
c->buffer->message (c->font,
"replaced glyph at %u (single substitution)",
c->buffer->idx - 1u);
}
return_trace (true);
}
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator,
hb_codepoint_pair_t))>
bool serialize (hb_serialize_context_t *c,
Iterator it)
{
TRACE_SERIALIZE (this);
auto substitutes =
+ it
| hb_map (hb_second)
;
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
if (unlikely (!c->extend_min (this))) return_trace (false);
if (unlikely (!substitute.serialize (c, substitutes))) return_trace (false);
if (unlikely (!coverage.serialize_serialize (c, glyphs))) return_trace (false);
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map;
auto it =
+ hb_zip (this+coverage, substitute)
| hb_filter (glyphset, hb_first)
| hb_filter (glyphset, hb_second)
| hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const typename Types::HBGlyphID &> p) -> hb_codepoint_pair_t
{ return hb_pair (glyph_map[p.first], glyph_map[p.second]); })
;
bool ret = bool (it);
SingleSubst_serialize (c->serializer, it);
return_trace (ret);
}
};
}
}
}
#endif /* OT_LAYOUT_GSUB_SINGLESUBSTFORMAT2_HH */

View File

@@ -0,0 +1,220 @@
#ifndef OT_LAYOUT_GSUB_SUBSTLOOKUP_HH
#define OT_LAYOUT_GSUB_SUBSTLOOKUP_HH
#include "Common.hh"
#include "SubstLookupSubTable.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct SubstLookup : Lookup
{
using SubTable = SubstLookupSubTable;
bool sanitize (hb_sanitize_context_t *c) const
{ return Lookup::sanitize<SubTable> (c); }
const SubTable& get_subtable (unsigned int i) const
{ return Lookup::get_subtable<SubTable> (i); }
static inline bool lookup_type_is_reverse (unsigned int lookup_type)
{ return lookup_type == SubTable::ReverseChainSingle; }
bool is_reverse () const
{
unsigned int type = get_type ();
if (unlikely (type == SubTable::Extension))
return get_subtable (0).u.extension.is_reverse ();
return lookup_type_is_reverse (type);
}
bool may_have_non_1to1 () const
{
hb_have_non_1to1_context_t c;
return dispatch (&c);
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);
return_trace (dispatch (c));
}
bool intersects (const hb_set_t *glyphs) const
{
hb_intersects_context_t c (glyphs);
return dispatch (&c);
}
hb_closure_context_t::return_t closure (hb_closure_context_t *c, unsigned int this_index) const
{
if (!c->should_visit_lookup (this_index))
return hb_closure_context_t::default_return_value ();
c->set_recurse_func (dispatch_closure_recurse_func);
hb_closure_context_t::return_t ret = dispatch (c);
c->flush ();
return ret;
}
hb_closure_lookups_context_t::return_t closure_lookups (hb_closure_lookups_context_t *c, unsigned this_index) const
{
if (c->is_lookup_visited (this_index))
return hb_closure_lookups_context_t::default_return_value ();
c->set_lookup_visited (this_index);
if (!intersects (c->glyphs))
{
c->set_lookup_inactive (this_index);
return hb_closure_lookups_context_t::default_return_value ();
}
hb_closure_lookups_context_t::return_t ret = dispatch (c);
return ret;
}
hb_collect_glyphs_context_t::return_t collect_glyphs (hb_collect_glyphs_context_t *c) const
{
c->set_recurse_func (dispatch_recurse_func<hb_collect_glyphs_context_t>);
return dispatch (c);
}
template <typename set_t>
void collect_coverage (set_t *glyphs) const
{
hb_collect_coverage_context_t<set_t> c (glyphs);
dispatch (&c);
}
bool would_apply (hb_would_apply_context_t *c,
const hb_ot_layout_lookup_accelerator_t *accel) const
{
if (unlikely (!c->len)) return false;
if (!accel->may_have (c->glyphs[0])) return false;
return dispatch (c);
}
template<typename Glyphs, typename Substitutes,
hb_requires (hb_is_sorted_source_of (Glyphs,
const hb_codepoint_t) &&
hb_is_source_of (Substitutes,
const hb_codepoint_t))>
bool serialize_single (hb_serialize_context_t *c,
uint32_t lookup_props,
Glyphs glyphs,
Substitutes substitutes)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Single, lookup_props, 1))) return_trace (false);
if (c->push<SubTable> ()->u.single.serialize (c, hb_zip (glyphs, substitutes)))
{
c->add_link (get_subtables<SubTable> ()[0], c->pop_pack ());
return_trace (true);
}
c->pop_discard ();
return_trace (false);
}
template<typename Iterator,
hb_requires (hb_is_sorted_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c,
uint32_t lookup_props,
Iterator it)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Multiple, lookup_props, 1))) return_trace (false);
if (c->push<SubTable> ()->u.multiple.
serialize (c, it))
{
c->add_link (get_subtables<SubTable> ()[0], c->pop_pack ());
return_trace (true);
}
c->pop_discard ();
return_trace (false);
}
bool serialize_alternate (hb_serialize_context_t *c,
uint32_t lookup_props,
hb_sorted_array_t<const HBGlyphID16> glyphs,
hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const HBGlyphID16> alternate_glyphs_list)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Alternate, lookup_props, 1))) return_trace (false);
if (c->push<SubTable> ()->u.alternate.
serialize (c,
glyphs,
alternate_len_list,
alternate_glyphs_list))
{
c->add_link (get_subtables<SubTable> ()[0], c->pop_pack ());
return_trace (true);
}
c->pop_discard ();
return_trace (false);
}
bool serialize_ligature (hb_serialize_context_t *c,
uint32_t lookup_props,
hb_sorted_array_t<const HBGlyphID16> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const HBGlyphID16> ligatures_list,
hb_array_t<const unsigned int> component_count_list,
hb_array_t<const HBGlyphID16> component_list /* Starting from second for each ligature */)
{
TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Ligature, lookup_props, 1))) return_trace (false);
if (c->push<SubTable> ()->u.ligature.
serialize (c,
first_glyphs,
ligature_per_first_glyph_count_list,
ligatures_list,
component_count_list,
component_list))
{
c->add_link (get_subtables<SubTable> ()[0], c->pop_pack ());
return_trace (true);
}
c->pop_discard ();
return_trace (false);
}
template <typename context_t>
static inline typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index);
static inline typename hb_closure_context_t::return_t closure_glyphs_recurse_func (hb_closure_context_t *c, unsigned lookup_index, hb_set_t *covered_seq_indices, unsigned seq_index, unsigned end_index);
static inline hb_closure_context_t::return_t dispatch_closure_recurse_func (hb_closure_context_t *c, unsigned lookup_index, hb_set_t *covered_seq_indices, unsigned seq_index, unsigned end_index)
{
if (!c->should_visit_lookup (lookup_index))
return hb_empty_t ();
hb_closure_context_t::return_t ret = closure_glyphs_recurse_func (c, lookup_index, covered_seq_indices, seq_index, end_index);
/* While in theory we should flush here, it will cause timeouts because a recursive
* lookup can keep growing the glyph set. Skip, and outer loop will retry up to
* HB_CLOSURE_MAX_STAGES time, which should be enough for every realistic font. */
//c->flush ();
return ret;
}
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ return Lookup::dispatch<SubTable> (c, std::forward<Ts> (ds)...); }
bool subset (hb_subset_context_t *c) const
{ return Lookup::subset<SubTable> (c); }
};
}
}
}
#endif /* OT_LAYOUT_GSUB_SUBSTLOOKUP_HH */

View File

@@ -0,0 +1,77 @@
#ifndef OT_LAYOUT_GSUB_SUBSTLOOKUPSUBTABLE_HH
#define OT_LAYOUT_GSUB_SUBSTLOOKUPSUBTABLE_HH
#include "Common.hh"
#include "SingleSubst.hh"
#include "MultipleSubst.hh"
#include "AlternateSubst.hh"
#include "LigatureSubst.hh"
#include "ContextSubst.hh"
#include "ChainContextSubst.hh"
#include "ExtensionSubst.hh"
#include "ReverseChainSingleSubst.hh"
namespace OT {
namespace Layout {
namespace GSUB_impl {
struct SubstLookupSubTable
{
friend struct ::OT::Lookup;
friend struct SubstLookup;
protected:
union {
SingleSubst single;
MultipleSubst multiple;
AlternateSubst alternate;
LigatureSubst ligature;
ContextSubst context;
ChainContextSubst chainContext;
ExtensionSubst extension;
ReverseChainSingleSubst reverseChainContextSingle;
} u;
public:
DEFINE_SIZE_MIN (0);
enum Type {
Single = 1,
Multiple = 2,
Alternate = 3,
Ligature = 4,
Context = 5,
ChainContext = 6,
Extension = 7,
ReverseChainSingle = 8
};
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const
{
TRACE_DISPATCH (this, lookup_type);
switch (lookup_type) {
case Single: return_trace (u.single.dispatch (c, std::forward<Ts> (ds)...));
case Multiple: return_trace (u.multiple.dispatch (c, std::forward<Ts> (ds)...));
case Alternate: return_trace (u.alternate.dispatch (c, std::forward<Ts> (ds)...));
case Ligature: return_trace (u.ligature.dispatch (c, std::forward<Ts> (ds)...));
case Context: return_trace (u.context.dispatch (c, std::forward<Ts> (ds)...));
case ChainContext: return_trace (u.chainContext.dispatch (c, std::forward<Ts> (ds)...));
case Extension: return_trace (u.extension.dispatch (c, std::forward<Ts> (ds)...));
case ReverseChainSingle: return_trace (u.reverseChainContextSingle.dispatch (c, std::forward<Ts> (ds)...));
default: return_trace (c->default_return_value ());
}
}
bool intersects (const hb_set_t *glyphs, unsigned int lookup_type) const
{
hb_intersects_context_t c (glyphs);
return dispatch (&c, lookup_type);
}
};
}
}
}
#endif /* HB_OT_LAYOUT_GSUB_SUBSTLOOKUPSUBTABLE_HH */

View File

@@ -0,0 +1,69 @@
/*
* Copyright © 2007,2008,2009 Red Hat, Inc.
* Copyright © 2010,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod, Garret Rieger
*/
#ifndef OT_LAYOUT_TYPES_HH
#define OT_LAYOUT_TYPES_HH
using hb_ot_lookup_cache_t = hb_cache_t<15, 8, 7>;
static_assert (sizeof (hb_ot_lookup_cache_t) == 256, "");
namespace OT {
namespace Layout {
struct SmallTypes {
static constexpr unsigned size = 2;
using large_int = uint32_t;
using HBUINT = HBUINT16;
using HBGlyphID = HBGlyphID16;
using Offset = Offset16;
template <typename Type, typename BaseType=void, bool has_null=true>
using OffsetTo = OT::Offset16To<Type, BaseType, has_null>;
template <typename Type>
using ArrayOf = OT::Array16Of<Type>;
template <typename Type>
using SortedArrayOf = OT::SortedArray16Of<Type>;
};
struct MediumTypes {
static constexpr unsigned size = 3;
using large_int = uint64_t;
using HBUINT = HBUINT24;
using HBGlyphID = HBGlyphID24;
using Offset = Offset24;
template <typename Type, typename BaseType=void, bool has_null=true>
using OffsetTo = OT::Offset24To<Type, BaseType, has_null>;
template <typename Type>
using ArrayOf = OT::Array24Of<Type>;
template <typename Type>
using SortedArrayOf = OT::SortedArray24Of<Type>;
};
}
}
#endif /* OT_LAYOUT_TYPES_HH */

View File

@@ -0,0 +1,417 @@
#include "VARC.hh"
#ifndef HB_NO_VAR_COMPOSITES
#include "../../../hb-draw.hh"
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-layout-gdef-table.hh"
namespace OT {
//namespace Var {
struct hb_transforming_pen_context_t
{
hb_transform_t<> transform;
hb_draw_funcs_t *dfuncs;
void *data;
hb_draw_state_t *st;
};
static void
hb_transforming_pen_move_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (to_x, to_y);
c->dfuncs->move_to (c->data, *c->st, to_x, to_y);
}
static void
hb_transforming_pen_line_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (to_x, to_y);
c->dfuncs->line_to (c->data, *c->st, to_x, to_y);
}
static void
hb_transforming_pen_quadratic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float control_x, float control_y,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (control_x, control_y);
c->transform.transform_point (to_x, to_y);
c->dfuncs->quadratic_to (c->data, *c->st, control_x, control_y, to_x, to_y);
}
static void
hb_transforming_pen_cubic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float control1_x, float control1_y,
float control2_x, float control2_y,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (control1_x, control1_y);
c->transform.transform_point (control2_x, control2_y);
c->transform.transform_point (to_x, to_y);
c->dfuncs->cubic_to (c->data, *c->st, control1_x, control1_y, control2_x, control2_y, to_x, to_y);
}
static void
hb_transforming_pen_close_path (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->dfuncs->close_path (c->data, *c->st);
}
static inline void free_static_transforming_pen_funcs ();
static struct hb_transforming_pen_funcs_lazy_loader_t : hb_draw_funcs_lazy_loader_t<hb_transforming_pen_funcs_lazy_loader_t>
{
static hb_draw_funcs_t *create ()
{
hb_draw_funcs_t *funcs = hb_draw_funcs_create ();
hb_draw_funcs_set_move_to_func (funcs, hb_transforming_pen_move_to, nullptr, nullptr);
hb_draw_funcs_set_line_to_func (funcs, hb_transforming_pen_line_to, nullptr, nullptr);
hb_draw_funcs_set_quadratic_to_func (funcs, hb_transforming_pen_quadratic_to, nullptr, nullptr);
hb_draw_funcs_set_cubic_to_func (funcs, hb_transforming_pen_cubic_to, nullptr, nullptr);
hb_draw_funcs_set_close_path_func (funcs, hb_transforming_pen_close_path, nullptr, nullptr);
hb_draw_funcs_make_immutable (funcs);
hb_atexit (free_static_transforming_pen_funcs);
return funcs;
}
} static_transforming_pen_funcs;
static inline
void free_static_transforming_pen_funcs ()
{
static_transforming_pen_funcs.free_instance ();
}
static hb_draw_funcs_t *
hb_transforming_pen_get_funcs ()
{
return static_transforming_pen_funcs.get_unconst ();
}
hb_ubytes_t
VarComponent::get_path_at (const hb_varc_context_t &c,
hb_codepoint_t parent_gid,
hb_array_t<const int> coords,
hb_transform_t<> total_transform,
hb_ubytes_t total_record,
hb_scalar_cache_t *cache) const
{
const unsigned char *end = total_record.arrayZ + total_record.length;
const unsigned char *record = total_record.arrayZ;
auto &VARC = *c.font->face->table.VARC->table;
auto &varStore = &VARC+VARC.varStore;
#define READ_UINT32VAR(name) \
HB_STMT_START { \
if (unlikely (unsigned (end - record) < HBUINT32VAR::min_size)) return hb_ubytes_t (); \
hb_barrier (); \
auto &varint = * (const HBUINT32VAR *) record; \
unsigned size = varint.get_size (); \
if (unlikely (unsigned (end - record) < size)) return hb_ubytes_t (); \
name = (uint32_t) varint; \
record += size; \
} HB_STMT_END
uint32_t flags;
READ_UINT32VAR (flags);
// gid
hb_codepoint_t gid = 0;
if (flags & (unsigned) flags_t::GID_IS_24BIT)
{
if (unlikely (unsigned (end - record) < HBGlyphID24::static_size))
return hb_ubytes_t ();
hb_barrier ();
gid = * (const HBGlyphID24 *) record;
record += HBGlyphID24::static_size;
}
else
{
if (unlikely (unsigned (end - record) < HBGlyphID16::static_size))
return hb_ubytes_t ();
hb_barrier ();
gid = * (const HBGlyphID16 *) record;
record += HBGlyphID16::static_size;
}
// Condition
bool show = true;
if (flags & (unsigned) flags_t::HAVE_CONDITION)
{
unsigned conditionIndex;
READ_UINT32VAR (conditionIndex);
const auto &condition = (&VARC+VARC.conditionList)[conditionIndex];
auto instancer = MultiItemVarStoreInstancer(&varStore, nullptr, coords, cache);
show = condition.evaluate (coords.arrayZ, coords.length, &instancer);
}
// Axis values
auto &axisIndices = c.scratch.axisIndices;
axisIndices.clear ();
auto &axisValues = c.scratch.axisValues;
axisValues.clear ();
if (flags & (unsigned) flags_t::HAVE_AXES)
{
unsigned axisIndicesIndex;
READ_UINT32VAR (axisIndicesIndex);
axisIndices.extend ((&VARC+VARC.axisIndicesList)[axisIndicesIndex]);
axisValues.resize (axisIndices.length);
const HBUINT8 *p = (const HBUINT8 *) record;
TupleValues::decompile (p, axisValues, (const HBUINT8 *) end);
record = (const unsigned char *) p;
}
// Apply variations if any
if (flags & (unsigned) flags_t::AXIS_VALUES_HAVE_VARIATION)
{
uint32_t axisValuesVarIdx;
READ_UINT32VAR (axisValuesVarIdx);
if (show && coords && !axisValues.in_error ())
varStore.get_delta (axisValuesVarIdx, coords, axisValues.as_array (), cache);
}
auto component_coords = coords;
/* Copying coords is expensive; so we have put an arbitrary
* limit on the max number of coords for now. */
if ((flags & (unsigned) flags_t::RESET_UNSPECIFIED_AXES) ||
coords.length > HB_VAR_COMPOSITE_MAX_AXES)
component_coords = hb_array (c.font->coords, c.font->num_coords);
// Transform
uint32_t transformVarIdx = VarIdx::NO_VARIATION;
if (flags & (unsigned) flags_t::TRANSFORM_HAS_VARIATION)
READ_UINT32VAR (transformVarIdx);
#define PROCESS_TRANSFORM_COMPONENTS \
HB_STMT_START { \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TRANSLATE_X, translateX); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TRANSLATE_Y, translateY); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_ROTATION, rotation); \
PROCESS_TRANSFORM_COMPONENT (F6DOT10, 1.0f, HAVE_SCALE_X, scaleX); \
PROCESS_TRANSFORM_COMPONENT (F6DOT10, 1.0f, HAVE_SCALE_Y, scaleY); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_SKEW_X, skewX); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_SKEW_Y, skewY); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TCENTER_X, tCenterX); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TCENTER_Y, tCenterY); \
} HB_STMT_END
hb_transform_decomposed_t<> transform;
// Read transform components
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
{ \
static_assert (type::static_size == HBINT16::static_size, ""); \
if (unlikely (unsigned (end - record) < HBINT16::static_size)) \
return hb_ubytes_t (); \
hb_barrier (); \
transform.name = mult * * (const HBINT16 *) record; \
record += HBINT16::static_size; \
}
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
// Read reserved records
unsigned i = flags & (unsigned) flags_t::RESERVED_MASK;
while (i)
{
HB_UNUSED uint32_t discard;
READ_UINT32VAR (discard);
i &= i - 1;
}
/* Parsing is over now. */
if (show)
{
// Only use coord_setter if there's actually any axis overrides.
coord_setter_t coord_setter (axisIndices ? component_coords : hb_array<int> ());
// Go backwards, to reduce coord_setter vector reallocations.
for (unsigned i = axisIndices.length; i; i--)
coord_setter[axisIndices[i - 1]] = axisValues[i - 1];
if (axisIndices)
component_coords = coord_setter.get_coords ();
// Apply transform variations if any
if (transformVarIdx != VarIdx::NO_VARIATION && coords)
{
float transformValues[9];
unsigned numTransformValues = 0;
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
transformValues[numTransformValues++] = transform.name / mult;
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
varStore.get_delta (transformVarIdx, coords, hb_array (transformValues, numTransformValues), cache);
numTransformValues = 0;
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
transform.name = transformValues[numTransformValues++] * mult;
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
}
// Divide them by their divisors
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
{ \
HBINT16 int_v; \
int_v = roundf (transform.name); \
type typed_v = * (const type *) &int_v; \
float float_v = (float) typed_v; \
transform.name = float_v; \
}
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
if (!(flags & (unsigned) flags_t::HAVE_SCALE_Y))
transform.scaleY = transform.scaleX;
total_transform.transform (transform.to_transform ());
total_transform.scale (c.font->x_mult ? 1.f / c.font->x_multf : 0.f,
c.font->y_mult ? 1.f / c.font->y_multf : 0.f);
bool same_coords = component_coords.length == coords.length &&
component_coords.arrayZ == coords.arrayZ;
c.depth_left--;
VARC.get_path_at (c, gid,
component_coords, total_transform,
parent_gid,
same_coords ? cache : nullptr);
c.depth_left++;
}
#undef PROCESS_TRANSFORM_COMPONENTS
#undef READ_UINT32VAR
return hb_ubytes_t (record, end - record);
}
bool
VARC::get_path_at (const hb_varc_context_t &c,
hb_codepoint_t glyph,
hb_array_t<const int> coords,
hb_transform_t<> transform,
hb_codepoint_t parent_glyph,
hb_scalar_cache_t *parent_cache) const
{
// Don't recurse on the same glyph.
unsigned idx = glyph == parent_glyph ?
NOT_COVERED :
(this+coverage).get_coverage (glyph);
if (idx == NOT_COVERED)
{
if (c.draw_session)
{
// Build a transforming pen to apply the transform.
hb_draw_funcs_t *transformer_funcs = hb_transforming_pen_get_funcs ();
hb_transforming_pen_context_t context {transform,
c.draw_session->funcs,
c.draw_session->draw_data,
&c.draw_session->st};
hb_draw_session_t transformer_session {transformer_funcs, &context};
hb_draw_session_t &shape_draw_session = transform.is_identity () ? *c.draw_session : transformer_session;
if (c.font->face->table.glyf->get_path_at (c.font, glyph, shape_draw_session, coords, c.scratch.glyf_scratch)) return true;
#ifndef HB_NO_CFF
if (c.font->face->table.cff2->get_path_at (c.font, glyph, shape_draw_session, coords)) return true;
if (c.font->face->table.cff1->get_path (c.font, glyph, shape_draw_session)) return true; // Doesn't have variations
#endif
return false;
}
else if (c.extents)
{
hb_glyph_extents_t glyph_extents;
if (!c.font->face->table.glyf->get_extents_at (c.font, glyph, &glyph_extents, coords))
#ifndef HB_NO_CFF
if (!c.font->face->table.cff2->get_extents_at (c.font, glyph, &glyph_extents, coords))
if (!c.font->face->table.cff1->get_extents (c.font, glyph, &glyph_extents)) // Doesn't have variations
#endif
return false;
hb_extents_t<> comp_extents (glyph_extents);
transform.transform_extents (comp_extents);
c.extents->union_ (comp_extents);
}
return true;
}
if (c.depth_left <= 0)
return true;
if (c.edges_left <= 0)
return true;
(c.edges_left)--;
hb_decycler_node_t node (c.decycler);
if (unlikely (!node.visit (glyph)))
return true;
hb_ubytes_t record = (this+glyphRecords)[idx];
hb_scalar_cache_t static_cache;
hb_scalar_cache_t *cache = parent_cache ?
parent_cache :
(this+varStore).create_cache (&static_cache);
transform.scale (c.font->x_multf, c.font->y_multf);
VarCompositeGlyph::get_path_at (c,
glyph,
coords, transform,
record,
cache);
if (cache != parent_cache)
(this+varStore).destroy_cache (cache, &static_cache);
return true;
}
//} // namespace Var
} // namespace OT
#endif

View File

@@ -0,0 +1,264 @@
#ifndef OT_VAR_VARC_VARC_HH
#define OT_VAR_VARC_VARC_HH
#include "../../../hb-decycler.hh"
#include "../../../hb-geometry.hh"
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-glyf-table.hh"
#include "../../../hb-ot-cff2-table.hh"
#include "../../../hb-ot-cff1-table.hh"
#include "coord-setter.hh"
namespace OT {
//namespace Var {
/*
* VARC -- Variable Composites
* https://github.com/harfbuzz/boring-expansion-spec/blob/main/VARC.md
*/
#ifndef HB_NO_VAR_COMPOSITES
struct hb_varc_scratch_t
{
hb_vector_t<unsigned> axisIndices;
hb_vector_t<float> axisValues;
hb_glyf_scratch_t glyf_scratch;
};
struct hb_varc_context_t
{
hb_font_t *font;
hb_draw_session_t *draw_session;
hb_extents_t<> *extents;
mutable hb_decycler_t decycler;
mutable signed edges_left;
mutable signed depth_left;
hb_varc_scratch_t &scratch;
};
struct VarComponent
{
enum class flags_t : uint32_t
{
RESET_UNSPECIFIED_AXES = 1u << 0,
HAVE_AXES = 1u << 1,
AXIS_VALUES_HAVE_VARIATION = 1u << 2,
TRANSFORM_HAS_VARIATION = 1u << 3,
HAVE_TRANSLATE_X = 1u << 4,
HAVE_TRANSLATE_Y = 1u << 5,
HAVE_ROTATION = 1u << 6,
HAVE_CONDITION = 1u << 7,
HAVE_SCALE_X = 1u << 8,
HAVE_SCALE_Y = 1u << 9,
HAVE_TCENTER_X = 1u << 10,
HAVE_TCENTER_Y = 1u << 11,
GID_IS_24BIT = 1u << 12,
HAVE_SKEW_X = 1u << 13,
HAVE_SKEW_Y = 1u << 14,
RESERVED_MASK = ~((1u << 15) - 1),
};
HB_INTERNAL hb_ubytes_t
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t parent_gid,
hb_array_t<const int> coords,
hb_transform_t<> transform,
hb_ubytes_t record,
hb_scalar_cache_t *cache = nullptr) const;
};
struct VarCompositeGlyph
{
static void
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t gid,
hb_array_t<const int> coords,
hb_transform_t<> transform,
hb_ubytes_t record,
hb_scalar_cache_t *cache)
{
while (record)
{
const VarComponent &comp = * (const VarComponent *) (record.arrayZ);
record = comp.get_path_at (c,
gid,
coords, transform,
record,
cache);
}
}
};
HB_MARK_AS_FLAG_T (VarComponent::flags_t);
struct VARC
{
friend struct VarComponent;
static constexpr hb_tag_t tableTag = HB_TAG ('V', 'A', 'R', 'C');
HB_INTERNAL bool
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t gid,
hb_array_t<const int> coords,
hb_transform_t<> transform = HB_TRANSFORM_IDENTITY,
hb_codepoint_t parent_gid = HB_CODEPOINT_INVALID,
hb_scalar_cache_t *parent_cache = nullptr) const;
bool
get_path (hb_font_t *font,
hb_codepoint_t gid,
hb_draw_session_t &draw_session,
hb_varc_scratch_t &scratch) const
{
hb_varc_context_t c {font,
&draw_session,
nullptr,
hb_decycler_t {},
HB_MAX_GRAPH_EDGE_COUNT,
HB_MAX_NESTING_LEVEL,
scratch};
return get_path_at (c, gid,
hb_array (font->coords, font->num_coords));
}
bool
get_extents (hb_font_t *font,
hb_codepoint_t gid,
hb_extents_t<> *extents,
hb_varc_scratch_t &scratch) const
{
hb_varc_context_t c {font,
nullptr,
extents,
hb_decycler_t {},
HB_MAX_GRAPH_EDGE_COUNT,
HB_MAX_NESTING_LEVEL,
scratch};
return get_path_at (c, gid,
hb_array (font->coords, font->num_coords));
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (version.sanitize (c) &&
hb_barrier () &&
version.major == 1 &&
coverage.sanitize (c, this) &&
varStore.sanitize (c, this) &&
conditionList.sanitize (c, this) &&
axisIndicesList.sanitize (c, this) &&
glyphRecords.sanitize (c, this));
}
struct accelerator_t
{
friend struct VarComponent;
accelerator_t (hb_face_t *face)
{
table = hb_sanitize_context_t ().reference_table<VARC> (face);
}
~accelerator_t ()
{
auto *scratch = cached_scratch.get_relaxed ();
if (scratch)
{
scratch->~hb_varc_scratch_t ();
hb_free (scratch);
}
table.destroy ();
}
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const
{
if (!table->has_data ()) return false;
auto *scratch = acquire_scratch ();
if (unlikely (!scratch)) return true;
bool ret = table->get_path (font, gid, draw_session, *scratch);
release_scratch (scratch);
return ret;
}
bool
get_extents (hb_font_t *font,
hb_codepoint_t gid,
hb_glyph_extents_t *extents) const
{
if (!table->has_data ()) return false;
hb_extents_t<> f_extents;
auto *scratch = acquire_scratch ();
if (unlikely (!scratch)) return true;
bool ret = table->get_extents (font, gid, &f_extents, *scratch);
release_scratch (scratch);
if (ret)
*extents = f_extents.to_glyph_extents (font->x_scale < 0, font->y_scale < 0);
return ret;
}
private:
hb_varc_scratch_t *acquire_scratch () const
{
hb_varc_scratch_t *scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_varc_scratch_t *) hb_calloc (1, sizeof (hb_varc_scratch_t));
if (unlikely (!scratch))
return nullptr;
}
return scratch;
}
void release_scratch (hb_varc_scratch_t *scratch) const
{
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_varc_scratch_t ();
hb_free (scratch);
}
}
private:
hb_blob_ptr_t<VARC> table;
mutable hb_atomic_t<hb_varc_scratch_t *> cached_scratch;
};
bool has_data () const { return version.major != 0; }
protected:
FixedVersion<> version; /* Version identifier */
Offset32To<Coverage> coverage;
Offset32To<MultiItemVariationStore> varStore;
Offset32To<ConditionList> conditionList;
Offset32To<TupleList> axisIndicesList;
Offset32To<CFF2Index/*Of<VarCompositeGlyph>*/> glyphRecords;
public:
DEFINE_SIZE_STATIC (24);
};
struct VARC_accelerator_t : VARC::accelerator_t {
VARC_accelerator_t (hb_face_t *face) : VARC::accelerator_t (face) {}
};
#endif
//}
}
#endif /* OT_VAR_VARC_VARC_HH */

View File

@@ -0,0 +1,63 @@
#ifndef OT_VAR_VARC_COORD_SETTER_HH
#define OT_VAR_VARC_COORD_SETTER_HH
#include "../../../hb.hh"
namespace OT {
//namespace Var {
struct coord_setter_t
{
coord_setter_t (hb_array_t<const int> coords_)
{
length = coords_.length;
if (length <= ARRAY_LENGTH (static_coords))
hb_memcpy (static_coords, coords_.arrayZ, length * sizeof (int));
else
dynamic_coords.extend (coords_);
}
int& operator [] (unsigned idx)
{
if (unlikely (idx >= HB_VAR_COMPOSITE_MAX_AXES))
return Crap(int);
if (length <= ARRAY_LENGTH (static_coords))
{
if (idx < ARRAY_LENGTH (static_coords))
{
while (length <= idx)
static_coords[length++] = 0;
return static_coords[idx];
}
else
dynamic_coords.extend (hb_array (static_coords, length));
}
if (dynamic_coords.length <= idx)
{
if (unlikely (!dynamic_coords.resize (idx + 1)))
return Crap(int);
length = idx + 1;
}
return dynamic_coords.arrayZ[idx];
}
hb_array_t<int> get_coords ()
{ return length <= ARRAY_LENGTH (static_coords) ? hb_array (static_coords, length) : dynamic_coords.as_array (); }
private:
hb_vector_t<int> dynamic_coords;
unsigned length;
int static_coords[sizeof (void *) * 8];
};
//} // namespace Var
} // namespace OT
#endif /* OT_VAR_VARC_COORD_SETTER_HH */

View File

@@ -0,0 +1,435 @@
#ifndef OT_GLYF_COMPOSITEGLYPH_HH
#define OT_GLYF_COMPOSITEGLYPH_HH
#include "../../hb-open-type.hh"
#include "composite-iter.hh"
namespace OT {
namespace glyf_impl {
struct CompositeGlyphRecord
{
protected:
enum composite_glyph_flag_t
{
ARG_1_AND_2_ARE_WORDS = 0x0001,
ARGS_ARE_XY_VALUES = 0x0002,
ROUND_XY_TO_GRID = 0x0004,
WE_HAVE_A_SCALE = 0x0008,
MORE_COMPONENTS = 0x0020,
WE_HAVE_AN_X_AND_Y_SCALE = 0x0040,
WE_HAVE_A_TWO_BY_TWO = 0x0080,
WE_HAVE_INSTRUCTIONS = 0x0100,
USE_MY_METRICS = 0x0200,
OVERLAP_COMPOUND = 0x0400,
SCALED_COMPONENT_OFFSET = 0x0800,
UNSCALED_COMPONENT_OFFSET = 0x1000,
#ifndef HB_NO_BEYOND_64K
GID_IS_24BIT = 0x2000
#endif
};
public:
unsigned int get_size () const
{
unsigned int size = min_size;
/* glyphIndex is 24bit instead of 16bit */
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT) size += HBGlyphID24::static_size - HBGlyphID16::static_size;
#endif
/* arg1 and 2 are int16 */
if (flags & ARG_1_AND_2_ARE_WORDS) size += 4;
/* arg1 and 2 are int8 */
else size += 2;
/* One x 16 bit (scale) */
if (flags & WE_HAVE_A_SCALE) size += 2;
/* Two x 16 bit (xscale, yscale) */
else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) size += 4;
/* Four x 16 bit (xscale, scale01, scale10, yscale) */
else if (flags & WE_HAVE_A_TWO_BY_TWO) size += 8;
return size;
}
void drop_instructions_flag () { flags = (uint16_t) flags & ~WE_HAVE_INSTRUCTIONS; }
void set_overlaps_flag ()
{
flags = (uint16_t) flags | OVERLAP_COMPOUND;
}
bool has_instructions () const { return flags & WE_HAVE_INSTRUCTIONS; }
bool has_more () const { return flags & MORE_COMPONENTS; }
bool is_use_my_metrics () const { return flags & USE_MY_METRICS; }
bool is_anchored () const { return !(flags & ARGS_ARE_XY_VALUES); }
void get_anchor_points (unsigned int &point1, unsigned int &point2) const
{
const auto *p = &StructAfter<const HBUINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
if (flags & ARG_1_AND_2_ARE_WORDS)
{
point1 = ((const HBUINT16 *) p)[0];
point2 = ((const HBUINT16 *) p)[1];
}
else
{
point1 = p[0];
point2 = p[1];
}
}
static void transform (const float (&matrix)[4],
hb_array_t<contour_point_t> points)
{
if (matrix[0] != 1.f || matrix[1] != 0.f ||
matrix[2] != 0.f || matrix[3] != 1.f)
for (auto &point : points)
point.transform (matrix);
}
static void translate (const contour_point_t &trans,
hb_array_t<contour_point_t> points)
{
if (HB_OPTIMIZE_SIZE_VAL)
{
if (trans.x != 0.f || trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
}
else
{
if (trans.x != 0.f && trans.y != 0.f)
for (auto &point : points)
point.translate (trans);
else
{
if (trans.x != 0.f)
for (auto &point : points)
point.x += trans.x;
else if (trans.y != 0.f)
for (auto &point : points)
point.y += trans.y;
}
}
}
void transform_points (hb_array_t<contour_point_t> points,
const float (&matrix)[4],
const contour_point_t &trans) const
{
if (scaled_offsets ())
{
translate (trans, points);
transform (matrix, points);
}
else
{
transform (matrix, points);
translate (trans, points);
}
}
bool get_points (contour_point_vector_t &points) const
{
float matrix[4];
contour_point_t trans;
get_transformation (matrix, trans);
if (unlikely (!points.alloc (points.length + 1 + 4))) return false; // For phantom points
points.push (trans);
return true;
}
unsigned compile_with_point (const contour_point_t &point,
char *out) const
{
const HBINT8 *p = &StructAfter<const HBINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
unsigned len = get_size ();
unsigned len_before_val = (const char *)p - (const char *)this;
if (flags & ARG_1_AND_2_ARE_WORDS)
{
// no overflow, copy value
hb_memcpy (out, this, len);
HBINT16 *o = reinterpret_cast<HBINT16 *> (out + len_before_val);
o[0] = roundf (point.x);
o[1] = roundf (point.y);
}
else
{
int new_x = roundf (point.x);
int new_y = roundf (point.y);
if (new_x <= 127 && new_x >= -128 &&
new_y <= 127 && new_y >= -128)
{
hb_memcpy (out, this, len);
HBINT8 *o = reinterpret_cast<HBINT8 *> (out + len_before_val);
o[0] = new_x;
o[1] = new_y;
}
else
{
// new point value has an int8 overflow
hb_memcpy (out, this, len_before_val);
//update flags
CompositeGlyphRecord *o = reinterpret_cast<CompositeGlyphRecord *> (out);
o->flags = flags | ARG_1_AND_2_ARE_WORDS;
out += len_before_val;
HBINT16 new_value;
new_value = new_x;
hb_memcpy (out, &new_value, HBINT16::static_size);
out += HBINT16::static_size;
new_value = new_y;
hb_memcpy (out, &new_value, HBINT16::static_size);
out += HBINT16::static_size;
hb_memcpy (out, p+2, len - len_before_val - 2);
len += 2;
}
}
return len;
}
protected:
bool scaled_offsets () const
{ return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; }
public:
bool get_transformation (float (&matrix)[4], contour_point_t &trans) const
{
matrix[0] = matrix[3] = 1.f;
matrix[1] = matrix[2] = 0.f;
const auto *p = &StructAfter<const HBINT8> (flags);
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
p += HBGlyphID24::static_size;
else
#endif
p += HBGlyphID16::static_size;
int tx, ty;
if (flags & ARG_1_AND_2_ARE_WORDS)
{
tx = *(const HBINT16 *) p;
p += HBINT16::static_size;
ty = *(const HBINT16 *) p;
p += HBINT16::static_size;
}
else
{
tx = *p++;
ty = *p++;
}
if (is_anchored ()) tx = ty = 0;
/* set is_end_point flag to true, used by IUP delta optimization */
trans.init ((float) tx, (float) ty, true);
{
const F2DOT14 *points = (const F2DOT14 *) p;
if (flags & WE_HAVE_A_SCALE)
{
matrix[0] = matrix[3] = points[0].to_float ();
return true;
}
else if (flags & WE_HAVE_AN_X_AND_Y_SCALE)
{
matrix[0] = points[0].to_float ();
matrix[3] = points[1].to_float ();
return true;
}
else if (flags & WE_HAVE_A_TWO_BY_TWO)
{
matrix[0] = points[0].to_float ();
matrix[1] = points[1].to_float ();
matrix[2] = points[2].to_float ();
matrix[3] = points[3].to_float ();
return true;
}
}
return tx || ty;
}
hb_codepoint_t get_gid () const
{
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
return StructAfter<const HBGlyphID24> (flags);
else
#endif
return StructAfter<const HBGlyphID16> (flags);
}
void set_gid (hb_codepoint_t gid)
{
#ifndef HB_NO_BEYOND_64K
if (flags & GID_IS_24BIT)
StructAfter<HBGlyphID24> (flags) = gid;
else
#endif
/* TODO assert? */
StructAfter<HBGlyphID16> (flags) = gid;
}
#ifndef HB_NO_BEYOND_64K
void lower_gid_24_to_16 ()
{
hb_codepoint_t gid = get_gid ();
if (!(flags & GID_IS_24BIT) || gid > 0xFFFFu)
return;
/* Lower the flag and move the rest of the struct down. */
unsigned size = get_size ();
char *end = (char *) this + size;
char *p = &StructAfter<char> (flags);
p += HBGlyphID24::static_size;
flags = flags & ~GID_IS_24BIT;
set_gid (gid);
memmove (p - HBGlyphID24::static_size + HBGlyphID16::static_size, p, end - p);
}
#endif
protected:
HBUINT16 flags;
HBUINT24 pad;
public:
DEFINE_SIZE_MIN (4);
};
using composite_iter_t = composite_iter_tmpl<CompositeGlyphRecord>;
struct CompositeGlyph
{
const GlyphHeader &header;
hb_bytes_t bytes;
CompositeGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) :
header (header_), bytes (bytes_) {}
composite_iter_t iter () const
{ return composite_iter_t (bytes, &StructAfter<CompositeGlyphRecord, GlyphHeader> (header)); }
unsigned int instructions_length (hb_bytes_t bytes) const
{
unsigned int start = bytes.length;
unsigned int end = bytes.length;
const CompositeGlyphRecord *last = nullptr;
for (auto &item : iter ())
last = &item;
if (unlikely (!last)) return 0;
if (last->has_instructions ())
start = (char *) last - &bytes + last->get_size ();
if (unlikely (start > end)) return 0;
return end - start;
}
/* Trimming for composites not implemented.
* If removing hints it falls out of that. */
const hb_bytes_t trim_padding () const { return bytes; }
void drop_hints ()
{
for (const auto &_ : iter ())
const_cast<CompositeGlyphRecord &> (_).drop_instructions_flag ();
}
/* Chop instructions off the end */
void drop_hints_bytes (hb_bytes_t &dest_start) const
{ dest_start = bytes.sub_array (0, bytes.length - instructions_length (bytes)); }
void set_overlaps_flag ()
{
CompositeGlyphRecord& glyph_chain = const_cast<CompositeGlyphRecord &> (
StructAfter<CompositeGlyphRecord, GlyphHeader> (header));
if (!bytes.check_range(&glyph_chain, CompositeGlyphRecord::min_size))
return;
glyph_chain.set_overlaps_flag ();
}
bool compile_bytes_with_deltas (const hb_bytes_t &source_bytes,
const contour_point_vector_t &points_with_deltas,
hb_bytes_t &dest_bytes /* OUT */)
{
if (source_bytes.length <= GlyphHeader::static_size ||
header.numberOfContours != -1)
{
dest_bytes = hb_bytes_t ();
return true;
}
unsigned source_len = source_bytes.length - GlyphHeader::static_size;
/* try to allocate more memories than source glyph bytes
* in case that there might be an overflow for int8 value
* and we would need to use int16 instead */
char *o = (char *) hb_calloc (source_len * 2, sizeof (char));
if (unlikely (!o)) return false;
const CompositeGlyphRecord *c = reinterpret_cast<const CompositeGlyphRecord *> (source_bytes.arrayZ + GlyphHeader::static_size);
auto it = composite_iter_t (hb_bytes_t ((const char *)c, source_len), c);
char *p = o;
unsigned i = 0, source_comp_len = 0;
for (const auto &component : it)
{
/* last 4 points in points_with_deltas are phantom points and should not be included */
if (i >= points_with_deltas.length - 4) {
hb_free (o);
return false;
}
unsigned comp_len = component.get_size ();
if (component.is_anchored ())
{
hb_memcpy (p, &component, comp_len);
p += comp_len;
}
else
{
unsigned new_len = component.compile_with_point (points_with_deltas[i], p);
p += new_len;
}
i++;
source_comp_len += comp_len;
}
//copy instructions if any
if (source_len > source_comp_len)
{
unsigned instr_len = source_len - source_comp_len;
hb_memcpy (p, (const char *)c + source_comp_len, instr_len);
p += instr_len;
}
unsigned len = p - o;
dest_bytes = hb_bytes_t (o, len);
return true;
}
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_COMPOSITEGLYPH_HH */

574
thirdparty/harfbuzz/src/OT/glyf/Glyph.hh vendored Normal file
View File

@@ -0,0 +1,574 @@
#ifndef OT_GLYF_GLYPH_HH
#define OT_GLYF_GLYPH_HH
#include "../../hb-open-type.hh"
#include "GlyphHeader.hh"
#include "SimpleGlyph.hh"
#include "CompositeGlyph.hh"
namespace OT {
struct glyf_accelerator_t;
namespace glyf_impl {
enum phantom_point_index_t
{
PHANTOM_LEFT = 0,
PHANTOM_RIGHT = 1,
PHANTOM_TOP = 2,
PHANTOM_BOTTOM = 3,
PHANTOM_COUNT = 4
};
struct Glyph
{
enum glyph_type_t {
EMPTY,
SIMPLE,
COMPOSITE,
};
public:
composite_iter_t get_composite_iterator () const
{
if (type != COMPOSITE) return composite_iter_t ();
return CompositeGlyph (*header, bytes).iter ();
}
const hb_bytes_t trim_padding () const
{
switch (type) {
case COMPOSITE: return CompositeGlyph (*header, bytes).trim_padding ();
case SIMPLE: return SimpleGlyph (*header, bytes).trim_padding ();
case EMPTY: return bytes;
default: return bytes;
}
}
void drop_hints ()
{
switch (type) {
case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints (); return;
case SIMPLE: SimpleGlyph (*header, bytes).drop_hints (); return;
case EMPTY: return;
}
}
void set_overlaps_flag ()
{
switch (type) {
case COMPOSITE: CompositeGlyph (*header, bytes).set_overlaps_flag (); return;
case SIMPLE: SimpleGlyph (*header, bytes).set_overlaps_flag (); return;
case EMPTY: return;
}
}
void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const
{
switch (type) {
case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints_bytes (dest_start); return;
case SIMPLE: SimpleGlyph (*header, bytes).drop_hints_bytes (dest_start, dest_end); return;
case EMPTY: return;
}
}
bool is_composite () const
{ return type == COMPOSITE; }
bool get_all_points_without_var (const hb_face_t *face,
contour_point_vector_t &points /* OUT */) const
{
switch (type) {
case SIMPLE:
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points)))
return false;
break;
case COMPOSITE:
{
for (auto &item : get_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
case EMPTY:
break;
}
/* Init phantom points */
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
// Duplicated code.
int lsb = 0;
face->table.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
int h_delta = (int) header->xMin - lsb;
HB_UNUSED int tsb = 0;
#ifndef HB_NO_VERTICAL
face->table.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb);
#endif
int v_orig = (int) header->yMax + tsb;
unsigned h_adv = face->table.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
face->table.vmtx->get_advance_without_var_unscaled (gid)
#else
- face->get_upem ()
#endif
;
phantoms[PHANTOM_LEFT].x = h_delta;
phantoms[PHANTOM_RIGHT].x = (int) h_adv + h_delta;
phantoms[PHANTOM_TOP].y = v_orig;
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
}
return true;
}
void update_mtx (const hb_subset_plan_t *plan,
int xMin, int xMax,
int yMin, int yMax,
const contour_point_vector_t &all_points) const
{
hb_codepoint_t new_gid = 0;
if (!plan->new_gid_for_old_gid (gid, &new_gid))
return;
if (type != EMPTY)
{
plan->bounds_width_vec[new_gid] = xMax - xMin;
plan->bounds_height_vec[new_gid] = yMax - yMin;
}
unsigned len = all_points.length;
float leftSideX = all_points[len - 4].x;
float rightSideX = all_points[len - 3].x;
float topSideY = all_points[len - 2].y;
float bottomSideY = all_points[len - 1].y;
uint32_t hash = hb_hash (new_gid);
signed hori_aw = roundf (rightSideX - leftSideX);
if (hori_aw < 0) hori_aw = 0;
int lsb = roundf (xMin - leftSideX);
plan->hmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) hori_aw, lsb));
//flag value should be computed using non-empty glyphs
if (type != EMPTY && lsb != xMin)
plan->head_maxp_info.allXMinIsLsb = false;
signed vert_aw = roundf (topSideY - bottomSideY);
if (vert_aw < 0) vert_aw = 0;
int tsb = roundf (topSideY - yMax);
plan->vmtx_map.set_with_hash (new_gid, hash, hb_pair ((unsigned) vert_aw, tsb));
}
bool compile_header_bytes (const hb_subset_plan_t *plan,
const contour_point_vector_t &all_points,
hb_bytes_t &dest_bytes /* OUT */) const
{
GlyphHeader *glyph_header = nullptr;
if (!plan->pinned_at_default && type != EMPTY && all_points.length >= 4)
{
glyph_header = (GlyphHeader *) hb_calloc (1, GlyphHeader::static_size);
if (unlikely (!glyph_header)) return false;
}
float xMin = 0, xMax = 0;
float yMin = 0, yMax = 0;
if (all_points.length > 4)
{
xMin = xMax = all_points[0].x;
yMin = yMax = all_points[0].y;
unsigned count = all_points.length - 4;
for (unsigned i = 1; i < count; i++)
{
float x = all_points[i].x;
float y = all_points[i].y;
xMin = hb_min (xMin, x);
xMax = hb_max (xMax, x);
yMin = hb_min (yMin, y);
yMax = hb_max (yMax, y);
}
}
// These are destined for storage in a 16 bit field to clamp the values to
// fit into a 16 bit signed integer.
int rounded_xMin = hb_clamp (roundf (xMin), -32768.0f, 32767.0f);
int rounded_xMax = hb_clamp (roundf (xMax), -32768.0f, 32767.0f);
int rounded_yMin = hb_clamp (roundf (yMin), -32768.0f, 32767.0f);
int rounded_yMax = hb_clamp (roundf (yMax), -32768.0f, 32767.0f);
update_mtx (plan, rounded_xMin, rounded_xMax, rounded_yMin, rounded_yMax, all_points);
if (type != EMPTY)
{
plan->head_maxp_info.xMin = hb_min (plan->head_maxp_info.xMin, rounded_xMin);
plan->head_maxp_info.yMin = hb_min (plan->head_maxp_info.yMin, rounded_yMin);
plan->head_maxp_info.xMax = hb_max (plan->head_maxp_info.xMax, rounded_xMax);
plan->head_maxp_info.yMax = hb_max (plan->head_maxp_info.yMax, rounded_yMax);
}
/* when pinned at default, no need to compile glyph header
* and for empty glyphs: all_points only include phantom points.
* just update metrics and then return */
if (!glyph_header)
return true;
glyph_header->numberOfContours = header->numberOfContours;
glyph_header->xMin = rounded_xMin;
glyph_header->yMin = rounded_yMin;
glyph_header->xMax = rounded_xMax;
glyph_header->yMax = rounded_yMax;
dest_bytes = hb_bytes_t ((const char *)glyph_header, GlyphHeader::static_size);
return true;
}
bool compile_bytes_with_deltas (const hb_subset_plan_t *plan,
hb_font_t *font,
const glyf_accelerator_t &glyf,
hb_bytes_t &dest_start, /* IN/OUT */
hb_bytes_t &dest_end /* OUT */)
{
contour_point_vector_t all_points, points_with_deltas;
unsigned composite_contours = 0;
head_maxp_info_t *head_maxp_info_p = &plan->head_maxp_info;
unsigned *composite_contours_p = &composite_contours;
// don't compute head/maxp values when glyph has no contours(type is EMPTY)
// also ignore .notdef glyph when --notdef-outline is not enabled
if (type == EMPTY ||
(gid == 0 && !(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)))
{
head_maxp_info_p = nullptr;
composite_contours_p = nullptr;
}
hb_glyf_scratch_t scratch;
if (!get_points (font, glyf, all_points, scratch, &points_with_deltas, head_maxp_info_p, composite_contours_p, false, false))
return false;
// .notdef, set type to empty so we only update metrics and don't compile bytes for
// it
if (gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE))
{
type = EMPTY;
dest_start = hb_bytes_t ();
dest_end = hb_bytes_t ();
}
//dont compile bytes when pinned at default, just recalculate bounds
if (!plan->pinned_at_default)
{
switch (type)
{
case COMPOSITE:
if (!CompositeGlyph (*header, bytes).compile_bytes_with_deltas (dest_start,
points_with_deltas,
dest_end))
return false;
break;
case SIMPLE:
if (!SimpleGlyph (*header, bytes).compile_bytes_with_deltas (all_points,
plan->flags & HB_SUBSET_FLAGS_NO_HINTING,
dest_end))
return false;
break;
case EMPTY:
/* set empty bytes for empty glyph
* do not use source glyph's pointers */
dest_start = hb_bytes_t ();
dest_end = hb_bytes_t ();
break;
}
}
if (!compile_header_bytes (plan, all_points, dest_start))
{
dest_end.fini ();
return false;
}
return true;
}
/* Note: Recursively calls itself.
* all_points includes phantom points
*/
template <typename accelerator_t>
bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator,
contour_point_vector_t &all_points /* OUT */,
hb_glyf_scratch_t &scratch,
contour_point_vector_t *points_with_deltas = nullptr, /* OUT */
head_maxp_info_t * head_maxp_info = nullptr, /* OUT */
unsigned *composite_contours = nullptr, /* OUT */
bool shift_points_hori = true,
bool use_my_metrics = true,
bool phantom_only = false,
hb_array_t<const int> coords = hb_array_t<const int> (),
hb_scalar_cache_t *gvar_cache = nullptr,
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
if (unlikely (depth > HB_MAX_NESTING_LEVEL)) return false;
unsigned stack_edge_count = 0;
if (!edge_count) edge_count = &stack_edge_count;
if (unlikely (*edge_count > HB_MAX_GRAPH_EDGE_COUNT)) return false;
(*edge_count)++;
if (head_maxp_info)
{
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
}
if (!coords && font->has_nonzero_coords)
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t &points = type == SIMPLE ? all_points : scratch.comp_points;
unsigned old_length = points.length;
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxContours = hb_max (head_maxp_info->maxContours, (unsigned) header->numberOfContours);
if (depth > 0 && composite_contours)
*composite_contours += (unsigned) header->numberOfContours;
if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (all_points, phantom_only)))
return false;
break;
case COMPOSITE:
{
for (auto &item : get_composite_iterator ())
if (unlikely (!item.get_points (points))) return false;
break;
}
case EMPTY:
break;
}
/* Init phantom points */
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
// Duplicated code.
int lsb = 0;
glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
int h_delta = (int) header->xMin - lsb;
HB_UNUSED int tsb = 0;
#ifndef HB_NO_VERTICAL
glyf_accelerator.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb);
#endif
int v_orig = (int) header->yMax + tsb;
unsigned h_adv = glyf_accelerator.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
glyf_accelerator.vmtx->get_advance_without_var_unscaled (gid)
#else
- font->face->get_upem ()
#endif
;
phantoms[PHANTOM_LEFT].x = h_delta;
phantoms[PHANTOM_RIGHT].x = (int) h_adv + h_delta;
phantoms[PHANTOM_TOP].y = v_orig;
phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv;
}
#ifndef HB_NO_VAR
if (hb_any (coords))
{
#ifndef HB_NO_BEYOND_64K
if (glyf_accelerator.GVAR->has_data ())
glyf_accelerator.GVAR->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
scratch,
gvar_cache,
phantom_only && type == SIMPLE);
else
#endif
glyf_accelerator.gvar->apply_deltas_to_points (gid,
coords,
points.as_array ().sub_array (old_length),
scratch,
gvar_cache,
phantom_only && type == SIMPLE);
}
#endif
// mainly used by CompositeGlyph calculating new X/Y offset value so no need to extend it
// with child glyphs' points
if (points_with_deltas != nullptr && depth == 0 && type == COMPOSITE)
{
assert (old_length == 0);
*points_with_deltas = points;
}
float shift = 0;
switch (type) {
case SIMPLE:
if (depth == 0 && head_maxp_info)
head_maxp_info->maxPoints = hb_max (head_maxp_info->maxPoints, all_points.length - old_length - 4);
shift = phantoms[PHANTOM_LEFT].x;
break;
case COMPOSITE:
{
hb_decycler_node_t decycler_node (scratch.decycler);
unsigned int comp_index = 0;
for (auto &item : get_composite_iterator ())
{
hb_codepoint_t item_gid = item.get_gid ();
if (unlikely (!decycler_node.visit (item_gid)))
{
comp_index++;
continue;
}
unsigned old_count = all_points.length;
if (unlikely ((!phantom_only || (use_my_metrics && item.is_use_my_metrics ())) &&
!glyf_accelerator.glyph_for_gid (item_gid)
.get_points (font,
glyf_accelerator,
all_points,
scratch,
points_with_deltas,
head_maxp_info,
composite_contours,
shift_points_hori,
use_my_metrics,
phantom_only,
coords,
gvar_cache,
depth + 1,
edge_count)))
{
points.resize (old_length);
return false;
}
// points might have been reallocated. Relocate phantoms.
phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
auto comp_points = all_points.as_array ().sub_array (old_count);
/* Copy phantom points from component if USE_MY_METRICS flag set */
if (use_my_metrics && item.is_use_my_metrics ())
for (unsigned int i = 0; i < PHANTOM_COUNT; i++)
phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i];
if (comp_points) // Empty in case of phantom_only
{
float matrix[4];
contour_point_t default_trans;
item.get_transformation (matrix, default_trans);
/* Apply component transformation & translation (with deltas applied) */
item.transform_points (comp_points, matrix, points[old_length + comp_index]);
}
if (item.is_anchored () && !phantom_only)
{
unsigned int p1, p2;
item.get_anchor_points (p1, p2);
if (likely (p1 < all_points.length && p2 < comp_points.length))
{
contour_point_t delta;
delta.init (all_points[p1].x - comp_points[p2].x,
all_points[p1].y - comp_points[p2].y);
item.translate (delta, comp_points);
}
}
all_points.resize (all_points.length - PHANTOM_COUNT);
if (all_points.length > HB_GLYF_MAX_POINTS)
{
points.resize (old_length);
return false;
}
comp_index++;
}
if (head_maxp_info && depth == 0)
{
if (composite_contours)
head_maxp_info->maxCompositeContours = hb_max (head_maxp_info->maxCompositeContours, *composite_contours);
head_maxp_info->maxCompositePoints = hb_max (head_maxp_info->maxCompositePoints, all_points.length);
head_maxp_info->maxComponentElements = hb_max (head_maxp_info->maxComponentElements, comp_index);
}
all_points.extend (phantoms);
shift = phantoms[PHANTOM_LEFT].x;
points.resize (old_length);
} break;
case EMPTY:
all_points.extend (phantoms);
shift = phantoms[PHANTOM_LEFT].x;
points.resize (old_length);
break;
}
if (depth == 0 && shift_points_hori) /* Apply at top level */
{
/* Undocumented rasterizer behavior:
* Shift points horizontally by the updated left side bearing
*/
if (shift)
for (auto &point : all_points)
point.x -= shift;
}
return !all_points.in_error ();
}
bool get_extents_without_var_scaled (hb_font_t *font, const glyf_accelerator_t &glyf_accelerator,
hb_glyph_extents_t *extents) const
{
if (type == EMPTY) return true; /* Empty glyph; zero extents. */
return header->get_extents_without_var_scaled (font, glyf_accelerator, gid, extents);
}
hb_bytes_t get_bytes () const { return bytes; }
glyph_type_t get_type () const { return type; }
const GlyphHeader *get_header () const { return header; }
Glyph () : bytes (),
header (bytes.as<GlyphHeader> ()),
gid (-1),
type(EMPTY)
{}
Glyph (hb_bytes_t bytes_,
hb_codepoint_t gid_ = (unsigned) -1) : bytes (bytes_),
header (bytes.as<GlyphHeader> ()),
gid (gid_)
{
int num_contours = header->numberOfContours;
if (unlikely (num_contours == 0)) type = EMPTY;
else if (num_contours > 0) type = SIMPLE;
else if (num_contours <= -1) type = COMPOSITE;
else type = EMPTY; // Spec deviation; Spec says COMPOSITE, but not seen in the wild.
}
protected:
hb_bytes_t bytes;
const GlyphHeader *header;
hb_codepoint_t gid;
glyph_type_t type;
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_GLYPH_HH */

View File

@@ -0,0 +1,52 @@
#ifndef OT_GLYF_GLYPHHEADER_HH
#define OT_GLYF_GLYPHHEADER_HH
#include "../../hb-open-type.hh"
namespace OT {
namespace glyf_impl {
struct GlyphHeader
{
bool has_data () const { return numberOfContours; }
template <typename accelerator_t>
bool get_extents_without_var_scaled (hb_font_t *font, const accelerator_t &glyf_accelerator,
hb_codepoint_t gid, hb_glyph_extents_t *extents) const
{
/* Undocumented rasterizer behavior: shift glyph to the left by (lsb - xMin), i.e., xMin = lsb */
/* extents->x_bearing = hb_min (glyph_header.xMin, glyph_header.xMax); */
int lsb = hb_min (xMin, xMax);
(void) glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
extents->x_bearing = lsb;
extents->y_bearing = hb_max (yMin, yMax);
extents->width = hb_max (xMin, xMax) - hb_min (xMin, xMax);
extents->height = hb_min (yMin, yMax) - hb_max (yMin, yMax);
font->scale_glyph_extents (extents);
return true;
}
HBINT16 numberOfContours;
/* If the number of contours is
* greater than or equal to zero,
* this is a simple glyph; if negative,
* this is a composite glyph. */
FWORD xMin; /* Minimum x for coordinate data. */
FWORD yMin; /* Minimum y for coordinate data. */
FWORD xMax; /* Maximum x for coordinate data. */
FWORD yMax; /* Maximum y for coordinate data. */
public:
DEFINE_SIZE_STATIC (10);
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_GLYPHHEADER_HH */

View File

@@ -0,0 +1,346 @@
#ifndef OT_GLYF_SIMPLEGLYPH_HH
#define OT_GLYF_SIMPLEGLYPH_HH
#include "../../hb-open-type.hh"
namespace OT {
namespace glyf_impl {
struct SimpleGlyph
{
enum simple_glyph_flag_t
{
FLAG_ON_CURVE = 0x01,
FLAG_X_SHORT = 0x02,
FLAG_Y_SHORT = 0x04,
FLAG_REPEAT = 0x08,
FLAG_X_SAME = 0x10,
FLAG_Y_SAME = 0x20,
FLAG_OVERLAP_SIMPLE = 0x40,
FLAG_CUBIC = 0x80
};
const GlyphHeader &header;
hb_bytes_t bytes;
SimpleGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) :
header (header_), bytes (bytes_) {}
unsigned int instruction_len_offset () const
{ return GlyphHeader::static_size + 2 * header.numberOfContours; }
unsigned int length (unsigned int instruction_len) const
{ return instruction_len_offset () + 2 + instruction_len; }
bool has_instructions_length () const
{
return instruction_len_offset () + 2 <= bytes.length;
}
unsigned int instructions_length () const
{
unsigned int instruction_length_offset = instruction_len_offset ();
if (unlikely (instruction_length_offset + 2 > bytes.length)) return 0;
const HBUINT16 &instructionLength = StructAtOffset<HBUINT16> (&bytes, instruction_length_offset);
/* Out of bounds of the current glyph */
if (unlikely (length (instructionLength) > bytes.length)) return 0;
return instructionLength;
}
const hb_bytes_t trim_padding () const
{
/* based on FontTools _g_l_y_f.py::trim */
const uint8_t *glyph = (uint8_t*) bytes.arrayZ;
const uint8_t *glyph_end = glyph + bytes.length;
/* simple glyph w/contours, possibly trimmable */
glyph += instruction_len_offset ();
if (unlikely (glyph + 2 >= glyph_end)) return hb_bytes_t ();
unsigned int num_coordinates = StructAtOffset<HBUINT16> (glyph - 2, 0) + 1;
unsigned int num_instructions = StructAtOffset<HBUINT16> (glyph, 0);
glyph += 2 + num_instructions;
unsigned int coord_bytes = 0;
unsigned int coords_with_flags = 0;
while (glyph < glyph_end)
{
uint8_t flag = *glyph;
glyph++;
unsigned int repeat = 1;
if (flag & FLAG_REPEAT)
{
if (unlikely (glyph >= glyph_end)) return hb_bytes_t ();
repeat = *glyph + 1;
glyph++;
}
unsigned int xBytes, yBytes;
xBytes = yBytes = 0;
if (flag & FLAG_X_SHORT) xBytes = 1;
else if ((flag & FLAG_X_SAME) == 0) xBytes = 2;
if (flag & FLAG_Y_SHORT) yBytes = 1;
else if ((flag & FLAG_Y_SAME) == 0) yBytes = 2;
coord_bytes += (xBytes + yBytes) * repeat;
coords_with_flags += repeat;
if (coords_with_flags >= num_coordinates) break;
}
if (unlikely (coords_with_flags != num_coordinates)) return hb_bytes_t ();
return bytes.sub_array (0, bytes.length + coord_bytes - (glyph_end - glyph));
}
/* zero instruction length */
void drop_hints ()
{
if (!has_instructions_length ()) return;
GlyphHeader &glyph_header = const_cast<GlyphHeader &> (header);
(HBUINT16 &) StructAtOffset<HBUINT16> (&glyph_header, instruction_len_offset ()) = 0;
}
void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const
{
unsigned int instructions_len = instructions_length ();
unsigned int glyph_length = length (instructions_len);
dest_start = bytes.sub_array (0, glyph_length - instructions_len);
dest_end = bytes.sub_array (glyph_length, bytes.length - glyph_length);
}
void set_overlaps_flag ()
{
if (unlikely (!header.numberOfContours)) return;
unsigned flags_offset = length (instructions_length ());
if (unlikely (flags_offset + 1 > bytes.length)) return;
HBUINT8 &first_flag = (HBUINT8 &) StructAtOffset<HBUINT16> (&bytes, flags_offset);
first_flag = (uint8_t) first_flag | FLAG_OVERLAP_SIMPLE;
}
static bool read_flags (const HBUINT8 *&p /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end)
{
auto *points = points_.arrayZ;
unsigned count = points_.length;
for (unsigned int i = 0; i < count;)
{
if (unlikely (p + 1 > end)) return false;
uint8_t flag = *p++;
points[i++].flag = flag;
if (flag & FLAG_REPEAT)
{
if (unlikely (p + 1 > end)) return false;
unsigned int repeat_count = *p++;
unsigned stop = hb_min (i + repeat_count, count);
for (; i < stop; i++)
points[i].flag = flag;
}
}
return true;
}
static bool read_points (const HBUINT8 *&p /* IN/OUT */,
hb_array_t<contour_point_t> points_ /* IN/OUT */,
const HBUINT8 *end,
float contour_point_t::*m,
const simple_glyph_flag_t short_flag,
const simple_glyph_flag_t same_flag)
{
int v = 0;
for (auto &point : points_)
{
unsigned flag = point.flag;
if (flag & short_flag)
{
if (unlikely (p + 1 > end)) return false;
v += (bool(flag & same_flag) * 2 - 1) * *p++;
}
else
{
if (!(flag & same_flag))
{
if (unlikely (p + HBINT16::static_size > end)) return false;
v += *(const HBINT16 *) p;
p += HBINT16::static_size;
}
}
point.*m = v;
}
return true;
}
bool get_contour_points (contour_point_vector_t &points /* OUT */,
bool phantom_only = false) const
{
const HBUINT16 *endPtsOfContours = &StructAfter<HBUINT16> (header);
int num_contours = header.numberOfContours;
assert (num_contours > 0);
/* One extra item at the end, for the instruction-count below. */
if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours]))) return false;
unsigned int num_points = endPtsOfContours[num_contours - 1] + 1;
unsigned old_length = points.length;
points.alloc (points.length + num_points + 4); // Allocate for phantom points, to avoid a possible copy
if (unlikely (!points.resize (points.length + num_points, false))) return false;
auto points_ = points.as_array ().sub_array (old_length);
if (!phantom_only)
hb_memset (points_.arrayZ, 0, sizeof (contour_point_t) * num_points);
if (phantom_only) return true;
for (int i = 0; i < num_contours; i++)
points_[endPtsOfContours[i]].is_end_point = true;
/* Skip instructions */
const HBUINT8 *p = &StructAtOffset<HBUINT8> (&endPtsOfContours[num_contours + 1],
endPtsOfContours[num_contours]);
if (unlikely ((const char *) p < bytes.arrayZ)) return false; /* Unlikely overflow */
const HBUINT8 *end = (const HBUINT8 *) (bytes.arrayZ + bytes.length);
if (unlikely (p >= end)) return false;
/* Read x & y coordinates */
return read_flags (p, points_, end)
&& read_points (p, points_, end, &contour_point_t::x,
FLAG_X_SHORT, FLAG_X_SAME)
&& read_points (p, points_, end, &contour_point_t::y,
FLAG_Y_SHORT, FLAG_Y_SAME);
}
static void encode_coord (int value,
unsigned &flag,
const simple_glyph_flag_t short_flag,
const simple_glyph_flag_t same_flag,
hb_vector_t<uint8_t> &coords /* OUT */)
{
if (value == 0)
{
flag |= same_flag;
}
else if (value >= -255 && value <= 255)
{
flag |= short_flag;
if (value > 0) flag |= same_flag;
else value = -value;
coords.arrayZ[coords.length++] = (uint8_t) value;
}
else
{
int16_t val = value;
coords.arrayZ[coords.length++] = val >> 8;
coords.arrayZ[coords.length++] = val & 0xff;
}
}
static void encode_flag (unsigned flag,
unsigned &repeat,
unsigned lastflag,
hb_vector_t<uint8_t> &flags /* OUT */)
{
if (flag == lastflag && repeat != 255)
{
repeat++;
if (repeat == 1)
{
/* We know there's room. */
flags.arrayZ[flags.length++] = flag;
}
else
{
unsigned len = flags.length;
flags.arrayZ[len-2] = flag | FLAG_REPEAT;
flags.arrayZ[len-1] = repeat;
}
}
else
{
repeat = 0;
flags.arrayZ[flags.length++] = flag;
}
}
bool compile_bytes_with_deltas (const contour_point_vector_t &all_points,
bool no_hinting,
hb_bytes_t &dest_bytes /* OUT */)
{
if (header.numberOfContours == 0 || all_points.length <= 4)
{
dest_bytes = hb_bytes_t ();
return true;
}
unsigned num_points = all_points.length - 4;
hb_vector_t<uint8_t> flags, x_coords, y_coords;
if (unlikely (!flags.alloc_exact (num_points))) return false;
if (unlikely (!x_coords.alloc_exact (2*num_points))) return false;
if (unlikely (!y_coords.alloc_exact (2*num_points))) return false;
unsigned lastflag = 255, repeat = 0;
int prev_x = 0, prev_y = 0;
for (unsigned i = 0; i < num_points; i++)
{
unsigned flag = all_points.arrayZ[i].flag;
flag &= FLAG_ON_CURVE | FLAG_OVERLAP_SIMPLE | FLAG_CUBIC;
int cur_x = roundf (all_points.arrayZ[i].x);
int cur_y = roundf (all_points.arrayZ[i].y);
encode_coord (cur_x - prev_x, flag, FLAG_X_SHORT, FLAG_X_SAME, x_coords);
encode_coord (cur_y - prev_y, flag, FLAG_Y_SHORT, FLAG_Y_SAME, y_coords);
encode_flag (flag, repeat, lastflag, flags);
prev_x = cur_x;
prev_y = cur_y;
lastflag = flag;
}
unsigned len_before_instrs = 2 * header.numberOfContours + 2;
unsigned len_instrs = instructions_length ();
unsigned total_len = len_before_instrs + flags.length + x_coords.length + y_coords.length;
if (!no_hinting)
total_len += len_instrs;
char *p = (char *) hb_malloc (total_len);
if (unlikely (!p)) return false;
const char *src = bytes.arrayZ + GlyphHeader::static_size;
char *cur = p;
hb_memcpy (p, src, len_before_instrs);
cur += len_before_instrs;
src += len_before_instrs;
if (!no_hinting)
{
hb_memcpy (cur, src, len_instrs);
cur += len_instrs;
}
hb_memcpy (cur, flags.arrayZ, flags.length);
cur += flags.length;
hb_memcpy (cur, x_coords.arrayZ, x_coords.length);
cur += x_coords.length;
hb_memcpy (cur, y_coords.arrayZ, y_coords.length);
dest_bytes = hb_bytes_t (p, total_len);
return true;
}
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_SIMPLEGLYPH_HH */

View File

@@ -0,0 +1,141 @@
#ifndef OT_GLYF_SUBSETGLYPH_HH
#define OT_GLYF_SUBSETGLYPH_HH
#include "../../hb-open-type.hh"
namespace OT {
struct glyf_accelerator_t;
namespace glyf_impl {
struct SubsetGlyph
{
hb_codepoint_t old_gid;
Glyph source_glyph;
hb_bytes_t dest_start; /* region of source_glyph to copy first */
hb_bytes_t dest_end; /* region of source_glyph to copy second */
bool allocated;
bool serialize (hb_serialize_context_t *c,
bool use_short_loca,
const hb_subset_plan_t *plan) const
{
TRACE_SERIALIZE (this);
hb_bytes_t dest_glyph = dest_start.copy (c);
hb_bytes_t end_copy = dest_end.copy (c);
if (!end_copy.arrayZ || !dest_glyph.arrayZ) {
return false;
}
dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + end_copy.length);
unsigned int pad_length = use_short_loca ? padding () : 0;
DEBUG_MSG (SUBSET, nullptr, "serialize %u byte glyph, width %u pad %u", dest_glyph.length, dest_glyph.length + pad_length, pad_length);
HBUINT8 pad;
pad = 0;
while (pad_length > 0)
{
(void) c->embed (pad);
pad_length--;
}
if (unlikely (!dest_glyph.length)) return_trace (true);
/* update components gids. */
for (auto &_ : Glyph (dest_glyph).get_composite_iterator ())
{
hb_codepoint_t new_gid;
if (plan->new_gid_for_old_gid (_.get_gid(), &new_gid))
const_cast<CompositeGlyphRecord &> (_).set_gid (new_gid);
}
#ifndef HB_NO_BEYOND_64K
auto it = Glyph (dest_glyph).get_composite_iterator ();
if (it)
{
/* lower GID24 to GID16 in components if possible. */
char *p = it ? (char *) &*it : nullptr;
char *q = p;
const char *end = dest_glyph.arrayZ + dest_glyph.length;
while (it)
{
auto &rec = const_cast<CompositeGlyphRecord &> (*it);
++it;
q += rec.get_size ();
rec.lower_gid_24_to_16 ();
unsigned size = rec.get_size ();
memmove (p, &rec, size);
p += size;
}
memmove (p, q, end - q);
p += end - q;
/* We want to shorten the glyph, but we can't do that without
* updating the length in the loca table, which is already
* written out :-(. So we just fill the rest of the glyph with
* harmless instructions, since that's what they will be
* interpreted as.
*
* Should move the lowering to _populate_subset_glyphs() to
* fix this issue. */
hb_memset (p, 0x7A /* TrueType instruction ROFF; harmless */, end - p);
p += end - p;
dest_glyph = hb_bytes_t (dest_glyph.arrayZ, p - (char *) dest_glyph.arrayZ);
// TODO: Padding; & trim serialized bytes.
// TODO: Update length in loca. Ugh.
}
#endif
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
Glyph (dest_glyph).drop_hints ();
if (plan->flags & HB_SUBSET_FLAGS_SET_OVERLAPS_FLAG)
Glyph (dest_glyph).set_overlaps_flag ();
return_trace (true);
}
bool compile_bytes_with_deltas (const hb_subset_plan_t *plan,
hb_font_t *font,
const glyf_accelerator_t &glyf)
{
allocated = source_glyph.compile_bytes_with_deltas (plan, font, glyf, dest_start, dest_end);
return allocated;
}
void free_compiled_bytes ()
{
if (likely (allocated)) {
allocated = false;
dest_start.fini ();
dest_end.fini ();
}
}
void drop_hints_bytes ()
{ source_glyph.drop_hints_bytes (dest_start, dest_end); }
unsigned int length () const { return dest_start.length + dest_end.length; }
/* pad to 2 to ensure 2-byte loca will be ok */
unsigned int padding () const { return length () % 2; }
unsigned int padded_size () const { return length () + padding (); }
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_SUBSETGLYPH_HH */

View File

@@ -0,0 +1,68 @@
#ifndef OT_GLYF_COMPOSITE_ITER_HH
#define OT_GLYF_COMPOSITE_ITER_HH
#include "../../hb.hh"
namespace OT {
namespace glyf_impl {
template <typename CompositeGlyphRecord>
struct composite_iter_tmpl : hb_iter_with_fallback_t<composite_iter_tmpl<CompositeGlyphRecord>,
const CompositeGlyphRecord &>
{
typedef const CompositeGlyphRecord *__item_t__;
composite_iter_tmpl (hb_bytes_t glyph_, __item_t__ current_) :
glyph (glyph_), current (nullptr), current_size (0)
{
set_current (current_);
}
composite_iter_tmpl () : glyph (hb_bytes_t ()), current (nullptr), current_size (0) {}
const CompositeGlyphRecord & __item__ () const { return *current; }
bool __more__ () const { return current; }
void __next__ ()
{
if (!current->has_more ()) { current = nullptr; return; }
set_current (&StructAtOffset<CompositeGlyphRecord> (current, current_size));
}
composite_iter_tmpl __end__ () const { return composite_iter_tmpl (); }
bool operator != (const composite_iter_tmpl& o) const
{ return current != o.current; }
void set_current (__item_t__ current_)
{
if (!glyph.check_range (current_, CompositeGlyphRecord::min_size))
{
current = nullptr;
current_size = 0;
return;
}
unsigned size = current_->get_size ();
if (!glyph.check_range (current_, size))
{
current = nullptr;
current_size = 0;
return;
}
current = current_;
current_size = size;
}
private:
hb_bytes_t glyph;
__item_t__ current;
unsigned current_size;
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_COMPOSITE_ITER_HH */

View File

@@ -0,0 +1,127 @@
#ifndef OT_GLYF_GLYF_HELPERS_HH
#define OT_GLYF_GLYF_HELPERS_HH
#include "../../hb-open-type.hh"
#include "../../hb-subset-plan.hh"
#include "loca.hh"
namespace OT {
namespace glyf_impl {
template<typename IteratorIn, typename TypeOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int))>
static void
_write_loca (IteratorIn&& it,
const hb_sorted_vector_t<hb_codepoint_pair_t> new_to_old_gid_list,
bool short_offsets,
TypeOut *dest,
unsigned num_offsets)
{
unsigned right_shift = short_offsets ? 1 : 0;
unsigned offset = 0;
TypeOut value;
value = 0;
*dest++ = value;
hb_codepoint_t last = 0;
for (auto _ : new_to_old_gid_list)
{
hb_codepoint_t gid = _.first;
for (; last < gid; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
unsigned padded_size = *it++;
offset += padded_size;
DEBUG_MSG (SUBSET, nullptr, "loca entry gid %" PRIu32 " offset %u padded-size %u", gid, offset, padded_size);
value = offset >> right_shift;
*dest++ = value;
last++; // Skip over gid
}
unsigned num_glyphs = num_offsets - 1;
for (; last < num_glyphs; last++)
{
DEBUG_MSG (SUBSET, nullptr, "loca entry empty offset %u", offset);
*dest++ = value;
}
}
static bool
_add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca)
{
hb_blob_t *head_blob = hb_sanitize_context_t ().reference_table<head> (plan->source);
hb_blob_t *head_prime_blob = hb_blob_copy_writable_or_fail (head_blob);
hb_blob_destroy (head_blob);
if (unlikely (!head_prime_blob))
return false;
head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr);
head_prime->indexToLocFormat = use_short_loca ? 0 : 1;
if (plan->normalized_coords)
{
head_prime->xMin = plan->head_maxp_info.xMin;
head_prime->xMax = plan->head_maxp_info.xMax;
head_prime->yMin = plan->head_maxp_info.yMin;
head_prime->yMax = plan->head_maxp_info.yMax;
unsigned orig_flag = head_prime->flags;
if (plan->head_maxp_info.allXMinIsLsb)
orig_flag |= 1 << 1;
else
orig_flag &= ~(1 << 1);
head_prime->flags = orig_flag;
}
bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob);
hb_blob_destroy (head_prime_blob);
return success;
}
template<typename Iterator,
hb_requires (hb_is_source_of (Iterator, unsigned int))>
static bool
_add_loca_and_head (hb_subset_context_t *c,
Iterator padded_offsets,
bool use_short_loca)
{
unsigned num_offsets = c->plan->num_output_glyphs () + 1;
unsigned entry_size = use_short_loca ? 2 : 4;
char *loca_prime_data = (char *) hb_malloc (entry_size * num_offsets);
if (unlikely (!loca_prime_data)) return false;
DEBUG_MSG (SUBSET, nullptr, "loca entry_size %u num_offsets %u size %u",
entry_size, num_offsets, entry_size * num_offsets);
if (use_short_loca)
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, true, (HBUINT16 *) loca_prime_data, num_offsets);
else
_write_loca (padded_offsets, c->plan->new_to_old_gid_list, false, (HBUINT32 *) loca_prime_data, num_offsets);
hb_blob_t *loca_blob = hb_blob_create (loca_prime_data,
entry_size * num_offsets,
HB_MEMORY_MODE_WRITABLE,
loca_prime_data,
hb_free);
bool result = c->plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version (c->plan, use_short_loca);
hb_blob_destroy (loca_blob);
return result;
}
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_GLYF_HELPERS_HH */

641
thirdparty/harfbuzz/src/OT/glyf/glyf.hh vendored Normal file
View File

@@ -0,0 +1,641 @@
#ifndef OT_GLYF_GLYF_HH
#define OT_GLYF_GLYF_HH
#include "../../hb-open-type.hh"
#include "../../hb-ot-head-table.hh"
#include "../../hb-ot-hmtx-table.hh"
#include "../../hb-ot-var-gvar-table.hh"
#include "../../hb-draw.hh"
#include "../../hb-paint.hh"
#include "glyf-helpers.hh"
#include "Glyph.hh"
#include "SubsetGlyph.hh"
#include "loca.hh"
#include "path-builder.hh"
namespace OT {
/*
* glyf -- TrueType Glyph Data
* https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
*/
#define HB_OT_TAG_glyf HB_TAG('g','l','y','f')
struct glyf
{
friend struct glyf_accelerator_t;
static constexpr hb_tag_t tableTag = HB_OT_TAG_glyf;
static bool has_valid_glyf_format(const hb_face_t* face)
{
const OT::head &head = *face->table.head;
return head.indexToLocFormat <= 1 && head.glyphDataFormat <= 1;
}
bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const
{
TRACE_SANITIZE (this);
/* Runtime checks as eager sanitizing each glyph is costy */
return_trace (true);
}
/* requires source of SubsetGlyph complains the identifier isn't declared */
template <typename Iterator>
bool serialize (hb_serialize_context_t *c,
Iterator it,
bool use_short_loca,
const hb_subset_plan_t *plan)
{
TRACE_SERIALIZE (this);
unsigned init_len = c->length ();
for (auto &_ : it)
if (unlikely (!_.serialize (c, use_short_loca, plan)))
return false;
/* As a special case when all glyph in the font are empty, add a zero byte
* to the table, so that OTS doesnt reject it, and to make the table work
* on Windows as well.
* See https://github.com/khaledhosny/ots/issues/52 */
if (init_len == c->length ())
{
HBUINT8 empty_byte;
empty_byte = 0;
c->copy (empty_byte);
}
return_trace (true);
}
/* Byte region(s) per glyph to output
unpadded, hints removed if so requested
If we fail to process a glyph we produce an empty (0-length) glyph */
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
if (!has_valid_glyf_format (c->plan->source)) {
// glyf format is unknown don't attempt to subset it.
DEBUG_MSG (SUBSET, nullptr,
"unkown glyf format, dropping from subset.");
return_trace (false);
}
hb_font_t *font = nullptr;
if (c->plan->normalized_coords)
{
font = _create_font_for_instancing (c->plan);
if (unlikely (!font))
return_trace (false);
}
hb_vector_t<unsigned> padded_offsets;
if (unlikely (!padded_offsets.alloc_exact (c->plan->new_to_old_gid_list.length)))
return_trace (false);
hb_vector_t<glyf_impl::SubsetGlyph> glyphs;
if (!_populate_subset_glyphs (c->plan, font, glyphs))
{
hb_font_destroy (font);
return_trace (false);
}
if (font)
hb_font_destroy (font);
unsigned max_offset = 0;
for (auto &g : glyphs)
{
unsigned size = g.padded_size ();
padded_offsets.push (size);
max_offset += size;
}
bool use_short_loca = false;
if (likely (!c->plan->force_long_loca))
use_short_loca = max_offset < 0x1FFFF;
if (!use_short_loca)
{
padded_offsets.resize (0);
for (auto &g : glyphs)
padded_offsets.push (g.length ());
}
auto *glyf_prime = c->serializer->start_embed <glyf> ();
bool result = glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan);
if (c->plan->normalized_coords && !c->plan->pinned_at_default)
_free_compiled_subset_glyphs (glyphs);
if (unlikely (!c->serializer->check_success (glyf_impl::_add_loca_and_head (c,
padded_offsets.iter (),
use_short_loca))))
return_trace (false);
return result;
}
bool
_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const;
hb_font_t *
_create_font_for_instancing (const hb_subset_plan_t *plan) const;
void _free_compiled_subset_glyphs (hb_vector_t<glyf_impl::SubsetGlyph> &glyphs) const
{
for (auto &g : glyphs)
g.free_compiled_bytes ();
}
protected:
UnsizedArrayOf<HBUINT8>
dataZ; /* Glyphs data. */
public:
DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always
* check the size externally, allow Null() object of it by
* defining it _MIN instead. */
};
struct glyf_accelerator_t
{
glyf_accelerator_t (hb_face_t *face)
{
short_offset = false;
num_glyphs = 0;
loca_table = nullptr;
glyf_table = nullptr;
#ifndef HB_NO_VAR
gvar = nullptr;
#ifndef HB_NO_BEYOND_64K
GVAR = nullptr;
#endif
#endif
hmtx = nullptr;
#ifndef HB_NO_VERTICAL
vmtx = nullptr;
#endif
const OT::head &head = *face->table.head;
if (!glyf::has_valid_glyf_format (face))
/* Unknown format. Leave num_glyphs=0, that takes care of disabling us. */
return;
short_offset = 0 == head.indexToLocFormat;
loca_table = face->table.loca.get_blob (); // Needs no destruct!
glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face);
#ifndef HB_NO_VAR
gvar = face->table.gvar;
#ifndef HB_NO_BEYOND_64K
GVAR = face->table.GVAR;
#endif
#endif
hmtx = face->table.hmtx;
#ifndef HB_NO_VERTICAL
vmtx = face->table.vmtx;
#endif
num_glyphs = hb_max (1u, loca_table.get_length () / (short_offset ? 2 : 4)) - 1;
num_glyphs = hb_min (num_glyphs, face->get_num_glyphs ());
}
~glyf_accelerator_t ()
{
auto *scratch = cached_scratch.get_relaxed ();
if (scratch)
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
glyf_table.destroy ();
}
bool has_data () const { return num_glyphs; }
protected:
template<typename T>
bool get_points (hb_font_t *font, hb_codepoint_t gid, T consumer,
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (gid >= num_glyphs) return false;
auto &all_points = scratch.all_points;
all_points.resize (0);
bool phantom_only = !consumer.is_consuming_contour_points ();
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, scratch, nullptr, nullptr, nullptr, true, true, phantom_only, coords, gvar_cache)))
return false;
unsigned count = all_points.length;
assert (count >= glyf_impl::PHANTOM_COUNT);
count -= glyf_impl::PHANTOM_COUNT;
if (consumer.is_consuming_contour_points ())
{
auto *points = all_points.arrayZ;
if (false)
{
/* Our path-builder was designed to work with this simple loop.
* But FreeType and CoreText do it differently, so we match those
* with the other, more complicated, code branch below. */
for (unsigned i = 0; i < count; i++)
{
consumer.consume_point (points[i]);
if (points[i].is_end_point)
consumer.contour_end ();
}
}
else
{
for (unsigned i = 0; i < count; i++)
{
// Start of a contour.
if (points[i].flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE)
{
// First point is on-curve. Draw the contour.
for (; i < count; i++)
{
consumer.consume_point (points[i]);
if (points[i].is_end_point)
{
consumer.contour_end ();
break;
}
}
}
else
{
unsigned start = i;
// Find end of the contour.
for (; i < count; i++)
if (points[i].is_end_point)
break;
unsigned end = i;
// Enough to start from the end. Our path-builder takes care of the rest.
if (likely (end < count)) // Can only fail in case of alloc failure *maybe*.
consumer.consume_point (points[end]);
for (i = start; i < end; i++)
consumer.consume_point (points[i]);
consumer.contour_end ();
}
}
}
consumer.points_end ();
}
/* Where to write phantoms, nullptr if not requested */
contour_point_t *phantoms = consumer.get_phantoms_sink ();
if (phantoms)
for (unsigned i = 0; i < glyf_impl::PHANTOM_COUNT; ++i)
phantoms[i] = all_points.arrayZ[count + i];
return true;
}
public:
#ifndef HB_NO_VAR
struct points_aggregator_t
{
hb_font_t *font;
hb_glyph_extents_t *extents;
contour_point_t *phantoms;
bool scaled;
struct contour_bounds_t
{
contour_bounds_t () { min_x = min_y = FLT_MAX; max_x = max_y = -FLT_MAX; }
void add (const contour_point_t &p)
{
min_x = hb_min (min_x, p.x);
min_y = hb_min (min_y, p.y);
max_x = hb_max (max_x, p.x);
max_y = hb_max (max_y, p.y);
}
bool empty () const { return (min_x >= max_x) || (min_y >= max_y); }
void get_extents (hb_font_t *font, hb_glyph_extents_t *extents, bool scaled)
{
if (unlikely (empty ()))
{
extents->width = 0;
extents->x_bearing = 0;
extents->height = 0;
extents->y_bearing = 0;
return;
}
{
extents->x_bearing = roundf (min_x);
extents->width = roundf (max_x - extents->x_bearing);
extents->y_bearing = roundf (max_y);
extents->height = roundf (min_y - extents->y_bearing);
if (scaled)
font->scale_glyph_extents (extents);
}
}
protected:
float min_x, min_y, max_x, max_y;
} bounds;
points_aggregator_t (hb_font_t *font_, hb_glyph_extents_t *extents_, contour_point_t *phantoms_, bool scaled_)
{
font = font_;
extents = extents_;
phantoms = phantoms_;
scaled = scaled_;
if (extents) bounds = contour_bounds_t ();
}
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point) { bounds.add (point); }
void contour_end () {}
void points_end () { bounds.get_extents (font, extents, scaled); }
bool is_consuming_contour_points () { return extents; }
contour_point_t *get_phantoms_sink () { return phantoms; }
};
#ifndef HB_NO_VAR
unsigned
get_advance_with_var_unscaled (hb_codepoint_t gid,
hb_font_t *font,
bool is_vertical,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (unlikely (gid >= num_glyphs)) return 0;
bool success = false;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
scratch, gvar_cache);
if (unlikely (!success))
{
unsigned upem = font->face->get_upem ();
return is_vertical ? upem : upem / 2;
}
float result = is_vertical
? phantoms[glyf_impl::PHANTOM_TOP].y - phantoms[glyf_impl::PHANTOM_BOTTOM].y
: phantoms[glyf_impl::PHANTOM_RIGHT].x - phantoms[glyf_impl::PHANTOM_LEFT].x;
return hb_clamp (roundf (result), 0.f, (float) UINT_MAX / 2);
}
float
get_v_origin_with_var_unscaled (hb_codepoint_t gid,
hb_font_t *font,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (unlikely (gid >= num_glyphs)) return 0;
bool success = false;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
scratch, gvar_cache);
if (unlikely (!success))
{
return font->face->get_upem ();
}
return phantoms[glyf_impl::PHANTOM_TOP].y;
}
#endif
#endif
public:
bool get_extents (hb_font_t *font,
hb_codepoint_t gid,
hb_glyph_extents_t *extents) const
{ return get_extents_at (font, gid, extents, hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0)); }
bool get_extents_at (hb_font_t *font,
hb_codepoint_t gid,
hb_glyph_extents_t *extents,
hb_array_t<const int> coords) const
{
if (unlikely (gid >= num_glyphs)) return false;
#ifndef HB_NO_VAR
if (coords)
{
hb_glyf_scratch_t *scratch = acquire_scratch ();
if (unlikely (!scratch))
return false;
bool ret = get_points (font,
gid,
points_aggregator_t (font, extents, nullptr, true),
coords,
*scratch);
release_scratch (scratch);
return ret;
}
#endif
return glyph_for_gid (gid).get_extents_without_var_scaled (font, *this, extents);
}
const glyf_impl::Glyph
glyph_for_gid (hb_codepoint_t gid, bool needs_padding_removal = false) const
{
if (unlikely (gid >= num_glyphs)) return glyf_impl::Glyph ();
unsigned int start_offset, end_offset;
if (short_offset)
{
const HBUINT16 *offsets = (const HBUINT16 *) loca_table->dataZ.arrayZ;
start_offset = 2 * offsets[gid];
end_offset = 2 * offsets[gid + 1];
}
else
{
const HBUINT32 *offsets = (const HBUINT32 *) loca_table->dataZ.arrayZ;
start_offset = offsets[gid];
end_offset = offsets[gid + 1];
}
if (unlikely (start_offset > end_offset || end_offset > glyf_table.get_length ()))
return glyf_impl::Glyph ();
glyf_impl::Glyph glyph (hb_bytes_t ((const char *) this->glyf_table + start_offset,
end_offset - start_offset), gid);
return needs_padding_removal ? glyf_impl::Glyph (glyph.trim_padding (), gid) : glyph;
}
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session, hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (!has_data ()) return false;
hb_glyf_scratch_t *scratch = acquire_scratch ();
if (unlikely (!scratch))
return true;
bool ret = get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
*scratch,
gvar_cache);
release_scratch (scratch);
return ret;
}
bool
get_path_at (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session,
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (!has_data ()) return false;
return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
coords,
scratch,
gvar_cache);
}
hb_glyf_scratch_t *acquire_scratch () const
{
hb_glyf_scratch_t *scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_glyf_scratch_t *) hb_calloc (1, sizeof (hb_glyf_scratch_t));
if (unlikely (!scratch))
return nullptr;
}
return scratch;
}
void release_scratch (hb_glyf_scratch_t *scratch) const
{
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
}
#ifndef HB_NO_VAR
const gvar_accelerator_t *gvar;
#ifndef HB_NO_BEYOND_64K
const GVAR_accelerator_t *GVAR;
#endif
#endif
const hmtx_accelerator_t *hmtx;
#ifndef HB_NO_VERTICAL
const vmtx_accelerator_t *vmtx;
#endif
private:
bool short_offset;
unsigned int num_glyphs;
hb_blob_ptr_t<loca> loca_table;
hb_blob_ptr_t<glyf> glyf_table;
mutable hb_atomic_t<hb_glyf_scratch_t *> cached_scratch;
};
inline bool
glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan,
hb_font_t *font,
hb_vector_t<glyf_impl::SubsetGlyph>& glyphs /* OUT */) const
{
OT::glyf_accelerator_t glyf (plan->source);
if (!glyphs.alloc_exact (plan->new_to_old_gid_list.length)) return false;
for (const auto &pair : plan->new_to_old_gid_list)
{
hb_codepoint_t new_gid = pair.first;
hb_codepoint_t old_gid = pair.second;
glyf_impl::SubsetGlyph *p = glyphs.push ();
glyf_impl::SubsetGlyph& subset_glyph = *p;
subset_glyph.old_gid = old_gid;
if (unlikely (old_gid == 0 && new_gid == 0 &&
!(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) &&
!plan->normalized_coords)
subset_glyph.source_glyph = glyf_impl::Glyph ();
else
{
/* If plan has an accelerator, the preprocessing step already trimmed glyphs.
* Don't trim them again! */
subset_glyph.source_glyph = glyf.glyph_for_gid (subset_glyph.old_gid, !plan->accelerator);
}
if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
subset_glyph.drop_hints_bytes ();
else
subset_glyph.dest_start = subset_glyph.source_glyph.get_bytes ();
if (font)
{
if (unlikely (!subset_glyph.compile_bytes_with_deltas (plan, font, glyf)))
{
// when pinned at default, only bounds are updated, thus no need to free
if (!plan->pinned_at_default)
_free_compiled_subset_glyphs (glyphs);
return false;
}
}
}
return true;
}
inline hb_font_t *
glyf::_create_font_for_instancing (const hb_subset_plan_t *plan) const
{
hb_font_t *font = hb_font_create (plan->source);
if (unlikely (font == hb_font_get_empty ())) return nullptr;
hb_vector_t<hb_variation_t> vars;
if (unlikely (!vars.alloc (plan->user_axes_location.get_population (), true)))
{
hb_font_destroy (font);
return nullptr;
}
for (auto _ : plan->user_axes_location)
{
hb_variation_t var;
var.tag = _.first;
var.value = _.second.middle;
vars.push (var);
}
#ifndef HB_NO_VAR
hb_font_set_variations (font, vars.arrayZ, plan->user_axes_location.get_population ());
#endif
return font;
}
} /* namespace OT */
#endif /* OT_GLYF_GLYF_HH */

43
thirdparty/harfbuzz/src/OT/glyf/loca.hh vendored Normal file
View File

@@ -0,0 +1,43 @@
#ifndef OT_GLYF_LOCA_HH
#define OT_GLYF_LOCA_HH
#include "../../hb-open-type.hh"
namespace OT {
/*
* loca -- Index to Location
* https://docs.microsoft.com/en-us/typography/opentype/spec/loca
*/
#define HB_OT_TAG_loca HB_TAG('l','o','c','a')
struct loca
{
friend struct glyf;
friend struct glyf_accelerator_t;
static constexpr hb_tag_t tableTag = HB_OT_TAG_loca;
bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const
{
TRACE_SANITIZE (this);
return_trace (true);
}
protected:
UnsizedArrayOf<HBUINT8>
dataZ; /* Location data. */
public:
DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always
* check the size externally, allow Null() object of it by
* defining it _MIN instead. */
};
} /* namespace OT */
#endif /* OT_GLYF_LOCA_HH */

View File

@@ -0,0 +1,192 @@
#ifndef OT_GLYF_PATH_BUILDER_HH
#define OT_GLYF_PATH_BUILDER_HH
#include "../../hb.hh"
namespace OT {
namespace glyf_impl {
struct path_builder_t
{
hb_font_t *font;
hb_draw_session_t *draw_session;
struct optional_point_t
{
optional_point_t () {}
optional_point_t (float x_, float y_) : has_data (true), x (x_), y (y_) {}
operator bool () const { return has_data; }
bool has_data = false;
float x;
float y;
optional_point_t mid (optional_point_t p)
{ return optional_point_t ((x + p.x) * 0.5f, (y + p.y) * 0.5f); }
} first_oncurve, first_offcurve, first_offcurve2, last_offcurve, last_offcurve2;
path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) :
font (font_), draw_session (&draw_session_) {}
/* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287
See also:
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html
* https://stackoverflow.com/a/20772557
*
* Cubic support added. */
HB_ALWAYS_INLINE
void consume_point (const contour_point_t &point)
{
bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE;
#ifdef HB_NO_CUBIC_GLYF
constexpr bool is_cubic = false;
#else
bool is_cubic = !is_on_curve && (point.flag & glyf_impl::SimpleGlyph::FLAG_CUBIC);
#endif
optional_point_t p (font->em_fscalef_x (point.x), font->em_fscalef_y (point.y));
if (unlikely (!first_oncurve))
{
if (is_on_curve)
{
first_oncurve = p;
draw_session->move_to (p.x, p.y);
}
else
{
if (is_cubic && !first_offcurve2)
{
first_offcurve2 = first_offcurve;
first_offcurve = p;
}
else if (first_offcurve)
{
optional_point_t mid = first_offcurve.mid (p);
first_oncurve = mid;
last_offcurve = p;
draw_session->move_to (mid.x, mid.y);
}
else
first_offcurve = p;
}
}
else
{
if (last_offcurve)
{
if (is_on_curve)
{
if (last_offcurve2)
{
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
p.x, p.y);
last_offcurve2 = optional_point_t ();
}
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
p.x, p.y);
last_offcurve = optional_point_t ();
}
else
{
if (is_cubic && !last_offcurve2)
{
last_offcurve2 = last_offcurve;
last_offcurve = p;
}
else
{
optional_point_t mid = last_offcurve.mid (p);
if (is_cubic)
{
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve2 = optional_point_t ();
}
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve = p;
}
}
}
else
{
if (is_on_curve)
draw_session->line_to (p.x, p.y);
else
last_offcurve = p;
}
}
}
void contour_end ()
{
if (first_offcurve && last_offcurve)
{
optional_point_t mid = last_offcurve.mid (first_offcurve2 ?
first_offcurve2 :
first_offcurve);
if (last_offcurve2)
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
mid.x, mid.y);
last_offcurve = optional_point_t ();
}
/* now check the rest */
if (first_offcurve && first_oncurve)
{
if (first_offcurve2)
draw_session->cubic_to (first_offcurve2.x, first_offcurve2.y,
first_offcurve.x, first_offcurve.y,
first_oncurve.x, first_oncurve.y);
else
draw_session->quadratic_to (first_offcurve.x, first_offcurve.y,
first_oncurve.x, first_oncurve.y);
}
else if (last_offcurve && first_oncurve)
{
if (last_offcurve2)
draw_session->cubic_to (last_offcurve2.x, last_offcurve2.y,
last_offcurve.x, last_offcurve.y,
first_oncurve.x, first_oncurve.y);
else
draw_session->quadratic_to (last_offcurve.x, last_offcurve.y,
first_oncurve.x, first_oncurve.y);
}
else if (first_oncurve)
draw_session->line_to (first_oncurve.x, first_oncurve.y);
else if (first_offcurve)
{
float x = first_offcurve.x, y = first_offcurve.y;
draw_session->move_to (x, y);
draw_session->quadratic_to (x, y, x, y);
}
/* Getting ready for the next contour */
first_oncurve = first_offcurve = last_offcurve = last_offcurve2 = optional_point_t ();
draw_session->close_path ();
}
void points_end () {}
bool is_consuming_contour_points () { return true; }
contour_point_t *get_phantoms_sink () { return nullptr; }
};
} /* namespace glyf_impl */
} /* namespace OT */
#endif /* OT_GLYF_PATH_BUILDER_HH */

589
thirdparty/harfbuzz/src/OT/name/name.hh vendored Normal file
View File

@@ -0,0 +1,589 @@
/*
* Copyright © 2011,2012 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Behdad Esfahbod
*/
#ifndef OT_NAME_NAME_HH
#define OT_NAME_NAME_HH
#include "../../hb-open-type.hh"
#include "../../hb-ot-name-language.hh"
#include "../../hb-aat-layout.hh"
#include "../../hb-utf.hh"
namespace OT {
template <typename in_utf_t, typename out_utf_t>
inline unsigned int
hb_ot_name_convert_utf (hb_bytes_t bytes,
unsigned int *text_size /* IN/OUT */,
typename out_utf_t::codepoint_t *text /* OUT */)
{
unsigned int src_len = bytes.length / sizeof (typename in_utf_t::codepoint_t);
const typename in_utf_t::codepoint_t *src = (const typename in_utf_t::codepoint_t *) bytes.arrayZ;
const typename in_utf_t::codepoint_t *src_end = src + src_len;
typename out_utf_t::codepoint_t *dst = text;
hb_codepoint_t unicode;
const hb_codepoint_t replacement = HB_BUFFER_REPLACEMENT_CODEPOINT_DEFAULT;
if (text_size && *text_size)
{
(*text_size)--; /* Save room for NUL-termination. */
const typename out_utf_t::codepoint_t *dst_end = text + *text_size;
while (src < src_end && dst < dst_end)
{
const typename in_utf_t::codepoint_t *src_next = in_utf_t::next (src, src_end, &unicode, replacement);
typename out_utf_t::codepoint_t *dst_next = out_utf_t::encode (dst, dst_end, unicode);
if (dst_next == dst)
break; /* Out-of-room. */
dst = dst_next;
src = src_next;
}
*text_size = dst - text;
*dst = 0; /* NUL-terminate. */
}
/* Accumulate length of rest. */
unsigned int dst_len = dst - text;
while (src < src_end)
{
src = in_utf_t::next (src, src_end, &unicode, replacement);
dst_len += out_utf_t::encode_len (unicode);
}
return dst_len;
}
#define entry_score var.u16[0]
#define entry_index var.u16[1]
/*
* name -- Naming
* https://docs.microsoft.com/en-us/typography/opentype/spec/name
*/
#define HB_OT_TAG_name HB_TAG('n','a','m','e')
#define UNSUPPORTED 42
struct NameRecord
{
hb_language_t language (hb_face_t *face) const
{
#ifndef HB_NO_OT_NAME_LANGUAGE
unsigned int p = platformID;
unsigned int l = languageID;
if (p == 3)
return _hb_ot_name_language_for_ms_code (l);
if (p == 1)
return _hb_ot_name_language_for_mac_code (l);
#ifndef HB_NO_OT_NAME_LANGUAGE_AAT
if (p == 0)
return face->table.ltag->get_language (l);
#endif
#endif
return HB_LANGUAGE_INVALID;
}
uint16_t score () const
{
/* Same order as in cmap::find_best_subtable(). */
unsigned int p = platformID;
unsigned int e = encodingID;
/* 32-bit. */
if (p == 3 && e == 10) return 0;
if (p == 0 && e == 6) return 1;
if (p == 0 && e == 4) return 2;
/* 16-bit. */
if (p == 3 && e == 1) return 3;
if (p == 0 && e == 3) return 4;
if (p == 0 && e == 2) return 5;
if (p == 0 && e == 1) return 6;
if (p == 0 && e == 0) return 7;
/* Symbol. */
if (p == 3 && e == 0) return 8;
/* We treat all Mac Latin names as ASCII only. */
if (p == 1 && e == 0) return 10; /* 10 is magic number :| */
return UNSUPPORTED;
}
NameRecord* copy (hb_serialize_context_t *c, const void *base
#ifdef HB_EXPERIMENTAL_API
, const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides
#endif
) const
{
TRACE_SERIALIZE (this);
HB_UNUSED auto snap = c->snapshot ();
auto *out = c->embed (this);
if (unlikely (!out)) return_trace (nullptr);
#ifdef HB_EXPERIMENTAL_API
hb_ot_name_record_ids_t record_ids (platformID, encodingID, languageID, nameID);
hb_bytes_t* name_bytes;
if (name_table_overrides->has (record_ids, &name_bytes)) {
hb_bytes_t encoded_bytes = *name_bytes;
char *name_str_utf16_be = nullptr;
if (platformID != 1)
{
unsigned text_size = hb_ot_name_convert_utf<hb_utf8_t, hb_utf16_be_t> (*name_bytes, nullptr, nullptr);
text_size++; // needs to consider NULL terminator for use in hb_ot_name_convert_utf()
unsigned byte_len = text_size * hb_utf16_be_t::codepoint_t::static_size;
name_str_utf16_be = (char *) hb_calloc (byte_len, 1);
if (!name_str_utf16_be)
{
c->revert (snap);
return_trace (nullptr);
}
hb_ot_name_convert_utf<hb_utf8_t, hb_utf16_be_t> (*name_bytes, &text_size,
(hb_utf16_be_t::codepoint_t *) name_str_utf16_be);
unsigned encoded_byte_len = text_size * hb_utf16_be_t::codepoint_t::static_size;
if (!encoded_byte_len || !c->check_assign (out->length, encoded_byte_len, HB_SERIALIZE_ERROR_INT_OVERFLOW)) {
c->revert (snap);
hb_free (name_str_utf16_be);
return_trace (nullptr);
}
encoded_bytes = hb_bytes_t (name_str_utf16_be, encoded_byte_len);
}
else
{
// mac platform, copy the UTF-8 string(all ascii characters) as is
if (!c->check_assign (out->length, encoded_bytes.length, HB_SERIALIZE_ERROR_INT_OVERFLOW)) {
c->revert (snap);
return_trace (nullptr);
}
}
out->offset = 0;
c->push ();
encoded_bytes.copy (c);
c->add_link (out->offset, c->pop_pack (), hb_serialize_context_t::Tail, 0);
hb_free (name_str_utf16_be);
}
else
#endif
{
out->offset.serialize_copy (c, offset, base, 0, hb_serialize_context_t::Tail, length);
}
return_trace (out);
}
bool isUnicode () const
{
unsigned int p = platformID;
unsigned int e = encodingID;
return (p == 0 ||
(p == 3 && (e == 0 || e == 1 || e == 10)));
}
static int cmp (const void *pa, const void *pb)
{
const NameRecord *a = (const NameRecord *)pa;
const NameRecord *b = (const NameRecord *)pb;
if (a->platformID != b->platformID)
return a->platformID - b->platformID;
if (a->encodingID != b->encodingID)
return a->encodingID - b->encodingID;
if (a->languageID != b->languageID)
return a->languageID - b->languageID;
if (a->nameID != b->nameID)
return a->nameID - b->nameID;
if (a->length != b->length)
return a->length - b->length;
return 0;
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
hb_barrier () &&
offset.sanitize (c, base, length));
}
HBUINT16 platformID; /* Platform ID. */
HBUINT16 encodingID; /* Platform-specific encoding ID. */
HBUINT16 languageID; /* Language ID. */
HBUINT16 nameID; /* Name ID. */
HBUINT16 length; /* String length (in bytes). */
NNOffset16To<UnsizedArrayOf<HBUINT8>>
offset; /* String offset from start of storage area (in bytes). */
public:
DEFINE_SIZE_STATIC (12);
};
static int
_hb_ot_name_entry_cmp_key (const void *pa, const void *pb, bool exact)
{
const hb_ot_name_entry_t *a = (const hb_ot_name_entry_t *) pa;
const hb_ot_name_entry_t *b = (const hb_ot_name_entry_t *) pb;
/* Compare by name_id, then language. */
if (a->name_id != b->name_id)
return a->name_id - b->name_id;
if (a->language == b->language) return 0;
if (!a->language) return -1;
if (!b->language) return +1;
const char *astr = hb_language_to_string (a->language);
const char *bstr = hb_language_to_string (b->language);
signed c = strcmp (astr, bstr);
// 'a' is the user request, and 'b' is string in the font.
// If eg. user asks for "en-us" and font has "en", approve.
if (!exact && c &&
hb_language_matches (b->language, a->language))
return 0;
return c;
}
static int
_hb_ot_name_entry_cmp (const void *pa, const void *pb)
{
/* Compare by name_id, then language, then score, then index. */
int v = _hb_ot_name_entry_cmp_key (pa, pb, true);
if (v)
return v;
const hb_ot_name_entry_t *a = (const hb_ot_name_entry_t *) pa;
const hb_ot_name_entry_t *b = (const hb_ot_name_entry_t *) pb;
if (a->entry_score != b->entry_score)
return a->entry_score - b->entry_score;
if (a->entry_index != b->entry_index)
return a->entry_index - b->entry_index;
return 0;
}
struct name
{
static constexpr hb_tag_t tableTag = HB_OT_TAG_name;
unsigned int get_size () const
{ return min_size + count * nameRecordZ.item_size; }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, const NameRecord &))>
bool serialize (hb_serialize_context_t *c,
Iterator it,
const void *src_string_pool
#ifdef HB_EXPERIMENTAL_API
, const hb_vector_t<hb_ot_name_record_ids_t>& insert_name_records
, const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides
#endif
)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min ((*this)))) return_trace (false);
unsigned total_count = it.len ()
#ifdef HB_EXPERIMENTAL_API
+ insert_name_records.length
#endif
;
this->format = 0;
if (!c->check_assign (this->count, total_count, HB_SERIALIZE_ERROR_INT_OVERFLOW))
return false;
NameRecord *name_records = (NameRecord *) hb_calloc (total_count, NameRecord::static_size);
if (unlikely (!name_records)) return_trace (false);
hb_array_t<NameRecord> records (name_records, total_count);
for (const NameRecord& record : it)
{
hb_memcpy (name_records, &record, NameRecord::static_size);
name_records++;
}
#ifdef HB_EXPERIMENTAL_API
for (unsigned i = 0; i < insert_name_records.length; i++)
{
const hb_ot_name_record_ids_t& ids = insert_name_records[i];
NameRecord record;
record.platformID = ids.platform_id;
record.encodingID = ids.encoding_id;
record.languageID = ids.language_id;
record.nameID = ids.name_id;
record.length = 0; // handled in NameRecord copy()
record.offset = 0;
hb_memcpy (name_records, &record, NameRecord::static_size);
name_records++;
}
#endif
records.qsort ();
c->copy_all (records,
src_string_pool
#ifdef HB_EXPERIMENTAL_API
, name_table_overrides
#endif
);
hb_free (records.arrayZ);
if (unlikely (c->ran_out_of_room ())) return_trace (false);
this->stringOffset = c->length ();
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
auto *name_prime = c->serializer->start_embed<name> ();
#ifdef HB_EXPERIMENTAL_API
const hb_hashmap_t<hb_ot_name_record_ids_t, hb_bytes_t> *name_table_overrides =
&c->plan->name_table_overrides;
#endif
auto it =
+ nameRecordZ.as_array (count)
| hb_filter (c->plan->name_ids, &NameRecord::nameID)
| hb_filter (c->plan->name_languages, &NameRecord::languageID)
| hb_filter ([&] (const NameRecord& namerecord) {
return
(c->plan->flags & HB_SUBSET_FLAGS_NAME_LEGACY)
|| namerecord.isUnicode ();
})
#ifdef HB_EXPERIMENTAL_API
| hb_filter ([&] (const NameRecord& namerecord) {
if (name_table_overrides->is_empty ())
return true;
hb_ot_name_record_ids_t rec_ids (namerecord.platformID,
namerecord.encodingID,
namerecord.languageID,
namerecord.nameID);
hb_bytes_t *p;
if (name_table_overrides->has (rec_ids, &p) &&
(*p).length == 0)
return false;
return true;
})
#endif
;
#ifdef HB_EXPERIMENTAL_API
hb_hashmap_t<hb_ot_name_record_ids_t, unsigned> retained_name_record_ids;
for (const NameRecord& rec : it)
{
hb_ot_name_record_ids_t rec_ids (rec.platformID,
rec.encodingID,
rec.languageID,
rec.nameID);
retained_name_record_ids.set (rec_ids, 1);
}
hb_vector_t<hb_ot_name_record_ids_t> insert_name_records;
if (!name_table_overrides->is_empty ())
{
if (unlikely (!insert_name_records.alloc (name_table_overrides->get_population (), true)))
return false;
for (const auto& record_ids : name_table_overrides->keys ())
{
if (name_table_overrides->get (record_ids).length == 0)
continue;
if (retained_name_record_ids.has (record_ids))
continue;
insert_name_records.push (record_ids);
}
}
#endif
return name_prime->serialize (c->serializer, it,
std::addressof (this + stringOffset)
#ifdef HB_EXPERIMENTAL_API
, insert_name_records
, name_table_overrides
#endif
);
}
bool sanitize_records (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
const void *string_pool = (this+stringOffset).arrayZ;
return_trace (nameRecordZ.sanitize (c, count, string_pool));
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this) &&
hb_barrier () &&
likely (format == 0 || format == 1) &&
c->check_array (nameRecordZ.arrayZ, count) &&
c->check_range (this, stringOffset) &&
sanitize_records (c));
}
struct accelerator_t
{
accelerator_t (hb_face_t *face)
{
this->table = hb_sanitize_context_t ().reference_table<name> (face);
assert (this->table.get_length () >= this->table->stringOffset);
this->pool = (const char *) (const void *) (this->table+this->table->stringOffset);
this->pool_len = this->table.get_length () - this->table->stringOffset;
const hb_array_t<const NameRecord> all_names (this->table->nameRecordZ.arrayZ,
this->table->count);
this->names.alloc_exact (all_names.length);
for (unsigned int i = 0; i < all_names.length; i++)
{
hb_ot_name_entry_t *entry = this->names.push ();
entry->name_id = all_names[i].nameID;
entry->language = all_names[i].language (face);
entry->entry_score = all_names[i].score ();
entry->entry_index = i;
}
this->names.qsort (_hb_ot_name_entry_cmp);
/* Walk and pick best only for each name_id,language pair,
* while dropping unsupported encodings. */
unsigned int j = 0;
for (unsigned int i = 0; i < this->names.length; i++)
{
if (this->names[i].entry_score == UNSUPPORTED ||
this->names[i].language == HB_LANGUAGE_INVALID)
continue;
if (i &&
this->names[i - 1].name_id == this->names[i].name_id &&
this->names[i - 1].language == this->names[i].language)
continue;
this->names[j++] = this->names[i];
}
this->names.resize (j);
}
~accelerator_t ()
{
this->table.destroy ();
}
int get_index (hb_ot_name_id_t name_id,
hb_language_t language,
unsigned int *width=nullptr) const
{
const hb_ot_name_entry_t key = {name_id, {0}, language};
const hb_ot_name_entry_t *entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names,
this->names.length,
sizeof (hb_ot_name_entry_t),
_hb_ot_name_entry_cmp_key,
true);
if (!entry)
{
entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names,
this->names.length,
sizeof (hb_ot_name_entry_t),
_hb_ot_name_entry_cmp_key,
false);
}
if (!entry)
return -1;
if (width)
*width = entry->entry_score < 10 ? 2 : 1;
return entry->entry_index;
}
hb_bytes_t get_name (unsigned int idx) const
{
const hb_array_t<const NameRecord> all_names (table->nameRecordZ.arrayZ, table->count);
const NameRecord &record = all_names[idx];
const hb_bytes_t string_pool (pool, pool_len);
return string_pool.sub_array (record.offset, record.length);
}
private:
const char *pool;
unsigned int pool_len;
public:
hb_blob_ptr_t<name> table;
hb_vector_t<hb_ot_name_entry_t> names;
};
public:
/* We only implement format 0 for now. */
HBUINT16 format; /* Format selector (=0/1). */
HBUINT16 count; /* Number of name records. */
NNOffset16To<UnsizedArrayOf<HBUINT8>>
stringOffset; /* Offset to start of string storage (from start of table). */
UnsizedArrayOf<NameRecord>
nameRecordZ; /* The name records where count is the number of records. */
public:
DEFINE_SIZE_ARRAY (6, nameRecordZ);
};
#undef entry_index
#undef entry_score
struct name_accelerator_t : name::accelerator_t {
name_accelerator_t (hb_face_t *face) : name::accelerator_t (face) {}
};
} /* namespace OT */
#endif /* OT_NAME_NAME_HH */

View File

@@ -0,0 +1,257 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-common.hh"
#ifndef GRAPH_CLASSDEF_GRAPH_HH
#define GRAPH_CLASSDEF_GRAPH_HH
namespace graph {
struct ClassDefFormat1 : public OT::ClassDefFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::ClassDefFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + classValue.get_size () - classValue.len.get_size ();
}
};
struct ClassDefFormat2 : public OT::ClassDefFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::ClassDefFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
}
};
struct ClassDef : public OT::ClassDef
{
template<typename It>
static bool add_class_def (gsubgpos_graph_context_t& c,
unsigned parent_id,
unsigned link_position,
It glyph_and_class,
unsigned max_size)
{
unsigned class_def_prime_id = c.graph.new_node (nullptr, nullptr);
auto& class_def_prime_vertex = c.graph.vertices_[class_def_prime_id];
if (!make_class_def (c, glyph_and_class, class_def_prime_id, max_size))
return false;
auto* class_def_link = c.graph.vertices_[parent_id].obj.real_links.push ();
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_prime_id;
class_def_link->position = link_position;
class_def_prime_vertex.add_parent (parent_id, false);
return true;
}
template<typename It>
static bool make_class_def (gsubgpos_graph_context_t& c,
It glyph_and_class,
unsigned dest_obj,
unsigned max_size)
{
char* buffer = (char*) hb_calloc (1, max_size);
hb_serialize_context_t serializer (buffer, max_size);
OT::ClassDef_serialize (&serializer, glyph_and_class);
serializer.end_serialize ();
if (serializer.in_error ())
{
hb_free (buffer);
return false;
}
hb_bytes_t class_def_copy = serializer.copy_bytes ();
if (!class_def_copy.arrayZ) return false;
// Give ownership to the context, it will cleanup the buffer.
if (!c.add_buffer ((char *) class_def_copy.arrayZ))
{
hb_free ((char *) class_def_copy.arrayZ);
return false;
}
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) class_def_copy.arrayZ;
obj.tail = obj.head + class_def_copy.length;
hb_free (buffer);
return true;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::ClassDef::min_size) return false;
hb_barrier ();
switch (u.format)
{
case 1: return ((ClassDefFormat1*)this)->sanitize (vertex);
case 2: return ((ClassDefFormat2*)this)->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
// Not currently supported
case 3:
case 4:
#endif
default: return false;
}
}
};
struct class_def_size_estimator_t
{
// TODO(garretrieger): update to support beyond64k coverage/classdef tables.
constexpr static unsigned class_def_format1_base_size = 6;
constexpr static unsigned class_def_format2_base_size = 4;
constexpr static unsigned coverage_base_size = 4;
constexpr static unsigned bytes_per_range = 6;
constexpr static unsigned bytes_per_glyph = 2;
template<typename It>
class_def_size_estimator_t (It glyph_and_class)
: num_ranges_per_class (), glyphs_per_class ()
{
reset();
for (auto p : + glyph_and_class)
{
unsigned gid = p.first;
unsigned klass = p.second;
hb_set_t* glyphs;
if (glyphs_per_class.has (klass, &glyphs) && glyphs) {
glyphs->add (gid);
continue;
}
hb_set_t new_glyphs;
new_glyphs.add (gid);
glyphs_per_class.set (klass, std::move (new_glyphs));
}
if (in_error ()) return;
for (unsigned klass : glyphs_per_class.keys ())
{
if (!klass) continue; // class 0 doesn't get encoded.
const hb_set_t& glyphs = glyphs_per_class.get (klass);
hb_codepoint_t start = HB_SET_VALUE_INVALID;
hb_codepoint_t end = HB_SET_VALUE_INVALID;
unsigned count = 0;
while (glyphs.next_range (&start, &end))
count++;
num_ranges_per_class.set (klass, count);
}
}
void reset() {
class_def_1_size = class_def_format1_base_size;
class_def_2_size = class_def_format2_base_size;
included_glyphs.clear();
included_classes.clear();
}
// Compute the size of coverage for all glyphs added via 'add_class_def_size'.
unsigned coverage_size () const
{
unsigned format1_size = coverage_base_size + bytes_per_glyph * included_glyphs.get_population();
unsigned format2_size = coverage_base_size + bytes_per_range * num_glyph_ranges();
return hb_min(format1_size, format2_size);
}
// Compute the new size of the ClassDef table if all glyphs associated with 'klass' were added.
unsigned add_class_def_size (unsigned klass)
{
if (!included_classes.has(klass)) {
hb_set_t* glyphs = nullptr;
if (glyphs_per_class.has(klass, &glyphs)) {
included_glyphs.union_(*glyphs);
}
class_def_1_size = class_def_format1_base_size;
if (!included_glyphs.is_empty()) {
unsigned min_glyph = included_glyphs.get_min();
unsigned max_glyph = included_glyphs.get_max();
class_def_1_size += bytes_per_glyph * (max_glyph - min_glyph + 1);
}
class_def_2_size += bytes_per_range * num_ranges_per_class.get (klass);
included_classes.add(klass);
}
return hb_min (class_def_1_size, class_def_2_size);
}
unsigned num_glyph_ranges() const {
hb_codepoint_t start = HB_SET_VALUE_INVALID;
hb_codepoint_t end = HB_SET_VALUE_INVALID;
unsigned count = 0;
while (included_glyphs.next_range (&start, &end)) {
count++;
}
return count;
}
bool in_error ()
{
if (num_ranges_per_class.in_error ()) return true;
if (glyphs_per_class.in_error ()) return true;
for (const hb_set_t& s : glyphs_per_class.values ())
{
if (s.in_error ()) return true;
}
return false;
}
private:
hb_hashmap_t<unsigned, unsigned> num_ranges_per_class;
hb_hashmap_t<unsigned, hb_set_t> glyphs_per_class;
hb_set_t included_classes;
hb_set_t included_glyphs;
unsigned class_def_1_size;
unsigned class_def_2_size;
};
}
#endif // GRAPH_CLASSDEF_GRAPH_HH

View File

@@ -0,0 +1,183 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../OT/Layout/Common/Coverage.hh"
#ifndef GRAPH_COVERAGE_GRAPH_HH
#define GRAPH_COVERAGE_GRAPH_HH
namespace graph {
struct CoverageFormat1 : public OT::Layout::Common::CoverageFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + glyphArray.get_size () - glyphArray.len.get_size ();
}
};
struct CoverageFormat2 : public OT::Layout::Common::CoverageFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
}
};
struct Coverage : public OT::Layout::Common::Coverage
{
static Coverage* clone_coverage (gsubgpos_graph_context_t& c,
unsigned coverage_id,
unsigned new_parent_id,
unsigned link_position,
unsigned start, unsigned end)
{
unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
auto& coverage_v = c.graph.vertices_[coverage_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return nullptr;
auto new_coverage =
+ hb_zip (coverage_table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second >= start && p.second < end;
})
| hb_map_retains_sorting (hb_first)
;
return add_coverage (c, new_parent_id, link_position, new_coverage, coverage_size);
}
template<typename It>
static Coverage* add_coverage (gsubgpos_graph_context_t& c,
unsigned parent_id,
unsigned link_position,
It glyphs,
unsigned max_size)
{
unsigned coverage_prime_id = c.graph.new_node (nullptr, nullptr);
auto& coverage_prime_vertex = c.graph.vertices_[coverage_prime_id];
if (!make_coverage (c, glyphs, coverage_prime_id, max_size))
return nullptr;
auto* coverage_link = c.graph.vertices_[parent_id].obj.real_links.push ();
coverage_link->width = SmallTypes::size;
coverage_link->objidx = coverage_prime_id;
coverage_link->position = link_position;
coverage_prime_vertex.add_parent (parent_id, false);
return (Coverage*) coverage_prime_vertex.obj.head;
}
// Filter an existing coverage table to glyphs at indices [start, end) and replace it with the filtered version.
static bool filter_coverage (gsubgpos_graph_context_t& c,
unsigned existing_coverage,
unsigned start, unsigned end) {
unsigned coverage_size = c.graph.vertices_[existing_coverage].table_size ();
auto& coverage_v = c.graph.vertices_[existing_coverage];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return false;
auto new_coverage =
+ hb_zip (coverage_table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second >= start && p.second < end;
})
| hb_map_retains_sorting (hb_first)
;
return make_coverage (c, new_coverage, existing_coverage, coverage_size * 2 + 100);
}
// Replace the coverage table at dest obj with one covering 'glyphs'.
template<typename It>
static bool make_coverage (gsubgpos_graph_context_t& c,
It glyphs,
unsigned dest_obj,
unsigned max_size)
{
char* buffer = (char*) hb_calloc (1, max_size);
hb_serialize_context_t serializer (buffer, max_size);
OT::Layout::Common::Coverage_serialize (&serializer, glyphs);
serializer.end_serialize ();
if (serializer.in_error ())
{
hb_free (buffer);
return false;
}
hb_bytes_t coverage_copy = serializer.copy_bytes ();
if (!coverage_copy.arrayZ) return false;
// Give ownership to the context, it will cleanup the buffer.
if (!c.add_buffer ((char *) coverage_copy.arrayZ))
{
hb_free ((char *) coverage_copy.arrayZ);
return false;
}
auto& obj = c.graph.vertices_[dest_obj].obj;
obj.head = (char *) coverage_copy.arrayZ;
obj.tail = obj.head + coverage_copy.length;
hb_free (buffer);
return true;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Layout::Common::Coverage::min_size) return false;
hb_barrier ();
switch (u.format)
{
case 1: return ((CoverageFormat1*)this)->sanitize (vertex);
case 2: return ((CoverageFormat2*)this)->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
// Not currently supported
case 3:
case 4:
#endif
default: return false;
}
}
};
}
#endif // GRAPH_COVERAGE_GRAPH_HH

1646
thirdparty/harfbuzz/src/graph/graph.hh vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,74 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "gsubgpos-graph.hh"
namespace graph {
gsubgpos_graph_context_t::gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_)
: table_tag (table_tag_),
graph (graph_),
lookup_list_index (0),
lookups ()
{
if (table_tag_ != HB_OT_TAG_GPOS
&& table_tag_ != HB_OT_TAG_GSUB)
return;
GSTAR* gstar = graph::GSTAR::graph_to_gstar (graph_);
if (gstar) {
gstar->find_lookups (graph, lookups);
lookup_list_index = gstar->get_lookup_list_index (graph_);
}
}
unsigned gsubgpos_graph_context_t::create_node (unsigned size)
{
char* buffer = (char*) hb_calloc (1, size);
if (!buffer)
return -1;
if (!add_buffer (buffer)) {
// Allocation did not get stored for freeing later.
hb_free (buffer);
return -1;
}
return graph.new_node (buffer, buffer + size);
}
unsigned gsubgpos_graph_context_t::num_non_ext_subtables () {
unsigned count = 0;
for (auto l : lookups.values ())
{
if (l->is_extension (table_tag)) continue;
count += l->number_of_subtables ();
}
return count;
}
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-gsubgpos.hh"
#ifndef GRAPH_GSUBGPOS_CONTEXT_HH
#define GRAPH_GSUBGPOS_CONTEXT_HH
namespace graph {
struct Lookup;
struct gsubgpos_graph_context_t
{
hb_tag_t table_tag;
graph_t& graph;
unsigned lookup_list_index;
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
hb_hashmap_t<unsigned, unsigned> subtable_to_extension;
HB_INTERNAL gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_);
HB_INTERNAL unsigned create_node (unsigned size);
bool add_buffer (char* buffer)
{
return graph.add_buffer (buffer);
}
private:
HB_INTERNAL unsigned num_non_ext_subtables ();
};
}
#endif // GRAPH_GSUBGPOS_CONTEXT

View File

@@ -0,0 +1,461 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#include "graph.hh"
#include "../hb-ot-layout-gsubgpos.hh"
#include "../OT/Layout/GSUB/ExtensionSubst.hh"
#include "../OT/Layout/GSUB/SubstLookupSubTable.hh"
#include "gsubgpos-context.hh"
#include "pairpos-graph.hh"
#include "markbasepos-graph.hh"
#include "ligature-graph.hh"
#ifndef GRAPH_GSUBGPOS_GRAPH_HH
#define GRAPH_GSUBGPOS_GRAPH_HH
namespace graph {
struct Lookup;
template<typename T>
struct ExtensionFormat1 : public OT::ExtensionFormat1<T>
{
void reset(unsigned type)
{
this->format = 1;
this->extensionLookupType = type;
this->extensionOffset = 0;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
return vertex_len >= OT::ExtensionFormat1<T>::static_size;
}
unsigned get_lookup_type () const
{
return this->extensionLookupType;
}
unsigned get_subtable_index (graph_t& graph, unsigned this_index) const
{
return graph.index_for_offset (this_index, &this->extensionOffset);
}
};
struct Lookup : public OT::Lookup
{
unsigned number_of_subtables () const
{
return subTable.len;
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Lookup::min_size) return false;
hb_barrier ();
return vertex_len >= this->get_size ();
}
bool is_extension (hb_tag_t table_tag) const
{
return lookupType == extension_type (table_tag);
}
bool make_extension (gsubgpos_graph_context_t& c,
unsigned this_index)
{
unsigned type = lookupType;
unsigned ext_type = extension_type (c.table_tag);
if (!ext_type || is_extension (c.table_tag))
{
// NOOP
return true;
}
DEBUG_MSG (SUBSET_REPACK, nullptr,
"Promoting lookup type %u (obj %u) to extension.",
type,
this_index);
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
if (!make_subtable_extension (c,
this_index,
subtable_index))
return false;
}
lookupType = ext_type;
return true;
}
bool split_subtables_if_needed (gsubgpos_graph_context_t& c,
unsigned this_index)
{
unsigned type = lookupType;
bool is_ext = is_extension (c.table_tag);
if (c.table_tag != HB_OT_TAG_GPOS && c.table_tag != HB_OT_TAG_GSUB)
return true;
if (!is_ext && !is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
return true;
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>> all_new_subtables;
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
unsigned parent_index = this_index;
if (is_ext) {
unsigned ext_subtable_index = subtable_index;
parent_index = ext_subtable_index;
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*)
c.graph.object (ext_subtable_index).head;
if (!extension || !extension->sanitize (c.graph.vertices_[ext_subtable_index]))
continue;
subtable_index = extension->get_subtable_index (c.graph, ext_subtable_index);
type = extension->get_lookup_type ();
if (!is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
continue;
}
hb_vector_t<unsigned> new_sub_tables;
if (c.table_tag == HB_OT_TAG_GPOS) {
switch (type)
{
case 2:
new_sub_tables = split_subtable<PairPos> (c, parent_index, subtable_index); break;
case 4:
new_sub_tables = split_subtable<MarkBasePos> (c, parent_index, subtable_index); break;
default:
break;
}
} else if (c.table_tag == HB_OT_TAG_GSUB) {
switch (type)
{
case 4:
new_sub_tables = split_subtable<graph::LigatureSubst> (c, parent_index, subtable_index); break;
default:
break;
}
}
if (new_sub_tables.in_error ()) return false;
if (!new_sub_tables) continue;
hb_pair_t<unsigned, hb_vector_t<unsigned>>* entry = all_new_subtables.push ();
entry->first = i;
entry->second = std::move (new_sub_tables);
}
if (all_new_subtables) {
return add_sub_tables (c, this_index, type, all_new_subtables);
}
return true;
}
template<typename T>
hb_vector_t<unsigned> split_subtable (gsubgpos_graph_context_t& c,
unsigned parent_idx,
unsigned objidx)
{
T* sub_table = (T*) c.graph.object (objidx).head;
if (!sub_table || !sub_table->sanitize (c.graph.vertices_[objidx]))
return hb_vector_t<unsigned> ();
return sub_table->split_subtables (c, parent_idx, objidx);
}
bool add_sub_tables (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned type,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
bool is_ext = is_extension (c.table_tag);
auto* v = &c.graph.vertices_[this_index];
fix_existing_subtable_links (c, this_index, subtable_ids);
unsigned new_subtable_count = 0;
for (const auto& p : subtable_ids)
new_subtable_count += p.second.length;
size_t new_size = v->table_size ()
+ new_subtable_count * OT::Offset16::static_size;
char* buffer = (char*) hb_calloc (1, new_size);
if (!buffer) return false;
if (!c.add_buffer (buffer))
{
hb_free (buffer);
return false;
}
hb_memcpy (buffer, v->obj.head, v->table_size());
v->obj.head = buffer;
v->obj.tail = buffer + new_size;
Lookup* new_lookup = (Lookup*) buffer;
unsigned shift = 0;
new_lookup->subTable.len = subTable.len + new_subtable_count;
for (const auto& p : subtable_ids)
{
unsigned offset_index = p.first + shift + 1;
shift += p.second.length;
for (unsigned subtable_id : p.second)
{
if (is_ext)
{
unsigned ext_id = create_extension_subtable (c, subtable_id, type);
c.graph.vertices_[subtable_id].add_parent (ext_id, false);
subtable_id = ext_id;
// the reference to v may have changed on adding a node, so reassign it.
v = &c.graph.vertices_[this_index];
}
auto* link = v->obj.real_links.push ();
link->width = 2;
link->objidx = subtable_id;
link->position = (char*) &new_lookup->subTable[offset_index++] -
(char*) new_lookup;
c.graph.vertices_[subtable_id].add_parent (this_index, false);
}
}
// Repacker sort order depends on link order, which we've messed up so resort it.
v->obj.real_links.qsort ();
// The head location of the lookup has changed, invalidating the lookups map entry
// in the context. Update the map.
c.lookups.set (this_index, new_lookup);
return true;
}
void fix_existing_subtable_links (gsubgpos_graph_context_t& c,
unsigned this_index,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
auto& v = c.graph.vertices_[this_index];
Lookup* lookup = (Lookup*) v.obj.head;
unsigned shift = 0;
for (const auto& p : subtable_ids)
{
unsigned insert_index = p.first + shift;
unsigned pos_offset = p.second.length * OT::Offset16::static_size;
unsigned insert_offset = (char*) &lookup->subTable[insert_index] - (char*) lookup;
shift += p.second.length;
for (auto& l : v.obj.all_links_writer ())
{
if (l.position > insert_offset) l.position += pos_offset;
}
}
}
unsigned create_extension_subtable (gsubgpos_graph_context_t& c,
unsigned subtable_index,
unsigned type)
{
unsigned extension_size = OT::ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>::static_size;
unsigned ext_index = c.create_node (extension_size);
if (ext_index == (unsigned) -1)
return -1;
auto& ext_vertex = c.graph.vertices_[ext_index];
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*) ext_vertex.obj.head;
extension->reset (type);
// Make extension point at the subtable.
auto* l = ext_vertex.obj.real_links.push ();
l->width = 4;
l->objidx = subtable_index;
l->position = 4;
return ext_index;
}
bool make_subtable_extension (gsubgpos_graph_context_t& c,
unsigned lookup_index,
unsigned subtable_index)
{
unsigned type = lookupType;
unsigned ext_index = -1;
unsigned* existing_ext_index = nullptr;
if (c.subtable_to_extension.has(subtable_index, &existing_ext_index)) {
ext_index = *existing_ext_index;
} else {
ext_index = create_extension_subtable(c, subtable_index, type);
c.subtable_to_extension.set(subtable_index, ext_index);
}
if (ext_index == (unsigned) -1)
return false;
auto& subtable_vertex = c.graph.vertices_[subtable_index];
auto& lookup_vertex = c.graph.vertices_[lookup_index];
for (auto& l : lookup_vertex.obj.real_links.writer ())
{
if (l.objidx == subtable_index) {
// Change lookup to point at the extension.
l.objidx = ext_index;
if (existing_ext_index)
subtable_vertex.remove_parent(lookup_index);
}
}
// Make extension point at the subtable.
auto& ext_vertex = c.graph.vertices_[ext_index];
ext_vertex.add_parent (lookup_index, false);
if (!existing_ext_index)
subtable_vertex.remap_parent (lookup_index, ext_index);
return true;
}
private:
bool is_supported_gsub_type(unsigned type, gsubgpos_graph_context_t& c) const {
return (c.table_tag == HB_OT_TAG_GSUB) && (
type == OT::Layout::GSUB_impl::SubstLookupSubTable::Type::Ligature
);
}
bool is_supported_gpos_type(unsigned type, gsubgpos_graph_context_t& c) const {
return (c.table_tag == HB_OT_TAG_GPOS) && (
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair ||
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase
);
}
unsigned extension_type (hb_tag_t table_tag) const
{
switch (table_tag)
{
case HB_OT_TAG_GPOS: return 9;
case HB_OT_TAG_GSUB: return 7;
default: return 0;
}
}
};
template <typename T>
struct LookupList : public OT::LookupList<T>
{
bool sanitize (const graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::LookupList<T>::min_size) return false;
hb_barrier ();
return vertex_len >= OT::LookupList<T>::item_size * this->len;
}
};
struct GSTAR : public OT::GSUBGPOS
{
static GSTAR* graph_to_gstar (graph_t& graph)
{
const auto& r = graph.root ();
GSTAR* gstar = (GSTAR*) r.obj.head;
if (!gstar || !gstar->sanitize (r))
return nullptr;
hb_barrier ();
return gstar;
}
const void* get_lookup_list_field_offset () const
{
switch (u.version.major) {
case 1: return u.version1.get_lookup_list_offset ();
#ifndef HB_NO_BEYOND_64K
case 2: return u.version2.get_lookup_list_offset ();
#endif
default: return 0;
}
}
bool sanitize (const graph_t::vertex_t& vertex)
{
int64_t len = vertex.obj.tail - vertex.obj.head;
if (len < OT::GSUBGPOS::min_size) return false;
hb_barrier ();
return len >= get_size ();
}
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
switch (u.version.major) {
case 1: find_lookups<SmallTypes> (graph, lookups); break;
#ifndef HB_NO_BEYOND_64K
case 2: find_lookups<MediumTypes> (graph, lookups); break;
#endif
}
}
unsigned get_lookup_list_index (graph_t& graph)
{
return graph.index_for_offset (graph.root_idx (),
get_lookup_list_field_offset());
}
template<typename Types>
void find_lookups (graph_t& graph,
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
{
unsigned lookup_list_idx = get_lookup_list_index (graph);
const LookupList<Types>* lookupList =
(const LookupList<Types>*) graph.object (lookup_list_idx).head;
if (!lookupList || !lookupList->sanitize (graph.vertices_[lookup_list_idx]))
return;
for (unsigned i = 0; i < lookupList->len; i++)
{
unsigned lookup_idx = graph.index_for_offset (lookup_list_idx, &(lookupList->arrayZ[i]));
Lookup* lookup = (Lookup*) graph.object (lookup_idx).head;
if (!lookup || !lookup->sanitize (graph.vertices_[lookup_idx])) continue;
lookups.set (lookup_idx, lookup);
}
}
};
}
#endif /* GRAPH_GSUBGPOS_GRAPH_HH */

View File

@@ -0,0 +1,480 @@
/*
* Copyright © 2025 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_LIGATURE_GRAPH_HH
#define GRAPH_LIGATURE_GRAPH_HH
#include "graph.hh"
#include "../OT/Layout/GSUB/LigatureSubst.hh"
#include "../OT/Layout/GSUB/LigatureSubstFormat1.hh"
#include "../OT/Layout/GSUB/LigatureSet.hh"
#include "../OT/Layout/types.hh"
#include <algorithm>
#include <utility>
namespace graph {
struct LigatureSet : public OT::Layout::GSUB_impl::LigatureSet<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size) return false;
hb_barrier ();
int64_t total_len = ligature.get_size() + OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size - ligature.len.get_size();
if (vertex_len < total_len) {
return false;
}
return true;
}
};
struct LigatureSubstFormat1 : public OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
unsigned min_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >=
min_size + ligatureSet.get_size() - ligatureSet.len.get_size();
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
auto split_points = compute_split_points(c, parent_index, this_index);
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
total_number_ligas(c, this_index),
liga_counts(c, this_index),
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
unsigned total_number_ligas(gsubgpos_graph_context_t& c, unsigned this_index) const {
unsigned total = 0;
for (unsigned i = 0; i < ligatureSet.len; i++)
{
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
if (!liga_set.table) {
return 0;
}
total += liga_set.table->ligature.len;
}
return total;
}
hb_vector_t<unsigned> liga_counts(gsubgpos_graph_context_t& c, unsigned this_index) const {
hb_vector_t<unsigned> result;
for (unsigned i = 0; i < ligatureSet.len; i++)
{
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
result.push(!liga_set.table ? 0 : liga_set.table->ligature.len);
}
return result;
}
hb_vector_t<unsigned> compute_split_points(gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index) const
{
// For ligature subst coverage is always packed last, and as a result is where an overflow
// will happen if there is one, so we can check the estimate length of the
// LigatureSubstFormat1 -> Coverage offset length which is the sum of all data in the
// retained sub graph except for the coverage table itself.
const unsigned base_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size;
unsigned accumulated = base_size;
unsigned ligature_index = 0;
hb_vector_t<unsigned> split_points;
for (unsigned i = 0; i < ligatureSet.len; i++)
{
accumulated += OT::HBUINT16::static_size; // for ligature set offset
accumulated += OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size; // for ligature set table
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
if (!liga_set.table) {
return hb_vector_t<unsigned> {};
}
for (unsigned j = 0; j < liga_set.table->ligature.len; j++)
{
const unsigned liga_id = c.graph.index_for_offset (liga_set.index, &liga_set.table->ligature[j]);
const unsigned liga_size = c.graph.vertices_[liga_id].table_size ();
accumulated += OT::HBUINT16::static_size; // for ligature offset
accumulated += liga_size; // for the ligature table
if (accumulated >= (1 << 16))
{
split_points.push(ligature_index);
// We're going to split such that the current ligature will be in the new sub table.
// That means we'll have one ligature subst (base_base), one ligature set, and one liga table
accumulated = base_size + // for liga subst subtable
(OT::HBUINT16::static_size * 2) + // for liga set and liga offset
OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size + // for liga set subtable
liga_size; // for liga sub table
}
ligature_index++;
}
}
return split_points;
}
struct split_context_t
{
gsubgpos_graph_context_t& c;
LigatureSubstFormat1* thiz;
unsigned this_index;
unsigned original_count_;
hb_vector_t<unsigned> liga_counts;
unsigned original_count ()
{
return original_count_;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (c, this_index, liga_counts, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (c, this_index, original_count(), liga_counts, count);
}
};
hb_pair_t<unsigned, LigatureSet*> new_liga_set(gsubgpos_graph_context_t& c, unsigned count) const {
unsigned prime_size = OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size
+ count * SmallTypes::size;
unsigned prime_id = c.create_node (prime_size);
if (prime_id == (unsigned) -1) return hb_pair(-1, nullptr);
LigatureSet* prime = (LigatureSet*) c.graph.object (prime_id).head;
prime->ligature.len = count;
return hb_pair(prime_id, prime);
}
void clear_virtual_links (gsubgpos_graph_context_t& c, unsigned node_index) const
{
auto& obj = c.graph.vertices_[node_index].obj;
for (const auto& l : obj.virtual_links)
{
auto& child = c.graph.vertices_[l.objidx];
child.remove_parent(node_index);
}
obj.virtual_links.clear();
}
void add_virtual_link(gsubgpos_graph_context_t& c, unsigned from, unsigned to) const {
auto& from_obj = c.graph.vertices_[from].obj;
c.graph.vertices_[to].add_parent(from, true);
auto& link = *from_obj.virtual_links.push ();
link.objidx = to;
}
hb_pair_t<unsigned, unsigned> current_liga_set_bounds (gsubgpos_graph_context_t& c,
unsigned liga_set_index,
const hb_serialize_context_t::object_t& liga_set) const
{
// Finds the actual liga indices present in the liga set currently. Takes
// into account those that have been removed by processing.
unsigned min_index = (unsigned) -1;
unsigned max_index = 0;
for (const auto& l : liga_set.real_links) {
if (l.position < 2) continue;
unsigned liga_index = (l.position - 2) / 2;
min_index = hb_min(min_index, liga_index);
max_index = hb_max(max_index, liga_index);
}
return hb_pair(min_index, max_index + 1);
}
void compact_liga_set (gsubgpos_graph_context_t& c, LigatureSet* table, hb_serialize_context_t::object_t& obj) const
{
if (table->ligature.len <= obj.real_links.length) return;
// compact the remaining linked liga offsets into a continous array and shrink the node as needed.
unsigned to_remove = table->ligature.len - obj.real_links.length;
unsigned new_position = SmallTypes::size;
obj.real_links.qsort(); // for this to work we need to process links in order of position.
for (auto& l : obj.real_links)
{
l.position = new_position;
new_position += SmallTypes::size;
}
table->ligature.len = obj.real_links.length;
obj.tail -= to_remove * SmallTypes::size;
}
unsigned clone_range (gsubgpos_graph_context_t& c,
unsigned this_index,
hb_vector_t<unsigned> liga_counts,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning LigatureSubstFormat1 (%u) range [%u, %u).", this_index, start, end);
// Create an oversized new liga subst, we'll adjust the size down later. We don't know
// the final size until we process it but we also need it to exist while we're processing
// so that nodes can be moved to it as needed.
unsigned prime_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size
+ ligatureSet.get_size() - ligatureSet.len.get_size();
unsigned liga_subst_prime_id = c.create_node (prime_size);
if (liga_subst_prime_id == (unsigned) -1) return -1;
LigatureSubstFormat1* liga_subst_prime = (LigatureSubstFormat1*) c.graph.object (liga_subst_prime_id).head;
liga_subst_prime->format = this->format;
liga_subst_prime->ligatureSet.len = this->ligatureSet.len;
// Create a place holder coverage prime id since we need to add virtual links to it while
// generating liga and liga sets. Afterwards it will be updated to have the correct coverage.
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
unsigned coverage_prime_id = c.graph.duplicate(coverage_id);
auto& coverage_prime_vertex = c.graph.vertices_[coverage_prime_id];
auto* coverage_prime_link = c.graph.vertices_[liga_subst_prime_id].obj.real_links.push ();
coverage_prime_link->width = SmallTypes::size;
coverage_prime_link->objidx = coverage_prime_id;
coverage_prime_link->position = 2;
coverage_prime_vertex.add_parent (liga_subst_prime_id, false);
// Locate all liga sets with ligas between start and end.
// Clone or move them as needed.
unsigned count = 0;
unsigned liga_set_count = 0;
unsigned liga_set_start = -1;
unsigned liga_set_end = 0; // inclusive
for (unsigned i = 0; i < liga_counts.length; i++)
{
unsigned num_ligas = liga_counts[i];
unsigned current_start = count;
unsigned current_end = count + num_ligas;
if (current_start >= end || start >= current_end) {
// No intersection, so just skip
count += num_ligas;
continue;
}
auto liga_set_index = c.graph.index_for_offset(this_index, &ligatureSet[i]);
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
if (!liga_set.table) {
return -1;
}
// Bounds may need to be adjusted if some ligas have been previously removed.
hb_pair_t<unsigned, unsigned> liga_bounds = current_liga_set_bounds(c, liga_set_index, liga_set.vertex->obj);
current_start = hb_max(count + liga_bounds.first, current_start);
current_end = hb_min(count + liga_bounds.second, current_end);
unsigned liga_set_prime_id;
if (current_start >= start && current_end <= end) {
// This liga set is fully contined within [start, end)
// We can move the entire ligaset to the new liga subset object.
liga_set_end = i;
if (i < liga_set_start) liga_set_start = i;
liga_set_prime_id = c.graph.move_child<> (this_index,
&ligatureSet[i],
liga_subst_prime_id,
&liga_subst_prime->ligatureSet[liga_set_count++]);
compact_liga_set(c, liga_set.table, liga_set.vertex->obj);
}
else
{
// This liga set partially overlaps [start, end). We'll need to create
// a new liga set sub table and move the intersecting ligas to it.
unsigned liga_count = hb_min(end, current_end) - hb_max(start, current_start);
auto result = new_liga_set(c, liga_count);
liga_set_prime_id = result.first;
LigatureSet* prime = result.second;
if (liga_set_prime_id == (unsigned) -1) return -1;
unsigned new_index = 0;
for (unsigned j = hb_max(start, current_start) - count; j < hb_min(end, current_end) - count; j++) {
c.graph.move_child<> (liga_set_index,
&liga_set.table->ligature[j],
liga_set_prime_id,
&prime->ligature[new_index++]);
}
liga_set_end = i;
if (i < liga_set_start) liga_set_start = i;
c.graph.add_link(&liga_subst_prime->ligatureSet[liga_set_count++], liga_subst_prime_id, liga_set_prime_id);
}
// The new liga and all children set needs to have a virtual link to the new coverage table:
auto& liga_set_prime = c.graph.vertices_[liga_set_prime_id].obj;
clear_virtual_links(c, liga_set_prime_id);
add_virtual_link(c, liga_set_prime_id, coverage_prime_id);
for (const auto& l : liga_set_prime.real_links) {
clear_virtual_links(c, l.objidx);
add_virtual_link(c, l.objidx, coverage_prime_id);
}
count += num_ligas;
}
c.graph.vertices_[liga_subst_prime_id].obj.tail -= (liga_subst_prime->ligatureSet.len - liga_set_count) * SmallTypes::size;
liga_subst_prime->ligatureSet.len = liga_set_count;
if (!Coverage::filter_coverage (c,
coverage_prime_id,
liga_set_start, liga_set_end + 1))
return -1;
return liga_subst_prime_id;
}
bool shrink (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned old_count,
hb_vector_t<unsigned> liga_counts,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking LigatureSubstFormat1 (%u) to [0, %u).",
this_index,
count);
if (count >= old_count)
return true;
hb_set_t retained_indices;
unsigned new_liga_set_count = 0;
for (unsigned i = 0; i < liga_counts.length; i++)
{
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
if (!liga_set.table) {
return false;
}
// We need the virtual links to coverage removed from all descendants on this liga subst.
// If any are left when we try to mutate the coverage table later it will be unnessecarily
// duplicated. Code later on will re-add the virtual links as needed (via retained_indices).
clear_virtual_links(c, liga_set.index);
retained_indices.add(liga_set.index);
for (const auto& liga_offset : liga_set.table->ligature) {
unsigned liga_index = c.graph.index_for_offset(liga_set.index, &liga_offset);
if (liga_index != (unsigned) -1) {
clear_virtual_links(c, liga_index);
retained_indices.add(liga_index);
}
}
unsigned num_ligas = liga_counts[i];
if (num_ligas >= count) {
// drop the trailing liga's from this set and all subsequent liga sets
unsigned num_ligas_to_remove = num_ligas - count;
new_liga_set_count = i + 1;
c.graph.vertices_[liga_set.index].obj.tail -= num_ligas_to_remove * SmallTypes::size;
liga_set.table->ligature.len = count;
break;
} else {
count -= num_ligas;
}
}
// Adjust liga set array
c.graph.vertices_[this_index].obj.tail -= (ligatureSet.len - new_liga_set_count) * SmallTypes::size;
ligatureSet.len = new_liga_set_count;
// Coverage matches the number of liga sets so rebuild as needed
auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage);
if (!coverage) return false;
for (unsigned i : retained_indices.iter())
add_virtual_link(c, i, coverage.index);
unsigned coverage_size = coverage.vertex->table_size ();
auto new_coverage =
+ hb_zip (coverage.table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second < new_liga_set_count;
})
| hb_map_retains_sorting (hb_first)
;
return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size);
}
};
struct LigatureSubst : public OT::Layout::GSUB_impl::LigatureSubst
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
case 1:
return ((LigatureSubstFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
// Don't split 24bit Ligature Subs
#endif
default:
return hb_vector_t<unsigned> ();
}
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
hb_barrier ();
switch (u.format) {
case 1:
return ((LigatureSubstFormat1*)(&u.format1))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
#endif
default:
// We don't handle format 2 here.
return false;
}
}
};
}
#endif // GRAPH_LIGATURE_GRAPH_HH

View File

@@ -0,0 +1,518 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_MARKBASEPOS_GRAPH_HH
#define GRAPH_MARKBASEPOS_GRAPH_HH
#include "split-helpers.hh"
#include "coverage-graph.hh"
#include "../OT/Layout/GPOS/MarkBasePos.hh"
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
namespace graph {
struct AnchorMatrix : public OT::Layout::GPOS_impl::AnchorMatrix
{
bool sanitize (graph_t::vertex_t& vertex, unsigned class_count) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < AnchorMatrix::min_size) return false;
hb_barrier ();
return vertex_len >= AnchorMatrix::min_size +
OT::Offset16::static_size * class_count * this->rows;
}
bool shrink (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned old_class_count,
unsigned new_class_count)
{
if (new_class_count >= old_class_count) return false;
auto& o = c.graph.vertices_[this_index].obj;
unsigned base_count = rows;
o.tail = o.head +
AnchorMatrix::min_size +
OT::Offset16::static_size * base_count * new_class_count;
// Reposition links into the new indexing scheme.
for (auto& link : o.real_links.writer ())
{
unsigned index = (link.position - 2) / 2;
unsigned base = index / old_class_count;
unsigned klass = index % old_class_count;
if (klass >= new_class_count)
// should have already been removed
return false;
unsigned new_index = base * new_class_count + klass;
link.position = (char*) &(this->matrixZ[new_index]) - (char*) this;
}
return true;
}
unsigned clone (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned start,
unsigned end,
unsigned class_count)
{
unsigned base_count = rows;
unsigned new_class_count = end - start;
unsigned size = AnchorMatrix::min_size +
OT::Offset16::static_size * new_class_count * rows;
unsigned prime_id = c.create_node (size);
if (prime_id == (unsigned) -1) return -1;
AnchorMatrix* prime = (AnchorMatrix*) c.graph.object (prime_id).head;
prime->rows = base_count;
auto& o = c.graph.vertices_[this_index].obj;
int num_links = o.real_links.length;
for (int i = 0; i < num_links; i++)
{
const auto& link = o.real_links[i];
unsigned old_index = (link.position - 2) / OT::Offset16::static_size;
unsigned klass = old_index % class_count;
if (klass < start || klass >= end) continue;
unsigned base = old_index / class_count;
unsigned new_klass = klass - start;
unsigned new_index = base * new_class_count + new_klass;
unsigned child_idx = link.objidx;
c.graph.add_link (&(prime->matrixZ[new_index]),
prime_id,
child_idx);
auto& child = c.graph.vertices_[child_idx];
child.remove_parent (this_index);
o.real_links.remove_unordered (i);
num_links--;
i--;
}
return prime_id;
}
};
struct MarkArray : public OT::Layout::GPOS_impl::MarkArray
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
unsigned min_size = MarkArray::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= get_size ();
}
bool shrink (gsubgpos_graph_context_t& c,
const hb_hashmap_t<unsigned, unsigned>& mark_array_links,
unsigned this_index,
unsigned new_class_count)
{
auto& o = c.graph.vertices_[this_index].obj;
for (const auto& link : o.real_links)
c.graph.vertices_[link.objidx].remove_parent (this_index);
o.real_links.reset ();
unsigned new_index = 0;
for (const auto& record : this->iter ())
{
unsigned klass = record.klass;
if (klass >= new_class_count) continue;
(*this)[new_index].klass = klass;
unsigned position = (char*) &record.markAnchor - (char*) this;
unsigned* objidx;
if (!mark_array_links.has (position, &objidx))
{
new_index++;
continue;
}
c.graph.add_link (&(*this)[new_index].markAnchor, this_index, *objidx);
new_index++;
}
this->len = new_index;
o.tail = o.head + MarkArray::min_size +
OT::Layout::GPOS_impl::MarkRecord::static_size * new_index;
return true;
}
unsigned clone (gsubgpos_graph_context_t& c,
unsigned this_index,
const hb_hashmap_t<unsigned, unsigned>& pos_to_index,
hb_set_t& marks,
unsigned start_class)
{
unsigned size = MarkArray::min_size +
OT::Layout::GPOS_impl::MarkRecord::static_size *
marks.get_population ();
unsigned prime_id = c.create_node (size);
if (prime_id == (unsigned) -1) return -1;
MarkArray* prime = (MarkArray*) c.graph.object (prime_id).head;
prime->len = marks.get_population ();
unsigned i = 0;
for (hb_codepoint_t mark : marks)
{
(*prime)[i].klass = (*this)[mark].klass - start_class;
unsigned offset_pos = (char*) &((*this)[mark].markAnchor) - (char*) this;
unsigned* anchor_index;
if (pos_to_index.has (offset_pos, &anchor_index))
c.graph.move_child (this_index,
&((*this)[mark].markAnchor),
prime_id,
&((*prime)[i].markAnchor));
i++;
}
return prime_id;
}
};
struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
return vertex_len >= MarkBasePosFormat1::static_size;
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
const unsigned base_coverage_id = c.graph.index_for_offset (this_index, &baseCoverage);
const unsigned base_size =
OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::min_size +
MarkArray::min_size +
AnchorMatrix::min_size +
c.graph.vertices_[base_coverage_id].table_size ();
hb_vector_t<class_info_t> class_to_info = get_class_info (c, this_index);
unsigned class_count = classCount;
auto base_array = c.graph.as_table<AnchorMatrix> (this_index,
&baseArray,
class_count);
if (!base_array) return hb_vector_t<unsigned> ();
unsigned base_count = base_array.table->rows;
unsigned partial_coverage_size = 4;
unsigned accumulated = base_size;
hb_vector_t<unsigned> split_points;
for (unsigned klass = 0; klass < class_count; klass++)
{
class_info_t& info = class_to_info[klass];
partial_coverage_size += OT::HBUINT16::static_size * info.marks.get_population ();
unsigned accumulated_delta =
OT::Layout::GPOS_impl::MarkRecord::static_size * info.marks.get_population () +
OT::Offset16::static_size * base_count;
for (unsigned objidx : info.child_indices)
accumulated_delta += c.graph.find_subgraph_size (objidx, visited);
accumulated += accumulated_delta;
unsigned total = accumulated + partial_coverage_size;
if (total >= (1 << 16))
{
split_points.push (klass);
accumulated = base_size + accumulated_delta;
partial_coverage_size = 4 + OT::HBUINT16::static_size * info.marks.get_population ();
visited.clear (); // node sharing isn't allowed between splits.
}
}
const unsigned mark_array_id = c.graph.index_for_offset (this_index, &markArray);
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
std::move (class_to_info),
c.graph.vertices_[mark_array_id].position_to_index_map (),
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct class_info_t {
hb_set_t marks;
hb_vector_t<unsigned> child_indices;
};
struct split_context_t {
gsubgpos_graph_context_t& c;
MarkBasePosFormat1* thiz;
unsigned this_index;
hb_vector_t<class_info_t> class_to_info;
hb_hashmap_t<unsigned, unsigned> mark_array_links;
hb_set_t marks_for (unsigned start, unsigned end)
{
hb_set_t marks;
for (unsigned klass = start; klass < end; klass++)
{
+ class_to_info[klass].marks.iter ()
| hb_sink (marks)
;
}
return marks;
}
unsigned original_count ()
{
return thiz->classCount;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (*this, this->this_index, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (*this, this->this_index, count);
}
};
hb_vector_t<class_info_t> get_class_info (gsubgpos_graph_context_t& c,
unsigned this_index)
{
hb_vector_t<class_info_t> class_to_info;
unsigned class_count = classCount;
if (!class_count) return class_to_info;
if (!class_to_info.resize (class_count))
return hb_vector_t<class_info_t>();
auto mark_array = c.graph.as_table<MarkArray> (this_index, &markArray);
if (!mark_array) return hb_vector_t<class_info_t> ();
unsigned mark_count = mark_array.table->len;
for (unsigned mark = 0; mark < mark_count; mark++)
{
unsigned klass = (*mark_array.table)[mark].get_class ();
if (klass >= class_count) continue;
class_to_info[klass].marks.add (mark);
}
for (const auto& link : mark_array.vertex->obj.real_links)
{
unsigned mark = (link.position - 2) /
OT::Layout::GPOS_impl::MarkRecord::static_size;
unsigned klass = (*mark_array.table)[mark].get_class ();
if (klass >= class_count) continue;
class_to_info[klass].child_indices.push (link.objidx);
}
unsigned base_array_id =
c.graph.index_for_offset (this_index, &baseArray);
auto& base_array_v = c.graph.vertices_[base_array_id];
for (const auto& link : base_array_v.obj.real_links)
{
unsigned index = (link.position - 2) / OT::Offset16::static_size;
unsigned klass = index % class_count;
class_to_info[klass].child_indices.push (link.objidx);
}
return class_to_info;
}
bool shrink (split_context_t& sc,
unsigned this_index,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking MarkBasePosFormat1 (%u) to [0, %u).",
this_index,
count);
unsigned old_count = classCount;
if (count >= old_count)
return true;
classCount = count;
auto mark_coverage = sc.c.graph.as_mutable_table<Coverage> (this_index,
&markCoverage);
if (!mark_coverage) return false;
hb_set_t marks = sc.marks_for (0, count);
auto new_coverage =
+ hb_enumerate (mark_coverage.table->iter ())
| hb_filter (marks, hb_first)
| hb_map_retains_sorting (hb_second)
;
if (!Coverage::make_coverage (sc.c, + new_coverage,
mark_coverage.index,
4 + 2 * marks.get_population ()))
return false;
auto base_array = sc.c.graph.as_mutable_table<AnchorMatrix> (this_index,
&baseArray,
old_count);
if (!base_array || !base_array.table->shrink (sc.c,
base_array.index,
old_count,
count))
return false;
auto mark_array = sc.c.graph.as_mutable_table<MarkArray> (this_index,
&markArray);
if (!mark_array || !mark_array.table->shrink (sc.c,
sc.mark_array_links,
mark_array.index,
count))
return false;
return true;
}
// Create a new MarkBasePos that has all of the data for classes from [start, end).
unsigned clone_range (split_context_t& sc,
unsigned this_index,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning MarkBasePosFormat1 (%u) range [%u, %u).", this_index, start, end);
graph_t& graph = sc.c.graph;
unsigned prime_size = OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::static_size;
unsigned prime_id = sc.c.create_node (prime_size);
if (prime_id == (unsigned) -1) return -1;
MarkBasePosFormat1* prime = (MarkBasePosFormat1*) graph.object (prime_id).head;
prime->format = this->format;
unsigned new_class_count = end - start;
prime->classCount = new_class_count;
unsigned base_coverage_id =
graph.index_for_offset (sc.this_index, &baseCoverage);
graph.add_link (&(prime->baseCoverage), prime_id, base_coverage_id);
graph.duplicate (prime_id, base_coverage_id);
auto mark_coverage = sc.c.graph.as_table<Coverage> (this_index,
&markCoverage);
if (!mark_coverage) return false;
hb_set_t marks = sc.marks_for (start, end);
auto new_coverage =
+ hb_enumerate (mark_coverage.table->iter ())
| hb_filter (marks, hb_first)
| hb_map_retains_sorting (hb_second)
;
if (!Coverage::add_coverage (sc.c,
prime_id,
2,
+ new_coverage,
marks.get_population () * 2 + 4))
return -1;
auto mark_array =
graph.as_table <MarkArray> (sc.this_index, &markArray);
if (!mark_array) return -1;
unsigned new_mark_array =
mark_array.table->clone (sc.c,
mark_array.index,
sc.mark_array_links,
marks,
start);
graph.add_link (&(prime->markArray), prime_id, new_mark_array);
unsigned class_count = classCount;
auto base_array =
graph.as_table<AnchorMatrix> (sc.this_index, &baseArray, class_count);
if (!base_array) return -1;
unsigned new_base_array =
base_array.table->clone (sc.c,
base_array.index,
start, end, this->classCount);
graph.add_link (&(prime->baseArray), prime_id, new_base_array);
return prime_id;
}
};
struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
// Don't split 24bit MarkBasePos's.
#endif
default:
return hb_vector_t<unsigned> ();
}
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
hb_barrier ();
switch (u.format) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
#endif
default:
// We don't handle format 3 and 4 here.
return false;
}
}
};
}
#endif // GRAPH_MARKBASEPOS_GRAPH_HH

View File

@@ -0,0 +1,652 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_PAIRPOS_GRAPH_HH
#define GRAPH_PAIRPOS_GRAPH_HH
#include "split-helpers.hh"
#include "coverage-graph.hh"
#include "classdef-graph.hh"
#include "../OT/Layout/GPOS/PairPos.hh"
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
namespace graph {
struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >=
min_size + pairSet.get_size () - pairSet.len.get_size();
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
const unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
const unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
unsigned partial_coverage_size = 4;
unsigned accumulated = base_size;
hb_vector_t<unsigned> split_points;
for (unsigned i = 0; i < pairSet.len; i++)
{
unsigned pair_set_index = pair_set_graph_index (c, this_index, i);
unsigned accumulated_delta =
c.graph.find_subgraph_size (pair_set_index, visited) +
SmallTypes::size; // for PairSet offset.
partial_coverage_size += OT::HBUINT16::static_size;
accumulated += accumulated_delta;
unsigned total = accumulated + hb_min (partial_coverage_size, coverage_size);
if (total >= (1 << 16))
{
split_points.push (i);
accumulated = base_size + accumulated_delta;
partial_coverage_size = 6;
visited.clear (); // node sharing isn't allowed between splits.
}
}
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct split_context_t {
gsubgpos_graph_context_t& c;
PairPosFormat1* thiz;
unsigned this_index;
unsigned original_count ()
{
return thiz->pairSet.len;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (this->c, this->this_index, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (this->c, this->this_index, count);
}
};
bool shrink (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking PairPosFormat1 (%u) to [0, %u).",
this_index,
count);
unsigned old_count = pairSet.len;
if (count >= old_count)
return true;
pairSet.len = count;
c.graph.vertices_[this_index].obj.tail -= (old_count - count) * SmallTypes::size;
auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage);
if (!coverage) return false;
unsigned coverage_size = coverage.vertex->table_size ();
auto new_coverage =
+ hb_zip (coverage.table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second < count;
})
| hb_map_retains_sorting (hb_first)
;
return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size);
}
// Create a new PairPos including PairSet's from start (inclusive) to end (exclusive).
// Returns object id of the new object.
unsigned clone_range (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning PairPosFormat1 (%u) range [%u, %u).", this_index, start, end);
unsigned num_pair_sets = end - start;
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size
+ num_pair_sets * SmallTypes::size;
unsigned pair_pos_prime_id = c.create_node (prime_size);
if (pair_pos_prime_id == (unsigned) -1) return -1;
PairPosFormat1* pair_pos_prime = (PairPosFormat1*) c.graph.object (pair_pos_prime_id).head;
pair_pos_prime->format = this->format;
pair_pos_prime->valueFormat[0] = this->valueFormat[0];
pair_pos_prime->valueFormat[1] = this->valueFormat[1];
pair_pos_prime->pairSet.len = num_pair_sets;
for (unsigned i = start; i < end; i++)
{
c.graph.move_child<> (this_index,
&pairSet[i],
pair_pos_prime_id,
&pair_pos_prime->pairSet[i - start]);
}
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
if (!Coverage::clone_coverage (c,
coverage_id,
pair_pos_prime_id,
2,
start, end))
return -1;
return pair_pos_prime_id;
}
unsigned pair_set_graph_index (gsubgpos_graph_context_t& c, unsigned this_index, unsigned i) const
{
return c.graph.index_for_offset (this_index, &pairSet[i]);
}
};
struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
size_t vertex_len = vertex.table_size ();
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
const unsigned class1_count = class1Count;
return vertex_len >=
min_size + class1_count * get_class1_record_size ();
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
const unsigned class_def_2_size = size_of (c, this_index, &classDef2);
const Coverage* coverage = get_coverage (c, this_index);
const ClassDef* class_def_1 = get_class_def_1 (c, this_index);
auto gid_and_class =
+ coverage->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_codepoint_pair_t (gid, class_def_1->get_class (gid));
})
;
class_def_size_estimator_t estimator (gid_and_class);
const unsigned class1_count = class1Count;
const unsigned class2_count = class2Count;
const unsigned class1_record_size = get_class1_record_size ();
const unsigned value_1_len = valueFormat1.get_len ();
const unsigned value_2_len = valueFormat2.get_len ();
const unsigned total_value_len = value_1_len + value_2_len;
unsigned accumulated = base_size;
unsigned coverage_size = 4;
unsigned class_def_1_size = 4;
unsigned max_coverage_size = coverage_size;
unsigned max_class_def_1_size = class_def_1_size;
hb_vector_t<unsigned> split_points;
hb_hashmap_t<unsigned, unsigned> device_tables = get_all_device_tables (c, this_index);
hb_vector_t<unsigned> format1_device_table_indices = valueFormat1.get_device_table_indices ();
hb_vector_t<unsigned> format2_device_table_indices = valueFormat2.get_device_table_indices ();
bool has_device_tables = bool(format1_device_table_indices) || bool(format2_device_table_indices);
hb_set_t visited;
for (unsigned i = 0; i < class1_count; i++)
{
unsigned accumulated_delta = class1_record_size;
class_def_1_size = estimator.add_class_def_size (i);
coverage_size = estimator.coverage_size ();
max_coverage_size = hb_max (max_coverage_size, coverage_size);
max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size);
if (has_device_tables) {
for (unsigned j = 0; j < class2_count; j++)
{
unsigned value1_index = total_value_len * (class2_count * i + j);
unsigned value2_index = value1_index + value_1_len;
accumulated_delta += size_of_value_record_children (c,
device_tables,
format1_device_table_indices,
value1_index,
visited);
accumulated_delta += size_of_value_record_children (c,
device_tables,
format2_device_table_indices,
value2_index,
visited);
}
}
accumulated += accumulated_delta;
unsigned total = accumulated
+ coverage_size + class_def_1_size + class_def_2_size
// The largest object will pack last and can exceed the size limit.
- hb_max (hb_max (coverage_size, class_def_1_size), class_def_2_size);
if (total >= (1 << 16))
{
split_points.push (i);
// split does not include i, so add the size for i when we reset the size counters.
accumulated = base_size + accumulated_delta;
estimator.reset();
class_def_1_size = estimator.add_class_def_size(i);
coverage_size = estimator.coverage_size();
visited.clear (); // node sharing isn't allowed between splits.
}
}
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
class1_record_size,
total_value_len,
value_1_len,
value_2_len,
max_coverage_size,
max_class_def_1_size,
device_tables,
format1_device_table_indices,
format2_device_table_indices
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
}
private:
struct split_context_t
{
gsubgpos_graph_context_t& c;
PairPosFormat2* thiz;
unsigned this_index;
unsigned class1_record_size;
unsigned value_record_len;
unsigned value1_record_len;
unsigned value2_record_len;
unsigned max_coverage_size;
unsigned max_class_def_size;
const hb_hashmap_t<unsigned, unsigned>& device_tables;
const hb_vector_t<unsigned>& format1_device_table_indices;
const hb_vector_t<unsigned>& format2_device_table_indices;
unsigned original_count ()
{
return thiz->class1Count;
}
unsigned clone_range (unsigned start, unsigned end)
{
return thiz->clone_range (*this, start, end);
}
bool shrink (unsigned count)
{
return thiz->shrink (*this, count);
}
};
size_t get_class1_record_size () const
{
const size_t class2_count = class2Count;
return
class2_count * (valueFormat1.get_size () + valueFormat2.get_size ());
}
unsigned clone_range (split_context_t& split_context,
unsigned start, unsigned end) const
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Cloning PairPosFormat2 (%u) range [%u, %u).", split_context.this_index, start, end);
graph_t& graph = split_context.c.graph;
unsigned num_records = end - start;
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size
+ num_records * split_context.class1_record_size;
unsigned pair_pos_prime_id = split_context.c.create_node (prime_size);
if (pair_pos_prime_id == (unsigned) -1) return -1;
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) graph.object (pair_pos_prime_id).head;
pair_pos_prime->format = this->format;
pair_pos_prime->valueFormat1 = this->valueFormat1;
pair_pos_prime->valueFormat2 = this->valueFormat2;
pair_pos_prime->class1Count = num_records;
pair_pos_prime->class2Count = this->class2Count;
clone_class1_records (split_context,
pair_pos_prime_id,
start,
end);
unsigned coverage_id =
graph.index_for_offset (split_context.this_index, &coverage);
unsigned class_def_1_id =
graph.index_for_offset (split_context.this_index, &classDef1);
auto& coverage_v = graph.vertices_[coverage_id];
auto& class_def_1_v = graph.vertices_[class_def_1_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
if (!coverage_table
|| !coverage_table->sanitize (coverage_v)
|| !class_def_1_table
|| !class_def_1_table->sanitize (class_def_1_v))
return -1;
auto klass_map =
+ coverage_table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_codepoint_pair_t (gid, class_def_1_table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass >= start && klass < end;
}, hb_second)
| hb_map_retains_sorting ([&] (hb_codepoint_pair_t gid_and_class) {
// Classes must be from 0...N so subtract start
return hb_codepoint_pair_t (gid_and_class.first, gid_and_class.second - start);
})
;
if (!Coverage::add_coverage (split_context.c,
pair_pos_prime_id,
2,
+ klass_map | hb_map_retains_sorting (hb_first),
split_context.max_coverage_size))
return -1;
// classDef1
if (!ClassDef::add_class_def (split_context.c,
pair_pos_prime_id,
8,
+ klass_map,
split_context.max_class_def_size))
return -1;
// classDef2
unsigned class_def_2_id =
graph.index_for_offset (split_context.this_index, &classDef2);
auto* class_def_link = graph.vertices_[pair_pos_prime_id].obj.real_links.push ();
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_2_id;
class_def_link->position = 10;
graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id, false);
graph.duplicate (pair_pos_prime_id, class_def_2_id);
return pair_pos_prime_id;
}
void clone_class1_records (split_context_t& split_context,
unsigned pair_pos_prime_id,
unsigned start, unsigned end) const
{
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
char* start_addr = ((char*)&values[0]) + start * split_context.class1_record_size;
unsigned num_records = end - start;
hb_memcpy (&pair_pos_prime->values[0],
start_addr,
num_records * split_context.class1_record_size);
if (!split_context.format1_device_table_indices
&& !split_context.format2_device_table_indices)
// No device tables to move over.
return;
unsigned class2_count = class2Count;
for (unsigned i = start; i < end; i++)
{
for (unsigned j = 0; j < class2_count; j++)
{
unsigned value1_index = split_context.value_record_len * (class2_count * i + j);
unsigned value2_index = value1_index + split_context.value1_record_len;
unsigned new_value1_index = split_context.value_record_len * (class2_count * (i - start) + j);
unsigned new_value2_index = new_value1_index + split_context.value1_record_len;
transfer_device_tables (split_context,
pair_pos_prime_id,
split_context.format1_device_table_indices,
value1_index,
new_value1_index);
transfer_device_tables (split_context,
pair_pos_prime_id,
split_context.format2_device_table_indices,
value2_index,
new_value2_index);
}
}
}
void transfer_device_tables (split_context_t& split_context,
unsigned pair_pos_prime_id,
const hb_vector_t<unsigned>& device_table_indices,
unsigned old_value_record_index,
unsigned new_value_record_index) const
{
PairPosFormat2* pair_pos_prime =
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
for (unsigned i : device_table_indices)
{
OT::Offset16* record = (OT::Offset16*) &values[old_value_record_index + i];
unsigned record_position = ((char*) record) - ((char*) this);
if (!split_context.device_tables.has (record_position)) continue;
split_context.c.graph.move_child (
split_context.this_index,
record,
pair_pos_prime_id,
(OT::Offset16*) &pair_pos_prime->values[new_value_record_index + i]);
}
}
bool shrink (split_context_t& split_context,
unsigned count)
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
" Shrinking PairPosFormat2 (%u) to [0, %u).",
split_context.this_index,
count);
unsigned old_count = class1Count;
if (count >= old_count)
return true;
graph_t& graph = split_context.c.graph;
class1Count = count;
graph.vertices_[split_context.this_index].obj.tail -=
(old_count - count) * split_context.class1_record_size;
auto coverage =
graph.as_mutable_table<Coverage> (split_context.this_index, &this->coverage);
if (!coverage) return false;
auto class_def_1 =
graph.as_mutable_table<ClassDef> (split_context.this_index, &classDef1);
if (!class_def_1) return false;
auto klass_map =
+ coverage.table->iter ()
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
return hb_codepoint_pair_t (gid, class_def_1.table->get_class (gid));
})
| hb_filter ([&] (hb_codepoint_t klass) {
return klass < count;
}, hb_second)
;
auto new_coverage = + klass_map | hb_map_retains_sorting (hb_first);
if (!Coverage::make_coverage (split_context.c,
+ new_coverage,
coverage.index,
// existing ranges my not be kept, worst case size is a format 1
// coverage table.
4 + new_coverage.len() * 2))
return false;
return ClassDef::make_class_def (split_context.c,
+ klass_map,
class_def_1.index,
class_def_1.vertex->table_size ());
}
hb_hashmap_t<unsigned, unsigned>
get_all_device_tables (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
const auto& v = c.graph.vertices_[this_index];
return v.position_to_index_map ();
}
const Coverage* get_coverage (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
auto& coverage_v = c.graph.vertices_[coverage_id];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return &Null(Coverage);
return coverage_table;
}
const ClassDef* get_class_def_1 (gsubgpos_graph_context_t& c,
unsigned this_index) const
{
unsigned class_def_1_id = c.graph.index_for_offset (this_index, &classDef1);
auto& class_def_1_v = c.graph.vertices_[class_def_1_id];
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
if (!class_def_1_table || !class_def_1_table->sanitize (class_def_1_v))
return &Null(ClassDef);
return class_def_1_table;
}
unsigned size_of_value_record_children (gsubgpos_graph_context_t& c,
const hb_hashmap_t<unsigned, unsigned>& device_tables,
const hb_vector_t<unsigned> device_table_indices,
unsigned value_record_index,
hb_set_t& visited)
{
unsigned size = 0;
for (unsigned i : device_table_indices)
{
OT::Layout::GPOS_impl::Value* record = &values[value_record_index + i];
unsigned record_position = ((char*) record) - ((char*) this);
unsigned* obj_idx;
if (!device_tables.has (record_position, &obj_idx)) continue;
size += c.graph.find_subgraph_size (*obj_idx, visited);
}
return size;
}
unsigned size_of (gsubgpos_graph_context_t& c,
unsigned this_index,
const void* offset) const
{
const unsigned id = c.graph.index_for_offset (this_index, offset);
return c.graph.vertices_[id].table_size ();
}
};
struct PairPos : public OT::Layout::GPOS_impl::PairPos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
case 1:
return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
case 2:
return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index);
#ifndef HB_NO_BEYOND_64K
case 3: HB_FALLTHROUGH;
case 4: HB_FALLTHROUGH;
// Don't split 24bit PairPos's.
#endif
default:
return hb_vector_t<unsigned> ();
}
}
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
hb_barrier ();
switch (u.format) {
case 1:
return ((PairPosFormat1*)(&u.format1))->sanitize (vertex);
case 2:
return ((PairPosFormat2*)(&u.format2))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K
case 3: HB_FALLTHROUGH;
case 4: HB_FALLTHROUGH;
#endif
default:
// We don't handle format 3 and 4 here.
return false;
}
}
};
}
#endif // GRAPH_PAIRPOS_GRAPH_HH

View File

@@ -0,0 +1,277 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_SERIALIZE_HH
#define GRAPH_SERIALIZE_HH
namespace graph {
struct overflow_record_t
{
unsigned parent;
unsigned child;
bool operator != (const overflow_record_t o) const
{ return !(*this == o); }
inline bool operator == (const overflow_record_t& o) const
{
return parent == o.parent &&
child == o.child;
}
inline uint32_t hash () const
{
uint32_t current = 0;
current = current * 31 + hb_hash (parent);
current = current * 31 + hb_hash (child);
return current;
}
};
inline
int64_t compute_offset (
const graph_t& graph,
unsigned parent_idx,
const hb_serialize_context_t::object_t::link_t& link)
{
const auto& parent = graph.vertices_[parent_idx];
const auto& child = graph.vertices_[link.objidx];
int64_t offset = 0;
switch ((hb_serialize_context_t::whence_t) link.whence) {
case hb_serialize_context_t::whence_t::Head:
offset = child.start - parent.start; break;
case hb_serialize_context_t::whence_t::Tail:
offset = child.start - parent.end; break;
case hb_serialize_context_t::whence_t::Absolute:
offset = child.start; break;
}
assert (offset >= link.bias);
offset -= link.bias;
return offset;
}
inline
bool is_valid_offset (int64_t offset,
const hb_serialize_context_t::object_t::link_t& link)
{
if (unlikely (!link.width))
// Virtual links can't overflow.
return link.is_signed || offset >= 0;
if (link.is_signed)
{
if (link.width == 4)
return offset >= -((int64_t) 1 << 31) && offset < ((int64_t) 1 << 31);
else
return offset >= -(1 << 15) && offset < (1 << 15);
}
else
{
if (link.width == 4)
return offset >= 0 && offset < ((int64_t) 1 << 32);
else if (link.width == 3)
return offset >= 0 && offset < ((int32_t) 1 << 24);
else
return offset >= 0 && offset < (1 << 16);
}
}
/*
* Will any offsets overflow on graph when it's serialized?
*/
inline bool
will_overflow (graph_t& graph,
hb_vector_t<overflow_record_t>* overflows = nullptr)
{
if (overflows) overflows->resize (0);
graph.update_positions ();
hb_hashmap_t<overflow_record_t*, bool> record_set;
const auto& vertices = graph.vertices_;
for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--)
{
// Don't need to check virtual links for overflow
for (const auto& link : vertices.arrayZ[parent_idx].obj.real_links)
{
int64_t offset = compute_offset (graph, parent_idx, link);
if (likely (is_valid_offset (offset, link)))
continue;
if (!overflows) return true;
overflow_record_t r;
r.parent = parent_idx;
r.child = link.objidx;
if (record_set.has(&r)) continue; // don't keep duplicate overflows.
overflows->push (r);
record_set.set(&r, true);
}
}
if (!overflows) return false;
return overflows->length;
}
inline
void print_overflows (graph_t& graph,
const hb_vector_t<overflow_record_t>& overflows)
{
if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
graph.update_parents ();
int limit = 10;
for (const auto& o : overflows)
{
if (!limit--) break;
const auto& parent = graph.vertices_[o.parent];
const auto& child = graph.vertices_[o.child];
DEBUG_MSG (SUBSET_REPACK, nullptr,
" overflow from "
"%4u (%4u in, %4u out, space %2u) => "
"%4u (%4u in, %4u out, space %2u)",
o.parent,
parent.incoming_edges (),
parent.obj.real_links.length + parent.obj.virtual_links.length,
graph.space_for (o.parent),
o.child,
child.incoming_edges (),
child.obj.real_links.length + child.obj.virtual_links.length,
graph.space_for (o.child));
}
if (overflows.length > 10) {
DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %u more overflows.", overflows.length - 10);
}
}
template <typename O> inline void
serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link,
char* head,
unsigned size,
hb_serialize_context_t* c)
{
assert(link.position + link.width <= size);
OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position);
*offset = 0;
c->add_link (*offset,
// serializer has an extra nil object at the start of the
// object array. So all id's are +1 of what our id's are.
link.objidx + 1,
(hb_serialize_context_t::whence_t) link.whence,
link.bias);
}
inline
void serialize_link (const hb_serialize_context_t::object_t::link_t& link,
char* head,
unsigned size,
hb_serialize_context_t* c)
{
switch (link.width)
{
case 0:
// Virtual links aren't serialized.
return;
case 4:
if (link.is_signed)
{
serialize_link_of_type<OT::HBINT32> (link, head, size, c);
} else {
serialize_link_of_type<OT::HBUINT32> (link, head, size, c);
}
return;
case 2:
if (link.is_signed)
{
serialize_link_of_type<OT::HBINT16> (link, head, size, c);
} else {
serialize_link_of_type<OT::HBUINT16> (link, head, size, c);
}
return;
case 3:
serialize_link_of_type<OT::HBUINT24> (link, head, size, c);
return;
default:
// Unexpected link width.
assert (0);
}
}
/*
* serialize graph into the provided serialization buffer.
*/
inline hb_blob_t* serialize (const graph_t& graph)
{
hb_vector_t<char> buffer;
size_t size = graph.total_size_in_bytes ();
if (!size) return hb_blob_get_empty ();
if (!buffer.alloc (size)) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer.");
return nullptr;
}
hb_serialize_context_t c((void *) buffer, size);
c.start_serialize<void> ();
const auto& vertices = graph.vertices_;
for (unsigned i = 0; i < vertices.length; i++) {
c.push ();
size_t size = vertices[i].obj.tail - vertices[i].obj.head;
char* start = c.allocate_size <char> (size);
if (!start) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space.");
return nullptr;
}
hb_memcpy (start, vertices[i].obj.head, size);
// Only real links needs to be serialized.
for (const auto& link : vertices[i].obj.real_links)
serialize_link (link, start, size, &c);
// All duplications are already encoded in the graph, so don't
// enable sharing during packing.
c.pop_pack (false);
}
c.end_serialize ();
if (c.in_error ()) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Error during serialization. Err flag: %d",
c.errors);
return nullptr;
}
return c.copy_blob ();
}
} // namespace graph
#endif // GRAPH_SERIALIZE_HH

View File

@@ -0,0 +1,69 @@
/*
* Copyright © 2022 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Garret Rieger
*/
#ifndef GRAPH_SPLIT_HELPERS_HH
#define GRAPH_SPLIT_HELPERS_HH
namespace graph {
template<typename Context>
HB_INTERNAL
hb_vector_t<unsigned> actuate_subtable_split (Context& split_context,
const hb_vector_t<unsigned>& split_points)
{
hb_vector_t<unsigned> new_objects;
if (!split_points)
return new_objects;
for (unsigned i = 0; i < split_points.length; i++)
{
unsigned start = split_points[i];
unsigned end = (i < split_points.length - 1)
? split_points[i + 1]
: split_context.original_count ();
unsigned id = split_context.clone_range (start, end);
if (id == (unsigned) -1)
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
return new_objects;
}
new_objects.push (id);
}
if (!split_context.shrink (split_points[0]))
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
}
return new_objects;
}
}
#endif // GRAPH_SPLIT_HELPERS_HH

View File

@@ -0,0 +1,99 @@
/*
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_AAT_LAYOUT_ANKR_TABLE_HH
#define HB_AAT_LAYOUT_ANKR_TABLE_HH
#include "hb-aat-layout-common.hh"
/*
* ankr -- Anchor Point
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
*/
#define HB_AAT_TAG_ankr HB_TAG('a','n','k','r')
namespace AAT {
using namespace OT;
struct Anchor
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
public:
FWORD xCoordinate;
FWORD yCoordinate;
public:
DEFINE_SIZE_STATIC (4);
};
typedef Array32Of<Anchor> GlyphAnchors;
struct ankr
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_ankr;
const Anchor &get_anchor (hb_codepoint_t glyph_id,
unsigned int i,
unsigned int num_glyphs) const
{
const NNOffset16To<GlyphAnchors> *offset = (this+lookupTable).get_value (glyph_id, num_glyphs);
if (!offset)
return Null (Anchor);
const GlyphAnchors &anchors = &(this+anchorData) + *offset;
return anchors[i];
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
version == 0 &&
c->check_range (this, anchorData) &&
lookupTable.sanitize (c, this, &(this+anchorData))));
}
protected:
HBUINT16 version; /* Version number (set to zero) */
HBUINT16 flags; /* Flags (currently unused; set to zero) */
Offset32To<Lookup<NNOffset16To<GlyphAnchors>>>
lookupTable; /* Offset to the table's lookup table */
NNOffset32To<HBUINT8>
anchorData; /* Offset to the glyph data table */
public:
DEFINE_SIZE_STATIC (12);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_ANKR_TABLE_HH */

View File

@@ -0,0 +1,159 @@
/*
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_AAT_LAYOUT_BSLN_TABLE_HH
#define HB_AAT_LAYOUT_BSLN_TABLE_HH
#include "hb-aat-layout-common.hh"
/*
* bsln -- Baseline
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
*/
#define HB_AAT_TAG_bsln HB_TAG('b','s','l','n')
namespace AAT {
struct BaselineTableFormat0Part
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
// Roman, Ideographic centered, Ideographic low, Hanging and Math
// are the default defined ones, but any other maybe accessed also.
HBINT16 deltas[32]; /* These are the FUnit distance deltas from
* the font's natural baseline to the other
* baselines used in the font. */
public:
DEFINE_SIZE_STATIC (64);
};
struct BaselineTableFormat1Part
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
lookupTable.sanitize (c)));
}
protected:
HBINT16 deltas[32]; /* ditto */
Lookup<HBUINT16>
lookupTable; /* Lookup table that maps glyphs to their
* baseline values. */
public:
DEFINE_SIZE_MIN (66);
};
struct BaselineTableFormat2Part
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
HBGlyphID16 stdGlyph; /* The specific glyph index number in this
* font that is used to set the baseline values.
* This is the standard glyph.
* This glyph must contain a set of control points
* (whose numbers are contained in the ctlPoints field)
* that are used to determine baseline distances. */
HBUINT16 ctlPoints[32]; /* Set of control point numbers,
* associated with the standard glyph.
* A value of 0xFFFF means there is no corresponding
* control point in the standard glyph. */
public:
DEFINE_SIZE_STATIC (66);
};
struct BaselineTableFormat3Part
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) && lookupTable.sanitize (c)));
}
protected:
HBGlyphID16 stdGlyph; /* ditto */
HBUINT16 ctlPoints[32]; /* ditto */
Lookup<HBUINT16>
lookupTable; /* Lookup table that maps glyphs to their
* baseline values. */
public:
DEFINE_SIZE_MIN (68);
};
struct bsln
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_bsln;
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!(c->check_struct (this) && defaultBaseline < 32)))
return_trace (false);
hb_barrier ();
switch (format)
{
case 0: return_trace (parts.format0.sanitize (c));
case 1: return_trace (parts.format1.sanitize (c));
case 2: return_trace (parts.format2.sanitize (c));
case 3: return_trace (parts.format3.sanitize (c));
default:return_trace (true);
}
}
protected:
FixedVersion<>version; /* Version number of the Baseline table. */
HBUINT16 format; /* Format of the baseline table. Only one baseline
* format may be selected for the font. */
HBUINT16 defaultBaseline;/* Default baseline value for all glyphs.
* This value can be from 0 through 31. */
union {
// Distance-Based Formats
BaselineTableFormat0Part format0;
BaselineTableFormat1Part format1;
// Control Point-based Formats
BaselineTableFormat2Part format2;
BaselineTableFormat3Part format3;
} parts;
public:
DEFINE_SIZE_MIN (8);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_BSLN_TABLE_HH */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,224 @@
/*
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_AAT_LAYOUT_FEAT_TABLE_HH
#define HB_AAT_LAYOUT_FEAT_TABLE_HH
#include "hb-aat-layout-common.hh"
/*
* feat -- Feature Name
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html
*/
#define HB_AAT_TAG_feat HB_TAG('f','e','a','t')
namespace AAT {
struct SettingName
{
friend struct FeatureName;
int cmp (hb_aat_layout_feature_selector_t key) const
{ return (int) key - (int) setting; }
hb_aat_layout_feature_selector_t get_selector () const
{ return (hb_aat_layout_feature_selector_t) (unsigned) setting; }
hb_aat_layout_feature_selector_info_t get_info (hb_aat_layout_feature_selector_t default_selector) const
{
return {
nameIndex,
(hb_aat_layout_feature_selector_t) (unsigned int) setting,
default_selector == HB_AAT_LAYOUT_FEATURE_SELECTOR_INVALID
? (hb_aat_layout_feature_selector_t) (setting + 1)
: default_selector,
0
};
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
HBUINT16 setting; /* The setting. */
NameID nameIndex; /* The name table index for the setting's name. */
public:
DEFINE_SIZE_STATIC (4);
};
DECLARE_NULL_NAMESPACE_BYTES (AAT, SettingName);
struct feat;
struct FeatureName
{
int cmp (hb_aat_layout_feature_type_t key) const
{ return (int) key - (int) feature; }
enum {
Exclusive = 0x8000, /* If set, the feature settings are mutually exclusive. */
NotDefault = 0x4000, /* If clear, then the setting with an index of 0 in
* the setting name array for this feature should
* be taken as the default for the feature
* (if one is required). If set, then bits 0-15 of this
* featureFlags field contain the index of the setting
* which is to be taken as the default. */
IndexMask = 0x00FF /* If bits 30 and 31 are set, then these sixteen bits
* indicate the index of the setting in the setting name
* array for this feature which should be taken
* as the default. */
};
unsigned int get_selector_infos (unsigned int start_offset,
unsigned int *selectors_count, /* IN/OUT. May be NULL. */
hb_aat_layout_feature_selector_info_t *selectors, /* OUT. May be NULL. */
unsigned int *pdefault_index, /* OUT. May be NULL. */
const void *base) const
{
hb_array_t< const SettingName> settings_table = (base+settingTableZ).as_array (nSettings);
static_assert (Index::NOT_FOUND_INDEX == HB_AAT_LAYOUT_NO_SELECTOR_INDEX, "");
hb_aat_layout_feature_selector_t default_selector = HB_AAT_LAYOUT_FEATURE_SELECTOR_INVALID;
unsigned int default_index = Index::NOT_FOUND_INDEX;
if (featureFlags & Exclusive)
{
default_index = (featureFlags & NotDefault) ? featureFlags & IndexMask : 0;
default_selector = settings_table[default_index].get_selector ();
}
if (pdefault_index)
*pdefault_index = default_index;
if (selectors_count)
{
+ settings_table.sub_array (start_offset, selectors_count)
| hb_map ([=] (const SettingName& setting) { return setting.get_info (default_selector); })
| hb_sink (hb_array (selectors, *selectors_count))
;
}
return settings_table.length;
}
hb_aat_layout_feature_type_t get_feature_type () const
{ return (hb_aat_layout_feature_type_t) (unsigned int) feature; }
hb_ot_name_id_t get_feature_name_id () const { return nameIndex; }
bool is_exclusive () const { return featureFlags & Exclusive; }
/* A FeatureName with no settings is meaningless */
bool has_data () const { return nSettings; }
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
(base+settingTableZ).sanitize (c, nSettings)));
}
protected:
HBUINT16 feature; /* Feature type. */
HBUINT16 nSettings; /* The number of records in the setting name array. */
NNOffset32To<UnsizedArrayOf<SettingName>>
settingTableZ; /* Offset in bytes from the beginning of this table to
* this feature's setting name array. The actual type of
* record this offset refers to will depend on the
* exclusivity value, as described below. */
HBUINT16 featureFlags; /* Single-bit flags associated with the feature type. */
HBINT16 nameIndex; /* The name table index for the feature's name.
* This index has values greater than 255 and
* less than 32768. */
public:
DEFINE_SIZE_STATIC (12);
};
struct feat
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_feat;
bool has_data () const { return version.to_int (); }
unsigned int get_feature_types (unsigned int start_offset,
unsigned int *count,
hb_aat_layout_feature_type_t *features) const
{
if (count)
{
+ namesZ.as_array (featureNameCount).sub_array (start_offset, count)
| hb_map (&FeatureName::get_feature_type)
| hb_sink (hb_array (features, *count))
;
}
return featureNameCount;
}
bool exposes_feature (hb_aat_layout_feature_type_t feature_type) const
{ return get_feature (feature_type).has_data (); }
const FeatureName& get_feature (hb_aat_layout_feature_type_t feature_type) const
{ return namesZ.bsearch (featureNameCount, feature_type); }
hb_ot_name_id_t get_feature_name_id (hb_aat_layout_feature_type_t feature) const
{ return get_feature (feature).get_feature_name_id (); }
unsigned int get_selector_infos (hb_aat_layout_feature_type_t feature_type,
unsigned int start_offset,
unsigned int *selectors_count, /* IN/OUT. May be NULL. */
hb_aat_layout_feature_selector_info_t *selectors, /* OUT. May be NULL. */
unsigned int *default_index /* OUT. May be NULL. */) const
{
return get_feature (feature_type).get_selector_infos (start_offset, selectors_count, selectors,
default_index, this);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
version.major == 1 &&
namesZ.sanitize (c, featureNameCount, this)));
}
protected:
FixedVersion<>version; /* Version number of the feature name table
* (0x00010000 for the current version). */
HBUINT16 featureNameCount;
/* The number of entries in the feature name array. */
HBUINT16 reserved1; /* Reserved (set to zero). */
HBUINT32 reserved2; /* Reserved (set to zero). */
SortedUnsizedArrayOf<FeatureName>
namesZ; /* The feature name array. */
public:
DEFINE_SIZE_ARRAY (12, namesZ);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_FEAT_TABLE_HH */

View File

@@ -0,0 +1,420 @@
/*
* Copyright © 2018 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_AAT_LAYOUT_JUST_TABLE_HH
#define HB_AAT_LAYOUT_JUST_TABLE_HH
#include "hb-aat-layout-common.hh"
#include "hb-ot-layout.hh"
#include "hb-open-type.hh"
#include "hb-aat-layout-morx-table.hh"
/*
* just -- Justification
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6just.html
*/
#define HB_AAT_TAG_just HB_TAG('j','u','s','t')
namespace AAT {
using namespace OT;
struct ActionSubrecordHeader
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
HBUINT16 actionClass; /* The JustClass value associated with this
* ActionSubrecord. */
HBUINT16 actionType; /* The type of postcompensation action. */
HBUINT16 actionLength; /* Length of this ActionSubrecord record, which
* must be a multiple of 4. */
public:
DEFINE_SIZE_STATIC (6);
};
struct DecompositionAction
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
ActionSubrecordHeader
header;
F16DOT16 lowerLimit; /* If the distance factor is less than this value,
* then the ligature is decomposed. */
F16DOT16 upperLimit; /* If the distance factor is greater than this value,
* then the ligature is decomposed. */
HBUINT16 order; /* Numerical order in which this ligature will
* be decomposed; you may want infrequent ligatures
* to decompose before more frequent ones. The ligatures
* on the line of text will decompose in increasing
* value of this field. */
Array16Of<HBUINT16>
decomposedglyphs;
/* Number of 16-bit glyph indexes that follow;
* the ligature will be decomposed into these glyphs.
*
* Array of decomposed glyphs. */
public:
DEFINE_SIZE_ARRAY (18, decomposedglyphs);
};
struct UnconditionalAddGlyphAction
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
ActionSubrecordHeader
header;
HBGlyphID16 addGlyph; /* Glyph that should be added if the distance factor
* is growing. */
public:
DEFINE_SIZE_STATIC (8);
};
struct ConditionalAddGlyphAction
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
ActionSubrecordHeader
header;
F16DOT16 substThreshold; /* Distance growth factor (in ems) at which
* this glyph is replaced and the growth factor
* recalculated. */
HBGlyphID16 addGlyph; /* Glyph to be added as kashida. If this value is
* 0xFFFF, no extra glyph will be added. Note that
* generally when a glyph is added, justification
* will need to be redone. */
HBGlyphID16 substGlyph; /* Glyph to be substituted for this glyph if the
* growth factor equals or exceeds the value of
* substThreshold. */
public:
DEFINE_SIZE_STATIC (14);
};
struct DuctileGlyphAction
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
ActionSubrecordHeader
header;
HBUINT32 variationAxis; /* The 4-byte tag identifying the ductile axis.
* This would normally be 0x64756374 ('duct'),
* but you may use any axis the font contains. */
F16DOT16 minimumLimit; /* The lowest value for the ductility axis that
* still yields an acceptable appearance. Normally
* this will be 1.0. */
F16DOT16 noStretchValue; /* This is the default value that corresponds to
* no change in appearance. Normally, this will
* be 1.0. */
F16DOT16 maximumLimit; /* The highest value for the ductility axis that
* still yields an acceptable appearance. */
public:
DEFINE_SIZE_STATIC (22);
};
struct RepeatedAddGlyphAction
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
ActionSubrecordHeader
header;
HBUINT16 flags; /* Currently unused; set to 0. */
HBGlyphID16 glyph; /* Glyph that should be added if the distance factor
* is growing. */
public:
DEFINE_SIZE_STATIC (10);
};
struct ActionSubrecord
{
unsigned int get_length () const { return u.header.actionLength; }
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this)))
return_trace (false);
hb_barrier ();
switch (u.header.actionType)
{
case 0: hb_barrier (); return_trace (u.decompositionAction.sanitize (c));
case 1: hb_barrier (); return_trace (u.unconditionalAddGlyphAction.sanitize (c));
case 2: hb_barrier (); return_trace (u.conditionalAddGlyphAction.sanitize (c));
// case 3: hb_barrier (); return_trace (u.stretchGlyphAction.sanitize (c));
case 4: hb_barrier (); return_trace (u.decompositionAction.sanitize (c));
case 5: hb_barrier (); return_trace (u.decompositionAction.sanitize (c));
default: return_trace (true);
}
}
protected:
union {
ActionSubrecordHeader header;
DecompositionAction decompositionAction;
UnconditionalAddGlyphAction unconditionalAddGlyphAction;
ConditionalAddGlyphAction conditionalAddGlyphAction;
/* StretchGlyphAction stretchGlyphAction; -- Not supported by CoreText */
DuctileGlyphAction ductileGlyphAction;
RepeatedAddGlyphAction repeatedAddGlyphAction;
} u; /* Data. The format of this data depends on
* the value of the actionType field. */
public:
DEFINE_SIZE_UNION (6, header);
};
struct PostcompensationActionChain
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this)))
return_trace (false);
hb_barrier ();
unsigned int offset = min_size;
for (unsigned int i = 0; i < count; i++)
{
const ActionSubrecord& subrecord = StructAtOffset<ActionSubrecord> (this, offset);
if (unlikely (!subrecord.sanitize (c))) return_trace (false);
offset += subrecord.get_length ();
}
return_trace (true);
}
protected:
HBUINT32 count;
public:
DEFINE_SIZE_STATIC (4);
};
struct JustWidthDeltaEntry
{
enum Flags
{
Reserved1 =0xE000,/* Reserved. You should set these bits to zero. */
UnlimiteGap =0x1000,/* The glyph can take unlimited gap. When this
* glyph participates in the justification process,
* it and any other glyphs on the line having this
* bit set absorb all the remaining gap. */
Reserved2 =0x0FF0,/* Reserved. You should set these bits to zero. */
Priority =0x000F /* The justification priority of the glyph. */
};
enum Priority
{
Kashida = 0, /* Kashida priority. This is the highest priority
* during justification. */
Whitespace = 1, /* Whitespace priority. Any whitespace glyphs (as
* identified in the glyph properties table) will
* get this priority. */
InterCharacter = 2, /* Inter-character priority. Give this to any
* remaining glyphs. */
NullPriority = 3 /* Null priority. You should set this priority for
* glyphs that only participate in justification
* after the above priorities. Normally all glyphs
* have one of the previous three values. If you
* don't want a glyph to participate in justification,
* and you don't want to set its factors to zero,
* you may instead assign it to the null priority. */
};
protected:
F16DOT16 beforeGrowLimit;/* The ratio by which the advance width of the
* glyph is permitted to grow on the left or top side. */
F16DOT16 beforeShrinkLimit;
/* The ratio by which the advance width of the
* glyph is permitted to shrink on the left or top side. */
F16DOT16 afterGrowLimit; /* The ratio by which the advance width of the glyph
* is permitted to shrink on the left or top side. */
F16DOT16 afterShrinkLimit;
/* The ratio by which the advance width of the glyph
* is at most permitted to shrink on the right or
* bottom side. */
HBUINT16 growFlags; /* Flags controlling the grow case. */
HBUINT16 shrinkFlags; /* Flags controlling the shrink case. */
public:
DEFINE_SIZE_STATIC (20);
};
struct WidthDeltaPair
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
protected:
HBUINT32 justClass; /* The justification category associated
* with the wdRecord field. Only 7 bits of
* this field are used. (The other bits are
* used as padding to guarantee longword
* alignment of the following record). */
JustWidthDeltaEntry
wdRecord; /* The actual width delta record. */
public:
DEFINE_SIZE_STATIC (24);
};
typedef OT::Array32Of<WidthDeltaPair> WidthDeltaCluster;
struct JustificationCategory
{
typedef void EntryData;
enum Flags
{
SetMark =0x8000,/* If set, make the current glyph the marked
* glyph. */
DontAdvance =0x4000,/* If set, don't advance to the next glyph before
* going to the new state. */
MarkCategory =0x3F80,/* The justification category for the marked
* glyph if nonzero. */
CurrentCategory =0x007F /* The justification category for the current
* glyph if nonzero. */
};
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
morphHeader.sanitize (c) &&
stHeader.sanitize (c)));
}
protected:
ChainSubtable<ObsoleteTypes>
morphHeader; /* Metamorphosis-style subtable header. */
StateTable<ObsoleteTypes, EntryData>
stHeader; /* The justification insertion state table header */
public:
DEFINE_SIZE_STATIC (30);
};
struct JustificationHeader
{
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
justClassTable.sanitize (c, base, base) &&
wdcTable.sanitize (c, base) &&
pcTable.sanitize (c, base) &&
lookupTable.sanitize (c, base)));
}
protected:
Offset16To<JustificationCategory>
justClassTable; /* Offset to the justification category state table. */
Offset16To<WidthDeltaCluster>
wdcTable; /* Offset from start of justification table to start
* of the subtable containing the width delta factors
* for the glyphs in your font.
*
* The width delta clusters table. */
Offset16To<PostcompensationActionChain>
pcTable; /* Offset from start of justification table to start
* of postcompensation subtable (set to zero if none).
*
* The postcompensation subtable, if present in the font. */
Lookup<Offset16To<WidthDeltaCluster>>
lookupTable; /* Lookup table associating glyphs with width delta
* clusters. See the description of Width Delta Clusters
* table for details on how to interpret the lookup values. */
public:
DEFINE_SIZE_MIN (8);
};
struct just
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_just;
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
version.major == 1 &&
horizData.sanitize (c, this, this) &&
vertData.sanitize (c, this, this)));
}
protected:
FixedVersion<>version; /* Version of the justification table
* (0x00010000u for version 1.0). */
HBUINT16 format; /* Format of the justification table (set to 0). */
Offset16To<JustificationHeader>
horizData; /* Byte offset from the start of the justification table
* to the header for tables that contain justification
* information for horizontal text.
* If you are not including this information,
* store 0. */
Offset16To<JustificationHeader>
vertData; /* ditto, vertical */
public:
DEFINE_SIZE_STATIC (10);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_JUST_TABLE_HH */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,174 @@
/*
* Copyright © 2019 Ebrahim Byagowi
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*/
#ifndef HB_AAT_LAYOUT_OPBD_TABLE_HH
#define HB_AAT_LAYOUT_OPBD_TABLE_HH
#include "hb-aat-layout-common.hh"
#include "hb-open-type.hh"
/*
* opbd -- Optical Bounds
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
*/
#define HB_AAT_TAG_opbd HB_TAG('o','p','b','d')
namespace AAT {
struct OpticalBounds
{
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (c->check_struct (this));
}
FWORD leftSide;
FWORD topSide;
FWORD rightSide;
FWORD bottomSide;
public:
DEFINE_SIZE_STATIC (8);
};
struct opbdFormat0
{
bool get_bounds (hb_font_t *font, hb_codepoint_t glyph_id,
hb_glyph_extents_t *extents, const void *base) const
{
const Offset16To<OpticalBounds> *bounds_offset = lookupTable.get_value (glyph_id, font->face->get_num_glyphs ());
if (!bounds_offset) return false;
const OpticalBounds &bounds = base+*bounds_offset;
if (extents)
*extents = {
font->em_scale_x (bounds.leftSide),
font->em_scale_y (bounds.topSide),
font->em_scale_x (bounds.rightSide),
font->em_scale_y (bounds.bottomSide)
};
return true;
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) && lookupTable.sanitize (c, base)));
}
protected:
Lookup<Offset16To<OpticalBounds>>
lookupTable; /* Lookup table associating glyphs with the four
* int16 values for the left-side, top-side,
* right-side, and bottom-side optical bounds. */
public:
DEFINE_SIZE_MIN (2);
};
struct opbdFormat1
{
bool get_bounds (hb_font_t *font, hb_codepoint_t glyph_id,
hb_glyph_extents_t *extents, const void *base) const
{
const Offset16To<OpticalBounds> *bounds_offset = lookupTable.get_value (glyph_id, font->face->get_num_glyphs ());
if (!bounds_offset) return false;
const OpticalBounds &bounds = base+*bounds_offset;
hb_position_t left = 0, top = 0, right = 0, bottom = 0, ignore;
if (font->get_glyph_contour_point (glyph_id, bounds.leftSide, &left, &ignore) ||
font->get_glyph_contour_point (glyph_id, bounds.topSide, &ignore, &top) ||
font->get_glyph_contour_point (glyph_id, bounds.rightSide, &right, &ignore) ||
font->get_glyph_contour_point (glyph_id, bounds.bottomSide, &ignore, &bottom))
{
if (extents)
*extents = {left, top, right, bottom};
return true;
}
return false;
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) && lookupTable.sanitize (c, base)));
}
protected:
Lookup<Offset16To<OpticalBounds>>
lookupTable; /* Lookup table associating glyphs with the four
* int16 values for the left-side, top-side,
* right-side, and bottom-side optical bounds. */
public:
DEFINE_SIZE_MIN (2);
};
struct opbd
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_opbd;
bool get_bounds (hb_font_t *font, hb_codepoint_t glyph_id,
hb_glyph_extents_t *extents) const
{
switch (format)
{
case 0: hb_barrier (); return u.format0.get_bounds (font, glyph_id, extents, this);
case 1: hb_barrier (); return u.format1.get_bounds (font, glyph_id, extents, this);
default:return false;
}
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!c->check_struct (this) || version.major != 1))
return_trace (false);
hb_barrier ();
switch (format)
{
case 0: hb_barrier (); return_trace (u.format0.sanitize (c, this));
case 1: hb_barrier (); return_trace (u.format1.sanitize (c, this));
default:return_trace (true);
}
}
protected:
FixedVersion<>version; /* Version number of the optical bounds
* table (0x00010000 for the current version). */
HBUINT16 format; /* Format of the optical bounds table.
* Format 0 indicates distance and Format 1 indicates
* control point. */
union {
opbdFormat0 format0;
opbdFormat1 format1;
} u;
public:
DEFINE_SIZE_MIN (8);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_OPBD_TABLE_HH */

View File

@@ -0,0 +1,275 @@
/*
* Copyright © 2018 Ebrahim Byagowi
* Copyright © 2018 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Behdad Esfahbod
*/
#ifndef HB_AAT_LAYOUT_TRAK_TABLE_HH
#define HB_AAT_LAYOUT_TRAK_TABLE_HH
#include "hb-aat-layout-common.hh"
#include "hb-ot-layout.hh"
#include "hb-open-type.hh"
#include "hb-ot-stat-table.hh"
/*
* trak -- Tracking
* https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html
*/
#define HB_AAT_TAG_trak HB_TAG('t','r','a','k')
namespace AAT {
struct TrackTableEntry
{
friend struct TrackData;
float get_track_value () const { return track.to_float (); }
float interpolate_at (unsigned int idx,
float ptem,
const void *base,
hb_array_t<const F16DOT16> size_table) const
{
const FWORD *values = (base+valuesZ).arrayZ;
float s0 = size_table[idx].to_float ();
float s1 = size_table[idx + 1].to_float ();
int v0 = values[idx];
int v1 = values[idx + 1];
// Deal with font bugs.
if (unlikely (s1 < s0))
{ hb_swap (s0, s1); hb_swap (v0, v1); }
if (unlikely (ptem < s0)) return v0;
if (unlikely (ptem > s1)) return v1;
if (unlikely (s0 == s1)) return (v0 + v1) * 0.5f;
float t = (ptem - s0) / (s1 - s0);
return v0 + t * (v1 - v0);
}
float get_value (float ptem,
const void *base,
hb_array_t<const F16DOT16> size_table) const
{
const FWORD *values = (base+valuesZ).arrayZ;
unsigned int n_sizes = size_table.length;
/*
* Choose size.
*/
if (!n_sizes) return 0.f;
if (n_sizes == 1) return values[0];
// At least two entries.
unsigned i;
for (i = 0; i < n_sizes; i++)
if (size_table[i].to_float () >= ptem)
break;
// Boundary conditions.
if (i == 0) return values[0];
if (i == n_sizes) return values[n_sizes - 1];
// Exact match.
if (size_table[i].to_float () == ptem) return values[i];
// Interpolate.
return interpolate_at (i - 1, ptem, base, size_table);
}
public:
bool sanitize (hb_sanitize_context_t *c,
const void *base,
unsigned int n_sizes) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
(valuesZ.sanitize (c, base, n_sizes))));
}
protected:
F16DOT16 track; /* Track value for this record. */
OT::NameID trackNameID; /* The 'name' table index for this track.
* (a short word or phrase like "loose"
* or "very tight") */
NNOffset16To<UnsizedArrayOf<FWORD>>
valuesZ; /* Offset from start of tracking table to
* per-size tracking values for this track. */
public:
DEFINE_SIZE_STATIC (8);
};
struct TrackData
{
float get_tracking (const void *base, float ptem, float track = 0.f) const
{
unsigned count = nTracks;
hb_array_t<const F16DOT16> size_table = (base+sizeTable).as_array (nSizes);
if (!count) return 0.f;
if (count == 1) return trackTable[0].get_value (ptem, base, size_table);
// At least two entries.
unsigned i = 0;
unsigned j = count - 1;
// Find the two entries that track is between.
while (i + 1 < count && trackTable[i + 1].get_track_value () <= track)
i++;
while (j > 0 && trackTable[j - 1].get_track_value () >= track)
j--;
// Exact match.
if (i == j) return trackTable[i].get_value (ptem, base, size_table);
// Interpolate.
float t0 = trackTable[i].get_track_value ();
float t1 = trackTable[j].get_track_value ();
float t = (track - t0) / (t1 - t0);
float a = trackTable[i].get_value (ptem, base, size_table);
float b = trackTable[j].get_value (ptem, base, size_table);
return a + t * (b - a);
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
sizeTable.sanitize (c, base, nSizes) &&
trackTable.sanitize (c, nTracks, base, nSizes)));
}
protected:
HBUINT16 nTracks; /* Number of separate tracks included in this table. */
HBUINT16 nSizes; /* Number of point sizes included in this table. */
NNOffset32To<UnsizedArrayOf<F16DOT16>>
sizeTable; /* Offset from start of the tracking table to
* Array[nSizes] of size values.. */
UnsizedArrayOf<TrackTableEntry>
trackTable; /* Array[nTracks] of TrackTableEntry records. */
public:
DEFINE_SIZE_ARRAY (8, trackTable);
};
struct trak
{
static constexpr hb_tag_t tableTag = HB_AAT_TAG_trak;
bool has_data () const { return version.to_int (); }
hb_position_t get_h_tracking (hb_font_t *font, float track = 0.f) const
{
float ptem = font->ptem > 0.f ? font->ptem : HB_CORETEXT_DEFAULT_FONT_SIZE;
return font->em_scalef_x ((this+horizData).get_tracking (this, ptem, track));
}
hb_position_t get_v_tracking (hb_font_t *font, float track = 0.f) const
{
float ptem = font->ptem > 0.f ? font->ptem : HB_CORETEXT_DEFAULT_FONT_SIZE;
return font->em_scalef_y ((this+vertData).get_tracking (this, ptem, track));
}
hb_position_t get_tracking (hb_font_t *font, hb_direction_t dir, float track = 0.f) const
{
#ifndef HB_NO_STYLE
if (!font->face->table.STAT->has_data ())
return 0;
return HB_DIRECTION_IS_HORIZONTAL (dir) ?
get_h_tracking (font, track) :
get_v_tracking (font, track);
#else
return 0;
#endif
}
bool apply (hb_aat_apply_context_t *c, float track = 0.f) const
{
TRACE_APPLY (this);
float ptem = c->font->ptem;
if (unlikely (ptem <= 0.f))
{
/* https://developer.apple.com/documentation/coretext/1508745-ctfontcreatewithgraphicsfont */
ptem = HB_CORETEXT_DEFAULT_FONT_SIZE;
}
hb_buffer_t *buffer = c->buffer;
if (HB_DIRECTION_IS_HORIZONTAL (buffer->props.direction))
{
hb_position_t advance_to_add = get_h_tracking (c->font, track);
foreach_grapheme (buffer, start, end)
buffer->pos[start].x_advance += advance_to_add;
}
else
{
hb_position_t advance_to_add = get_v_tracking (c->font, track);
foreach_grapheme (buffer, start, end)
buffer->pos[start].y_advance += advance_to_add;
}
return_trace (true);
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) &&
hb_barrier () &&
version.major == 1 &&
horizData.sanitize (c, this, this) &&
vertData.sanitize (c, this, this)));
}
protected:
FixedVersion<>version; /* Version of the tracking table
* (0x00010000u for version 1.0). */
HBUINT16 format; /* Format of the tracking table (set to 0). */
Offset16To<TrackData>
horizData; /* Offset from start of tracking table to TrackData
* for horizontal text (or 0 if none). */
Offset16To<TrackData>
vertData; /* Offset from start of tracking table to TrackData
* for vertical text (or 0 if none). */
HBUINT16 reserved; /* Reserved. Set to 0. */
public:
DEFINE_SIZE_STATIC (12);
};
} /* namespace AAT */
#endif /* HB_AAT_LAYOUT_TRAK_TABLE_HH */

Some files were not shown because too many files have changed in this diff Show More