initial commit, 4.5 stable
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled
This commit is contained in:
257
thirdparty/harfbuzz/src/graph/classdef-graph.hh
vendored
Normal file
257
thirdparty/harfbuzz/src/graph/classdef-graph.hh
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#include "graph.hh"
|
||||
#include "../hb-ot-layout-common.hh"
|
||||
|
||||
#ifndef GRAPH_CLASSDEF_GRAPH_HH
|
||||
#define GRAPH_CLASSDEF_GRAPH_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct ClassDefFormat1 : public OT::ClassDefFormat1_3<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
constexpr unsigned min_size = OT::ClassDefFormat1_3<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= min_size + classValue.get_size () - classValue.len.get_size ();
|
||||
}
|
||||
};
|
||||
|
||||
struct ClassDefFormat2 : public OT::ClassDefFormat2_4<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
constexpr unsigned min_size = OT::ClassDefFormat2_4<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
|
||||
}
|
||||
};
|
||||
|
||||
struct ClassDef : public OT::ClassDef
|
||||
{
|
||||
template<typename It>
|
||||
static bool add_class_def (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_id,
|
||||
unsigned link_position,
|
||||
It glyph_and_class,
|
||||
unsigned max_size)
|
||||
{
|
||||
unsigned class_def_prime_id = c.graph.new_node (nullptr, nullptr);
|
||||
auto& class_def_prime_vertex = c.graph.vertices_[class_def_prime_id];
|
||||
if (!make_class_def (c, glyph_and_class, class_def_prime_id, max_size))
|
||||
return false;
|
||||
|
||||
auto* class_def_link = c.graph.vertices_[parent_id].obj.real_links.push ();
|
||||
class_def_link->width = SmallTypes::size;
|
||||
class_def_link->objidx = class_def_prime_id;
|
||||
class_def_link->position = link_position;
|
||||
class_def_prime_vertex.add_parent (parent_id, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename It>
|
||||
static bool make_class_def (gsubgpos_graph_context_t& c,
|
||||
It glyph_and_class,
|
||||
unsigned dest_obj,
|
||||
unsigned max_size)
|
||||
{
|
||||
char* buffer = (char*) hb_calloc (1, max_size);
|
||||
hb_serialize_context_t serializer (buffer, max_size);
|
||||
OT::ClassDef_serialize (&serializer, glyph_and_class);
|
||||
serializer.end_serialize ();
|
||||
if (serializer.in_error ())
|
||||
{
|
||||
hb_free (buffer);
|
||||
return false;
|
||||
}
|
||||
|
||||
hb_bytes_t class_def_copy = serializer.copy_bytes ();
|
||||
if (!class_def_copy.arrayZ) return false;
|
||||
// Give ownership to the context, it will cleanup the buffer.
|
||||
if (!c.add_buffer ((char *) class_def_copy.arrayZ))
|
||||
{
|
||||
hb_free ((char *) class_def_copy.arrayZ);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& obj = c.graph.vertices_[dest_obj].obj;
|
||||
obj.head = (char *) class_def_copy.arrayZ;
|
||||
obj.tail = obj.head + class_def_copy.length;
|
||||
|
||||
hb_free (buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < OT::ClassDef::min_size) return false;
|
||||
hb_barrier ();
|
||||
switch (u.format)
|
||||
{
|
||||
case 1: return ((ClassDefFormat1*)this)->sanitize (vertex);
|
||||
case 2: return ((ClassDefFormat2*)this)->sanitize (vertex);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
// Not currently supported
|
||||
case 3:
|
||||
case 4:
|
||||
#endif
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct class_def_size_estimator_t
|
||||
{
|
||||
// TODO(garretrieger): update to support beyond64k coverage/classdef tables.
|
||||
constexpr static unsigned class_def_format1_base_size = 6;
|
||||
constexpr static unsigned class_def_format2_base_size = 4;
|
||||
constexpr static unsigned coverage_base_size = 4;
|
||||
constexpr static unsigned bytes_per_range = 6;
|
||||
constexpr static unsigned bytes_per_glyph = 2;
|
||||
|
||||
template<typename It>
|
||||
class_def_size_estimator_t (It glyph_and_class)
|
||||
: num_ranges_per_class (), glyphs_per_class ()
|
||||
{
|
||||
reset();
|
||||
for (auto p : + glyph_and_class)
|
||||
{
|
||||
unsigned gid = p.first;
|
||||
unsigned klass = p.second;
|
||||
|
||||
hb_set_t* glyphs;
|
||||
if (glyphs_per_class.has (klass, &glyphs) && glyphs) {
|
||||
glyphs->add (gid);
|
||||
continue;
|
||||
}
|
||||
|
||||
hb_set_t new_glyphs;
|
||||
new_glyphs.add (gid);
|
||||
glyphs_per_class.set (klass, std::move (new_glyphs));
|
||||
}
|
||||
|
||||
if (in_error ()) return;
|
||||
|
||||
for (unsigned klass : glyphs_per_class.keys ())
|
||||
{
|
||||
if (!klass) continue; // class 0 doesn't get encoded.
|
||||
|
||||
const hb_set_t& glyphs = glyphs_per_class.get (klass);
|
||||
hb_codepoint_t start = HB_SET_VALUE_INVALID;
|
||||
hb_codepoint_t end = HB_SET_VALUE_INVALID;
|
||||
|
||||
unsigned count = 0;
|
||||
while (glyphs.next_range (&start, &end))
|
||||
count++;
|
||||
|
||||
num_ranges_per_class.set (klass, count);
|
||||
}
|
||||
}
|
||||
|
||||
void reset() {
|
||||
class_def_1_size = class_def_format1_base_size;
|
||||
class_def_2_size = class_def_format2_base_size;
|
||||
included_glyphs.clear();
|
||||
included_classes.clear();
|
||||
}
|
||||
|
||||
// Compute the size of coverage for all glyphs added via 'add_class_def_size'.
|
||||
unsigned coverage_size () const
|
||||
{
|
||||
unsigned format1_size = coverage_base_size + bytes_per_glyph * included_glyphs.get_population();
|
||||
unsigned format2_size = coverage_base_size + bytes_per_range * num_glyph_ranges();
|
||||
return hb_min(format1_size, format2_size);
|
||||
}
|
||||
|
||||
// Compute the new size of the ClassDef table if all glyphs associated with 'klass' were added.
|
||||
unsigned add_class_def_size (unsigned klass)
|
||||
{
|
||||
if (!included_classes.has(klass)) {
|
||||
hb_set_t* glyphs = nullptr;
|
||||
if (glyphs_per_class.has(klass, &glyphs)) {
|
||||
included_glyphs.union_(*glyphs);
|
||||
}
|
||||
|
||||
class_def_1_size = class_def_format1_base_size;
|
||||
if (!included_glyphs.is_empty()) {
|
||||
unsigned min_glyph = included_glyphs.get_min();
|
||||
unsigned max_glyph = included_glyphs.get_max();
|
||||
class_def_1_size += bytes_per_glyph * (max_glyph - min_glyph + 1);
|
||||
}
|
||||
|
||||
class_def_2_size += bytes_per_range * num_ranges_per_class.get (klass);
|
||||
|
||||
included_classes.add(klass);
|
||||
}
|
||||
|
||||
return hb_min (class_def_1_size, class_def_2_size);
|
||||
}
|
||||
|
||||
unsigned num_glyph_ranges() const {
|
||||
hb_codepoint_t start = HB_SET_VALUE_INVALID;
|
||||
hb_codepoint_t end = HB_SET_VALUE_INVALID;
|
||||
|
||||
unsigned count = 0;
|
||||
while (included_glyphs.next_range (&start, &end)) {
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bool in_error ()
|
||||
{
|
||||
if (num_ranges_per_class.in_error ()) return true;
|
||||
if (glyphs_per_class.in_error ()) return true;
|
||||
|
||||
for (const hb_set_t& s : glyphs_per_class.values ())
|
||||
{
|
||||
if (s.in_error ()) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
hb_hashmap_t<unsigned, unsigned> num_ranges_per_class;
|
||||
hb_hashmap_t<unsigned, hb_set_t> glyphs_per_class;
|
||||
hb_set_t included_classes;
|
||||
hb_set_t included_glyphs;
|
||||
unsigned class_def_1_size;
|
||||
unsigned class_def_2_size;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_CLASSDEF_GRAPH_HH
|
183
thirdparty/harfbuzz/src/graph/coverage-graph.hh
vendored
Normal file
183
thirdparty/harfbuzz/src/graph/coverage-graph.hh
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#include "graph.hh"
|
||||
#include "../OT/Layout/Common/Coverage.hh"
|
||||
|
||||
#ifndef GRAPH_COVERAGE_GRAPH_HH
|
||||
#define GRAPH_COVERAGE_GRAPH_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct CoverageFormat1 : public OT::Layout::Common::CoverageFormat1_3<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat1_3<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= min_size + glyphArray.get_size () - glyphArray.len.get_size ();
|
||||
}
|
||||
};
|
||||
|
||||
struct CoverageFormat2 : public OT::Layout::Common::CoverageFormat2_4<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat2_4<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
|
||||
}
|
||||
};
|
||||
|
||||
struct Coverage : public OT::Layout::Common::Coverage
|
||||
{
|
||||
static Coverage* clone_coverage (gsubgpos_graph_context_t& c,
|
||||
unsigned coverage_id,
|
||||
unsigned new_parent_id,
|
||||
unsigned link_position,
|
||||
unsigned start, unsigned end)
|
||||
|
||||
{
|
||||
unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
|
||||
auto& coverage_v = c.graph.vertices_[coverage_id];
|
||||
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
|
||||
if (!coverage_table || !coverage_table->sanitize (coverage_v))
|
||||
return nullptr;
|
||||
|
||||
auto new_coverage =
|
||||
+ hb_zip (coverage_table->iter (), hb_range ())
|
||||
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
|
||||
return p.second >= start && p.second < end;
|
||||
})
|
||||
| hb_map_retains_sorting (hb_first)
|
||||
;
|
||||
|
||||
return add_coverage (c, new_parent_id, link_position, new_coverage, coverage_size);
|
||||
}
|
||||
|
||||
template<typename It>
|
||||
static Coverage* add_coverage (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_id,
|
||||
unsigned link_position,
|
||||
It glyphs,
|
||||
unsigned max_size)
|
||||
{
|
||||
unsigned coverage_prime_id = c.graph.new_node (nullptr, nullptr);
|
||||
auto& coverage_prime_vertex = c.graph.vertices_[coverage_prime_id];
|
||||
if (!make_coverage (c, glyphs, coverage_prime_id, max_size))
|
||||
return nullptr;
|
||||
|
||||
auto* coverage_link = c.graph.vertices_[parent_id].obj.real_links.push ();
|
||||
coverage_link->width = SmallTypes::size;
|
||||
coverage_link->objidx = coverage_prime_id;
|
||||
coverage_link->position = link_position;
|
||||
coverage_prime_vertex.add_parent (parent_id, false);
|
||||
|
||||
return (Coverage*) coverage_prime_vertex.obj.head;
|
||||
}
|
||||
|
||||
// Filter an existing coverage table to glyphs at indices [start, end) and replace it with the filtered version.
|
||||
static bool filter_coverage (gsubgpos_graph_context_t& c,
|
||||
unsigned existing_coverage,
|
||||
unsigned start, unsigned end) {
|
||||
unsigned coverage_size = c.graph.vertices_[existing_coverage].table_size ();
|
||||
auto& coverage_v = c.graph.vertices_[existing_coverage];
|
||||
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
|
||||
if (!coverage_table || !coverage_table->sanitize (coverage_v))
|
||||
return false;
|
||||
|
||||
auto new_coverage =
|
||||
+ hb_zip (coverage_table->iter (), hb_range ())
|
||||
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
|
||||
return p.second >= start && p.second < end;
|
||||
})
|
||||
| hb_map_retains_sorting (hb_first)
|
||||
;
|
||||
|
||||
return make_coverage (c, new_coverage, existing_coverage, coverage_size * 2 + 100);
|
||||
}
|
||||
|
||||
// Replace the coverage table at dest obj with one covering 'glyphs'.
|
||||
template<typename It>
|
||||
static bool make_coverage (gsubgpos_graph_context_t& c,
|
||||
It glyphs,
|
||||
unsigned dest_obj,
|
||||
unsigned max_size)
|
||||
{
|
||||
char* buffer = (char*) hb_calloc (1, max_size);
|
||||
hb_serialize_context_t serializer (buffer, max_size);
|
||||
OT::Layout::Common::Coverage_serialize (&serializer, glyphs);
|
||||
serializer.end_serialize ();
|
||||
if (serializer.in_error ())
|
||||
{
|
||||
hb_free (buffer);
|
||||
return false;
|
||||
}
|
||||
|
||||
hb_bytes_t coverage_copy = serializer.copy_bytes ();
|
||||
if (!coverage_copy.arrayZ) return false;
|
||||
// Give ownership to the context, it will cleanup the buffer.
|
||||
if (!c.add_buffer ((char *) coverage_copy.arrayZ))
|
||||
{
|
||||
hb_free ((char *) coverage_copy.arrayZ);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto& obj = c.graph.vertices_[dest_obj].obj;
|
||||
obj.head = (char *) coverage_copy.arrayZ;
|
||||
obj.tail = obj.head + coverage_copy.length;
|
||||
|
||||
hb_free (buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < OT::Layout::Common::Coverage::min_size) return false;
|
||||
hb_barrier ();
|
||||
switch (u.format)
|
||||
{
|
||||
case 1: return ((CoverageFormat1*)this)->sanitize (vertex);
|
||||
case 2: return ((CoverageFormat2*)this)->sanitize (vertex);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
// Not currently supported
|
||||
case 3:
|
||||
case 4:
|
||||
#endif
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_COVERAGE_GRAPH_HH
|
1646
thirdparty/harfbuzz/src/graph/graph.hh
vendored
Normal file
1646
thirdparty/harfbuzz/src/graph/graph.hh
vendored
Normal file
File diff suppressed because it is too large
Load Diff
74
thirdparty/harfbuzz/src/graph/gsubgpos-context.cc
vendored
Normal file
74
thirdparty/harfbuzz/src/graph/gsubgpos-context.cc
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#include "gsubgpos-graph.hh"
|
||||
|
||||
namespace graph {
|
||||
|
||||
gsubgpos_graph_context_t::gsubgpos_graph_context_t (hb_tag_t table_tag_,
|
||||
graph_t& graph_)
|
||||
: table_tag (table_tag_),
|
||||
graph (graph_),
|
||||
lookup_list_index (0),
|
||||
lookups ()
|
||||
{
|
||||
if (table_tag_ != HB_OT_TAG_GPOS
|
||||
&& table_tag_ != HB_OT_TAG_GSUB)
|
||||
return;
|
||||
|
||||
GSTAR* gstar = graph::GSTAR::graph_to_gstar (graph_);
|
||||
if (gstar) {
|
||||
gstar->find_lookups (graph, lookups);
|
||||
lookup_list_index = gstar->get_lookup_list_index (graph_);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned gsubgpos_graph_context_t::create_node (unsigned size)
|
||||
{
|
||||
char* buffer = (char*) hb_calloc (1, size);
|
||||
if (!buffer)
|
||||
return -1;
|
||||
|
||||
if (!add_buffer (buffer)) {
|
||||
// Allocation did not get stored for freeing later.
|
||||
hb_free (buffer);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return graph.new_node (buffer, buffer + size);
|
||||
}
|
||||
|
||||
unsigned gsubgpos_graph_context_t::num_non_ext_subtables () {
|
||||
unsigned count = 0;
|
||||
for (auto l : lookups.values ())
|
||||
{
|
||||
if (l->is_extension (table_tag)) continue;
|
||||
count += l->number_of_subtables ();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
61
thirdparty/harfbuzz/src/graph/gsubgpos-context.hh
vendored
Normal file
61
thirdparty/harfbuzz/src/graph/gsubgpos-context.hh
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#include "graph.hh"
|
||||
#include "../hb-ot-layout-gsubgpos.hh"
|
||||
|
||||
#ifndef GRAPH_GSUBGPOS_CONTEXT_HH
|
||||
#define GRAPH_GSUBGPOS_CONTEXT_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct Lookup;
|
||||
|
||||
struct gsubgpos_graph_context_t
|
||||
{
|
||||
hb_tag_t table_tag;
|
||||
graph_t& graph;
|
||||
unsigned lookup_list_index;
|
||||
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
|
||||
hb_hashmap_t<unsigned, unsigned> subtable_to_extension;
|
||||
|
||||
HB_INTERNAL gsubgpos_graph_context_t (hb_tag_t table_tag_,
|
||||
graph_t& graph_);
|
||||
|
||||
HB_INTERNAL unsigned create_node (unsigned size);
|
||||
|
||||
bool add_buffer (char* buffer)
|
||||
{
|
||||
return graph.add_buffer (buffer);
|
||||
}
|
||||
|
||||
private:
|
||||
HB_INTERNAL unsigned num_non_ext_subtables ();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_GSUBGPOS_CONTEXT
|
461
thirdparty/harfbuzz/src/graph/gsubgpos-graph.hh
vendored
Normal file
461
thirdparty/harfbuzz/src/graph/gsubgpos-graph.hh
vendored
Normal file
@@ -0,0 +1,461 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#include "graph.hh"
|
||||
#include "../hb-ot-layout-gsubgpos.hh"
|
||||
#include "../OT/Layout/GSUB/ExtensionSubst.hh"
|
||||
#include "../OT/Layout/GSUB/SubstLookupSubTable.hh"
|
||||
#include "gsubgpos-context.hh"
|
||||
#include "pairpos-graph.hh"
|
||||
#include "markbasepos-graph.hh"
|
||||
#include "ligature-graph.hh"
|
||||
|
||||
#ifndef GRAPH_GSUBGPOS_GRAPH_HH
|
||||
#define GRAPH_GSUBGPOS_GRAPH_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct Lookup;
|
||||
|
||||
template<typename T>
|
||||
struct ExtensionFormat1 : public OT::ExtensionFormat1<T>
|
||||
{
|
||||
void reset(unsigned type)
|
||||
{
|
||||
this->format = 1;
|
||||
this->extensionLookupType = type;
|
||||
this->extensionOffset = 0;
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
return vertex_len >= OT::ExtensionFormat1<T>::static_size;
|
||||
}
|
||||
|
||||
unsigned get_lookup_type () const
|
||||
{
|
||||
return this->extensionLookupType;
|
||||
}
|
||||
|
||||
unsigned get_subtable_index (graph_t& graph, unsigned this_index) const
|
||||
{
|
||||
return graph.index_for_offset (this_index, &this->extensionOffset);
|
||||
}
|
||||
};
|
||||
|
||||
struct Lookup : public OT::Lookup
|
||||
{
|
||||
unsigned number_of_subtables () const
|
||||
{
|
||||
return subTable.len;
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < OT::Lookup::min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= this->get_size ();
|
||||
}
|
||||
|
||||
bool is_extension (hb_tag_t table_tag) const
|
||||
{
|
||||
return lookupType == extension_type (table_tag);
|
||||
}
|
||||
|
||||
bool make_extension (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index)
|
||||
{
|
||||
unsigned type = lookupType;
|
||||
unsigned ext_type = extension_type (c.table_tag);
|
||||
if (!ext_type || is_extension (c.table_tag))
|
||||
{
|
||||
// NOOP
|
||||
return true;
|
||||
}
|
||||
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
"Promoting lookup type %u (obj %u) to extension.",
|
||||
type,
|
||||
this_index);
|
||||
|
||||
for (unsigned i = 0; i < subTable.len; i++)
|
||||
{
|
||||
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
|
||||
if (!make_subtable_extension (c,
|
||||
this_index,
|
||||
subtable_index))
|
||||
return false;
|
||||
}
|
||||
|
||||
lookupType = ext_type;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool split_subtables_if_needed (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index)
|
||||
{
|
||||
unsigned type = lookupType;
|
||||
bool is_ext = is_extension (c.table_tag);
|
||||
|
||||
if (c.table_tag != HB_OT_TAG_GPOS && c.table_tag != HB_OT_TAG_GSUB)
|
||||
return true;
|
||||
|
||||
if (!is_ext && !is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
|
||||
return true;
|
||||
|
||||
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>> all_new_subtables;
|
||||
for (unsigned i = 0; i < subTable.len; i++)
|
||||
{
|
||||
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
|
||||
unsigned parent_index = this_index;
|
||||
if (is_ext) {
|
||||
unsigned ext_subtable_index = subtable_index;
|
||||
parent_index = ext_subtable_index;
|
||||
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
|
||||
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*)
|
||||
c.graph.object (ext_subtable_index).head;
|
||||
if (!extension || !extension->sanitize (c.graph.vertices_[ext_subtable_index]))
|
||||
continue;
|
||||
|
||||
subtable_index = extension->get_subtable_index (c.graph, ext_subtable_index);
|
||||
type = extension->get_lookup_type ();
|
||||
if (!is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
|
||||
continue;
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> new_sub_tables;
|
||||
|
||||
if (c.table_tag == HB_OT_TAG_GPOS) {
|
||||
switch (type)
|
||||
{
|
||||
case 2:
|
||||
new_sub_tables = split_subtable<PairPos> (c, parent_index, subtable_index); break;
|
||||
case 4:
|
||||
new_sub_tables = split_subtable<MarkBasePos> (c, parent_index, subtable_index); break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else if (c.table_tag == HB_OT_TAG_GSUB) {
|
||||
switch (type)
|
||||
{
|
||||
case 4:
|
||||
new_sub_tables = split_subtable<graph::LigatureSubst> (c, parent_index, subtable_index); break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_sub_tables.in_error ()) return false;
|
||||
if (!new_sub_tables) continue;
|
||||
hb_pair_t<unsigned, hb_vector_t<unsigned>>* entry = all_new_subtables.push ();
|
||||
entry->first = i;
|
||||
entry->second = std::move (new_sub_tables);
|
||||
}
|
||||
|
||||
if (all_new_subtables) {
|
||||
return add_sub_tables (c, this_index, type, all_new_subtables);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
hb_vector_t<unsigned> split_subtable (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_idx,
|
||||
unsigned objidx)
|
||||
{
|
||||
T* sub_table = (T*) c.graph.object (objidx).head;
|
||||
if (!sub_table || !sub_table->sanitize (c.graph.vertices_[objidx]))
|
||||
return hb_vector_t<unsigned> ();
|
||||
|
||||
return sub_table->split_subtables (c, parent_idx, objidx);
|
||||
}
|
||||
|
||||
bool add_sub_tables (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned type,
|
||||
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
|
||||
{
|
||||
bool is_ext = is_extension (c.table_tag);
|
||||
auto* v = &c.graph.vertices_[this_index];
|
||||
fix_existing_subtable_links (c, this_index, subtable_ids);
|
||||
|
||||
unsigned new_subtable_count = 0;
|
||||
for (const auto& p : subtable_ids)
|
||||
new_subtable_count += p.second.length;
|
||||
|
||||
size_t new_size = v->table_size ()
|
||||
+ new_subtable_count * OT::Offset16::static_size;
|
||||
char* buffer = (char*) hb_calloc (1, new_size);
|
||||
if (!buffer) return false;
|
||||
if (!c.add_buffer (buffer))
|
||||
{
|
||||
hb_free (buffer);
|
||||
return false;
|
||||
}
|
||||
hb_memcpy (buffer, v->obj.head, v->table_size());
|
||||
|
||||
v->obj.head = buffer;
|
||||
v->obj.tail = buffer + new_size;
|
||||
|
||||
Lookup* new_lookup = (Lookup*) buffer;
|
||||
|
||||
unsigned shift = 0;
|
||||
new_lookup->subTable.len = subTable.len + new_subtable_count;
|
||||
for (const auto& p : subtable_ids)
|
||||
{
|
||||
unsigned offset_index = p.first + shift + 1;
|
||||
shift += p.second.length;
|
||||
|
||||
for (unsigned subtable_id : p.second)
|
||||
{
|
||||
if (is_ext)
|
||||
{
|
||||
unsigned ext_id = create_extension_subtable (c, subtable_id, type);
|
||||
c.graph.vertices_[subtable_id].add_parent (ext_id, false);
|
||||
subtable_id = ext_id;
|
||||
// the reference to v may have changed on adding a node, so reassign it.
|
||||
v = &c.graph.vertices_[this_index];
|
||||
}
|
||||
|
||||
auto* link = v->obj.real_links.push ();
|
||||
link->width = 2;
|
||||
link->objidx = subtable_id;
|
||||
link->position = (char*) &new_lookup->subTable[offset_index++] -
|
||||
(char*) new_lookup;
|
||||
c.graph.vertices_[subtable_id].add_parent (this_index, false);
|
||||
}
|
||||
}
|
||||
|
||||
// Repacker sort order depends on link order, which we've messed up so resort it.
|
||||
v->obj.real_links.qsort ();
|
||||
|
||||
// The head location of the lookup has changed, invalidating the lookups map entry
|
||||
// in the context. Update the map.
|
||||
c.lookups.set (this_index, new_lookup);
|
||||
return true;
|
||||
}
|
||||
|
||||
void fix_existing_subtable_links (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
|
||||
{
|
||||
auto& v = c.graph.vertices_[this_index];
|
||||
Lookup* lookup = (Lookup*) v.obj.head;
|
||||
|
||||
unsigned shift = 0;
|
||||
for (const auto& p : subtable_ids)
|
||||
{
|
||||
unsigned insert_index = p.first + shift;
|
||||
unsigned pos_offset = p.second.length * OT::Offset16::static_size;
|
||||
unsigned insert_offset = (char*) &lookup->subTable[insert_index] - (char*) lookup;
|
||||
shift += p.second.length;
|
||||
|
||||
for (auto& l : v.obj.all_links_writer ())
|
||||
{
|
||||
if (l.position > insert_offset) l.position += pos_offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned create_extension_subtable (gsubgpos_graph_context_t& c,
|
||||
unsigned subtable_index,
|
||||
unsigned type)
|
||||
{
|
||||
unsigned extension_size = OT::ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>::static_size;
|
||||
|
||||
unsigned ext_index = c.create_node (extension_size);
|
||||
if (ext_index == (unsigned) -1)
|
||||
return -1;
|
||||
|
||||
auto& ext_vertex = c.graph.vertices_[ext_index];
|
||||
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
|
||||
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*) ext_vertex.obj.head;
|
||||
extension->reset (type);
|
||||
|
||||
// Make extension point at the subtable.
|
||||
auto* l = ext_vertex.obj.real_links.push ();
|
||||
|
||||
l->width = 4;
|
||||
l->objidx = subtable_index;
|
||||
l->position = 4;
|
||||
|
||||
return ext_index;
|
||||
}
|
||||
|
||||
bool make_subtable_extension (gsubgpos_graph_context_t& c,
|
||||
unsigned lookup_index,
|
||||
unsigned subtable_index)
|
||||
{
|
||||
unsigned type = lookupType;
|
||||
unsigned ext_index = -1;
|
||||
unsigned* existing_ext_index = nullptr;
|
||||
if (c.subtable_to_extension.has(subtable_index, &existing_ext_index)) {
|
||||
ext_index = *existing_ext_index;
|
||||
} else {
|
||||
ext_index = create_extension_subtable(c, subtable_index, type);
|
||||
c.subtable_to_extension.set(subtable_index, ext_index);
|
||||
}
|
||||
|
||||
if (ext_index == (unsigned) -1)
|
||||
return false;
|
||||
|
||||
auto& subtable_vertex = c.graph.vertices_[subtable_index];
|
||||
auto& lookup_vertex = c.graph.vertices_[lookup_index];
|
||||
for (auto& l : lookup_vertex.obj.real_links.writer ())
|
||||
{
|
||||
if (l.objidx == subtable_index) {
|
||||
// Change lookup to point at the extension.
|
||||
l.objidx = ext_index;
|
||||
if (existing_ext_index)
|
||||
subtable_vertex.remove_parent(lookup_index);
|
||||
}
|
||||
}
|
||||
|
||||
// Make extension point at the subtable.
|
||||
auto& ext_vertex = c.graph.vertices_[ext_index];
|
||||
ext_vertex.add_parent (lookup_index, false);
|
||||
if (!existing_ext_index)
|
||||
subtable_vertex.remap_parent (lookup_index, ext_index);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_supported_gsub_type(unsigned type, gsubgpos_graph_context_t& c) const {
|
||||
return (c.table_tag == HB_OT_TAG_GSUB) && (
|
||||
type == OT::Layout::GSUB_impl::SubstLookupSubTable::Type::Ligature
|
||||
);
|
||||
}
|
||||
|
||||
bool is_supported_gpos_type(unsigned type, gsubgpos_graph_context_t& c) const {
|
||||
return (c.table_tag == HB_OT_TAG_GPOS) && (
|
||||
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair ||
|
||||
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase
|
||||
);
|
||||
}
|
||||
|
||||
unsigned extension_type (hb_tag_t table_tag) const
|
||||
{
|
||||
switch (table_tag)
|
||||
{
|
||||
case HB_OT_TAG_GPOS: return 9;
|
||||
case HB_OT_TAG_GSUB: return 7;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct LookupList : public OT::LookupList<T>
|
||||
{
|
||||
bool sanitize (const graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < OT::LookupList<T>::min_size) return false;
|
||||
hb_barrier ();
|
||||
return vertex_len >= OT::LookupList<T>::item_size * this->len;
|
||||
}
|
||||
};
|
||||
|
||||
struct GSTAR : public OT::GSUBGPOS
|
||||
{
|
||||
static GSTAR* graph_to_gstar (graph_t& graph)
|
||||
{
|
||||
const auto& r = graph.root ();
|
||||
|
||||
GSTAR* gstar = (GSTAR*) r.obj.head;
|
||||
if (!gstar || !gstar->sanitize (r))
|
||||
return nullptr;
|
||||
hb_barrier ();
|
||||
|
||||
return gstar;
|
||||
}
|
||||
|
||||
const void* get_lookup_list_field_offset () const
|
||||
{
|
||||
switch (u.version.major) {
|
||||
case 1: return u.version1.get_lookup_list_offset ();
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: return u.version2.get_lookup_list_offset ();
|
||||
#endif
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool sanitize (const graph_t::vertex_t& vertex)
|
||||
{
|
||||
int64_t len = vertex.obj.tail - vertex.obj.head;
|
||||
if (len < OT::GSUBGPOS::min_size) return false;
|
||||
hb_barrier ();
|
||||
return len >= get_size ();
|
||||
}
|
||||
|
||||
void find_lookups (graph_t& graph,
|
||||
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
|
||||
{
|
||||
switch (u.version.major) {
|
||||
case 1: find_lookups<SmallTypes> (graph, lookups); break;
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: find_lookups<MediumTypes> (graph, lookups); break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
unsigned get_lookup_list_index (graph_t& graph)
|
||||
{
|
||||
return graph.index_for_offset (graph.root_idx (),
|
||||
get_lookup_list_field_offset());
|
||||
}
|
||||
|
||||
template<typename Types>
|
||||
void find_lookups (graph_t& graph,
|
||||
hb_hashmap_t<unsigned, Lookup*>& lookups /* OUT */)
|
||||
{
|
||||
unsigned lookup_list_idx = get_lookup_list_index (graph);
|
||||
const LookupList<Types>* lookupList =
|
||||
(const LookupList<Types>*) graph.object (lookup_list_idx).head;
|
||||
if (!lookupList || !lookupList->sanitize (graph.vertices_[lookup_list_idx]))
|
||||
return;
|
||||
|
||||
for (unsigned i = 0; i < lookupList->len; i++)
|
||||
{
|
||||
unsigned lookup_idx = graph.index_for_offset (lookup_list_idx, &(lookupList->arrayZ[i]));
|
||||
Lookup* lookup = (Lookup*) graph.object (lookup_idx).head;
|
||||
if (!lookup || !lookup->sanitize (graph.vertices_[lookup_idx])) continue;
|
||||
lookups.set (lookup_idx, lookup);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif /* GRAPH_GSUBGPOS_GRAPH_HH */
|
480
thirdparty/harfbuzz/src/graph/ligature-graph.hh
vendored
Normal file
480
thirdparty/harfbuzz/src/graph/ligature-graph.hh
vendored
Normal file
@@ -0,0 +1,480 @@
|
||||
/*
|
||||
* Copyright © 2025 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#ifndef GRAPH_LIGATURE_GRAPH_HH
|
||||
#define GRAPH_LIGATURE_GRAPH_HH
|
||||
|
||||
#include "graph.hh"
|
||||
#include "../OT/Layout/GSUB/LigatureSubst.hh"
|
||||
#include "../OT/Layout/GSUB/LigatureSubstFormat1.hh"
|
||||
#include "../OT/Layout/GSUB/LigatureSet.hh"
|
||||
#include "../OT/Layout/types.hh"
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct LigatureSet : public OT::Layout::GSUB_impl::LigatureSet<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
int64_t total_len = ligature.get_size() + OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size - ligature.len.get_size();
|
||||
if (vertex_len < total_len) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct LigatureSubstFormat1 : public OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
unsigned min_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
return vertex_len >=
|
||||
min_size + ligatureSet.get_size() - ligatureSet.len.get_size();
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
auto split_points = compute_split_points(c, parent_index, this_index);
|
||||
split_context_t split_context {
|
||||
c,
|
||||
this,
|
||||
c.graph.duplicate_if_shared (parent_index, this_index),
|
||||
total_number_ligas(c, this_index),
|
||||
liga_counts(c, this_index),
|
||||
};
|
||||
return actuate_subtable_split<split_context_t> (split_context, split_points);
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned total_number_ligas(gsubgpos_graph_context_t& c, unsigned this_index) const {
|
||||
unsigned total = 0;
|
||||
for (unsigned i = 0; i < ligatureSet.len; i++)
|
||||
{
|
||||
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
|
||||
if (!liga_set.table) {
|
||||
return 0;
|
||||
}
|
||||
total += liga_set.table->ligature.len;
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> liga_counts(gsubgpos_graph_context_t& c, unsigned this_index) const {
|
||||
hb_vector_t<unsigned> result;
|
||||
for (unsigned i = 0; i < ligatureSet.len; i++)
|
||||
{
|
||||
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
|
||||
result.push(!liga_set.table ? 0 : liga_set.table->ligature.len);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> compute_split_points(gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index) const
|
||||
{
|
||||
// For ligature subst coverage is always packed last, and as a result is where an overflow
|
||||
// will happen if there is one, so we can check the estimate length of the
|
||||
// LigatureSubstFormat1 -> Coverage offset length which is the sum of all data in the
|
||||
// retained sub graph except for the coverage table itself.
|
||||
const unsigned base_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size;
|
||||
unsigned accumulated = base_size;
|
||||
|
||||
unsigned ligature_index = 0;
|
||||
hb_vector_t<unsigned> split_points;
|
||||
for (unsigned i = 0; i < ligatureSet.len; i++)
|
||||
{
|
||||
accumulated += OT::HBUINT16::static_size; // for ligature set offset
|
||||
accumulated += OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size; // for ligature set table
|
||||
|
||||
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
|
||||
if (!liga_set.table) {
|
||||
return hb_vector_t<unsigned> {};
|
||||
}
|
||||
|
||||
for (unsigned j = 0; j < liga_set.table->ligature.len; j++)
|
||||
{
|
||||
const unsigned liga_id = c.graph.index_for_offset (liga_set.index, &liga_set.table->ligature[j]);
|
||||
const unsigned liga_size = c.graph.vertices_[liga_id].table_size ();
|
||||
|
||||
accumulated += OT::HBUINT16::static_size; // for ligature offset
|
||||
accumulated += liga_size; // for the ligature table
|
||||
|
||||
if (accumulated >= (1 << 16))
|
||||
{
|
||||
split_points.push(ligature_index);
|
||||
// We're going to split such that the current ligature will be in the new sub table.
|
||||
// That means we'll have one ligature subst (base_base), one ligature set, and one liga table
|
||||
accumulated = base_size + // for liga subst subtable
|
||||
(OT::HBUINT16::static_size * 2) + // for liga set and liga offset
|
||||
OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size + // for liga set subtable
|
||||
liga_size; // for liga sub table
|
||||
}
|
||||
|
||||
ligature_index++;
|
||||
}
|
||||
}
|
||||
|
||||
return split_points;
|
||||
}
|
||||
|
||||
|
||||
struct split_context_t
|
||||
{
|
||||
gsubgpos_graph_context_t& c;
|
||||
LigatureSubstFormat1* thiz;
|
||||
unsigned this_index;
|
||||
unsigned original_count_;
|
||||
hb_vector_t<unsigned> liga_counts;
|
||||
|
||||
unsigned original_count ()
|
||||
{
|
||||
return original_count_;
|
||||
}
|
||||
|
||||
unsigned clone_range (unsigned start, unsigned end)
|
||||
{
|
||||
return thiz->clone_range (c, this_index, liga_counts, start, end);
|
||||
}
|
||||
|
||||
bool shrink (unsigned count)
|
||||
{
|
||||
return thiz->shrink (c, this_index, original_count(), liga_counts, count);
|
||||
}
|
||||
};
|
||||
|
||||
hb_pair_t<unsigned, LigatureSet*> new_liga_set(gsubgpos_graph_context_t& c, unsigned count) const {
|
||||
unsigned prime_size = OT::Layout::GSUB_impl::LigatureSet<SmallTypes>::min_size
|
||||
+ count * SmallTypes::size;
|
||||
|
||||
unsigned prime_id = c.create_node (prime_size);
|
||||
if (prime_id == (unsigned) -1) return hb_pair(-1, nullptr);
|
||||
|
||||
LigatureSet* prime = (LigatureSet*) c.graph.object (prime_id).head;
|
||||
prime->ligature.len = count;
|
||||
return hb_pair(prime_id, prime);
|
||||
}
|
||||
|
||||
void clear_virtual_links (gsubgpos_graph_context_t& c, unsigned node_index) const
|
||||
{
|
||||
auto& obj = c.graph.vertices_[node_index].obj;
|
||||
for (const auto& l : obj.virtual_links)
|
||||
{
|
||||
auto& child = c.graph.vertices_[l.objidx];
|
||||
child.remove_parent(node_index);
|
||||
}
|
||||
obj.virtual_links.clear();
|
||||
}
|
||||
|
||||
void add_virtual_link(gsubgpos_graph_context_t& c, unsigned from, unsigned to) const {
|
||||
auto& from_obj = c.graph.vertices_[from].obj;
|
||||
c.graph.vertices_[to].add_parent(from, true);
|
||||
auto& link = *from_obj.virtual_links.push ();
|
||||
link.objidx = to;
|
||||
}
|
||||
|
||||
hb_pair_t<unsigned, unsigned> current_liga_set_bounds (gsubgpos_graph_context_t& c,
|
||||
unsigned liga_set_index,
|
||||
const hb_serialize_context_t::object_t& liga_set) const
|
||||
{
|
||||
// Finds the actual liga indices present in the liga set currently. Takes
|
||||
// into account those that have been removed by processing.
|
||||
unsigned min_index = (unsigned) -1;
|
||||
unsigned max_index = 0;
|
||||
for (const auto& l : liga_set.real_links) {
|
||||
if (l.position < 2) continue;
|
||||
|
||||
unsigned liga_index = (l.position - 2) / 2;
|
||||
min_index = hb_min(min_index, liga_index);
|
||||
max_index = hb_max(max_index, liga_index);
|
||||
}
|
||||
return hb_pair(min_index, max_index + 1);
|
||||
}
|
||||
|
||||
void compact_liga_set (gsubgpos_graph_context_t& c, LigatureSet* table, hb_serialize_context_t::object_t& obj) const
|
||||
{
|
||||
if (table->ligature.len <= obj.real_links.length) return;
|
||||
|
||||
// compact the remaining linked liga offsets into a continous array and shrink the node as needed.
|
||||
unsigned to_remove = table->ligature.len - obj.real_links.length;
|
||||
unsigned new_position = SmallTypes::size;
|
||||
obj.real_links.qsort(); // for this to work we need to process links in order of position.
|
||||
for (auto& l : obj.real_links)
|
||||
{
|
||||
l.position = new_position;
|
||||
new_position += SmallTypes::size;
|
||||
}
|
||||
|
||||
table->ligature.len = obj.real_links.length;
|
||||
obj.tail -= to_remove * SmallTypes::size;
|
||||
}
|
||||
|
||||
unsigned clone_range (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
hb_vector_t<unsigned> liga_counts,
|
||||
unsigned start, unsigned end) const
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Cloning LigatureSubstFormat1 (%u) range [%u, %u).", this_index, start, end);
|
||||
|
||||
// Create an oversized new liga subst, we'll adjust the size down later. We don't know
|
||||
// the final size until we process it but we also need it to exist while we're processing
|
||||
// so that nodes can be moved to it as needed.
|
||||
unsigned prime_size = OT::Layout::GSUB_impl::LigatureSubstFormat1_2<SmallTypes>::min_size
|
||||
+ ligatureSet.get_size() - ligatureSet.len.get_size();
|
||||
|
||||
unsigned liga_subst_prime_id = c.create_node (prime_size);
|
||||
if (liga_subst_prime_id == (unsigned) -1) return -1;
|
||||
|
||||
LigatureSubstFormat1* liga_subst_prime = (LigatureSubstFormat1*) c.graph.object (liga_subst_prime_id).head;
|
||||
liga_subst_prime->format = this->format;
|
||||
liga_subst_prime->ligatureSet.len = this->ligatureSet.len;
|
||||
|
||||
// Create a place holder coverage prime id since we need to add virtual links to it while
|
||||
// generating liga and liga sets. Afterwards it will be updated to have the correct coverage.
|
||||
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
|
||||
unsigned coverage_prime_id = c.graph.duplicate(coverage_id);
|
||||
auto& coverage_prime_vertex = c.graph.vertices_[coverage_prime_id];
|
||||
auto* coverage_prime_link = c.graph.vertices_[liga_subst_prime_id].obj.real_links.push ();
|
||||
coverage_prime_link->width = SmallTypes::size;
|
||||
coverage_prime_link->objidx = coverage_prime_id;
|
||||
coverage_prime_link->position = 2;
|
||||
coverage_prime_vertex.add_parent (liga_subst_prime_id, false);
|
||||
|
||||
// Locate all liga sets with ligas between start and end.
|
||||
// Clone or move them as needed.
|
||||
unsigned count = 0;
|
||||
unsigned liga_set_count = 0;
|
||||
unsigned liga_set_start = -1;
|
||||
unsigned liga_set_end = 0; // inclusive
|
||||
for (unsigned i = 0; i < liga_counts.length; i++)
|
||||
{
|
||||
unsigned num_ligas = liga_counts[i];
|
||||
|
||||
unsigned current_start = count;
|
||||
unsigned current_end = count + num_ligas;
|
||||
|
||||
if (current_start >= end || start >= current_end) {
|
||||
// No intersection, so just skip
|
||||
count += num_ligas;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto liga_set_index = c.graph.index_for_offset(this_index, &ligatureSet[i]);
|
||||
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
|
||||
if (!liga_set.table) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Bounds may need to be adjusted if some ligas have been previously removed.
|
||||
hb_pair_t<unsigned, unsigned> liga_bounds = current_liga_set_bounds(c, liga_set_index, liga_set.vertex->obj);
|
||||
current_start = hb_max(count + liga_bounds.first, current_start);
|
||||
current_end = hb_min(count + liga_bounds.second, current_end);
|
||||
|
||||
unsigned liga_set_prime_id;
|
||||
if (current_start >= start && current_end <= end) {
|
||||
// This liga set is fully contined within [start, end)
|
||||
// We can move the entire ligaset to the new liga subset object.
|
||||
liga_set_end = i;
|
||||
if (i < liga_set_start) liga_set_start = i;
|
||||
liga_set_prime_id = c.graph.move_child<> (this_index,
|
||||
&ligatureSet[i],
|
||||
liga_subst_prime_id,
|
||||
&liga_subst_prime->ligatureSet[liga_set_count++]);
|
||||
compact_liga_set(c, liga_set.table, liga_set.vertex->obj);
|
||||
}
|
||||
else
|
||||
{
|
||||
// This liga set partially overlaps [start, end). We'll need to create
|
||||
// a new liga set sub table and move the intersecting ligas to it.
|
||||
unsigned liga_count = hb_min(end, current_end) - hb_max(start, current_start);
|
||||
auto result = new_liga_set(c, liga_count);
|
||||
liga_set_prime_id = result.first;
|
||||
LigatureSet* prime = result.second;
|
||||
if (liga_set_prime_id == (unsigned) -1) return -1;
|
||||
|
||||
unsigned new_index = 0;
|
||||
for (unsigned j = hb_max(start, current_start) - count; j < hb_min(end, current_end) - count; j++) {
|
||||
c.graph.move_child<> (liga_set_index,
|
||||
&liga_set.table->ligature[j],
|
||||
liga_set_prime_id,
|
||||
&prime->ligature[new_index++]);
|
||||
}
|
||||
|
||||
liga_set_end = i;
|
||||
if (i < liga_set_start) liga_set_start = i;
|
||||
c.graph.add_link(&liga_subst_prime->ligatureSet[liga_set_count++], liga_subst_prime_id, liga_set_prime_id);
|
||||
}
|
||||
|
||||
// The new liga and all children set needs to have a virtual link to the new coverage table:
|
||||
auto& liga_set_prime = c.graph.vertices_[liga_set_prime_id].obj;
|
||||
clear_virtual_links(c, liga_set_prime_id);
|
||||
add_virtual_link(c, liga_set_prime_id, coverage_prime_id);
|
||||
for (const auto& l : liga_set_prime.real_links) {
|
||||
clear_virtual_links(c, l.objidx);
|
||||
add_virtual_link(c, l.objidx, coverage_prime_id);
|
||||
}
|
||||
|
||||
count += num_ligas;
|
||||
}
|
||||
|
||||
c.graph.vertices_[liga_subst_prime_id].obj.tail -= (liga_subst_prime->ligatureSet.len - liga_set_count) * SmallTypes::size;
|
||||
liga_subst_prime->ligatureSet.len = liga_set_count;
|
||||
|
||||
if (!Coverage::filter_coverage (c,
|
||||
coverage_prime_id,
|
||||
liga_set_start, liga_set_end + 1))
|
||||
return -1;
|
||||
|
||||
return liga_subst_prime_id;
|
||||
}
|
||||
|
||||
bool shrink (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned old_count,
|
||||
hb_vector_t<unsigned> liga_counts,
|
||||
unsigned count)
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Shrinking LigatureSubstFormat1 (%u) to [0, %u).",
|
||||
this_index,
|
||||
count);
|
||||
if (count >= old_count)
|
||||
return true;
|
||||
|
||||
hb_set_t retained_indices;
|
||||
unsigned new_liga_set_count = 0;
|
||||
for (unsigned i = 0; i < liga_counts.length; i++)
|
||||
{
|
||||
auto liga_set = c.graph.as_table<LigatureSet>(this_index, &ligatureSet[i]);
|
||||
if (!liga_set.table) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We need the virtual links to coverage removed from all descendants on this liga subst.
|
||||
// If any are left when we try to mutate the coverage table later it will be unnessecarily
|
||||
// duplicated. Code later on will re-add the virtual links as needed (via retained_indices).
|
||||
clear_virtual_links(c, liga_set.index);
|
||||
retained_indices.add(liga_set.index);
|
||||
for (const auto& liga_offset : liga_set.table->ligature) {
|
||||
unsigned liga_index = c.graph.index_for_offset(liga_set.index, &liga_offset);
|
||||
if (liga_index != (unsigned) -1) {
|
||||
clear_virtual_links(c, liga_index);
|
||||
retained_indices.add(liga_index);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned num_ligas = liga_counts[i];
|
||||
if (num_ligas >= count) {
|
||||
// drop the trailing liga's from this set and all subsequent liga sets
|
||||
unsigned num_ligas_to_remove = num_ligas - count;
|
||||
new_liga_set_count = i + 1;
|
||||
c.graph.vertices_[liga_set.index].obj.tail -= num_ligas_to_remove * SmallTypes::size;
|
||||
liga_set.table->ligature.len = count;
|
||||
break;
|
||||
} else {
|
||||
count -= num_ligas;
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust liga set array
|
||||
c.graph.vertices_[this_index].obj.tail -= (ligatureSet.len - new_liga_set_count) * SmallTypes::size;
|
||||
ligatureSet.len = new_liga_set_count;
|
||||
|
||||
// Coverage matches the number of liga sets so rebuild as needed
|
||||
auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage);
|
||||
if (!coverage) return false;
|
||||
|
||||
for (unsigned i : retained_indices.iter())
|
||||
add_virtual_link(c, i, coverage.index);
|
||||
|
||||
unsigned coverage_size = coverage.vertex->table_size ();
|
||||
auto new_coverage =
|
||||
+ hb_zip (coverage.table->iter (), hb_range ())
|
||||
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
|
||||
return p.second < new_liga_set_count;
|
||||
})
|
||||
| hb_map_retains_sorting (hb_first)
|
||||
;
|
||||
|
||||
return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size);
|
||||
}
|
||||
};
|
||||
|
||||
struct LigatureSubst : public OT::Layout::GSUB_impl::LigatureSubst
|
||||
{
|
||||
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((LigatureSubstFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: HB_FALLTHROUGH;
|
||||
// Don't split 24bit Ligature Subs
|
||||
#endif
|
||||
default:
|
||||
return hb_vector_t<unsigned> ();
|
||||
}
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < u.format.get_size ()) return false;
|
||||
hb_barrier ();
|
||||
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((LigatureSubstFormat1*)(&u.format1))->sanitize (vertex);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: HB_FALLTHROUGH;
|
||||
#endif
|
||||
default:
|
||||
// We don't handle format 2 here.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_LIGATURE_GRAPH_HH
|
518
thirdparty/harfbuzz/src/graph/markbasepos-graph.hh
vendored
Normal file
518
thirdparty/harfbuzz/src/graph/markbasepos-graph.hh
vendored
Normal file
@@ -0,0 +1,518 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#ifndef GRAPH_MARKBASEPOS_GRAPH_HH
|
||||
#define GRAPH_MARKBASEPOS_GRAPH_HH
|
||||
|
||||
#include "split-helpers.hh"
|
||||
#include "coverage-graph.hh"
|
||||
#include "../OT/Layout/GPOS/MarkBasePos.hh"
|
||||
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct AnchorMatrix : public OT::Layout::GPOS_impl::AnchorMatrix
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex, unsigned class_count) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < AnchorMatrix::min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
return vertex_len >= AnchorMatrix::min_size +
|
||||
OT::Offset16::static_size * class_count * this->rows;
|
||||
}
|
||||
|
||||
bool shrink (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned old_class_count,
|
||||
unsigned new_class_count)
|
||||
{
|
||||
if (new_class_count >= old_class_count) return false;
|
||||
auto& o = c.graph.vertices_[this_index].obj;
|
||||
unsigned base_count = rows;
|
||||
o.tail = o.head +
|
||||
AnchorMatrix::min_size +
|
||||
OT::Offset16::static_size * base_count * new_class_count;
|
||||
|
||||
// Reposition links into the new indexing scheme.
|
||||
for (auto& link : o.real_links.writer ())
|
||||
{
|
||||
unsigned index = (link.position - 2) / 2;
|
||||
unsigned base = index / old_class_count;
|
||||
unsigned klass = index % old_class_count;
|
||||
if (klass >= new_class_count)
|
||||
// should have already been removed
|
||||
return false;
|
||||
|
||||
unsigned new_index = base * new_class_count + klass;
|
||||
|
||||
link.position = (char*) &(this->matrixZ[new_index]) - (char*) this;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned clone (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned start,
|
||||
unsigned end,
|
||||
unsigned class_count)
|
||||
{
|
||||
unsigned base_count = rows;
|
||||
unsigned new_class_count = end - start;
|
||||
unsigned size = AnchorMatrix::min_size +
|
||||
OT::Offset16::static_size * new_class_count * rows;
|
||||
unsigned prime_id = c.create_node (size);
|
||||
if (prime_id == (unsigned) -1) return -1;
|
||||
AnchorMatrix* prime = (AnchorMatrix*) c.graph.object (prime_id).head;
|
||||
prime->rows = base_count;
|
||||
|
||||
auto& o = c.graph.vertices_[this_index].obj;
|
||||
int num_links = o.real_links.length;
|
||||
for (int i = 0; i < num_links; i++)
|
||||
{
|
||||
const auto& link = o.real_links[i];
|
||||
unsigned old_index = (link.position - 2) / OT::Offset16::static_size;
|
||||
unsigned klass = old_index % class_count;
|
||||
if (klass < start || klass >= end) continue;
|
||||
|
||||
unsigned base = old_index / class_count;
|
||||
unsigned new_klass = klass - start;
|
||||
unsigned new_index = base * new_class_count + new_klass;
|
||||
|
||||
|
||||
unsigned child_idx = link.objidx;
|
||||
c.graph.add_link (&(prime->matrixZ[new_index]),
|
||||
prime_id,
|
||||
child_idx);
|
||||
|
||||
auto& child = c.graph.vertices_[child_idx];
|
||||
child.remove_parent (this_index);
|
||||
|
||||
o.real_links.remove_unordered (i);
|
||||
num_links--;
|
||||
i--;
|
||||
}
|
||||
|
||||
return prime_id;
|
||||
}
|
||||
};
|
||||
|
||||
struct MarkArray : public OT::Layout::GPOS_impl::MarkArray
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
unsigned min_size = MarkArray::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
return vertex_len >= get_size ();
|
||||
}
|
||||
|
||||
bool shrink (gsubgpos_graph_context_t& c,
|
||||
const hb_hashmap_t<unsigned, unsigned>& mark_array_links,
|
||||
unsigned this_index,
|
||||
unsigned new_class_count)
|
||||
{
|
||||
auto& o = c.graph.vertices_[this_index].obj;
|
||||
for (const auto& link : o.real_links)
|
||||
c.graph.vertices_[link.objidx].remove_parent (this_index);
|
||||
o.real_links.reset ();
|
||||
|
||||
unsigned new_index = 0;
|
||||
for (const auto& record : this->iter ())
|
||||
{
|
||||
unsigned klass = record.klass;
|
||||
if (klass >= new_class_count) continue;
|
||||
|
||||
(*this)[new_index].klass = klass;
|
||||
unsigned position = (char*) &record.markAnchor - (char*) this;
|
||||
unsigned* objidx;
|
||||
if (!mark_array_links.has (position, &objidx))
|
||||
{
|
||||
new_index++;
|
||||
continue;
|
||||
}
|
||||
|
||||
c.graph.add_link (&(*this)[new_index].markAnchor, this_index, *objidx);
|
||||
new_index++;
|
||||
}
|
||||
|
||||
this->len = new_index;
|
||||
o.tail = o.head + MarkArray::min_size +
|
||||
OT::Layout::GPOS_impl::MarkRecord::static_size * new_index;
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned clone (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
const hb_hashmap_t<unsigned, unsigned>& pos_to_index,
|
||||
hb_set_t& marks,
|
||||
unsigned start_class)
|
||||
{
|
||||
unsigned size = MarkArray::min_size +
|
||||
OT::Layout::GPOS_impl::MarkRecord::static_size *
|
||||
marks.get_population ();
|
||||
unsigned prime_id = c.create_node (size);
|
||||
if (prime_id == (unsigned) -1) return -1;
|
||||
MarkArray* prime = (MarkArray*) c.graph.object (prime_id).head;
|
||||
prime->len = marks.get_population ();
|
||||
|
||||
|
||||
unsigned i = 0;
|
||||
for (hb_codepoint_t mark : marks)
|
||||
{
|
||||
(*prime)[i].klass = (*this)[mark].klass - start_class;
|
||||
unsigned offset_pos = (char*) &((*this)[mark].markAnchor) - (char*) this;
|
||||
unsigned* anchor_index;
|
||||
if (pos_to_index.has (offset_pos, &anchor_index))
|
||||
c.graph.move_child (this_index,
|
||||
&((*this)[mark].markAnchor),
|
||||
prime_id,
|
||||
&((*prime)[i].markAnchor));
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
return prime_id;
|
||||
}
|
||||
};
|
||||
|
||||
struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
return vertex_len >= MarkBasePosFormat1::static_size;
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
hb_set_t visited;
|
||||
|
||||
const unsigned base_coverage_id = c.graph.index_for_offset (this_index, &baseCoverage);
|
||||
const unsigned base_size =
|
||||
OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::min_size +
|
||||
MarkArray::min_size +
|
||||
AnchorMatrix::min_size +
|
||||
c.graph.vertices_[base_coverage_id].table_size ();
|
||||
|
||||
hb_vector_t<class_info_t> class_to_info = get_class_info (c, this_index);
|
||||
|
||||
unsigned class_count = classCount;
|
||||
auto base_array = c.graph.as_table<AnchorMatrix> (this_index,
|
||||
&baseArray,
|
||||
class_count);
|
||||
if (!base_array) return hb_vector_t<unsigned> ();
|
||||
unsigned base_count = base_array.table->rows;
|
||||
|
||||
unsigned partial_coverage_size = 4;
|
||||
unsigned accumulated = base_size;
|
||||
hb_vector_t<unsigned> split_points;
|
||||
|
||||
for (unsigned klass = 0; klass < class_count; klass++)
|
||||
{
|
||||
class_info_t& info = class_to_info[klass];
|
||||
partial_coverage_size += OT::HBUINT16::static_size * info.marks.get_population ();
|
||||
unsigned accumulated_delta =
|
||||
OT::Layout::GPOS_impl::MarkRecord::static_size * info.marks.get_population () +
|
||||
OT::Offset16::static_size * base_count;
|
||||
|
||||
for (unsigned objidx : info.child_indices)
|
||||
accumulated_delta += c.graph.find_subgraph_size (objidx, visited);
|
||||
|
||||
accumulated += accumulated_delta;
|
||||
unsigned total = accumulated + partial_coverage_size;
|
||||
|
||||
if (total >= (1 << 16))
|
||||
{
|
||||
split_points.push (klass);
|
||||
accumulated = base_size + accumulated_delta;
|
||||
partial_coverage_size = 4 + OT::HBUINT16::static_size * info.marks.get_population ();
|
||||
visited.clear (); // node sharing isn't allowed between splits.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const unsigned mark_array_id = c.graph.index_for_offset (this_index, &markArray);
|
||||
split_context_t split_context {
|
||||
c,
|
||||
this,
|
||||
c.graph.duplicate_if_shared (parent_index, this_index),
|
||||
std::move (class_to_info),
|
||||
c.graph.vertices_[mark_array_id].position_to_index_map (),
|
||||
};
|
||||
|
||||
return actuate_subtable_split<split_context_t> (split_context, split_points);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
struct class_info_t {
|
||||
hb_set_t marks;
|
||||
hb_vector_t<unsigned> child_indices;
|
||||
};
|
||||
|
||||
struct split_context_t {
|
||||
gsubgpos_graph_context_t& c;
|
||||
MarkBasePosFormat1* thiz;
|
||||
unsigned this_index;
|
||||
hb_vector_t<class_info_t> class_to_info;
|
||||
hb_hashmap_t<unsigned, unsigned> mark_array_links;
|
||||
|
||||
hb_set_t marks_for (unsigned start, unsigned end)
|
||||
{
|
||||
hb_set_t marks;
|
||||
for (unsigned klass = start; klass < end; klass++)
|
||||
{
|
||||
+ class_to_info[klass].marks.iter ()
|
||||
| hb_sink (marks)
|
||||
;
|
||||
}
|
||||
return marks;
|
||||
}
|
||||
|
||||
unsigned original_count ()
|
||||
{
|
||||
return thiz->classCount;
|
||||
}
|
||||
|
||||
unsigned clone_range (unsigned start, unsigned end)
|
||||
{
|
||||
return thiz->clone_range (*this, this->this_index, start, end);
|
||||
}
|
||||
|
||||
bool shrink (unsigned count)
|
||||
{
|
||||
return thiz->shrink (*this, this->this_index, count);
|
||||
}
|
||||
};
|
||||
|
||||
hb_vector_t<class_info_t> get_class_info (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index)
|
||||
{
|
||||
hb_vector_t<class_info_t> class_to_info;
|
||||
|
||||
unsigned class_count = classCount;
|
||||
if (!class_count) return class_to_info;
|
||||
|
||||
if (!class_to_info.resize (class_count))
|
||||
return hb_vector_t<class_info_t>();
|
||||
|
||||
auto mark_array = c.graph.as_table<MarkArray> (this_index, &markArray);
|
||||
if (!mark_array) return hb_vector_t<class_info_t> ();
|
||||
unsigned mark_count = mark_array.table->len;
|
||||
for (unsigned mark = 0; mark < mark_count; mark++)
|
||||
{
|
||||
unsigned klass = (*mark_array.table)[mark].get_class ();
|
||||
if (klass >= class_count) continue;
|
||||
class_to_info[klass].marks.add (mark);
|
||||
}
|
||||
|
||||
for (const auto& link : mark_array.vertex->obj.real_links)
|
||||
{
|
||||
unsigned mark = (link.position - 2) /
|
||||
OT::Layout::GPOS_impl::MarkRecord::static_size;
|
||||
unsigned klass = (*mark_array.table)[mark].get_class ();
|
||||
if (klass >= class_count) continue;
|
||||
class_to_info[klass].child_indices.push (link.objidx);
|
||||
}
|
||||
|
||||
unsigned base_array_id =
|
||||
c.graph.index_for_offset (this_index, &baseArray);
|
||||
auto& base_array_v = c.graph.vertices_[base_array_id];
|
||||
|
||||
for (const auto& link : base_array_v.obj.real_links)
|
||||
{
|
||||
unsigned index = (link.position - 2) / OT::Offset16::static_size;
|
||||
unsigned klass = index % class_count;
|
||||
class_to_info[klass].child_indices.push (link.objidx);
|
||||
}
|
||||
|
||||
return class_to_info;
|
||||
}
|
||||
|
||||
bool shrink (split_context_t& sc,
|
||||
unsigned this_index,
|
||||
unsigned count)
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Shrinking MarkBasePosFormat1 (%u) to [0, %u).",
|
||||
this_index,
|
||||
count);
|
||||
|
||||
unsigned old_count = classCount;
|
||||
if (count >= old_count)
|
||||
return true;
|
||||
|
||||
classCount = count;
|
||||
|
||||
auto mark_coverage = sc.c.graph.as_mutable_table<Coverage> (this_index,
|
||||
&markCoverage);
|
||||
if (!mark_coverage) return false;
|
||||
hb_set_t marks = sc.marks_for (0, count);
|
||||
auto new_coverage =
|
||||
+ hb_enumerate (mark_coverage.table->iter ())
|
||||
| hb_filter (marks, hb_first)
|
||||
| hb_map_retains_sorting (hb_second)
|
||||
;
|
||||
if (!Coverage::make_coverage (sc.c, + new_coverage,
|
||||
mark_coverage.index,
|
||||
4 + 2 * marks.get_population ()))
|
||||
return false;
|
||||
|
||||
|
||||
auto base_array = sc.c.graph.as_mutable_table<AnchorMatrix> (this_index,
|
||||
&baseArray,
|
||||
old_count);
|
||||
if (!base_array || !base_array.table->shrink (sc.c,
|
||||
base_array.index,
|
||||
old_count,
|
||||
count))
|
||||
return false;
|
||||
|
||||
auto mark_array = sc.c.graph.as_mutable_table<MarkArray> (this_index,
|
||||
&markArray);
|
||||
if (!mark_array || !mark_array.table->shrink (sc.c,
|
||||
sc.mark_array_links,
|
||||
mark_array.index,
|
||||
count))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Create a new MarkBasePos that has all of the data for classes from [start, end).
|
||||
unsigned clone_range (split_context_t& sc,
|
||||
unsigned this_index,
|
||||
unsigned start, unsigned end) const
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Cloning MarkBasePosFormat1 (%u) range [%u, %u).", this_index, start, end);
|
||||
|
||||
graph_t& graph = sc.c.graph;
|
||||
unsigned prime_size = OT::Layout::GPOS_impl::MarkBasePosFormat1_2<SmallTypes>::static_size;
|
||||
|
||||
unsigned prime_id = sc.c.create_node (prime_size);
|
||||
if (prime_id == (unsigned) -1) return -1;
|
||||
|
||||
MarkBasePosFormat1* prime = (MarkBasePosFormat1*) graph.object (prime_id).head;
|
||||
prime->format = this->format;
|
||||
unsigned new_class_count = end - start;
|
||||
prime->classCount = new_class_count;
|
||||
|
||||
unsigned base_coverage_id =
|
||||
graph.index_for_offset (sc.this_index, &baseCoverage);
|
||||
graph.add_link (&(prime->baseCoverage), prime_id, base_coverage_id);
|
||||
graph.duplicate (prime_id, base_coverage_id);
|
||||
|
||||
auto mark_coverage = sc.c.graph.as_table<Coverage> (this_index,
|
||||
&markCoverage);
|
||||
if (!mark_coverage) return false;
|
||||
hb_set_t marks = sc.marks_for (start, end);
|
||||
auto new_coverage =
|
||||
+ hb_enumerate (mark_coverage.table->iter ())
|
||||
| hb_filter (marks, hb_first)
|
||||
| hb_map_retains_sorting (hb_second)
|
||||
;
|
||||
if (!Coverage::add_coverage (sc.c,
|
||||
prime_id,
|
||||
2,
|
||||
+ new_coverage,
|
||||
marks.get_population () * 2 + 4))
|
||||
return -1;
|
||||
|
||||
auto mark_array =
|
||||
graph.as_table <MarkArray> (sc.this_index, &markArray);
|
||||
if (!mark_array) return -1;
|
||||
unsigned new_mark_array =
|
||||
mark_array.table->clone (sc.c,
|
||||
mark_array.index,
|
||||
sc.mark_array_links,
|
||||
marks,
|
||||
start);
|
||||
graph.add_link (&(prime->markArray), prime_id, new_mark_array);
|
||||
|
||||
unsigned class_count = classCount;
|
||||
auto base_array =
|
||||
graph.as_table<AnchorMatrix> (sc.this_index, &baseArray, class_count);
|
||||
if (!base_array) return -1;
|
||||
unsigned new_base_array =
|
||||
base_array.table->clone (sc.c,
|
||||
base_array.index,
|
||||
start, end, this->classCount);
|
||||
graph.add_link (&(prime->baseArray), prime_id, new_base_array);
|
||||
|
||||
return prime_id;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
|
||||
{
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: HB_FALLTHROUGH;
|
||||
// Don't split 24bit MarkBasePos's.
|
||||
#endif
|
||||
default:
|
||||
return hb_vector_t<unsigned> ();
|
||||
}
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < u.format.get_size ()) return false;
|
||||
hb_barrier ();
|
||||
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((MarkBasePosFormat1*)(&u.format1))->sanitize (vertex);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 2: HB_FALLTHROUGH;
|
||||
#endif
|
||||
default:
|
||||
// We don't handle format 3 and 4 here.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_MARKBASEPOS_GRAPH_HH
|
652
thirdparty/harfbuzz/src/graph/pairpos-graph.hh
vendored
Normal file
652
thirdparty/harfbuzz/src/graph/pairpos-graph.hh
vendored
Normal file
@@ -0,0 +1,652 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#ifndef GRAPH_PAIRPOS_GRAPH_HH
|
||||
#define GRAPH_PAIRPOS_GRAPH_HH
|
||||
|
||||
#include "split-helpers.hh"
|
||||
#include "coverage-graph.hh"
|
||||
#include "classdef-graph.hh"
|
||||
#include "../OT/Layout/GPOS/PairPos.hh"
|
||||
#include "../OT/Layout/GPOS/PosLookupSubTable.hh"
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
return vertex_len >=
|
||||
min_size + pairSet.get_size () - pairSet.len.get_size();
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
hb_set_t visited;
|
||||
|
||||
const unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
|
||||
const unsigned coverage_size = c.graph.vertices_[coverage_id].table_size ();
|
||||
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size;
|
||||
|
||||
unsigned partial_coverage_size = 4;
|
||||
unsigned accumulated = base_size;
|
||||
hb_vector_t<unsigned> split_points;
|
||||
for (unsigned i = 0; i < pairSet.len; i++)
|
||||
{
|
||||
unsigned pair_set_index = pair_set_graph_index (c, this_index, i);
|
||||
unsigned accumulated_delta =
|
||||
c.graph.find_subgraph_size (pair_set_index, visited) +
|
||||
SmallTypes::size; // for PairSet offset.
|
||||
partial_coverage_size += OT::HBUINT16::static_size;
|
||||
|
||||
accumulated += accumulated_delta;
|
||||
unsigned total = accumulated + hb_min (partial_coverage_size, coverage_size);
|
||||
|
||||
if (total >= (1 << 16))
|
||||
{
|
||||
split_points.push (i);
|
||||
accumulated = base_size + accumulated_delta;
|
||||
partial_coverage_size = 6;
|
||||
visited.clear (); // node sharing isn't allowed between splits.
|
||||
}
|
||||
}
|
||||
|
||||
split_context_t split_context {
|
||||
c,
|
||||
this,
|
||||
c.graph.duplicate_if_shared (parent_index, this_index),
|
||||
};
|
||||
|
||||
return actuate_subtable_split<split_context_t> (split_context, split_points);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
struct split_context_t {
|
||||
gsubgpos_graph_context_t& c;
|
||||
PairPosFormat1* thiz;
|
||||
unsigned this_index;
|
||||
|
||||
unsigned original_count ()
|
||||
{
|
||||
return thiz->pairSet.len;
|
||||
}
|
||||
|
||||
unsigned clone_range (unsigned start, unsigned end)
|
||||
{
|
||||
return thiz->clone_range (this->c, this->this_index, start, end);
|
||||
}
|
||||
|
||||
bool shrink (unsigned count)
|
||||
{
|
||||
return thiz->shrink (this->c, this->this_index, count);
|
||||
}
|
||||
};
|
||||
|
||||
bool shrink (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned count)
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Shrinking PairPosFormat1 (%u) to [0, %u).",
|
||||
this_index,
|
||||
count);
|
||||
unsigned old_count = pairSet.len;
|
||||
if (count >= old_count)
|
||||
return true;
|
||||
|
||||
pairSet.len = count;
|
||||
c.graph.vertices_[this_index].obj.tail -= (old_count - count) * SmallTypes::size;
|
||||
|
||||
auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage);
|
||||
if (!coverage) return false;
|
||||
|
||||
unsigned coverage_size = coverage.vertex->table_size ();
|
||||
auto new_coverage =
|
||||
+ hb_zip (coverage.table->iter (), hb_range ())
|
||||
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
|
||||
return p.second < count;
|
||||
})
|
||||
| hb_map_retains_sorting (hb_first)
|
||||
;
|
||||
|
||||
return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size);
|
||||
}
|
||||
|
||||
// Create a new PairPos including PairSet's from start (inclusive) to end (exclusive).
|
||||
// Returns object id of the new object.
|
||||
unsigned clone_range (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
unsigned start, unsigned end) const
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Cloning PairPosFormat1 (%u) range [%u, %u).", this_index, start, end);
|
||||
|
||||
unsigned num_pair_sets = end - start;
|
||||
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size
|
||||
+ num_pair_sets * SmallTypes::size;
|
||||
|
||||
unsigned pair_pos_prime_id = c.create_node (prime_size);
|
||||
if (pair_pos_prime_id == (unsigned) -1) return -1;
|
||||
|
||||
PairPosFormat1* pair_pos_prime = (PairPosFormat1*) c.graph.object (pair_pos_prime_id).head;
|
||||
pair_pos_prime->format = this->format;
|
||||
pair_pos_prime->valueFormat[0] = this->valueFormat[0];
|
||||
pair_pos_prime->valueFormat[1] = this->valueFormat[1];
|
||||
pair_pos_prime->pairSet.len = num_pair_sets;
|
||||
|
||||
for (unsigned i = start; i < end; i++)
|
||||
{
|
||||
c.graph.move_child<> (this_index,
|
||||
&pairSet[i],
|
||||
pair_pos_prime_id,
|
||||
&pair_pos_prime->pairSet[i - start]);
|
||||
}
|
||||
|
||||
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
|
||||
if (!Coverage::clone_coverage (c,
|
||||
coverage_id,
|
||||
pair_pos_prime_id,
|
||||
2,
|
||||
start, end))
|
||||
return -1;
|
||||
|
||||
return pair_pos_prime_id;
|
||||
}
|
||||
|
||||
|
||||
|
||||
unsigned pair_set_graph_index (gsubgpos_graph_context_t& c, unsigned this_index, unsigned i) const
|
||||
{
|
||||
return c.graph.index_for_offset (this_index, &pairSet[i]);
|
||||
}
|
||||
};
|
||||
|
||||
struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>
|
||||
{
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
size_t vertex_len = vertex.table_size ();
|
||||
unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
|
||||
if (vertex_len < min_size) return false;
|
||||
hb_barrier ();
|
||||
|
||||
const unsigned class1_count = class1Count;
|
||||
return vertex_len >=
|
||||
min_size + class1_count * get_class1_record_size ();
|
||||
}
|
||||
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
|
||||
const unsigned class_def_2_size = size_of (c, this_index, &classDef2);
|
||||
const Coverage* coverage = get_coverage (c, this_index);
|
||||
const ClassDef* class_def_1 = get_class_def_1 (c, this_index);
|
||||
auto gid_and_class =
|
||||
+ coverage->iter ()
|
||||
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
|
||||
return hb_codepoint_pair_t (gid, class_def_1->get_class (gid));
|
||||
})
|
||||
;
|
||||
class_def_size_estimator_t estimator (gid_and_class);
|
||||
|
||||
const unsigned class1_count = class1Count;
|
||||
const unsigned class2_count = class2Count;
|
||||
const unsigned class1_record_size = get_class1_record_size ();
|
||||
|
||||
const unsigned value_1_len = valueFormat1.get_len ();
|
||||
const unsigned value_2_len = valueFormat2.get_len ();
|
||||
const unsigned total_value_len = value_1_len + value_2_len;
|
||||
|
||||
unsigned accumulated = base_size;
|
||||
unsigned coverage_size = 4;
|
||||
unsigned class_def_1_size = 4;
|
||||
unsigned max_coverage_size = coverage_size;
|
||||
unsigned max_class_def_1_size = class_def_1_size;
|
||||
|
||||
hb_vector_t<unsigned> split_points;
|
||||
|
||||
hb_hashmap_t<unsigned, unsigned> device_tables = get_all_device_tables (c, this_index);
|
||||
hb_vector_t<unsigned> format1_device_table_indices = valueFormat1.get_device_table_indices ();
|
||||
hb_vector_t<unsigned> format2_device_table_indices = valueFormat2.get_device_table_indices ();
|
||||
bool has_device_tables = bool(format1_device_table_indices) || bool(format2_device_table_indices);
|
||||
|
||||
hb_set_t visited;
|
||||
for (unsigned i = 0; i < class1_count; i++)
|
||||
{
|
||||
unsigned accumulated_delta = class1_record_size;
|
||||
class_def_1_size = estimator.add_class_def_size (i);
|
||||
coverage_size = estimator.coverage_size ();
|
||||
max_coverage_size = hb_max (max_coverage_size, coverage_size);
|
||||
max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size);
|
||||
|
||||
if (has_device_tables) {
|
||||
for (unsigned j = 0; j < class2_count; j++)
|
||||
{
|
||||
unsigned value1_index = total_value_len * (class2_count * i + j);
|
||||
unsigned value2_index = value1_index + value_1_len;
|
||||
accumulated_delta += size_of_value_record_children (c,
|
||||
device_tables,
|
||||
format1_device_table_indices,
|
||||
value1_index,
|
||||
visited);
|
||||
accumulated_delta += size_of_value_record_children (c,
|
||||
device_tables,
|
||||
format2_device_table_indices,
|
||||
value2_index,
|
||||
visited);
|
||||
}
|
||||
}
|
||||
|
||||
accumulated += accumulated_delta;
|
||||
unsigned total = accumulated
|
||||
+ coverage_size + class_def_1_size + class_def_2_size
|
||||
// The largest object will pack last and can exceed the size limit.
|
||||
- hb_max (hb_max (coverage_size, class_def_1_size), class_def_2_size);
|
||||
if (total >= (1 << 16))
|
||||
{
|
||||
split_points.push (i);
|
||||
// split does not include i, so add the size for i when we reset the size counters.
|
||||
accumulated = base_size + accumulated_delta;
|
||||
|
||||
estimator.reset();
|
||||
class_def_1_size = estimator.add_class_def_size(i);
|
||||
coverage_size = estimator.coverage_size();
|
||||
visited.clear (); // node sharing isn't allowed between splits.
|
||||
}
|
||||
}
|
||||
|
||||
split_context_t split_context {
|
||||
c,
|
||||
this,
|
||||
c.graph.duplicate_if_shared (parent_index, this_index),
|
||||
class1_record_size,
|
||||
total_value_len,
|
||||
value_1_len,
|
||||
value_2_len,
|
||||
max_coverage_size,
|
||||
max_class_def_1_size,
|
||||
device_tables,
|
||||
format1_device_table_indices,
|
||||
format2_device_table_indices
|
||||
};
|
||||
|
||||
return actuate_subtable_split<split_context_t> (split_context, split_points);
|
||||
}
|
||||
private:
|
||||
|
||||
struct split_context_t
|
||||
{
|
||||
gsubgpos_graph_context_t& c;
|
||||
PairPosFormat2* thiz;
|
||||
unsigned this_index;
|
||||
unsigned class1_record_size;
|
||||
unsigned value_record_len;
|
||||
unsigned value1_record_len;
|
||||
unsigned value2_record_len;
|
||||
unsigned max_coverage_size;
|
||||
unsigned max_class_def_size;
|
||||
|
||||
const hb_hashmap_t<unsigned, unsigned>& device_tables;
|
||||
const hb_vector_t<unsigned>& format1_device_table_indices;
|
||||
const hb_vector_t<unsigned>& format2_device_table_indices;
|
||||
|
||||
unsigned original_count ()
|
||||
{
|
||||
return thiz->class1Count;
|
||||
}
|
||||
|
||||
unsigned clone_range (unsigned start, unsigned end)
|
||||
{
|
||||
return thiz->clone_range (*this, start, end);
|
||||
}
|
||||
|
||||
bool shrink (unsigned count)
|
||||
{
|
||||
return thiz->shrink (*this, count);
|
||||
}
|
||||
};
|
||||
|
||||
size_t get_class1_record_size () const
|
||||
{
|
||||
const size_t class2_count = class2Count;
|
||||
return
|
||||
class2_count * (valueFormat1.get_size () + valueFormat2.get_size ());
|
||||
}
|
||||
|
||||
unsigned clone_range (split_context_t& split_context,
|
||||
unsigned start, unsigned end) const
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Cloning PairPosFormat2 (%u) range [%u, %u).", split_context.this_index, start, end);
|
||||
|
||||
graph_t& graph = split_context.c.graph;
|
||||
|
||||
unsigned num_records = end - start;
|
||||
unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size
|
||||
+ num_records * split_context.class1_record_size;
|
||||
|
||||
unsigned pair_pos_prime_id = split_context.c.create_node (prime_size);
|
||||
if (pair_pos_prime_id == (unsigned) -1) return -1;
|
||||
|
||||
PairPosFormat2* pair_pos_prime =
|
||||
(PairPosFormat2*) graph.object (pair_pos_prime_id).head;
|
||||
pair_pos_prime->format = this->format;
|
||||
pair_pos_prime->valueFormat1 = this->valueFormat1;
|
||||
pair_pos_prime->valueFormat2 = this->valueFormat2;
|
||||
pair_pos_prime->class1Count = num_records;
|
||||
pair_pos_prime->class2Count = this->class2Count;
|
||||
clone_class1_records (split_context,
|
||||
pair_pos_prime_id,
|
||||
start,
|
||||
end);
|
||||
|
||||
unsigned coverage_id =
|
||||
graph.index_for_offset (split_context.this_index, &coverage);
|
||||
unsigned class_def_1_id =
|
||||
graph.index_for_offset (split_context.this_index, &classDef1);
|
||||
auto& coverage_v = graph.vertices_[coverage_id];
|
||||
auto& class_def_1_v = graph.vertices_[class_def_1_id];
|
||||
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
|
||||
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
|
||||
if (!coverage_table
|
||||
|| !coverage_table->sanitize (coverage_v)
|
||||
|| !class_def_1_table
|
||||
|| !class_def_1_table->sanitize (class_def_1_v))
|
||||
return -1;
|
||||
|
||||
auto klass_map =
|
||||
+ coverage_table->iter ()
|
||||
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
|
||||
return hb_codepoint_pair_t (gid, class_def_1_table->get_class (gid));
|
||||
})
|
||||
| hb_filter ([&] (hb_codepoint_t klass) {
|
||||
return klass >= start && klass < end;
|
||||
}, hb_second)
|
||||
| hb_map_retains_sorting ([&] (hb_codepoint_pair_t gid_and_class) {
|
||||
// Classes must be from 0...N so subtract start
|
||||
return hb_codepoint_pair_t (gid_and_class.first, gid_and_class.second - start);
|
||||
})
|
||||
;
|
||||
|
||||
if (!Coverage::add_coverage (split_context.c,
|
||||
pair_pos_prime_id,
|
||||
2,
|
||||
+ klass_map | hb_map_retains_sorting (hb_first),
|
||||
split_context.max_coverage_size))
|
||||
return -1;
|
||||
|
||||
// classDef1
|
||||
if (!ClassDef::add_class_def (split_context.c,
|
||||
pair_pos_prime_id,
|
||||
8,
|
||||
+ klass_map,
|
||||
split_context.max_class_def_size))
|
||||
return -1;
|
||||
|
||||
// classDef2
|
||||
unsigned class_def_2_id =
|
||||
graph.index_for_offset (split_context.this_index, &classDef2);
|
||||
auto* class_def_link = graph.vertices_[pair_pos_prime_id].obj.real_links.push ();
|
||||
class_def_link->width = SmallTypes::size;
|
||||
class_def_link->objidx = class_def_2_id;
|
||||
class_def_link->position = 10;
|
||||
graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id, false);
|
||||
graph.duplicate (pair_pos_prime_id, class_def_2_id);
|
||||
|
||||
return pair_pos_prime_id;
|
||||
}
|
||||
|
||||
void clone_class1_records (split_context_t& split_context,
|
||||
unsigned pair_pos_prime_id,
|
||||
unsigned start, unsigned end) const
|
||||
{
|
||||
PairPosFormat2* pair_pos_prime =
|
||||
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
|
||||
|
||||
char* start_addr = ((char*)&values[0]) + start * split_context.class1_record_size;
|
||||
unsigned num_records = end - start;
|
||||
hb_memcpy (&pair_pos_prime->values[0],
|
||||
start_addr,
|
||||
num_records * split_context.class1_record_size);
|
||||
|
||||
if (!split_context.format1_device_table_indices
|
||||
&& !split_context.format2_device_table_indices)
|
||||
// No device tables to move over.
|
||||
return;
|
||||
|
||||
unsigned class2_count = class2Count;
|
||||
for (unsigned i = start; i < end; i++)
|
||||
{
|
||||
for (unsigned j = 0; j < class2_count; j++)
|
||||
{
|
||||
unsigned value1_index = split_context.value_record_len * (class2_count * i + j);
|
||||
unsigned value2_index = value1_index + split_context.value1_record_len;
|
||||
|
||||
unsigned new_value1_index = split_context.value_record_len * (class2_count * (i - start) + j);
|
||||
unsigned new_value2_index = new_value1_index + split_context.value1_record_len;
|
||||
|
||||
transfer_device_tables (split_context,
|
||||
pair_pos_prime_id,
|
||||
split_context.format1_device_table_indices,
|
||||
value1_index,
|
||||
new_value1_index);
|
||||
|
||||
transfer_device_tables (split_context,
|
||||
pair_pos_prime_id,
|
||||
split_context.format2_device_table_indices,
|
||||
value2_index,
|
||||
new_value2_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void transfer_device_tables (split_context_t& split_context,
|
||||
unsigned pair_pos_prime_id,
|
||||
const hb_vector_t<unsigned>& device_table_indices,
|
||||
unsigned old_value_record_index,
|
||||
unsigned new_value_record_index) const
|
||||
{
|
||||
PairPosFormat2* pair_pos_prime =
|
||||
(PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head;
|
||||
|
||||
for (unsigned i : device_table_indices)
|
||||
{
|
||||
OT::Offset16* record = (OT::Offset16*) &values[old_value_record_index + i];
|
||||
unsigned record_position = ((char*) record) - ((char*) this);
|
||||
if (!split_context.device_tables.has (record_position)) continue;
|
||||
|
||||
split_context.c.graph.move_child (
|
||||
split_context.this_index,
|
||||
record,
|
||||
pair_pos_prime_id,
|
||||
(OT::Offset16*) &pair_pos_prime->values[new_value_record_index + i]);
|
||||
}
|
||||
}
|
||||
|
||||
bool shrink (split_context_t& split_context,
|
||||
unsigned count)
|
||||
{
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" Shrinking PairPosFormat2 (%u) to [0, %u).",
|
||||
split_context.this_index,
|
||||
count);
|
||||
unsigned old_count = class1Count;
|
||||
if (count >= old_count)
|
||||
return true;
|
||||
|
||||
graph_t& graph = split_context.c.graph;
|
||||
class1Count = count;
|
||||
graph.vertices_[split_context.this_index].obj.tail -=
|
||||
(old_count - count) * split_context.class1_record_size;
|
||||
|
||||
auto coverage =
|
||||
graph.as_mutable_table<Coverage> (split_context.this_index, &this->coverage);
|
||||
if (!coverage) return false;
|
||||
|
||||
auto class_def_1 =
|
||||
graph.as_mutable_table<ClassDef> (split_context.this_index, &classDef1);
|
||||
if (!class_def_1) return false;
|
||||
|
||||
auto klass_map =
|
||||
+ coverage.table->iter ()
|
||||
| hb_map_retains_sorting ([&] (hb_codepoint_t gid) {
|
||||
return hb_codepoint_pair_t (gid, class_def_1.table->get_class (gid));
|
||||
})
|
||||
| hb_filter ([&] (hb_codepoint_t klass) {
|
||||
return klass < count;
|
||||
}, hb_second)
|
||||
;
|
||||
|
||||
auto new_coverage = + klass_map | hb_map_retains_sorting (hb_first);
|
||||
if (!Coverage::make_coverage (split_context.c,
|
||||
+ new_coverage,
|
||||
coverage.index,
|
||||
// existing ranges my not be kept, worst case size is a format 1
|
||||
// coverage table.
|
||||
4 + new_coverage.len() * 2))
|
||||
return false;
|
||||
|
||||
return ClassDef::make_class_def (split_context.c,
|
||||
+ klass_map,
|
||||
class_def_1.index,
|
||||
class_def_1.vertex->table_size ());
|
||||
}
|
||||
|
||||
hb_hashmap_t<unsigned, unsigned>
|
||||
get_all_device_tables (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index) const
|
||||
{
|
||||
const auto& v = c.graph.vertices_[this_index];
|
||||
return v.position_to_index_map ();
|
||||
}
|
||||
|
||||
const Coverage* get_coverage (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index) const
|
||||
{
|
||||
unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage);
|
||||
auto& coverage_v = c.graph.vertices_[coverage_id];
|
||||
|
||||
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
|
||||
if (!coverage_table || !coverage_table->sanitize (coverage_v))
|
||||
return &Null(Coverage);
|
||||
return coverage_table;
|
||||
}
|
||||
|
||||
const ClassDef* get_class_def_1 (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index) const
|
||||
{
|
||||
unsigned class_def_1_id = c.graph.index_for_offset (this_index, &classDef1);
|
||||
auto& class_def_1_v = c.graph.vertices_[class_def_1_id];
|
||||
|
||||
ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head;
|
||||
if (!class_def_1_table || !class_def_1_table->sanitize (class_def_1_v))
|
||||
return &Null(ClassDef);
|
||||
return class_def_1_table;
|
||||
}
|
||||
|
||||
unsigned size_of_value_record_children (gsubgpos_graph_context_t& c,
|
||||
const hb_hashmap_t<unsigned, unsigned>& device_tables,
|
||||
const hb_vector_t<unsigned> device_table_indices,
|
||||
unsigned value_record_index,
|
||||
hb_set_t& visited)
|
||||
{
|
||||
unsigned size = 0;
|
||||
for (unsigned i : device_table_indices)
|
||||
{
|
||||
OT::Layout::GPOS_impl::Value* record = &values[value_record_index + i];
|
||||
unsigned record_position = ((char*) record) - ((char*) this);
|
||||
unsigned* obj_idx;
|
||||
if (!device_tables.has (record_position, &obj_idx)) continue;
|
||||
size += c.graph.find_subgraph_size (*obj_idx, visited);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned size_of (gsubgpos_graph_context_t& c,
|
||||
unsigned this_index,
|
||||
const void* offset) const
|
||||
{
|
||||
const unsigned id = c.graph.index_for_offset (this_index, offset);
|
||||
return c.graph.vertices_[id].table_size ();
|
||||
}
|
||||
};
|
||||
|
||||
struct PairPos : public OT::Layout::GPOS_impl::PairPos
|
||||
{
|
||||
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
|
||||
unsigned parent_index,
|
||||
unsigned this_index)
|
||||
{
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
|
||||
case 2:
|
||||
return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 3: HB_FALLTHROUGH;
|
||||
case 4: HB_FALLTHROUGH;
|
||||
// Don't split 24bit PairPos's.
|
||||
#endif
|
||||
default:
|
||||
return hb_vector_t<unsigned> ();
|
||||
}
|
||||
}
|
||||
|
||||
bool sanitize (graph_t::vertex_t& vertex) const
|
||||
{
|
||||
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
|
||||
if (vertex_len < u.format.get_size ()) return false;
|
||||
hb_barrier ();
|
||||
|
||||
switch (u.format) {
|
||||
case 1:
|
||||
return ((PairPosFormat1*)(&u.format1))->sanitize (vertex);
|
||||
case 2:
|
||||
return ((PairPosFormat2*)(&u.format2))->sanitize (vertex);
|
||||
#ifndef HB_NO_BEYOND_64K
|
||||
case 3: HB_FALLTHROUGH;
|
||||
case 4: HB_FALLTHROUGH;
|
||||
#endif
|
||||
default:
|
||||
// We don't handle format 3 and 4 here.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_PAIRPOS_GRAPH_HH
|
277
thirdparty/harfbuzz/src/graph/serialize.hh
vendored
Normal file
277
thirdparty/harfbuzz/src/graph/serialize.hh
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#ifndef GRAPH_SERIALIZE_HH
|
||||
#define GRAPH_SERIALIZE_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
struct overflow_record_t
|
||||
{
|
||||
unsigned parent;
|
||||
unsigned child;
|
||||
|
||||
bool operator != (const overflow_record_t o) const
|
||||
{ return !(*this == o); }
|
||||
|
||||
inline bool operator == (const overflow_record_t& o) const
|
||||
{
|
||||
return parent == o.parent &&
|
||||
child == o.child;
|
||||
}
|
||||
|
||||
inline uint32_t hash () const
|
||||
{
|
||||
uint32_t current = 0;
|
||||
current = current * 31 + hb_hash (parent);
|
||||
current = current * 31 + hb_hash (child);
|
||||
return current;
|
||||
}
|
||||
};
|
||||
|
||||
inline
|
||||
int64_t compute_offset (
|
||||
const graph_t& graph,
|
||||
unsigned parent_idx,
|
||||
const hb_serialize_context_t::object_t::link_t& link)
|
||||
{
|
||||
const auto& parent = graph.vertices_[parent_idx];
|
||||
const auto& child = graph.vertices_[link.objidx];
|
||||
int64_t offset = 0;
|
||||
switch ((hb_serialize_context_t::whence_t) link.whence) {
|
||||
case hb_serialize_context_t::whence_t::Head:
|
||||
offset = child.start - parent.start; break;
|
||||
case hb_serialize_context_t::whence_t::Tail:
|
||||
offset = child.start - parent.end; break;
|
||||
case hb_serialize_context_t::whence_t::Absolute:
|
||||
offset = child.start; break;
|
||||
}
|
||||
|
||||
assert (offset >= link.bias);
|
||||
offset -= link.bias;
|
||||
return offset;
|
||||
}
|
||||
|
||||
inline
|
||||
bool is_valid_offset (int64_t offset,
|
||||
const hb_serialize_context_t::object_t::link_t& link)
|
||||
{
|
||||
if (unlikely (!link.width))
|
||||
// Virtual links can't overflow.
|
||||
return link.is_signed || offset >= 0;
|
||||
|
||||
if (link.is_signed)
|
||||
{
|
||||
if (link.width == 4)
|
||||
return offset >= -((int64_t) 1 << 31) && offset < ((int64_t) 1 << 31);
|
||||
else
|
||||
return offset >= -(1 << 15) && offset < (1 << 15);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (link.width == 4)
|
||||
return offset >= 0 && offset < ((int64_t) 1 << 32);
|
||||
else if (link.width == 3)
|
||||
return offset >= 0 && offset < ((int32_t) 1 << 24);
|
||||
else
|
||||
return offset >= 0 && offset < (1 << 16);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Will any offsets overflow on graph when it's serialized?
|
||||
*/
|
||||
inline bool
|
||||
will_overflow (graph_t& graph,
|
||||
hb_vector_t<overflow_record_t>* overflows = nullptr)
|
||||
{
|
||||
if (overflows) overflows->resize (0);
|
||||
graph.update_positions ();
|
||||
|
||||
hb_hashmap_t<overflow_record_t*, bool> record_set;
|
||||
const auto& vertices = graph.vertices_;
|
||||
for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--)
|
||||
{
|
||||
// Don't need to check virtual links for overflow
|
||||
for (const auto& link : vertices.arrayZ[parent_idx].obj.real_links)
|
||||
{
|
||||
int64_t offset = compute_offset (graph, parent_idx, link);
|
||||
if (likely (is_valid_offset (offset, link)))
|
||||
continue;
|
||||
|
||||
if (!overflows) return true;
|
||||
|
||||
overflow_record_t r;
|
||||
r.parent = parent_idx;
|
||||
r.child = link.objidx;
|
||||
if (record_set.has(&r)) continue; // don't keep duplicate overflows.
|
||||
|
||||
overflows->push (r);
|
||||
record_set.set(&r, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (!overflows) return false;
|
||||
return overflows->length;
|
||||
}
|
||||
|
||||
inline
|
||||
void print_overflows (graph_t& graph,
|
||||
const hb_vector_t<overflow_record_t>& overflows)
|
||||
{
|
||||
if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
|
||||
|
||||
graph.update_parents ();
|
||||
int limit = 10;
|
||||
for (const auto& o : overflows)
|
||||
{
|
||||
if (!limit--) break;
|
||||
const auto& parent = graph.vertices_[o.parent];
|
||||
const auto& child = graph.vertices_[o.child];
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr,
|
||||
" overflow from "
|
||||
"%4u (%4u in, %4u out, space %2u) => "
|
||||
"%4u (%4u in, %4u out, space %2u)",
|
||||
o.parent,
|
||||
parent.incoming_edges (),
|
||||
parent.obj.real_links.length + parent.obj.virtual_links.length,
|
||||
graph.space_for (o.parent),
|
||||
o.child,
|
||||
child.incoming_edges (),
|
||||
child.obj.real_links.length + child.obj.virtual_links.length,
|
||||
graph.space_for (o.child));
|
||||
}
|
||||
if (overflows.length > 10) {
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %u more overflows.", overflows.length - 10);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename O> inline void
|
||||
serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link,
|
||||
char* head,
|
||||
unsigned size,
|
||||
hb_serialize_context_t* c)
|
||||
{
|
||||
assert(link.position + link.width <= size);
|
||||
|
||||
OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position);
|
||||
*offset = 0;
|
||||
c->add_link (*offset,
|
||||
// serializer has an extra nil object at the start of the
|
||||
// object array. So all id's are +1 of what our id's are.
|
||||
link.objidx + 1,
|
||||
(hb_serialize_context_t::whence_t) link.whence,
|
||||
link.bias);
|
||||
}
|
||||
|
||||
inline
|
||||
void serialize_link (const hb_serialize_context_t::object_t::link_t& link,
|
||||
char* head,
|
||||
unsigned size,
|
||||
hb_serialize_context_t* c)
|
||||
{
|
||||
switch (link.width)
|
||||
{
|
||||
case 0:
|
||||
// Virtual links aren't serialized.
|
||||
return;
|
||||
case 4:
|
||||
if (link.is_signed)
|
||||
{
|
||||
serialize_link_of_type<OT::HBINT32> (link, head, size, c);
|
||||
} else {
|
||||
serialize_link_of_type<OT::HBUINT32> (link, head, size, c);
|
||||
}
|
||||
return;
|
||||
case 2:
|
||||
if (link.is_signed)
|
||||
{
|
||||
serialize_link_of_type<OT::HBINT16> (link, head, size, c);
|
||||
} else {
|
||||
serialize_link_of_type<OT::HBUINT16> (link, head, size, c);
|
||||
}
|
||||
return;
|
||||
case 3:
|
||||
serialize_link_of_type<OT::HBUINT24> (link, head, size, c);
|
||||
return;
|
||||
default:
|
||||
// Unexpected link width.
|
||||
assert (0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* serialize graph into the provided serialization buffer.
|
||||
*/
|
||||
inline hb_blob_t* serialize (const graph_t& graph)
|
||||
{
|
||||
hb_vector_t<char> buffer;
|
||||
size_t size = graph.total_size_in_bytes ();
|
||||
|
||||
if (!size) return hb_blob_get_empty ();
|
||||
|
||||
if (!buffer.alloc (size)) {
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer.");
|
||||
return nullptr;
|
||||
}
|
||||
hb_serialize_context_t c((void *) buffer, size);
|
||||
|
||||
c.start_serialize<void> ();
|
||||
const auto& vertices = graph.vertices_;
|
||||
for (unsigned i = 0; i < vertices.length; i++) {
|
||||
c.push ();
|
||||
|
||||
size_t size = vertices[i].obj.tail - vertices[i].obj.head;
|
||||
char* start = c.allocate_size <char> (size);
|
||||
if (!start) {
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space.");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
hb_memcpy (start, vertices[i].obj.head, size);
|
||||
|
||||
// Only real links needs to be serialized.
|
||||
for (const auto& link : vertices[i].obj.real_links)
|
||||
serialize_link (link, start, size, &c);
|
||||
|
||||
// All duplications are already encoded in the graph, so don't
|
||||
// enable sharing during packing.
|
||||
c.pop_pack (false);
|
||||
}
|
||||
c.end_serialize ();
|
||||
|
||||
if (c.in_error ()) {
|
||||
DEBUG_MSG (SUBSET_REPACK, nullptr, "Error during serialization. Err flag: %d",
|
||||
c.errors);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return c.copy_blob ();
|
||||
}
|
||||
|
||||
} // namespace graph
|
||||
|
||||
#endif // GRAPH_SERIALIZE_HH
|
69
thirdparty/harfbuzz/src/graph/split-helpers.hh
vendored
Normal file
69
thirdparty/harfbuzz/src/graph/split-helpers.hh
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright © 2022 Google, Inc.
|
||||
*
|
||||
* This is part of HarfBuzz, a text shaping library.
|
||||
*
|
||||
* Permission is hereby granted, without written agreement and without
|
||||
* license or royalty fees, to use, copy, modify, and distribute this
|
||||
* software and its documentation for any purpose, provided that the
|
||||
* above copyright notice and the following two paragraphs appear in
|
||||
* all copies of this software.
|
||||
*
|
||||
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
|
||||
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
|
||||
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
|
||||
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
* DAMAGE.
|
||||
*
|
||||
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
|
||||
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
||||
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
|
||||
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
||||
*
|
||||
* Google Author(s): Garret Rieger
|
||||
*/
|
||||
|
||||
#ifndef GRAPH_SPLIT_HELPERS_HH
|
||||
#define GRAPH_SPLIT_HELPERS_HH
|
||||
|
||||
namespace graph {
|
||||
|
||||
template<typename Context>
|
||||
HB_INTERNAL
|
||||
hb_vector_t<unsigned> actuate_subtable_split (Context& split_context,
|
||||
const hb_vector_t<unsigned>& split_points)
|
||||
{
|
||||
hb_vector_t<unsigned> new_objects;
|
||||
if (!split_points)
|
||||
return new_objects;
|
||||
|
||||
for (unsigned i = 0; i < split_points.length; i++)
|
||||
{
|
||||
unsigned start = split_points[i];
|
||||
unsigned end = (i < split_points.length - 1)
|
||||
? split_points[i + 1]
|
||||
: split_context.original_count ();
|
||||
unsigned id = split_context.clone_range (start, end);
|
||||
|
||||
if (id == (unsigned) -1)
|
||||
{
|
||||
new_objects.reset ();
|
||||
new_objects.allocated = -1; // mark error
|
||||
return new_objects;
|
||||
}
|
||||
new_objects.push (id);
|
||||
}
|
||||
|
||||
if (!split_context.shrink (split_points[0]))
|
||||
{
|
||||
new_objects.reset ();
|
||||
new_objects.allocated = -1; // mark error
|
||||
}
|
||||
|
||||
return new_objects;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // GRAPH_SPLIT_HELPERS_HH
|
Reference in New Issue
Block a user