initial commit, 4.5 stable
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled

This commit is contained in:
2025-09-16 20:46:46 -04:00
commit 9d30169a8d
13378 changed files with 7050105 additions and 0 deletions

8
core/templates/SCsub Normal file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
env_templates = env.Clone()
env_templates.add_source_files(env.core_sources, "*.cpp")

View File

@@ -0,0 +1,39 @@
/**************************************************************************/
/* a_hash_map.cpp */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "a_hash_map.h"
#include "core/variant/variant.h"
// Explicit instantiation.
template class AHashMap<int, int>;
template class AHashMap<String, int>;
template class AHashMap<StringName, StringName>;
template class AHashMap<StringName, Variant>;
template class AHashMap<StringName, int>;

734
core/templates/a_hash_map.h Normal file
View File

@@ -0,0 +1,734 @@
/**************************************************************************/
/* a_hash_map.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/templates/hash_map.h"
struct HashMapData {
union {
uint64_t data;
struct
{
uint32_t hash;
uint32_t hash_to_key;
};
};
};
static_assert(sizeof(HashMapData) == 8);
/**
* An array-based implementation of a hash map. It is very efficient in terms of performance and
* memory usage. Works like a dynamic array, adding elements to the end of the array, and
* allows you to access array elements by their index by using `get_by_index` method.
* Example:
* ```
* AHashMap<int, Object *> map;
*
* int get_object_id_by_number(int p_number) {
* int id = map.get_index(p_number);
* return id;
* }
*
* Object *get_object_by_id(int p_id) {
* map.get_by_index(p_id).value;
* }
* ```
* Still, don`t erase the elements because ID can break.
*
* When an element erase, its place is taken by the element from the end.
*
* <-------------
* | |
* 6 8 X 9 32 -1 5 -10 7 X X X
* 6 8 7 9 32 -1 5 -10 X X X X
*
*
* Use RBMap if you need to iterate over sorted elements.
*
* Use HashMap if:
* - You need to keep an iterator or const pointer to Key and you intend to add/remove elements in the meantime.
* - You need to preserve the insertion order when using erase.
*
* It is recommended to use `HashMap` if `KeyValue` size is very large.
*/
template <typename TKey, typename TValue,
typename Hasher = HashMapHasherDefault,
typename Comparator = HashMapComparatorDefault<TKey>>
class AHashMap {
public:
// Must be a power of two.
static constexpr uint32_t INITIAL_CAPACITY = 16;
static constexpr uint32_t EMPTY_HASH = 0;
static_assert(EMPTY_HASH == 0, "EMPTY_HASH must always be 0 for the memcpy() optimization.");
private:
typedef KeyValue<TKey, TValue> MapKeyValue;
MapKeyValue *elements = nullptr;
HashMapData *map_data = nullptr;
// Due to optimization, this is `capacity - 1`. Use + 1 to get normal capacity.
uint32_t capacity = 0;
uint32_t num_elements = 0;
uint32_t _hash(const TKey &p_key) const {
uint32_t hash = Hasher::hash(p_key);
if (unlikely(hash == EMPTY_HASH)) {
hash = EMPTY_HASH + 1;
}
return hash;
}
static _FORCE_INLINE_ uint32_t _get_resize_count(uint32_t p_capacity) {
return p_capacity ^ (p_capacity + 1) >> 2; // = get_capacity() * 0.75 - 1; Works only if p_capacity = 2^n - 1.
}
static _FORCE_INLINE_ uint32_t _get_probe_length(uint32_t p_pos, uint32_t p_hash, uint32_t p_local_capacity) {
const uint32_t original_pos = p_hash & p_local_capacity;
return (p_pos - original_pos + p_local_capacity + 1) & p_local_capacity;
}
bool _lookup_pos(const TKey &p_key, uint32_t &r_pos, uint32_t &r_hash_pos) const {
if (unlikely(elements == nullptr)) {
return false; // Failed lookups, no elements.
}
return _lookup_pos_with_hash(p_key, r_pos, r_hash_pos, _hash(p_key));
}
bool _lookup_pos_with_hash(const TKey &p_key, uint32_t &r_pos, uint32_t &r_hash_pos, uint32_t p_hash) const {
if (unlikely(elements == nullptr)) {
return false; // Failed lookups, no elements.
}
uint32_t pos = p_hash & capacity;
HashMapData data = map_data[pos];
if (data.hash == p_hash && Comparator::compare(elements[data.hash_to_key].key, p_key)) {
r_pos = data.hash_to_key;
r_hash_pos = pos;
return true;
}
if (data.data == EMPTY_HASH) {
return false;
}
// A collision occurred.
pos = (pos + 1) & capacity;
uint32_t distance = 1;
while (true) {
data = map_data[pos];
if (data.hash == p_hash && Comparator::compare(elements[data.hash_to_key].key, p_key)) {
r_pos = data.hash_to_key;
r_hash_pos = pos;
return true;
}
if (data.data == EMPTY_HASH) {
return false;
}
if (distance > _get_probe_length(pos, data.hash, capacity)) {
return false;
}
pos = (pos + 1) & capacity;
distance++;
}
}
uint32_t _insert_with_hash(uint32_t p_hash, uint32_t p_index) {
uint32_t pos = p_hash & capacity;
if (map_data[pos].data == EMPTY_HASH) {
uint64_t data = ((uint64_t)p_index << 32) | p_hash;
map_data[pos].data = data;
return pos;
}
uint32_t distance = 1;
pos = (pos + 1) & capacity;
HashMapData c_data;
c_data.hash = p_hash;
c_data.hash_to_key = p_index;
while (true) {
if (map_data[pos].data == EMPTY_HASH) {
#ifdef DEV_ENABLED
if (unlikely(distance > 12)) {
WARN_PRINT("Excessive collision count (" +
itos(distance) + "), is the right hash function being used?");
}
#endif
map_data[pos] = c_data;
return pos;
}
// Not an empty slot, let's check the probing length of the existing one.
uint32_t existing_probe_len = _get_probe_length(pos, map_data[pos].hash, capacity);
if (existing_probe_len < distance) {
SWAP(c_data, map_data[pos]);
distance = existing_probe_len;
}
pos = (pos + 1) & capacity;
distance++;
}
}
void _resize_and_rehash(uint32_t p_new_capacity) {
uint32_t real_old_capacity = capacity + 1;
// Capacity can't be 0 and must be 2^n - 1.
capacity = MAX(4u, p_new_capacity);
uint32_t real_capacity = next_power_of_2(capacity);
capacity = real_capacity - 1;
HashMapData *old_map_data = map_data;
map_data = reinterpret_cast<HashMapData *>(Memory::alloc_static_zeroed(sizeof(HashMapData) * real_capacity));
elements = reinterpret_cast<MapKeyValue *>(Memory::realloc_static(elements, sizeof(MapKeyValue) * (_get_resize_count(capacity) + 1)));
if (num_elements != 0) {
for (uint32_t i = 0; i < real_old_capacity; i++) {
HashMapData data = old_map_data[i];
if (data.data != EMPTY_HASH) {
_insert_with_hash(data.hash, data.hash_to_key);
}
}
}
Memory::free_static(old_map_data);
}
int32_t _insert_element(const TKey &p_key, const TValue &p_value, uint32_t p_hash) {
if (unlikely(elements == nullptr)) {
// Allocate on demand to save memory.
uint32_t real_capacity = capacity + 1;
map_data = reinterpret_cast<HashMapData *>(Memory::alloc_static_zeroed(sizeof(HashMapData) * real_capacity));
elements = reinterpret_cast<MapKeyValue *>(Memory::alloc_static(sizeof(MapKeyValue) * (_get_resize_count(capacity) + 1)));
}
if (unlikely(num_elements > _get_resize_count(capacity))) {
_resize_and_rehash(capacity * 2);
}
memnew_placement(&elements[num_elements], MapKeyValue(p_key, p_value));
_insert_with_hash(p_hash, num_elements);
num_elements++;
return num_elements - 1;
}
void _init_from(const AHashMap &p_other) {
capacity = p_other.capacity;
uint32_t real_capacity = capacity + 1;
num_elements = p_other.num_elements;
if (p_other.num_elements == 0) {
return;
}
map_data = reinterpret_cast<HashMapData *>(Memory::alloc_static(sizeof(HashMapData) * real_capacity));
elements = reinterpret_cast<MapKeyValue *>(Memory::alloc_static(sizeof(MapKeyValue) * (_get_resize_count(capacity) + 1)));
if constexpr (std::is_trivially_copyable_v<TKey> && std::is_trivially_copyable_v<TValue>) {
void *destination = elements;
const void *source = p_other.elements;
memcpy(destination, source, sizeof(MapKeyValue) * num_elements);
} else {
for (uint32_t i = 0; i < num_elements; i++) {
memnew_placement(&elements[i], MapKeyValue(p_other.elements[i]));
}
}
memcpy(map_data, p_other.map_data, sizeof(HashMapData) * real_capacity);
}
public:
/* Standard Godot Container API */
_FORCE_INLINE_ uint32_t get_capacity() const { return capacity + 1; }
_FORCE_INLINE_ uint32_t size() const { return num_elements; }
_FORCE_INLINE_ bool is_empty() const {
return num_elements == 0;
}
void clear() {
if (elements == nullptr || num_elements == 0) {
return;
}
memset(map_data, EMPTY_HASH, (capacity + 1) * sizeof(HashMapData));
if constexpr (!(std::is_trivially_destructible_v<TKey> && std::is_trivially_destructible_v<TValue>)) {
for (uint32_t i = 0; i < num_elements; i++) {
elements[i].key.~TKey();
elements[i].value.~TValue();
}
}
num_elements = 0;
}
TValue &get(const TKey &p_key) {
uint32_t pos = 0;
uint32_t hash_pos = 0;
bool exists = _lookup_pos(p_key, pos, hash_pos);
CRASH_COND_MSG(!exists, "AHashMap key not found.");
return elements[pos].value;
}
const TValue &get(const TKey &p_key) const {
uint32_t pos = 0;
uint32_t hash_pos = 0;
bool exists = _lookup_pos(p_key, pos, hash_pos);
CRASH_COND_MSG(!exists, "AHashMap key not found.");
return elements[pos].value;
}
const TValue *getptr(const TKey &p_key) const {
uint32_t pos = 0;
uint32_t hash_pos = 0;
bool exists = _lookup_pos(p_key, pos, hash_pos);
if (exists) {
return &elements[pos].value;
}
return nullptr;
}
TValue *getptr(const TKey &p_key) {
uint32_t pos = 0;
uint32_t hash_pos = 0;
bool exists = _lookup_pos(p_key, pos, hash_pos);
if (exists) {
return &elements[pos].value;
}
return nullptr;
}
bool has(const TKey &p_key) const {
uint32_t _pos = 0;
uint32_t h_pos = 0;
return _lookup_pos(p_key, _pos, h_pos);
}
bool erase(const TKey &p_key) {
uint32_t pos = 0;
uint32_t element_pos = 0;
bool exists = _lookup_pos(p_key, element_pos, pos);
if (!exists) {
return false;
}
uint32_t next_pos = (pos + 1) & capacity;
while (map_data[next_pos].hash != EMPTY_HASH && _get_probe_length(next_pos, map_data[next_pos].hash, capacity) != 0) {
SWAP(map_data[next_pos], map_data[pos]);
pos = next_pos;
next_pos = (next_pos + 1) & capacity;
}
map_data[pos].data = EMPTY_HASH;
elements[element_pos].key.~TKey();
elements[element_pos].value.~TValue();
num_elements--;
if (element_pos < num_elements) {
void *destination = &elements[element_pos];
const void *source = &elements[num_elements];
memcpy(destination, source, sizeof(MapKeyValue));
uint32_t h_pos = 0;
_lookup_pos(elements[num_elements].key, pos, h_pos);
map_data[h_pos].hash_to_key = element_pos;
}
return true;
}
// Replace the key of an entry in-place, without invalidating iterators or changing the entries position during iteration.
// p_old_key must exist in the map and p_new_key must not, unless it is equal to p_old_key.
bool replace_key(const TKey &p_old_key, const TKey &p_new_key) {
if (p_old_key == p_new_key) {
return true;
}
uint32_t pos = 0;
uint32_t element_pos = 0;
ERR_FAIL_COND_V(_lookup_pos(p_new_key, element_pos, pos), false);
ERR_FAIL_COND_V(!_lookup_pos(p_old_key, element_pos, pos), false);
MapKeyValue &element = elements[element_pos];
const_cast<TKey &>(element.key) = p_new_key;
uint32_t next_pos = (pos + 1) & capacity;
while (map_data[next_pos].hash != EMPTY_HASH && _get_probe_length(next_pos, map_data[next_pos].hash, capacity) != 0) {
SWAP(map_data[next_pos], map_data[pos]);
pos = next_pos;
next_pos = (next_pos + 1) & capacity;
}
map_data[pos].data = EMPTY_HASH;
uint32_t hash = _hash(p_new_key);
_insert_with_hash(hash, element_pos);
return true;
}
// Reserves space for a number of elements, useful to avoid many resizes and rehashes.
// If adding a known (possibly large) number of elements at once, must be larger than old capacity.
void reserve(uint32_t p_new_capacity) {
ERR_FAIL_COND_MSG(p_new_capacity < size(), "reserve() called with a capacity smaller than the current size. This is likely a mistake.");
if (elements == nullptr) {
capacity = MAX(4u, p_new_capacity);
capacity = next_power_of_2(capacity) - 1;
return; // Unallocated yet.
}
if (p_new_capacity <= get_capacity()) {
return;
}
_resize_and_rehash(p_new_capacity);
}
/** Iterator API **/
struct ConstIterator {
_FORCE_INLINE_ const MapKeyValue &operator*() const {
return *pair;
}
_FORCE_INLINE_ const MapKeyValue *operator->() const {
return pair;
}
_FORCE_INLINE_ ConstIterator &operator++() {
pair++;
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
pair--;
if (pair < begin) {
pair = end;
}
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return pair == b.pair; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return pair != b.pair; }
_FORCE_INLINE_ explicit operator bool() const {
return pair != end;
}
_FORCE_INLINE_ ConstIterator(MapKeyValue *p_key, MapKeyValue *p_begin, MapKeyValue *p_end) {
pair = p_key;
begin = p_begin;
end = p_end;
}
_FORCE_INLINE_ ConstIterator() {}
_FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) {
pair = p_it.pair;
begin = p_it.begin;
end = p_it.end;
}
_FORCE_INLINE_ void operator=(const ConstIterator &p_it) {
pair = p_it.pair;
begin = p_it.begin;
end = p_it.end;
}
private:
MapKeyValue *pair = nullptr;
MapKeyValue *begin = nullptr;
MapKeyValue *end = nullptr;
};
struct Iterator {
_FORCE_INLINE_ MapKeyValue &operator*() const {
return *pair;
}
_FORCE_INLINE_ MapKeyValue *operator->() const {
return pair;
}
_FORCE_INLINE_ Iterator &operator++() {
pair++;
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
pair--;
if (pair < begin) {
pair = end;
}
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return pair == b.pair; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return pair != b.pair; }
_FORCE_INLINE_ explicit operator bool() const {
return pair != end;
}
_FORCE_INLINE_ Iterator(MapKeyValue *p_key, MapKeyValue *p_begin, MapKeyValue *p_end) {
pair = p_key;
begin = p_begin;
end = p_end;
}
_FORCE_INLINE_ Iterator() {}
_FORCE_INLINE_ Iterator(const Iterator &p_it) {
pair = p_it.pair;
begin = p_it.begin;
end = p_it.end;
}
_FORCE_INLINE_ void operator=(const Iterator &p_it) {
pair = p_it.pair;
begin = p_it.begin;
end = p_it.end;
}
operator ConstIterator() const {
return ConstIterator(pair, begin, end);
}
private:
MapKeyValue *pair = nullptr;
MapKeyValue *begin = nullptr;
MapKeyValue *end = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(elements, elements, elements + num_elements);
}
_FORCE_INLINE_ Iterator end() {
return Iterator(elements + num_elements, elements, elements + num_elements);
}
_FORCE_INLINE_ Iterator last() {
if (unlikely(num_elements == 0)) {
return Iterator(nullptr, nullptr, nullptr);
}
return Iterator(elements + num_elements - 1, elements, elements + num_elements);
}
Iterator find(const TKey &p_key) {
uint32_t pos = 0;
uint32_t h_pos = 0;
bool exists = _lookup_pos(p_key, pos, h_pos);
if (!exists) {
return end();
}
return Iterator(elements + pos, elements, elements + num_elements);
}
void remove(const Iterator &p_iter) {
if (p_iter) {
erase(p_iter->key);
}
}
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(elements, elements, elements + num_elements);
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(elements + num_elements, elements, elements + num_elements);
}
_FORCE_INLINE_ ConstIterator last() const {
if (unlikely(num_elements == 0)) {
return ConstIterator(nullptr, nullptr, nullptr);
}
return ConstIterator(elements + num_elements - 1, elements, elements + num_elements);
}
ConstIterator find(const TKey &p_key) const {
uint32_t pos = 0;
uint32_t h_pos = 0;
bool exists = _lookup_pos(p_key, pos, h_pos);
if (!exists) {
return end();
}
return ConstIterator(elements + pos, elements, elements + num_elements);
}
/* Indexing */
const TValue &operator[](const TKey &p_key) const {
uint32_t pos = 0;
uint32_t h_pos = 0;
bool exists = _lookup_pos(p_key, pos, h_pos);
CRASH_COND(!exists);
return elements[pos].value;
}
TValue &operator[](const TKey &p_key) {
uint32_t pos = 0;
uint32_t h_pos = 0;
uint32_t hash = _hash(p_key);
bool exists = _lookup_pos_with_hash(p_key, pos, h_pos, hash);
if (exists) {
return elements[pos].value;
} else {
pos = _insert_element(p_key, TValue(), hash);
return elements[pos].value;
}
}
/* Insert */
Iterator insert(const TKey &p_key, const TValue &p_value) {
uint32_t pos = 0;
uint32_t h_pos = 0;
uint32_t hash = _hash(p_key);
bool exists = _lookup_pos_with_hash(p_key, pos, h_pos, hash);
if (!exists) {
pos = _insert_element(p_key, p_value, hash);
} else {
elements[pos].value = p_value;
}
return Iterator(elements + pos, elements, elements + num_elements);
}
// Inserts an element without checking if it already exists.
Iterator insert_new(const TKey &p_key, const TValue &p_value) {
DEV_ASSERT(!has(p_key));
uint32_t hash = _hash(p_key);
uint32_t pos = _insert_element(p_key, p_value, hash);
return Iterator(elements + pos, elements, elements + num_elements);
}
/* Array methods. */
// Unsafe. Changing keys and going outside the bounds of an array can lead to undefined behavior.
KeyValue<TKey, TValue> *get_elements_ptr() {
return elements;
}
// Returns the element index. If not found, returns -1.
int get_index(const TKey &p_key) {
uint32_t pos = 0;
uint32_t h_pos = 0;
bool exists = _lookup_pos(p_key, pos, h_pos);
if (!exists) {
return -1;
}
return pos;
}
KeyValue<TKey, TValue> &get_by_index(uint32_t p_index) {
CRASH_BAD_UNSIGNED_INDEX(p_index, num_elements);
return elements[p_index];
}
bool erase_by_index(uint32_t p_index) {
if (p_index >= size()) {
return false;
}
return erase(elements[p_index].key);
}
/* Constructors */
AHashMap(const AHashMap &p_other) {
_init_from(p_other);
}
AHashMap(const HashMap<TKey, TValue> &p_other) {
reserve(p_other.size());
for (const KeyValue<TKey, TValue> &E : p_other) {
uint32_t hash = _hash(E.key);
_insert_element(E.key, E.value, hash);
}
}
void operator=(const AHashMap &p_other) {
if (this == &p_other) {
return; // Ignore self assignment.
}
reset();
_init_from(p_other);
}
void operator=(const HashMap<TKey, TValue> &p_other) {
reset();
reserve(p_other.size());
for (const KeyValue<TKey, TValue> &E : p_other) {
uint32_t hash = _hash(E.key);
_insert_element(E.key, E.value, hash);
}
}
AHashMap(uint32_t p_initial_capacity) {
// Capacity can't be 0 and must be 2^n - 1.
capacity = MAX(4u, p_initial_capacity);
capacity = next_power_of_2(capacity) - 1;
}
AHashMap() :
capacity(INITIAL_CAPACITY - 1) {
}
AHashMap(std::initializer_list<KeyValue<TKey, TValue>> p_init) {
reserve(p_init.size());
for (const KeyValue<TKey, TValue> &E : p_init) {
insert(E.key, E.value);
}
}
void reset() {
if (elements != nullptr) {
if constexpr (!(std::is_trivially_destructible_v<TKey> && std::is_trivially_destructible_v<TValue>)) {
for (uint32_t i = 0; i < num_elements; i++) {
elements[i].key.~TKey();
elements[i].value.~TValue();
}
}
Memory::free_static(elements);
Memory::free_static(map_data);
elements = nullptr;
}
capacity = INITIAL_CAPACITY - 1;
num_elements = 0;
}
~AHashMap() {
reset();
}
};
extern template class AHashMap<int, int>;
extern template class AHashMap<String, int>;
extern template class AHashMap<StringName, StringName>;
extern template class AHashMap<StringName, Variant>;
extern template class AHashMap<StringName, int>;

View File

@@ -0,0 +1,178 @@
/**************************************************************************/
/* bin_sorted_array.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/templates/local_vector.h"
#include "core/templates/paged_array.h"
template <typename T>
class BinSortedArray {
PagedArray<T> array;
LocalVector<uint64_t> bin_limits;
// Implement if elements need to keep track of their own index in the array.
_FORCE_INLINE_ virtual void _update_idx(T &r_element, uint64_t p_idx) {}
_FORCE_INLINE_ void _swap(uint64_t p_a, uint64_t p_b) {
SWAP(array[p_a], array[p_b]);
_update_idx(array[p_a], p_a);
_update_idx(array[p_b], p_b);
}
public:
uint64_t insert(T &p_element, uint64_t p_bin) {
array.push_back(p_element);
uint64_t new_idx = array.size() - 1;
_update_idx(p_element, new_idx);
bin_limits[0] = new_idx;
if (p_bin != 0) {
new_idx = move(new_idx, p_bin);
}
return new_idx;
}
uint64_t move(uint64_t p_idx, uint64_t p_bin) {
ERR_FAIL_UNSIGNED_INDEX_V(p_idx, array.size(), -1);
uint64_t current_bin = bin_limits.size() - 1;
while (p_idx > bin_limits[current_bin]) {
current_bin--;
}
if (p_bin == current_bin) {
return p_idx;
}
uint64_t current_idx = p_idx;
if (p_bin > current_bin) {
while (p_bin > current_bin) {
uint64_t swap_idx = 0;
if (current_bin == bin_limits.size() - 1) {
bin_limits.push_back(0);
} else {
bin_limits[current_bin + 1]++;
swap_idx = bin_limits[current_bin + 1];
}
if (current_idx != swap_idx) {
_swap(current_idx, swap_idx);
current_idx = swap_idx;
}
current_bin++;
}
} else {
while (p_bin < current_bin) {
uint64_t swap_idx = bin_limits[current_bin];
if (current_idx != swap_idx) {
_swap(current_idx, swap_idx);
}
if (current_bin == bin_limits.size() - 1 && bin_limits[current_bin] == 0) {
bin_limits.resize(bin_limits.size() - 1);
} else {
bin_limits[current_bin]--;
}
current_idx = swap_idx;
current_bin--;
}
}
return current_idx;
}
void remove_at(uint64_t p_idx) {
ERR_FAIL_UNSIGNED_INDEX(p_idx, array.size());
uint64_t new_idx = move(p_idx, 0);
uint64_t swap_idx = array.size() - 1;
if (new_idx != swap_idx) {
_swap(new_idx, swap_idx);
}
if (bin_limits[0] > 0) {
bin_limits[0]--;
}
array.pop_back();
}
void set_page_pool(PagedArrayPool<T> *p_page_pool) {
array.set_page_pool(p_page_pool);
}
_FORCE_INLINE_ const T &operator[](uint64_t p_index) const {
return array[p_index];
}
_FORCE_INLINE_ T &operator[](uint64_t p_index) {
return array[p_index];
}
int get_bin_count() {
if (array.size() == 0) {
return 0;
}
return bin_limits.size();
}
int get_bin_start(int p_bin) {
ERR_FAIL_COND_V(p_bin >= get_bin_count(), ~0U);
if ((unsigned int)p_bin == bin_limits.size() - 1) {
return 0;
}
return bin_limits[p_bin + 1] + 1;
}
int get_bin_size(int p_bin) {
ERR_FAIL_COND_V(p_bin >= get_bin_count(), 0);
if ((unsigned int)p_bin == bin_limits.size() - 1) {
return bin_limits[p_bin] + 1;
}
return bin_limits[p_bin] - bin_limits[p_bin + 1];
}
void reset() {
array.reset();
bin_limits.clear();
bin_limits.push_back(0);
}
BinSortedArray() {
bin_limits.push_back(0);
}
virtual ~BinSortedArray() {
reset();
}
};

View File

@@ -0,0 +1,73 @@
/**************************************************************************/
/* bit_field.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/typedefs.h"
#include <type_traits>
// TODO: Replace `typename` with enum concept once C++20 concepts/constraints are allowed.
template <typename T>
class BitField {
static_assert(std::is_enum_v<T>);
uint64_t value;
public:
_ALWAYS_INLINE_ constexpr void set_flag(BitField p_flag) { value |= p_flag.value; }
_ALWAYS_INLINE_ constexpr bool has_flag(BitField p_flag) const { return value & p_flag.value; }
_ALWAYS_INLINE_ constexpr bool is_empty() const { return value == 0; }
_ALWAYS_INLINE_ constexpr void clear_flag(BitField p_flag) { value &= ~p_flag.value; }
_ALWAYS_INLINE_ constexpr void clear() { value = 0; }
[[nodiscard]] _ALWAYS_INLINE_ constexpr BitField get_combined(BitField p_other) const { return BitField(value | p_other.value); }
[[nodiscard]] _ALWAYS_INLINE_ constexpr BitField get_shared(BitField p_other) const { return BitField(value & p_other.value); }
[[nodiscard]] _ALWAYS_INLINE_ constexpr BitField get_different(BitField p_other) const { return BitField(value ^ p_other.value); }
_ALWAYS_INLINE_ constexpr BitField() = default;
_ALWAYS_INLINE_ constexpr BitField(T p_value) :
value(static_cast<uint64_t>(p_value)) {}
_ALWAYS_INLINE_ constexpr operator T() const { return static_cast<T>(value); }
// TODO: Unify as single constructor once C++20 `explicit` conditionals are allowed.
template <typename V, std::enable_if_t<std::is_arithmetic_v<V> && std::is_convertible_v<T, int>, int> = 0>
_ALWAYS_INLINE_ constexpr BitField(V p_value) :
value(static_cast<uint64_t>(p_value)) {}
template <typename V, std::enable_if_t<std::is_arithmetic_v<V> && !std::is_convertible_v<T, int>, int> = 0>
_ALWAYS_INLINE_ constexpr explicit BitField(V p_value) :
value(static_cast<uint64_t>(p_value)) {}
template <typename V, std::enable_if_t<std::is_arithmetic_v<V>, int> = 0>
_ALWAYS_INLINE_ constexpr explicit operator V() const { return static_cast<V>(value); }
};
// Implicitly zero-constructible as a trivially-constructible type.
static_assert(is_zero_constructible_v<BitField<Error>>);

View File

@@ -0,0 +1,38 @@
/**************************************************************************/
/* command_queue_mt.cpp */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "command_queue_mt.h"
CommandQueueMT::CommandQueueMT() {
command_mem.reserve(DEFAULT_COMMAND_MEM_SIZE_KB * 1024);
}
CommandQueueMT::~CommandQueueMT() {
}

View File

@@ -0,0 +1,257 @@
/**************************************************************************/
/* command_queue_mt.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/object/worker_thread_pool.h"
#include "core/os/condition_variable.h"
#include "core/os/mutex.h"
#include "core/templates/local_vector.h"
#include "core/templates/simple_type.h"
#include "core/templates/tuple.h"
#include "core/typedefs.h"
class CommandQueueMT {
struct CommandBase {
bool sync = false;
virtual void call() = 0;
virtual ~CommandBase() = default;
CommandBase(bool p_sync) :
sync(p_sync) {}
};
template <typename T, typename M, bool NeedsSync, typename... Args>
struct Command : public CommandBase {
T *instance;
M method;
Tuple<GetSimpleTypeT<Args>...> args;
template <typename... FwdArgs>
_FORCE_INLINE_ Command(T *p_instance, M p_method, FwdArgs &&...p_args) :
CommandBase(NeedsSync), instance(p_instance), method(p_method), args(std::forward<FwdArgs>(p_args)...) {}
void call() {
call_impl(BuildIndexSequence<sizeof...(Args)>{});
}
private:
template <size_t... I>
_FORCE_INLINE_ void call_impl(IndexSequence<I...>) {
// Move out of the Tuple, this will be destroyed as soon as the call is complete.
(instance->*method)(std::move(get<I>())...);
}
// This method exists so we can call it in the parameter pack expansion in call_impl.
template <size_t I>
_FORCE_INLINE_ auto &get() { return ::tuple_get<I>(args); }
};
// Separate class from Command so we can save the space of the ret pointer for commands that don't return.
template <typename T, typename M, typename R, typename... Args>
struct CommandRet : public CommandBase {
T *instance;
M method;
R *ret;
Tuple<GetSimpleTypeT<Args>...> args;
_FORCE_INLINE_ CommandRet(T *p_instance, M p_method, R *p_ret, GetSimpleTypeT<Args>... p_args) :
CommandBase(true), instance(p_instance), method(p_method), ret(p_ret), args{ p_args... } {}
void call() override {
*ret = call_impl(BuildIndexSequence<sizeof...(Args)>{});
}
private:
template <size_t... I>
_FORCE_INLINE_ R call_impl(IndexSequence<I...>) {
// Move out of the Tuple, this will be destroyed as soon as the call is complete.
return (instance->*method)(std::move(get<I>())...);
}
// This method exists so we can call it in the parameter pack expansion in call_impl.
template <size_t I>
_FORCE_INLINE_ auto &get() { return ::tuple_get<I>(args); }
};
/***** BASE *******/
static const uint32_t DEFAULT_COMMAND_MEM_SIZE_KB = 64;
BinaryMutex mutex;
LocalVector<uint8_t> command_mem;
ConditionVariable sync_cond_var;
uint32_t sync_head = 0;
uint32_t sync_tail = 0;
uint32_t sync_awaiters = 0;
WorkerThreadPool::TaskID pump_task_id = WorkerThreadPool::INVALID_TASK_ID;
uint64_t flush_read_ptr = 0;
std::atomic<bool> pending{ false };
template <typename T, typename... Args>
_FORCE_INLINE_ void create_command(Args &&...p_args) {
// alloc size is size+T+safeguard
constexpr uint64_t alloc_size = ((sizeof(T) + 8U - 1U) & ~(8U - 1U));
static_assert(alloc_size < UINT32_MAX, "Type too large to fit in the command queue.");
uint64_t size = command_mem.size();
command_mem.resize(size + alloc_size + sizeof(uint64_t));
*(uint64_t *)&command_mem[size] = alloc_size;
void *cmd = &command_mem[size + sizeof(uint64_t)];
new (cmd) T(std::forward<Args>(p_args)...);
pending.store(true);
}
template <typename T, bool NeedsSync, typename... Args>
_FORCE_INLINE_ void _push_internal(Args &&...args) {
MutexLock mlock(mutex);
create_command<T>(std::forward<Args>(args)...);
if (pump_task_id != WorkerThreadPool::INVALID_TASK_ID) {
WorkerThreadPool::get_singleton()->notify_yield_over(pump_task_id);
}
if constexpr (NeedsSync) {
sync_tail++;
_wait_for_sync(mlock);
}
}
_FORCE_INLINE_ void _prevent_sync_wraparound() {
bool safe_to_reset = !sync_awaiters;
bool already_sync_to_latest = sync_head == sync_tail;
if (safe_to_reset && already_sync_to_latest) {
sync_head = 0;
sync_tail = 0;
}
}
void _flush() {
if (unlikely(flush_read_ptr)) {
// Re-entrant call.
return;
}
MutexLock lock(mutex);
while (flush_read_ptr < command_mem.size()) {
uint64_t size = *(uint64_t *)&command_mem[flush_read_ptr];
flush_read_ptr += 8;
CommandBase *cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
uint32_t allowance_id = WorkerThreadPool::thread_enter_unlock_allowance_zone(lock);
cmd->call();
WorkerThreadPool::thread_exit_unlock_allowance_zone(allowance_id);
// Handle potential realloc due to the command and unlock allowance.
cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
if (unlikely(cmd->sync)) {
sync_head++;
lock.~MutexLock(); // Give an opportunity to awaiters right away.
sync_cond_var.notify_all();
new (&lock) MutexLock(mutex);
// Handle potential realloc happened during unlock.
cmd = reinterpret_cast<CommandBase *>(&command_mem[flush_read_ptr]);
}
cmd->~CommandBase();
flush_read_ptr += size;
}
command_mem.clear();
pending.store(false);
flush_read_ptr = 0;
_prevent_sync_wraparound();
}
_FORCE_INLINE_ void _wait_for_sync(MutexLock<BinaryMutex> &p_lock) {
sync_awaiters++;
uint32_t sync_head_goal = sync_tail;
do {
sync_cond_var.wait(p_lock);
} while (sync_head < sync_head_goal);
sync_awaiters--;
_prevent_sync_wraparound();
}
void _no_op() {}
public:
template <typename T, typename M, typename... Args>
void push(T *p_instance, M p_method, Args &&...p_args) {
// Standard command, no sync.
using CommandType = Command<T, M, false, Args...>;
_push_internal<CommandType, false>(p_instance, p_method, std::forward<Args>(p_args)...);
}
template <typename T, typename M, typename... Args>
void push_and_sync(T *p_instance, M p_method, Args... p_args) {
// Standard command, sync.
using CommandType = Command<T, M, true, Args...>;
_push_internal<CommandType, true>(p_instance, p_method, std::forward<Args>(p_args)...);
}
template <typename T, typename M, typename R, typename... Args>
void push_and_ret(T *p_instance, M p_method, R *r_ret, Args... p_args) {
// Command with return value, sync.
using CommandType = CommandRet<T, M, R, Args...>;
_push_internal<CommandType, true>(p_instance, p_method, r_ret, std::forward<Args>(p_args)...);
}
_FORCE_INLINE_ void flush_if_pending() {
if (unlikely(pending.load())) {
_flush();
}
}
void flush_all() {
_flush();
}
void sync() {
push_and_sync(this, &CommandQueueMT::_no_op);
}
void wait_and_flush() {
ERR_FAIL_COND(pump_task_id == WorkerThreadPool::INVALID_TASK_ID);
WorkerThreadPool::get_singleton()->wait_for_task_completion(pump_task_id);
_flush();
}
void set_pump_task_id(WorkerThreadPool::TaskID p_task_id) {
MutexLock lock(mutex);
pump_task_id = p_task_id;
}
CommandQueueMT();
~CommandQueueMT();
};

447
core/templates/cowdata.h Normal file
View File

@@ -0,0 +1,447 @@
/**************************************************************************/
/* cowdata.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/os/memory.h"
#include "core/templates/safe_refcount.h"
#include "core/templates/span.h"
#include <initializer_list>
#include <type_traits>
static_assert(std::is_trivially_destructible_v<std::atomic<uint64_t>>);
GODOT_GCC_WARNING_PUSH
GODOT_GCC_WARNING_IGNORE("-Wplacement-new") // Silence a false positive warning (see GH-52119).
GODOT_GCC_WARNING_IGNORE("-Wmaybe-uninitialized") // False positive raised when using constexpr.
template <typename T>
class CowData {
public:
typedef int64_t Size;
typedef uint64_t USize;
static constexpr USize MAX_INT = INT64_MAX;
private:
// Alignment: ↓ max_align_t ↓ USize ↓ max_align_t
// ┌────────────────────┬──┬─────────────┬──┬───────────...
// │ SafeNumeric<USize> │░░│ USize │░░│ T[]
// │ ref. count │░░│ data size │░░│ data
// └────────────────────┴──┴─────────────┴──┴───────────...
// Offset: ↑ REF_COUNT_OFFSET ↑ SIZE_OFFSET ↑ DATA_OFFSET
static constexpr size_t REF_COUNT_OFFSET = 0;
static constexpr size_t SIZE_OFFSET = ((REF_COUNT_OFFSET + sizeof(SafeNumeric<USize>)) % alignof(USize) == 0) ? (REF_COUNT_OFFSET + sizeof(SafeNumeric<USize>)) : ((REF_COUNT_OFFSET + sizeof(SafeNumeric<USize>)) + alignof(USize) - ((REF_COUNT_OFFSET + sizeof(SafeNumeric<USize>)) % alignof(USize)));
static constexpr size_t DATA_OFFSET = ((SIZE_OFFSET + sizeof(USize)) % alignof(max_align_t) == 0) ? (SIZE_OFFSET + sizeof(USize)) : ((SIZE_OFFSET + sizeof(USize)) + alignof(max_align_t) - ((SIZE_OFFSET + sizeof(USize)) % alignof(max_align_t)));
mutable T *_ptr = nullptr;
// internal helpers
static _FORCE_INLINE_ T *_get_data_ptr(uint8_t *p_ptr) {
return (T *)(p_ptr + DATA_OFFSET);
}
_FORCE_INLINE_ SafeNumeric<USize> *_get_refcount() const {
if (!_ptr) {
return nullptr;
}
return (SafeNumeric<USize> *)((uint8_t *)_ptr - DATA_OFFSET + REF_COUNT_OFFSET);
}
_FORCE_INLINE_ USize *_get_size() const {
if (!_ptr) {
return nullptr;
}
return (USize *)((uint8_t *)_ptr - DATA_OFFSET + SIZE_OFFSET);
}
_FORCE_INLINE_ static USize _get_alloc_size(USize p_elements) {
return next_power_of_2(p_elements * (USize)sizeof(T));
}
_FORCE_INLINE_ static bool _get_alloc_size_checked(USize p_elements, USize *out) {
if (unlikely(p_elements == 0)) {
*out = 0;
return true;
}
#if defined(__GNUC__) && defined(IS_32_BIT)
USize o;
USize p;
if (__builtin_mul_overflow(p_elements, sizeof(T), &o)) {
*out = 0;
return false;
}
*out = next_power_of_2(o);
if (__builtin_add_overflow(o, static_cast<USize>(32), &p)) {
return false; // No longer allocated here.
}
#else
// Speed is more important than correctness here, do the operations unchecked
// and hope for the best.
*out = _get_alloc_size(p_elements);
#endif
return *out;
}
// Decrements the reference count. Deallocates the backing buffer if needed.
// After this function, _ptr is guaranteed to be NULL.
void _unref();
void _ref(const CowData *p_from);
void _ref(const CowData &p_from);
// Ensures that the backing buffer is at least p_size wide, and that this CowData instance is
// the only reference to it. The buffer is populated with as many element copies from the old
// array as possible.
// It is the responsibility of the caller to populate newly allocated space up to p_size.
Error _fork_allocate(USize p_size);
Error _copy_on_write() { return _fork_allocate(size()); }
// Allocates a backing array of the given capacity. The reference count is initialized to 1.
// It is the responsibility of the caller to populate the array and the new size property.
Error _alloc(USize p_alloc_size);
// Re-allocates the backing array to the given capacity. The reference count is initialized to 1.
// It is the responsibility of the caller to populate the array and the new size property.
// The caller must also make sure there are no other references to the data, as pointers may
// be invalidated.
Error _realloc(USize p_alloc_size);
public:
void operator=(const CowData<T> &p_from) { _ref(p_from); }
void operator=(CowData<T> &&p_from) {
if (_ptr == p_from._ptr) {
return;
}
_unref();
_ptr = p_from._ptr;
p_from._ptr = nullptr;
}
_FORCE_INLINE_ T *ptrw() {
_copy_on_write();
return _ptr;
}
_FORCE_INLINE_ const T *ptr() const {
return _ptr;
}
_FORCE_INLINE_ Size size() const {
USize *size = (USize *)_get_size();
if (size) {
return *size;
} else {
return 0;
}
}
_FORCE_INLINE_ void clear() { _unref(); }
_FORCE_INLINE_ bool is_empty() const { return _ptr == nullptr; }
_FORCE_INLINE_ void set(Size p_index, const T &p_elem) {
ERR_FAIL_INDEX(p_index, size());
_copy_on_write();
_ptr[p_index] = p_elem;
}
_FORCE_INLINE_ T &get_m(Size p_index) {
CRASH_BAD_INDEX(p_index, size());
_copy_on_write();
return _ptr[p_index];
}
_FORCE_INLINE_ const T &get(Size p_index) const {
CRASH_BAD_INDEX(p_index, size());
return _ptr[p_index];
}
template <bool p_initialize = true>
Error resize(Size p_size);
_FORCE_INLINE_ void remove_at(Size p_index) {
ERR_FAIL_INDEX(p_index, size());
T *p = ptrw();
Size len = size();
for (Size i = p_index; i < len - 1; i++) {
p[i] = std::move(p[i + 1]);
}
resize(len - 1);
}
Error insert(Size p_pos, const T &p_val) {
Size new_size = size() + 1;
ERR_FAIL_INDEX_V(p_pos, new_size, ERR_INVALID_PARAMETER);
Error err = resize(new_size);
ERR_FAIL_COND_V(err, err);
T *p = ptrw();
for (Size i = new_size - 1; i > p_pos; i--) {
p[i] = std::move(p[i - 1]);
}
p[p_pos] = p_val;
return OK;
}
_FORCE_INLINE_ operator Span<T>() const { return Span<T>(ptr(), size()); }
_FORCE_INLINE_ Span<T> span() const { return operator Span<T>(); }
_FORCE_INLINE_ CowData() {}
_FORCE_INLINE_ ~CowData() { _unref(); }
_FORCE_INLINE_ CowData(std::initializer_list<T> p_init);
_FORCE_INLINE_ CowData(const CowData<T> &p_from) { _ref(p_from); }
_FORCE_INLINE_ CowData(CowData<T> &&p_from) {
_ptr = p_from._ptr;
p_from._ptr = nullptr;
}
};
template <typename T>
void CowData<T>::_unref() {
if (!_ptr) {
return;
}
SafeNumeric<USize> *refc = _get_refcount();
if (refc->decrement() > 0) {
// Data is still in use elsewhere.
_ptr = nullptr;
return;
}
// We had the only reference; destroy the data.
// First, invalidate our own reference.
// NOTE: It is required to do so immediately because it must not be observable outside of this
// function after refcount has already been reduced to 0.
// WARNING: It must be done before calling the destructors, because one of them may otherwise
// observe it through a reference to us. In this case, it may try to access the buffer,
// which is illegal after some of the elements in it have already been destructed, and
// may lead to a segmentation fault.
USize current_size = *_get_size();
T *prev_ptr = _ptr;
_ptr = nullptr;
if constexpr (!std::is_trivially_destructible_v<T>) {
for (USize i = 0; i < current_size; ++i) {
prev_ptr[i].~T();
}
}
// Free memory.
Memory::free_static((uint8_t *)prev_ptr - DATA_OFFSET, false);
#ifdef DEBUG_ENABLED
// If any destructors access us through pointers, it is a bug.
// We can't really test for that, but we can at least check no items have been added.
ERR_FAIL_COND_MSG(_ptr != nullptr, "Internal bug, please report: CowData was modified during destruction.");
#endif
}
template <typename T>
Error CowData<T>::_fork_allocate(USize p_size) {
if (p_size == 0) {
// Wants to clean up.
_unref();
return OK;
}
USize alloc_size;
ERR_FAIL_COND_V(!_get_alloc_size_checked(p_size, &alloc_size), ERR_OUT_OF_MEMORY);
const USize prev_size = size();
if (!_ptr) {
// We had no data before; just allocate a new array.
const Error error = _alloc(alloc_size);
if (error) {
return error;
}
} else if (_get_refcount()->get() == 1) {
// Resize in-place.
// NOTE: This case is not just an optimization, but required, as some callers depend on
// `_copy_on_write()` calls not changing the pointer after the first fork
// (e.g. mutable iterators).
if (p_size == prev_size) {
// We can shortcut here; we don't need to do anything.
return OK;
}
// Destroy extraneous elements.
if constexpr (!std::is_trivially_destructible_v<T>) {
for (USize i = prev_size; i > p_size; i--) {
_ptr[i - 1].~T();
}
}
if (alloc_size != _get_alloc_size(prev_size)) {
const Error error = _realloc(alloc_size);
if (error) {
// Out of memory; the current array is still valid though.
return error;
}
}
} else {
// Resize by forking.
// Create a temporary CowData to hold ownership over our _ptr.
// It will be used to copy elements from the old buffer over to our new buffer.
// At the end of the block, it will be automatically destructed by going out of scope.
const CowData prev_data;
prev_data._ptr = _ptr;
_ptr = nullptr;
const Error error = _alloc(alloc_size);
if (error) {
// On failure to allocate, just give up the old data and return.
// We could recover our old pointer from prev_data, but by just dropping our data, we
// consciously invite early failure for the case that the caller does not handle this
// case gracefully.
return error;
}
// Copy over elements.
const USize copied_element_count = MIN(prev_size, p_size);
if (copied_element_count > 0) {
if constexpr (std::is_trivially_copyable_v<T>) {
memcpy((uint8_t *)_ptr, (uint8_t *)prev_data._ptr, copied_element_count * sizeof(T));
} else {
for (USize i = 0; i < copied_element_count; i++) {
memnew_placement(&_ptr[i], T(prev_data._ptr[i]));
}
}
}
}
// Set our new size.
*_get_size() = p_size;
return OK;
}
template <typename T>
template <bool p_initialize>
Error CowData<T>::resize(Size p_size) {
ERR_FAIL_COND_V(p_size < 0, ERR_INVALID_PARAMETER);
const Size prev_size = size();
if (p_size == prev_size) {
return OK;
}
const Error error = _fork_allocate(p_size);
if (error) {
return error;
}
if constexpr (p_initialize) {
if (p_size > prev_size) {
memnew_arr_placement(_ptr + prev_size, p_size - prev_size);
}
} else {
static_assert(std::is_trivially_destructible_v<T>);
}
return OK;
}
template <typename T>
Error CowData<T>::_alloc(USize p_alloc_size) {
uint8_t *mem_new = (uint8_t *)Memory::alloc_static(p_alloc_size + DATA_OFFSET, false);
ERR_FAIL_NULL_V(mem_new, ERR_OUT_OF_MEMORY);
_ptr = _get_data_ptr(mem_new);
// If we alloc, we're guaranteed to be the only reference.
new (_get_refcount()) SafeNumeric<USize>(1);
return OK;
}
template <typename T>
Error CowData<T>::_realloc(USize p_alloc_size) {
uint8_t *mem_new = (uint8_t *)Memory::realloc_static(((uint8_t *)_ptr) - DATA_OFFSET, p_alloc_size + DATA_OFFSET, false);
ERR_FAIL_NULL_V(mem_new, ERR_OUT_OF_MEMORY);
_ptr = _get_data_ptr(mem_new);
// If we realloc, we're guaranteed to be the only reference.
// So the reference was 1 and was copied to be 1 again.
DEV_ASSERT(_get_refcount()->get() == 1);
return OK;
}
template <typename T>
void CowData<T>::_ref(const CowData *p_from) {
_ref(*p_from);
}
template <typename T>
void CowData<T>::_ref(const CowData &p_from) {
if (_ptr == p_from._ptr) {
return; // self assign, do nothing.
}
_unref(); // Resets _ptr to nullptr.
if (!p_from._ptr) {
return; //nothing to do
}
if (p_from._get_refcount()->conditional_increment() > 0) { // could reference
_ptr = p_from._ptr;
}
}
template <typename T>
CowData<T>::CowData(std::initializer_list<T> p_init) {
Error err = resize(p_init.size());
if (err != OK) {
return;
}
Size i = 0;
for (const T &element : p_init) {
set(i++, element);
}
}
GODOT_GCC_WARNING_POP
// Zero-constructing CowData initializes _ptr to nullptr (and thus empty).
template <typename T>
struct is_zero_constructible<CowData<T>> : std::true_type {};

View File

@@ -0,0 +1,165 @@
/**************************************************************************/
/* fixed_vector.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/templates/span.h"
/**
* A high performance Vector of fixed capacity.
* Especially useful if you need to create an array on the stack, to
* prevent dynamic allocations (especially in bottleneck code).
*
* Choose CAPACITY such that it is enough for all elements that could be added through all branches.
*
*/
template <class T, uint32_t CAPACITY>
class FixedVector {
// This declaration allows us to access other FixedVector's private members.
template <class T_, uint32_t CAPACITY_>
friend class FixedVector;
uint32_t _size = 0;
alignas(T) uint8_t _data[CAPACITY * sizeof(T)];
constexpr static uint32_t DATA_PADDING = MAX(alignof(T), alignof(uint32_t)) - alignof(uint32_t);
public:
_FORCE_INLINE_ constexpr FixedVector() = default;
constexpr FixedVector(std::initializer_list<T> p_init) {
ERR_FAIL_COND(p_init.size() > CAPACITY);
for (const T &element : p_init) {
memnew_placement(ptr() + _size++, T(element));
}
}
template <uint32_t p_capacity>
constexpr FixedVector(const FixedVector<T, p_capacity> &p_from) {
ERR_FAIL_COND(p_from.size() > CAPACITY);
if constexpr (std::is_trivially_copyable_v<T>) {
// Copy size and all provided elements at once.
memcpy((void *)&_size, (void *)&p_from._size, sizeof(_size) + DATA_PADDING + p_from.size() * sizeof(T));
} else {
for (const T &element : p_from) {
memnew_placement(ptr() + _size++, T(element));
}
}
}
template <uint32_t p_capacity>
constexpr FixedVector(FixedVector<T, p_capacity> &&p_from) {
ERR_FAIL_COND(p_from.size() > CAPACITY);
// Copy size and all provided elements at once.
// Note: Assumes trivial relocatability.
memcpy((void *)&_size, (void *)&p_from._size, sizeof(_size) + DATA_PADDING + p_from.size() * sizeof(T));
p_from._size = 0;
}
~FixedVector() {
if constexpr (!std::is_trivially_destructible_v<T>) {
for (uint32_t i = 0; i < _size; i++) {
ptr()[i].~T();
}
}
}
_FORCE_INLINE_ constexpr T *ptr() { return (T *)(_data); }
_FORCE_INLINE_ constexpr const T *ptr() const { return (const T *)(_data); }
_FORCE_INLINE_ constexpr operator Span<T>() const { return Span<T>(ptr(), size()); }
_FORCE_INLINE_ constexpr Span<T> span() const { return operator Span<T>(); }
_FORCE_INLINE_ constexpr uint32_t size() const { return _size; }
_FORCE_INLINE_ constexpr bool is_empty() const { return !_size; }
_FORCE_INLINE_ constexpr bool is_full() const { return _size == CAPACITY; }
_FORCE_INLINE_ constexpr uint32_t capacity() const { return CAPACITY; }
_FORCE_INLINE_ constexpr void clear() { resize_initialized(0); }
/// Changes the size of the vector.
/// If p_size > size(), constructs new elements.
/// If p_size < size(), destructs new elements.
constexpr Error resize_initialized(uint32_t p_size) {
if (p_size > _size) {
ERR_FAIL_COND_V(p_size > CAPACITY, ERR_OUT_OF_MEMORY);
memnew_arr_placement(ptr() + _size, p_size - _size);
} else if (p_size < _size) {
if constexpr (!std::is_trivially_destructible_v<T>) {
for (uint32_t i = p_size; i < _size; i++) {
ptr()[i].~T();
}
}
}
_size = p_size;
return OK;
}
/// Changes the size of the vector.
/// The initializer of new elements is skipped, making this function faster than resize_initialized.
/// The caller is required to initialize the new values.
constexpr Error resize_uninitialized(uint32_t p_size) {
static_assert(std::is_trivially_destructible_v<T>, "resize_uninitialized is unsafe to call if T is not trivially destructible.");
ERR_FAIL_COND_V(p_size > CAPACITY, ERR_OUT_OF_MEMORY);
_size = p_size;
return OK;
}
constexpr void push_back(const T &p_val) {
ERR_FAIL_COND(_size >= CAPACITY);
memnew_placement(ptr() + _size, T(p_val));
_size++;
}
constexpr void pop_back() {
ERR_FAIL_COND(_size == 0);
_size--;
ptr()[_size].~T();
}
// NOTE: Subscripts sanity check the bounds to avoid undefined behavior.
// This is slower than direct buffer access and can prevent autovectorization.
// If the bounds are known, use ptr() subscript instead.
constexpr const T &operator[](uint32_t p_index) const {
CRASH_COND(p_index >= _size);
return ptr()[p_index];
}
constexpr T &operator[](uint32_t p_index) {
CRASH_COND(p_index >= _size);
return ptr()[p_index];
}
_FORCE_INLINE_ constexpr T *begin() { return ptr(); }
_FORCE_INLINE_ constexpr T *end() { return ptr() + _size; }
_FORCE_INLINE_ constexpr const T *begin() const { return ptr(); }
_FORCE_INLINE_ constexpr const T *end() const { return ptr() + _size; }
};

648
core/templates/hash_map.h Normal file
View File

@@ -0,0 +1,648 @@
/**************************************************************************/
/* hash_map.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/templates/hashfuncs.h"
#include "core/templates/pair.h"
#include "core/templates/sort_list.h"
#include <initializer_list>
/**
* A HashMap implementation that uses open addressing with Robin Hood hashing.
* Robin Hood hashing swaps out entries that have a smaller probing distance
* than the to-be-inserted entry, that evens out the average probing distance
* and enables faster lookups. Backward shift deletion is employed to further
* improve the performance and to avoid infinite loops in rare cases.
*
* Keys and values are stored in a double linked list by insertion order. This
* has a slight performance overhead on lookup, which can be mostly compensated
* using a paged allocator if required.
*
* The assignment operator copy the pairs from one map to the other.
*/
template <typename TKey, typename TValue>
struct HashMapElement {
HashMapElement *next = nullptr;
HashMapElement *prev = nullptr;
KeyValue<TKey, TValue> data;
HashMapElement() {}
HashMapElement(const TKey &p_key, const TValue &p_value) :
data(p_key, p_value) {}
};
template <typename TKey, typename TValue,
typename Hasher = HashMapHasherDefault,
typename Comparator = HashMapComparatorDefault<TKey>,
typename Allocator = DefaultTypedAllocator<HashMapElement<TKey, TValue>>>
class HashMap : private Allocator {
public:
static constexpr uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
static constexpr float MAX_OCCUPANCY = 0.75;
static constexpr uint32_t EMPTY_HASH = 0;
private:
HashMapElement<TKey, TValue> **elements = nullptr;
uint32_t *hashes = nullptr;
HashMapElement<TKey, TValue> *head_element = nullptr;
HashMapElement<TKey, TValue> *tail_element = nullptr;
uint32_t capacity_index = 0;
uint32_t num_elements = 0;
_FORCE_INLINE_ static uint32_t _hash(const TKey &p_key) {
uint32_t hash = Hasher::hash(p_key);
if (unlikely(hash == EMPTY_HASH)) {
hash = EMPTY_HASH + 1;
}
return hash;
}
_FORCE_INLINE_ static constexpr void _increment_mod(uint32_t &r_pos, const uint32_t p_capacity) {
r_pos++;
// `if` is faster than both fastmod and mod.
if (unlikely(r_pos == p_capacity)) {
r_pos = 0;
}
}
static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
const uint32_t distance_pos = p_pos - original_pos + p_capacity;
// At most p_capacity over 0, so we can use an if (faster than fastmod).
return distance_pos >= p_capacity ? distance_pos - p_capacity : distance_pos;
}
bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
return elements != nullptr && num_elements > 0 && _lookup_pos_unchecked(p_key, _hash(p_key), r_pos);
}
/// Note: Assumes that elements != nullptr
bool _lookup_pos_unchecked(const TKey &p_key, uint32_t p_hash, uint32_t &r_pos) const {
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t pos = fastmod(p_hash, capacity_inv, capacity);
uint32_t distance = 0;
while (true) {
if (hashes[pos] == EMPTY_HASH) {
return false;
}
if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
return false;
}
if (hashes[pos] == p_hash && Comparator::compare(elements[pos]->data.key, p_key)) {
r_pos = pos;
return true;
}
_increment_mod(pos, capacity);
distance++;
}
}
void _insert_element(uint32_t p_hash, HashMapElement<TKey, TValue> *p_value) {
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = p_hash;
HashMapElement<TKey, TValue> *value = p_value;
uint32_t distance = 0;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
while (true) {
if (hashes[pos] == EMPTY_HASH) {
elements[pos] = value;
hashes[pos] = hash;
num_elements++;
return;
}
// Not an empty slot, let's check the probing length of the existing one.
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
if (existing_probe_len < distance) {
SWAP(hash, hashes[pos]);
SWAP(value, elements[pos]);
distance = existing_probe_len;
}
_increment_mod(pos, capacity);
distance++;
}
}
void _resize_and_rehash(uint32_t p_new_capacity_index) {
uint32_t old_capacity = hash_table_size_primes[capacity_index];
// Capacity can't be 0.
capacity_index = MAX((uint32_t)MIN_CAPACITY_INDEX, p_new_capacity_index);
uint32_t capacity = hash_table_size_primes[capacity_index];
HashMapElement<TKey, TValue> **old_elements = elements;
uint32_t *old_hashes = hashes;
num_elements = 0;
static_assert(EMPTY_HASH == 0, "Assuming EMPTY_HASH = 0 for alloc_static_zeroed call");
hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static_zeroed(sizeof(uint32_t) * capacity));
elements = reinterpret_cast<HashMapElement<TKey, TValue> **>(Memory::alloc_static_zeroed(sizeof(HashMapElement<TKey, TValue> *) * capacity));
if (old_capacity == 0) {
// Nothing to do.
return;
}
for (uint32_t i = 0; i < old_capacity; i++) {
if (old_hashes[i] == EMPTY_HASH) {
continue;
}
_insert_element(old_hashes[i], old_elements[i]);
}
Memory::free_static(old_elements);
Memory::free_static(old_hashes);
}
_FORCE_INLINE_ HashMapElement<TKey, TValue> *_insert(const TKey &p_key, const TValue &p_value, uint32_t p_hash, bool p_front_insert = false) {
uint32_t capacity = hash_table_size_primes[capacity_index];
if (unlikely(elements == nullptr)) {
// Allocate on demand to save memory.
static_assert(EMPTY_HASH == 0, "Assuming EMPTY_HASH = 0 for alloc_static_zeroed call");
hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static_zeroed(sizeof(uint32_t) * capacity));
elements = reinterpret_cast<HashMapElement<TKey, TValue> **>(Memory::alloc_static_zeroed(sizeof(HashMapElement<TKey, TValue> *) * capacity));
}
if (num_elements + 1 > MAX_OCCUPANCY * capacity) {
ERR_FAIL_COND_V_MSG(capacity_index + 1 == HASH_TABLE_SIZE_MAX, nullptr, "Hash table maximum capacity reached, aborting insertion.");
_resize_and_rehash(capacity_index + 1);
}
HashMapElement<TKey, TValue> *elem = Allocator::new_allocation(HashMapElement<TKey, TValue>(p_key, p_value));
if (tail_element == nullptr) {
head_element = elem;
tail_element = elem;
} else if (p_front_insert) {
head_element->prev = elem;
elem->next = head_element;
head_element = elem;
} else {
tail_element->next = elem;
elem->prev = tail_element;
tail_element = elem;
}
_insert_element(p_hash, elem);
return elem;
}
public:
_FORCE_INLINE_ uint32_t get_capacity() const { return hash_table_size_primes[capacity_index]; }
_FORCE_INLINE_ uint32_t size() const { return num_elements; }
/* Standard Godot Container API */
bool is_empty() const {
return num_elements == 0;
}
void clear() {
if (elements == nullptr || num_elements == 0) {
return;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
for (uint32_t i = 0; i < capacity; i++) {
if (hashes[i] == EMPTY_HASH) {
continue;
}
hashes[i] = EMPTY_HASH;
Allocator::delete_allocation(elements[i]);
elements[i] = nullptr;
}
tail_element = nullptr;
head_element = nullptr;
num_elements = 0;
}
void sort() {
sort_custom<KeyValueSort<TKey, TValue>>();
}
template <typename C>
void sort_custom() {
if (size() < 2) {
return;
}
using E = HashMapElement<TKey, TValue>;
SortList<E, KeyValue<TKey, TValue>, &E::data, &E::prev, &E::next, C> sorter;
sorter.sort(head_element, tail_element);
}
TValue &get(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
CRASH_COND_MSG(!exists, "HashMap key not found.");
return elements[pos]->data.value;
}
const TValue &get(const TKey &p_key) const {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
CRASH_COND_MSG(!exists, "HashMap key not found.");
return elements[pos]->data.value;
}
const TValue *getptr(const TKey &p_key) const {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (exists) {
return &elements[pos]->data.value;
}
return nullptr;
}
TValue *getptr(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (exists) {
return &elements[pos]->data.value;
}
return nullptr;
}
_FORCE_INLINE_ bool has(const TKey &p_key) const {
uint32_t _pos = 0;
return _lookup_pos(p_key, _pos);
}
bool erase(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (!exists) {
return false;
}
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod((pos + 1), capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
SWAP(hashes[next_pos], hashes[pos]);
SWAP(elements[next_pos], elements[pos]);
pos = next_pos;
_increment_mod(next_pos, capacity);
}
hashes[pos] = EMPTY_HASH;
if (head_element == elements[pos]) {
head_element = elements[pos]->next;
}
if (tail_element == elements[pos]) {
tail_element = elements[pos]->prev;
}
if (elements[pos]->prev) {
elements[pos]->prev->next = elements[pos]->next;
}
if (elements[pos]->next) {
elements[pos]->next->prev = elements[pos]->prev;
}
Allocator::delete_allocation(elements[pos]);
elements[pos] = nullptr;
num_elements--;
return true;
}
// Replace the key of an entry in-place, without invalidating iterators or changing the entries position during iteration.
// p_old_key must exist in the map and p_new_key must not, unless it is equal to p_old_key.
bool replace_key(const TKey &p_old_key, const TKey &p_new_key) {
ERR_FAIL_COND_V(elements == nullptr || num_elements == 0, false);
if (p_old_key == p_new_key) {
return true;
}
const uint32_t new_hash = _hash(p_new_key);
uint32_t pos = 0;
ERR_FAIL_COND_V(_lookup_pos_unchecked(p_new_key, new_hash, pos), false);
ERR_FAIL_COND_V(!_lookup_pos(p_old_key, pos), false);
HashMapElement<TKey, TValue> *element = elements[pos];
// Delete the old entries in hashes and elements.
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod((pos + 1), capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
SWAP(hashes[next_pos], hashes[pos]);
SWAP(elements[next_pos], elements[pos]);
pos = next_pos;
_increment_mod(next_pos, capacity);
}
hashes[pos] = EMPTY_HASH;
elements[pos] = nullptr;
// _insert_element will increment this again.
num_elements--;
// Update the HashMapElement with the new key and reinsert it.
const_cast<TKey &>(element->data.key) = p_new_key;
_insert_element(new_hash, element);
return true;
}
// Reserves space for a number of elements, useful to avoid many resizes and rehashes.
// If adding a known (possibly large) number of elements at once, must be larger than old capacity.
void reserve(uint32_t p_new_capacity) {
ERR_FAIL_COND_MSG(p_new_capacity < size(), "reserve() called with a capacity smaller than the current size. This is likely a mistake.");
uint32_t new_index = capacity_index;
while (hash_table_size_primes[new_index] < p_new_capacity) {
ERR_FAIL_COND_MSG(new_index + 1 == (uint32_t)HASH_TABLE_SIZE_MAX, nullptr);
new_index++;
}
if (new_index == capacity_index) {
return;
}
if (elements == nullptr) {
capacity_index = new_index;
return; // Unallocated yet.
}
_resize_and_rehash(new_index);
}
/** Iterator API **/
struct ConstIterator {
_FORCE_INLINE_ const KeyValue<TKey, TValue> &operator*() const {
return E->data;
}
_FORCE_INLINE_ const KeyValue<TKey, TValue> *operator->() const { return &E->data; }
_FORCE_INLINE_ ConstIterator &operator++() {
if (E) {
E = E->next;
}
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
if (E) {
E = E->prev;
}
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
_FORCE_INLINE_ explicit operator bool() const {
return E != nullptr;
}
_FORCE_INLINE_ ConstIterator(const HashMapElement<TKey, TValue> *p_E) { E = p_E; }
_FORCE_INLINE_ ConstIterator() {}
_FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
_FORCE_INLINE_ void operator=(const ConstIterator &p_it) {
E = p_it.E;
}
private:
const HashMapElement<TKey, TValue> *E = nullptr;
};
struct Iterator {
_FORCE_INLINE_ KeyValue<TKey, TValue> &operator*() const {
return E->data;
}
_FORCE_INLINE_ KeyValue<TKey, TValue> *operator->() const { return &E->data; }
_FORCE_INLINE_ Iterator &operator++() {
if (E) {
E = E->next;
}
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
if (E) {
E = E->prev;
}
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
_FORCE_INLINE_ explicit operator bool() const {
return E != nullptr;
}
_FORCE_INLINE_ Iterator(HashMapElement<TKey, TValue> *p_E) { E = p_E; }
_FORCE_INLINE_ Iterator() {}
_FORCE_INLINE_ Iterator(const Iterator &p_it) { E = p_it.E; }
_FORCE_INLINE_ void operator=(const Iterator &p_it) {
E = p_it.E;
}
operator ConstIterator() const {
return ConstIterator(E);
}
private:
HashMapElement<TKey, TValue> *E = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(head_element);
}
_FORCE_INLINE_ Iterator end() {
return Iterator(nullptr);
}
_FORCE_INLINE_ Iterator last() {
return Iterator(tail_element);
}
_FORCE_INLINE_ Iterator find(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (!exists) {
return end();
}
return Iterator(elements[pos]);
}
_FORCE_INLINE_ void remove(const Iterator &p_iter) {
if (p_iter) {
erase(p_iter->key);
}
}
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(head_element);
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(nullptr);
}
_FORCE_INLINE_ ConstIterator last() const {
return ConstIterator(tail_element);
}
_FORCE_INLINE_ ConstIterator find(const TKey &p_key) const {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (!exists) {
return end();
}
return ConstIterator(elements[pos]);
}
/* Indexing */
const TValue &operator[](const TKey &p_key) const {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
CRASH_COND(!exists);
return elements[pos]->data.value;
}
TValue &operator[](const TKey &p_key) {
const uint32_t hash = _hash(p_key);
uint32_t pos = 0;
bool exists = elements && num_elements > 0 && _lookup_pos_unchecked(p_key, hash, pos);
if (!exists) {
return _insert(p_key, TValue(), hash)->data.value;
} else {
return elements[pos]->data.value;
}
}
/* Insert */
Iterator insert(const TKey &p_key, const TValue &p_value, bool p_front_insert = false) {
const uint32_t hash = _hash(p_key);
uint32_t pos = 0;
bool exists = elements && num_elements > 0 && _lookup_pos_unchecked(p_key, hash, pos);
if (!exists) {
return Iterator(_insert(p_key, p_value, hash, p_front_insert));
} else {
elements[pos]->data.value = p_value;
return Iterator(elements[pos]);
}
}
/* Constructors */
HashMap(const HashMap &p_other) {
reserve(hash_table_size_primes[p_other.capacity_index]);
if (p_other.num_elements == 0) {
return;
}
for (const KeyValue<TKey, TValue> &E : p_other) {
insert(E.key, E.value);
}
}
void operator=(const HashMap &p_other) {
if (this == &p_other) {
return; // Ignore self assignment.
}
if (num_elements != 0) {
clear();
}
reserve(hash_table_size_primes[p_other.capacity_index]);
if (p_other.elements == nullptr) {
return; // Nothing to copy.
}
for (const KeyValue<TKey, TValue> &E : p_other) {
insert(E.key, E.value);
}
}
HashMap(uint32_t p_initial_capacity) {
// Capacity can't be 0.
capacity_index = 0;
reserve(p_initial_capacity);
}
HashMap() {
capacity_index = MIN_CAPACITY_INDEX;
}
HashMap(std::initializer_list<KeyValue<TKey, TValue>> p_init) {
reserve(p_init.size());
for (const KeyValue<TKey, TValue> &E : p_init) {
insert(E.key, E.value);
}
}
uint32_t debug_get_hash(uint32_t p_index) {
if (num_elements == 0) {
return 0;
}
ERR_FAIL_INDEX_V(p_index, get_capacity(), 0);
return hashes[p_index];
}
Iterator debug_get_element(uint32_t p_index) {
if (num_elements == 0) {
return Iterator();
}
ERR_FAIL_INDEX_V(p_index, get_capacity(), Iterator());
return Iterator(elements[p_index]);
}
~HashMap() {
clear();
if (elements != nullptr) {
Memory::free_static(elements);
Memory::free_static(hashes);
}
}
};

497
core/templates/hash_set.h Normal file
View File

@@ -0,0 +1,497 @@
/**************************************************************************/
/* hash_set.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/templates/hashfuncs.h"
/**
* Implementation of Set using a bidi indexed hash map.
* Use RBSet instead of this only if the following conditions are met:
*
* - You need to keep an iterator or const pointer to Key and you intend to add/remove elements in the meantime.
* - Iteration order does matter (via operator<)
*
*/
template <typename TKey,
typename Hasher = HashMapHasherDefault,
typename Comparator = HashMapComparatorDefault<TKey>>
class HashSet {
public:
static constexpr uint32_t MIN_CAPACITY_INDEX = 2; // Use a prime.
static constexpr float MAX_OCCUPANCY = 0.75;
static constexpr uint32_t EMPTY_HASH = 0;
private:
TKey *keys = nullptr;
uint32_t *hash_to_key = nullptr;
uint32_t *key_to_hash = nullptr;
uint32_t *hashes = nullptr;
uint32_t capacity_index = 0;
uint32_t num_elements = 0;
_FORCE_INLINE_ uint32_t _hash(const TKey &p_key) const {
uint32_t hash = Hasher::hash(p_key);
if (unlikely(hash == EMPTY_HASH)) {
hash = EMPTY_HASH + 1;
}
return hash;
}
_FORCE_INLINE_ static constexpr void _increment_mod(uint32_t &r_pos, const uint32_t p_capacity) {
r_pos++;
// `if` is faster than both fastmod and mod.
if (unlikely(r_pos == p_capacity)) {
r_pos = 0;
}
}
static _FORCE_INLINE_ uint32_t _get_probe_length(const uint32_t p_pos, const uint32_t p_hash, const uint32_t p_capacity, const uint64_t p_capacity_inv) {
const uint32_t original_pos = fastmod(p_hash, p_capacity_inv, p_capacity);
const uint32_t distance_pos = p_pos - original_pos + p_capacity;
// At most p_capacity over 0, so we can use an if (faster than fastmod).
return distance_pos >= p_capacity ? distance_pos - p_capacity : distance_pos;
}
bool _lookup_pos(const TKey &p_key, uint32_t &r_pos) const {
if (keys == nullptr || num_elements == 0) {
return false; // Failed lookups, no elements
}
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = _hash(p_key);
uint32_t pos = fastmod(hash, capacity_inv, capacity);
uint32_t distance = 0;
while (true) {
if (hashes[pos] == EMPTY_HASH) {
return false;
}
if (hashes[pos] == hash && Comparator::compare(keys[hash_to_key[pos]], p_key)) {
r_pos = hash_to_key[pos];
return true;
}
if (distance > _get_probe_length(pos, hashes[pos], capacity, capacity_inv)) {
return false;
}
_increment_mod(pos, capacity);
distance++;
}
}
uint32_t _insert_with_hash(uint32_t p_hash, uint32_t p_index) {
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t hash = p_hash;
uint32_t index = p_index;
uint32_t distance = 0;
uint32_t pos = fastmod(hash, capacity_inv, capacity);
while (true) {
if (hashes[pos] == EMPTY_HASH) {
hashes[pos] = hash;
key_to_hash[index] = pos;
hash_to_key[pos] = index;
return pos;
}
// Not an empty slot, let's check the probing length of the existing one.
uint32_t existing_probe_len = _get_probe_length(pos, hashes[pos], capacity, capacity_inv);
if (existing_probe_len < distance) {
key_to_hash[index] = pos;
SWAP(hash, hashes[pos]);
SWAP(index, hash_to_key[pos]);
distance = existing_probe_len;
}
_increment_mod(pos, capacity);
distance++;
}
}
void _resize_and_rehash(uint32_t p_new_capacity_index) {
// Capacity can't be 0.
capacity_index = MAX((uint32_t)MIN_CAPACITY_INDEX, p_new_capacity_index);
uint32_t capacity = hash_table_size_primes[capacity_index];
uint32_t *old_hashes = hashes;
uint32_t *old_key_to_hash = key_to_hash;
static_assert(EMPTY_HASH == 0, "Assuming EMPTY_HASH = 0 for alloc_static_zeroed call");
hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static_zeroed(sizeof(uint32_t) * capacity));
keys = reinterpret_cast<TKey *>(Memory::realloc_static(keys, sizeof(TKey) * capacity));
key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
hash_to_key = reinterpret_cast<uint32_t *>(Memory::realloc_static(hash_to_key, sizeof(uint32_t) * capacity));
for (uint32_t i = 0; i < num_elements; i++) {
uint32_t h = old_hashes[old_key_to_hash[i]];
_insert_with_hash(h, i);
}
Memory::free_static(old_hashes);
Memory::free_static(old_key_to_hash);
}
_FORCE_INLINE_ int32_t _insert(const TKey &p_key) {
uint32_t capacity = hash_table_size_primes[capacity_index];
if (unlikely(keys == nullptr)) {
// Allocate on demand to save memory.
static_assert(EMPTY_HASH == 0, "Assuming EMPTY_HASH = 0 for alloc_static_zeroed call");
hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static_zeroed(sizeof(uint32_t) * capacity));
keys = reinterpret_cast<TKey *>(Memory::alloc_static(sizeof(TKey) * capacity));
key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
hash_to_key = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
}
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (exists) {
return pos;
} else {
if (num_elements + 1 > MAX_OCCUPANCY * capacity) {
ERR_FAIL_COND_V_MSG(capacity_index + 1 == HASH_TABLE_SIZE_MAX, -1, "Hash table maximum capacity reached, aborting insertion.");
_resize_and_rehash(capacity_index + 1);
}
uint32_t hash = _hash(p_key);
memnew_placement(&keys[num_elements], TKey(p_key));
_insert_with_hash(hash, num_elements);
num_elements++;
return num_elements - 1;
}
}
void _init_from(const HashSet &p_other) {
capacity_index = p_other.capacity_index;
num_elements = p_other.num_elements;
if (p_other.num_elements == 0) {
return;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
hashes = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
keys = reinterpret_cast<TKey *>(Memory::alloc_static(sizeof(TKey) * capacity));
key_to_hash = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
hash_to_key = reinterpret_cast<uint32_t *>(Memory::alloc_static(sizeof(uint32_t) * capacity));
for (uint32_t i = 0; i < num_elements; i++) {
memnew_placement(&keys[i], TKey(p_other.keys[i]));
key_to_hash[i] = p_other.key_to_hash[i];
}
for (uint32_t i = 0; i < capacity; i++) {
hashes[i] = p_other.hashes[i];
hash_to_key[i] = p_other.hash_to_key[i];
}
}
public:
_FORCE_INLINE_ uint32_t get_capacity() const { return hash_table_size_primes[capacity_index]; }
_FORCE_INLINE_ uint32_t size() const { return num_elements; }
/* Standard Godot Container API */
bool is_empty() const {
return num_elements == 0;
}
void clear() {
if (keys == nullptr || num_elements == 0) {
return;
}
uint32_t capacity = hash_table_size_primes[capacity_index];
for (uint32_t i = 0; i < capacity; i++) {
hashes[i] = EMPTY_HASH;
}
for (uint32_t i = 0; i < num_elements; i++) {
keys[i].~TKey();
}
num_elements = 0;
}
_FORCE_INLINE_ bool has(const TKey &p_key) const {
uint32_t _pos = 0;
return _lookup_pos(p_key, _pos);
}
bool erase(const TKey &p_key) {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (!exists) {
return false;
}
uint32_t key_pos = pos;
pos = key_to_hash[pos]; //make hash pos
const uint32_t capacity = hash_table_size_primes[capacity_index];
const uint64_t capacity_inv = hash_table_size_primes_inv[capacity_index];
uint32_t next_pos = fastmod(pos + 1, capacity_inv, capacity);
while (hashes[next_pos] != EMPTY_HASH && _get_probe_length(next_pos, hashes[next_pos], capacity, capacity_inv) != 0) {
uint32_t kpos = hash_to_key[pos];
uint32_t kpos_next = hash_to_key[next_pos];
SWAP(key_to_hash[kpos], key_to_hash[kpos_next]);
SWAP(hashes[next_pos], hashes[pos]);
SWAP(hash_to_key[next_pos], hash_to_key[pos]);
pos = next_pos;
_increment_mod(next_pos, capacity);
}
hashes[pos] = EMPTY_HASH;
keys[key_pos].~TKey();
num_elements--;
if (key_pos < num_elements) {
// Not the last key, move the last one here to keep keys lineal
memnew_placement(&keys[key_pos], TKey(keys[num_elements]));
keys[num_elements].~TKey();
key_to_hash[key_pos] = key_to_hash[num_elements];
hash_to_key[key_to_hash[num_elements]] = key_pos;
}
return true;
}
// Reserves space for a number of elements, useful to avoid many resizes and rehashes.
// If adding a known (possibly large) number of elements at once, must be larger than old capacity.
void reserve(uint32_t p_new_capacity) {
ERR_FAIL_COND_MSG(p_new_capacity < size(), "reserve() called with a capacity smaller than the current size. This is likely a mistake.");
uint32_t new_index = capacity_index;
while (hash_table_size_primes[new_index] < p_new_capacity) {
ERR_FAIL_COND_MSG(new_index + 1 == (uint32_t)HASH_TABLE_SIZE_MAX, nullptr);
new_index++;
}
if (new_index == capacity_index) {
return;
}
if (keys == nullptr) {
capacity_index = new_index;
return; // Unallocated yet.
}
_resize_and_rehash(new_index);
}
/** Iterator API **/
struct Iterator {
_FORCE_INLINE_ const TKey &operator*() const {
return keys[index];
}
_FORCE_INLINE_ const TKey *operator->() const {
return &keys[index];
}
_FORCE_INLINE_ Iterator &operator++() {
index++;
if (index >= (int32_t)num_keys) {
index = -1;
keys = nullptr;
num_keys = 0;
}
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
index--;
if (index < 0) {
index = -1;
keys = nullptr;
num_keys = 0;
}
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return keys == b.keys && index == b.index; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return keys != b.keys || index != b.index; }
_FORCE_INLINE_ explicit operator bool() const {
return keys != nullptr;
}
_FORCE_INLINE_ Iterator(const TKey *p_keys, uint32_t p_num_keys, int32_t p_index = -1) {
keys = p_keys;
num_keys = p_num_keys;
index = p_index;
}
_FORCE_INLINE_ Iterator() {}
_FORCE_INLINE_ Iterator(const Iterator &p_it) {
keys = p_it.keys;
num_keys = p_it.num_keys;
index = p_it.index;
}
_FORCE_INLINE_ void operator=(const Iterator &p_it) {
keys = p_it.keys;
num_keys = p_it.num_keys;
index = p_it.index;
}
private:
const TKey *keys = nullptr;
uint32_t num_keys = 0;
int32_t index = -1;
};
_FORCE_INLINE_ Iterator begin() const {
return num_elements ? Iterator(keys, num_elements, 0) : Iterator();
}
_FORCE_INLINE_ Iterator end() const {
return Iterator();
}
_FORCE_INLINE_ Iterator last() const {
if (num_elements == 0) {
return Iterator();
}
return Iterator(keys, num_elements, num_elements - 1);
}
_FORCE_INLINE_ Iterator find(const TKey &p_key) const {
uint32_t pos = 0;
bool exists = _lookup_pos(p_key, pos);
if (!exists) {
return end();
}
return Iterator(keys, num_elements, pos);
}
_FORCE_INLINE_ void remove(const Iterator &p_iter) {
if (p_iter) {
erase(*p_iter);
}
}
/* Insert */
Iterator insert(const TKey &p_key) {
uint32_t pos = _insert(p_key);
return Iterator(keys, num_elements, pos);
}
/* Constructors */
HashSet(const HashSet &p_other) {
_init_from(p_other);
}
void operator=(const HashSet &p_other) {
if (this == &p_other) {
return; // Ignore self assignment.
}
clear();
if (keys != nullptr) {
Memory::free_static(keys);
Memory::free_static(key_to_hash);
Memory::free_static(hash_to_key);
Memory::free_static(hashes);
keys = nullptr;
hashes = nullptr;
hash_to_key = nullptr;
key_to_hash = nullptr;
}
_init_from(p_other);
}
bool operator==(const HashSet &p_other) const {
if (num_elements != p_other.num_elements) {
return false;
}
for (uint32_t i = 0; i < num_elements; i++) {
if (!p_other.has(keys[i])) {
return false;
}
}
return true;
}
bool operator!=(const HashSet &p_other) const {
return !(*this == p_other);
}
HashSet(uint32_t p_initial_capacity) {
// Capacity can't be 0.
capacity_index = 0;
reserve(p_initial_capacity);
}
HashSet() {
capacity_index = MIN_CAPACITY_INDEX;
}
HashSet(std::initializer_list<TKey> p_init) {
reserve(p_init.size());
for (const TKey &E : p_init) {
insert(E);
}
}
void reset() {
clear();
if (keys != nullptr) {
Memory::free_static(keys);
Memory::free_static(key_to_hash);
Memory::free_static(hash_to_key);
Memory::free_static(hashes);
keys = nullptr;
hashes = nullptr;
hash_to_key = nullptr;
key_to_hash = nullptr;
}
capacity_index = MIN_CAPACITY_INDEX;
}
~HashSet() {
clear();
if (keys != nullptr) {
Memory::free_static(keys);
Memory::free_static(key_to_hash);
Memory::free_static(hash_to_key);
Memory::free_static(hashes);
}
}
};

638
core/templates/hashfuncs.h Normal file
View File

@@ -0,0 +1,638 @@
/**************************************************************************/
/* hashfuncs.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/math/aabb.h"
#include "core/math/basis.h"
#include "core/math/color.h"
#include "core/math/math_defs.h"
#include "core/math/math_funcs.h"
#include "core/math/plane.h"
#include "core/math/projection.h"
#include "core/math/quaternion.h"
#include "core/math/rect2.h"
#include "core/math/rect2i.h"
#include "core/math/transform_2d.h"
#include "core/math/transform_3d.h"
#include "core/math/vector2.h"
#include "core/math/vector2i.h"
#include "core/math/vector3.h"
#include "core/math/vector3i.h"
#include "core/math/vector4.h"
#include "core/math/vector4i.h"
#include "core/object/object_id.h"
#include "core/string/node_path.h"
#include "core/string/string_name.h"
#include "core/string/ustring.h"
#include "core/templates/pair.h"
#include "core/templates/rid.h"
#include "core/typedefs.h"
#include "core/variant/callable.h"
#ifdef _MSC_VER
#include <intrin.h> // Needed for `__umulh` below.
#endif
/**
* Hashing functions
*/
/**
* DJB2 Hash function
* @param C String
* @return 32-bits hashcode
*/
static _FORCE_INLINE_ uint32_t hash_djb2(const char *p_cstr) {
const unsigned char *chr = (const unsigned char *)p_cstr;
uint32_t hash = 5381;
uint32_t c = *chr++;
while (c) {
hash = ((hash << 5) + hash) ^ c; /* hash * 33 ^ c */
c = *chr++;
}
return hash;
}
static _FORCE_INLINE_ uint32_t hash_djb2_buffer(const uint8_t *p_buff, int p_len, uint32_t p_prev = 5381) {
uint32_t hash = p_prev;
for (int i = 0; i < p_len; i++) {
hash = ((hash << 5) + hash) ^ p_buff[i]; /* hash * 33 + c */
}
return hash;
}
static _FORCE_INLINE_ uint32_t hash_djb2_one_32(uint32_t p_in, uint32_t p_prev = 5381) {
return ((p_prev << 5) + p_prev) ^ p_in;
}
/**
* Thomas Wang's 64-bit to 32-bit Hash function:
* https://web.archive.org/web/20071223173210/https:/www.concentric.net/~Ttwang/tech/inthash.htm
*
* @param p_int - 64-bit unsigned integer key to be hashed
* @return unsigned 32-bit value representing hashcode
*/
static _FORCE_INLINE_ uint32_t hash_one_uint64(const uint64_t p_int) {
uint64_t v = p_int;
v = (~v) + (v << 18); // v = (v << 18) - v - 1;
v = v ^ (v >> 31);
v = v * 21; // v = (v + (v << 2)) + (v << 4);
v = v ^ (v >> 11);
v = v + (v << 6);
v = v ^ (v >> 22);
return uint32_t(v);
}
static _FORCE_INLINE_ uint64_t hash64_murmur3_64(uint64_t key, uint64_t seed) {
key ^= seed;
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
return key;
}
#define HASH_MURMUR3_SEED 0x7F07C65
// Murmurhash3 32-bit version.
// All MurmurHash versions are public domain software, and the author disclaims all copyright to their code.
static _FORCE_INLINE_ uint32_t hash_murmur3_one_32(uint32_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
p_in *= 0xcc9e2d51;
p_in = (p_in << 15) | (p_in >> 17);
p_in *= 0x1b873593;
p_seed ^= p_in;
p_seed = (p_seed << 13) | (p_seed >> 19);
p_seed = p_seed * 5 + 0xe6546b64;
return p_seed;
}
static _FORCE_INLINE_ uint32_t hash_murmur3_one_float(float p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
union {
float f;
uint32_t i;
} u;
// Normalize +/- 0.0 and NaN values so they hash the same.
if (p_in == 0.0f) {
u.f = 0.0;
} else if (Math::is_nan(p_in)) {
u.f = Math::NaN;
} else {
u.f = p_in;
}
return hash_murmur3_one_32(u.i, p_seed);
}
static _FORCE_INLINE_ uint32_t hash_murmur3_one_64(uint64_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
p_seed = hash_murmur3_one_32(p_in & 0xFFFFFFFF, p_seed);
return hash_murmur3_one_32(p_in >> 32, p_seed);
}
static _FORCE_INLINE_ uint32_t hash_murmur3_one_double(double p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
union {
double d;
uint64_t i;
} u;
// Normalize +/- 0.0 and NaN values so they hash the same.
if (p_in == 0.0f) {
u.d = 0.0;
} else if (Math::is_nan(p_in)) {
u.d = Math::NaN;
} else {
u.d = p_in;
}
return hash_murmur3_one_64(u.i, p_seed);
}
static _FORCE_INLINE_ uint32_t hash_murmur3_one_real(real_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
#ifdef REAL_T_IS_DOUBLE
return hash_murmur3_one_double(p_in, p_seed);
#else
return hash_murmur3_one_float(p_in, p_seed);
#endif
}
static _FORCE_INLINE_ uint32_t hash_rotl32(uint32_t x, int8_t r) {
return (x << r) | (x >> (32 - r));
}
static _FORCE_INLINE_ uint32_t hash_fmix32(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
static _FORCE_INLINE_ uint32_t hash_murmur3_buffer(const void *key, int length, const uint32_t seed = HASH_MURMUR3_SEED) {
// Although not required, this is a random prime number.
const uint8_t *data = (const uint8_t *)key;
const int nblocks = length / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
for (int i = -nblocks; i; i++) {
uint32_t k1 = blocks[i];
k1 *= c1;
k1 = hash_rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
h1 = hash_rotl32(h1, 13);
h1 = h1 * 5 + 0xe6546b64;
}
const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
uint32_t k1 = 0;
switch (length & 3) {
case 3:
k1 ^= tail[2] << 16;
[[fallthrough]];
case 2:
k1 ^= tail[1] << 8;
[[fallthrough]];
case 1:
k1 ^= tail[0];
k1 *= c1;
k1 = hash_rotl32(k1, 15);
k1 *= c2;
h1 ^= k1;
};
// Finalize with additional bit mixing.
h1 ^= length;
return hash_fmix32(h1);
}
static _FORCE_INLINE_ uint32_t hash_djb2_one_float(double p_in, uint32_t p_prev = 5381) {
union {
double d;
uint64_t i;
} u;
// Normalize +/- 0.0 and NaN values so they hash the same.
if (p_in == 0.0f) {
u.d = 0.0;
} else if (Math::is_nan(p_in)) {
u.d = Math::NaN;
} else {
u.d = p_in;
}
return ((p_prev << 5) + p_prev) + hash_one_uint64(u.i);
}
template <typename T>
static _FORCE_INLINE_ uint32_t hash_make_uint32_t(T p_in) {
union {
T t;
uint32_t _u32;
} _u;
_u._u32 = 0;
_u.t = p_in;
return _u._u32;
}
static _FORCE_INLINE_ uint64_t hash_djb2_one_float_64(double p_in, uint64_t p_prev = 5381) {
union {
double d;
uint64_t i;
} u;
// Normalize +/- 0.0 and NaN values so they hash the same.
if (p_in == 0.0f) {
u.d = 0.0;
} else if (Math::is_nan(p_in)) {
u.d = Math::NaN;
} else {
u.d = p_in;
}
return ((p_prev << 5) + p_prev) + u.i;
}
static _FORCE_INLINE_ uint64_t hash_djb2_one_64(uint64_t p_in, uint64_t p_prev = 5381) {
return ((p_prev << 5) + p_prev) ^ p_in;
}
template <typename T>
static _FORCE_INLINE_ uint64_t hash_make_uint64_t(T p_in) {
union {
T t;
uint64_t _u64;
} _u;
_u._u64 = 0; // in case p_in is smaller
_u.t = p_in;
return _u._u64;
}
template <typename T>
class Ref;
struct HashMapHasherDefault {
// Generic hash function for any type.
template <typename T>
static _FORCE_INLINE_ uint32_t hash(const T *p_pointer) { return hash_one_uint64((uint64_t)p_pointer); }
template <typename T>
static _FORCE_INLINE_ uint32_t hash(const Ref<T> &p_ref) { return hash_one_uint64((uint64_t)p_ref.operator->()); }
template <typename F, typename S>
static _FORCE_INLINE_ uint32_t hash(const Pair<F, S> &p_pair) {
uint64_t h1 = hash(p_pair.first);
uint64_t h2 = hash(p_pair.second);
return hash_one_uint64((h1 << 32) | h2);
}
static _FORCE_INLINE_ uint32_t hash(const String &p_string) { return p_string.hash(); }
static _FORCE_INLINE_ uint32_t hash(const char *p_cstr) { return hash_djb2(p_cstr); }
static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return hash_fmix32(uint32_t(p_wchar)); }
static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return hash_fmix32(uint32_t(p_uchar)); }
static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return hash_fmix32(uint32_t(p_uchar)); }
static _FORCE_INLINE_ uint32_t hash(const RID &p_rid) { return hash_one_uint64(p_rid.get_id()); }
static _FORCE_INLINE_ uint32_t hash(const CharString &p_char_string) { return hash_djb2(p_char_string.get_data()); }
static _FORCE_INLINE_ uint32_t hash(const StringName &p_string_name) { return p_string_name.hash(); }
static _FORCE_INLINE_ uint32_t hash(const NodePath &p_path) { return p_path.hash(); }
static _FORCE_INLINE_ uint32_t hash(const ObjectID &p_id) { return hash_one_uint64(p_id); }
static _FORCE_INLINE_ uint32_t hash(const Callable &p_callable) { return p_callable.hash(); }
static _FORCE_INLINE_ uint32_t hash(const uint64_t p_int) { return hash_one_uint64(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash_one_uint64(uint64_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const float p_float) { return hash_murmur3_one_float(p_float); }
static _FORCE_INLINE_ uint32_t hash(const double p_double) { return hash_murmur3_one_double(p_double); }
static _FORCE_INLINE_ uint32_t hash(const uint32_t p_int) { return hash_fmix32(p_int); }
static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return hash_fmix32(uint32_t(p_int)); }
static _FORCE_INLINE_ uint32_t hash(const Vector2i &p_vec) {
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector3i &p_vec) {
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
h = hash_murmur3_one_32(uint32_t(p_vec.z), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector4i &p_vec) {
uint32_t h = hash_murmur3_one_32(uint32_t(p_vec.x));
h = hash_murmur3_one_32(uint32_t(p_vec.y), h);
h = hash_murmur3_one_32(uint32_t(p_vec.z), h);
h = hash_murmur3_one_32(uint32_t(p_vec.w), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector2 &p_vec) {
uint32_t h = hash_murmur3_one_real(p_vec.x);
h = hash_murmur3_one_real(p_vec.y, h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector3 &p_vec) {
uint32_t h = hash_murmur3_one_real(p_vec.x);
h = hash_murmur3_one_real(p_vec.y, h);
h = hash_murmur3_one_real(p_vec.z, h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Vector4 &p_vec) {
uint32_t h = hash_murmur3_one_real(p_vec.x);
h = hash_murmur3_one_real(p_vec.y, h);
h = hash_murmur3_one_real(p_vec.z, h);
h = hash_murmur3_one_real(p_vec.w, h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Color &p_vec) {
uint32_t h = hash_murmur3_one_float(p_vec.r);
h = hash_murmur3_one_float(p_vec.g, h);
h = hash_murmur3_one_float(p_vec.b, h);
h = hash_murmur3_one_float(p_vec.a, h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Rect2i &p_rect) {
uint32_t h = hash_murmur3_one_32(uint32_t(p_rect.position.x));
h = hash_murmur3_one_32(uint32_t(p_rect.position.y), h);
h = hash_murmur3_one_32(uint32_t(p_rect.size.x), h);
h = hash_murmur3_one_32(uint32_t(p_rect.size.y), h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const Rect2 &p_rect) {
uint32_t h = hash_murmur3_one_real(p_rect.position.x);
h = hash_murmur3_one_real(p_rect.position.y, h);
h = hash_murmur3_one_real(p_rect.size.x, h);
h = hash_murmur3_one_real(p_rect.size.y, h);
return hash_fmix32(h);
}
static _FORCE_INLINE_ uint32_t hash(const AABB &p_aabb) {
uint32_t h = hash_murmur3_one_real(p_aabb.position.x);
h = hash_murmur3_one_real(p_aabb.position.y, h);
h = hash_murmur3_one_real(p_aabb.position.z, h);
h = hash_murmur3_one_real(p_aabb.size.x, h);
h = hash_murmur3_one_real(p_aabb.size.y, h);
h = hash_murmur3_one_real(p_aabb.size.z, h);
return hash_fmix32(h);
}
};
struct HashHasher {
static _FORCE_INLINE_ uint32_t hash(const int32_t hash) { return hash; }
static _FORCE_INLINE_ uint32_t hash(const uint32_t hash) { return hash; }
static _FORCE_INLINE_ uint64_t hash(const int64_t hash) { return hash; }
static _FORCE_INLINE_ uint64_t hash(const uint64_t hash) { return hash; }
};
// TODO: Fold this into HashMapHasherDefault once C++20 concepts are allowed
template <typename T>
struct HashableHasher {
static _FORCE_INLINE_ uint32_t hash(const T &hashable) { return hashable.hash(); }
};
template <typename T>
struct HashMapComparatorDefault {
static bool compare(const T &p_lhs, const T &p_rhs) {
return p_lhs == p_rhs;
}
};
template <>
struct HashMapComparatorDefault<float> {
static bool compare(const float &p_lhs, const float &p_rhs) {
return Math::is_same(p_lhs, p_rhs);
}
};
template <>
struct HashMapComparatorDefault<double> {
static bool compare(const double &p_lhs, const double &p_rhs) {
return Math::is_same(p_lhs, p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Color> {
static bool compare(const Color &p_lhs, const Color &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Vector2> {
static bool compare(const Vector2 &p_lhs, const Vector2 &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Vector3> {
static bool compare(const Vector3 &p_lhs, const Vector3 &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Vector4> {
static bool compare(const Vector4 &p_lhs, const Vector4 &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Rect2> {
static bool compare(const Rect2 &p_lhs, const Rect2 &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<AABB> {
static bool compare(const AABB &p_lhs, const AABB &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Plane> {
static bool compare(const Plane &p_lhs, const Plane &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Transform2D> {
static bool compare(const Transform2D &p_lhs, const Transform2D &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Basis> {
static bool compare(const Basis &p_lhs, const Basis &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Transform3D> {
static bool compare(const Transform3D &p_lhs, const Transform3D &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Projection> {
static bool compare(const Projection &p_lhs, const Projection &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
template <>
struct HashMapComparatorDefault<Quaternion> {
static bool compare(const Quaternion &p_lhs, const Quaternion &p_rhs) {
return p_lhs.is_same(p_rhs);
}
};
constexpr uint32_t HASH_TABLE_SIZE_MAX = 29;
inline constexpr uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
5,
13,
23,
47,
97,
193,
389,
769,
1543,
3079,
6151,
12289,
24593,
49157,
98317,
196613,
393241,
786433,
1572869,
3145739,
6291469,
12582917,
25165843,
50331653,
100663319,
201326611,
402653189,
805306457,
1610612741,
};
// Computed with elem_i = UINT64_C (0 x FFFFFFFF FFFFFFFF ) / d_i + 1, where d_i is the i-th element of the above array.
inline constexpr uint64_t hash_table_size_primes_inv[HASH_TABLE_SIZE_MAX] = {
3689348814741910324,
1418980313362273202,
802032351030850071,
392483916461905354,
190172619316593316,
95578984837873325,
47420935922132524,
23987963684927896,
11955116055547344,
5991147799191151,
2998982941588287,
1501077717772769,
750081082979285,
375261795343686,
187625172388393,
93822606204624,
46909513691883,
23456218233098,
11728086747027,
5864041509391,
2932024948977,
1466014921160,
733007198436,
366503839517,
183251896093,
91625960335,
45812983922,
22906489714,
11453246088
};
/**
* Fastmod computes ( n mod d ) given the precomputed c much faster than n % d.
* The implementation of fastmod is based on the following paper by Daniel Lemire et al.
* Faster Remainder by Direct Computation: Applications to Compilers and Software Libraries
* https://arxiv.org/abs/1902.01961
*/
static _FORCE_INLINE_ uint32_t fastmod(const uint32_t n, const uint64_t c, const uint32_t d) {
#if defined(_MSC_VER)
// Returns the upper 64 bits of the product of two 64-bit unsigned integers.
// This intrinsic function is required since MSVC does not support unsigned 128-bit integers.
#if defined(_M_X64) || defined(_M_ARM64)
return __umulh(c * n, d);
#else
// Fallback to the slower method for 32-bit platforms.
return n % d;
#endif // _M_X64 || _M_ARM64
#else
#ifdef __SIZEOF_INT128__
// Prevent compiler warning, because we know what we are doing.
uint64_t lowbits = c * n;
__extension__ typedef unsigned __int128 uint128;
return static_cast<uint64_t>(((uint128)lowbits * d) >> 64);
#else
// Fallback to the slower method if no 128-bit unsigned integer type is available.
return n % d;
#endif // __SIZEOF_INT128__
#endif // _MSC_VER
}

View File

@@ -0,0 +1,49 @@
/**************************************************************************/
/* interpolated_property.cpp */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "interpolated_property.h"
#include "core/math/vector2.h"
namespace InterpolatedPropertyFuncs {
float lerp(float p_a, float p_b, float p_fraction) {
return Math::lerp(p_a, p_b, p_fraction);
}
double lerp(double p_a, double p_b, float p_fraction) {
return Math::lerp(p_a, p_b, (double)p_fraction);
}
Vector2 lerp(const Vector2 &p_a, const Vector2 &p_b, float p_fraction) {
return p_a.lerp(p_b, p_fraction);
}
} //namespace InterpolatedPropertyFuncs

View File

@@ -0,0 +1,107 @@
/**************************************************************************/
/* interpolated_property.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
struct Vector2;
namespace InterpolatedPropertyFuncs {
float lerp(float p_a, float p_b, float p_fraction);
double lerp(double p_a, double p_b, float p_fraction);
Vector2 lerp(const Vector2 &p_a, const Vector2 &p_b, float p_fraction);
} //namespace InterpolatedPropertyFuncs
// This class is intended to reduce the boiler plate involved to
// support custom properties to be physics interpolated.
template <class T>
class InterpolatedProperty {
// Only needs interpolating / updating the servers when
// curr and prev are different.
bool _needs_interpolating = false;
T _interpolated;
T curr;
T prev;
public:
// FTI depends on the constant flow between current values
// (on the current tick) and stored previous values (on the previous tick).
// These should be updated both on each tick, and also on resets.
void pump() {
prev = curr;
_needs_interpolating = false;
}
void reset() { pump(); }
void set_interpolated_value(const T &p_val) {
_interpolated = p_val;
}
const T &interpolated() const { return _interpolated; }
bool needs_interpolating() const { return _needs_interpolating; }
bool interpolate(float p_interpolation_fraction) {
if (_needs_interpolating) {
_interpolated = InterpolatedPropertyFuncs::lerp(prev, curr, p_interpolation_fraction);
return true;
}
return false;
}
operator T() const {
return curr;
}
bool operator==(const T &p_o) const {
return p_o == curr;
}
bool operator!=(const T &p_o) const {
return p_o != curr;
}
InterpolatedProperty &operator=(T p_val) {
curr = p_val;
_interpolated = p_val;
_needs_interpolating = true;
return *this;
}
InterpolatedProperty(T p_val) {
curr = p_val;
_interpolated = p_val;
pump();
}
InterpolatedProperty() {
// Ensure either the constructor is run,
// or the memory is zeroed if using a fundamental type.
_interpolated = T{};
curr = T{};
prev = T{};
}
};

744
core/templates/list.h Normal file
View File

@@ -0,0 +1,744 @@
/**************************************************************************/
/* list.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/os/memory.h"
#include "core/templates/sort_list.h"
#include <initializer_list>
/**
* Generic Templatized Linked List Implementation.
* The implementation differs from the STL one because
* a compatible preallocated linked list can be written
* using the same API, or features such as erasing an element
* from the iterator.
*/
template <typename T, typename A = DefaultAllocator>
class List {
struct _Data;
public:
class Element {
private:
friend class List<T, A>;
T value;
Element *next_ptr = nullptr;
Element *prev_ptr = nullptr;
_Data *data = nullptr;
public:
/**
* Get NEXT Element iterator, for constant lists.
*/
_FORCE_INLINE_ const Element *next() const {
return next_ptr;
}
/**
* Get NEXT Element iterator,
*/
_FORCE_INLINE_ Element *next() {
return next_ptr;
}
/**
* Get PREV Element iterator, for constant lists.
*/
_FORCE_INLINE_ const Element *prev() const {
return prev_ptr;
}
/**
* Get PREV Element iterator,
*/
_FORCE_INLINE_ Element *prev() {
return prev_ptr;
}
/**
* * operator, for using as *iterator, when iterators are defined on stack.
*/
_FORCE_INLINE_ const T &operator*() const {
return value;
}
/**
* operator->, for using as iterator->, when iterators are defined on stack, for constant lists.
*/
_FORCE_INLINE_ const T *operator->() const {
return &value;
}
/**
* * operator, for using as *iterator, when iterators are defined on stack,
*/
_FORCE_INLINE_ T &operator*() {
return value;
}
/**
* operator->, for using as iterator->, when iterators are defined on stack, for constant lists.
*/
_FORCE_INLINE_ T *operator->() {
return &value;
}
/**
* get the value stored in this element.
*/
_FORCE_INLINE_ T &get() {
return value;
}
/**
* get the value stored in this element, for constant lists
*/
_FORCE_INLINE_ const T &get() const {
return value;
}
/**
* set the value stored in this element.
*/
_FORCE_INLINE_ void set(const T &p_value) {
value = (T &)p_value;
}
void erase() {
data->erase(this);
}
void transfer_to_back(List<T, A> *p_dst_list);
_FORCE_INLINE_ Element() {}
};
typedef T ValueType;
struct ConstIterator {
_FORCE_INLINE_ const T &operator*() const {
return E->get();
}
_FORCE_INLINE_ const T *operator->() const { return &E->get(); }
_FORCE_INLINE_ ConstIterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
_FORCE_INLINE_ ConstIterator(const Element *p_E) { E = p_E; }
_FORCE_INLINE_ ConstIterator() {}
_FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
private:
const Element *E = nullptr;
};
struct Iterator {
_FORCE_INLINE_ T &operator*() const {
return E->get();
}
_FORCE_INLINE_ T *operator->() const { return &E->get(); }
_FORCE_INLINE_ Iterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
operator ConstIterator() const {
return ConstIterator(E);
}
private:
Element *E = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(front());
}
_FORCE_INLINE_ Iterator end() {
return Iterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ Iterator find(const K &p_key) {
return Iterator(find(p_key));
}
#endif
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(front());
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ ConstIterator find(const K &p_key) const {
return ConstIterator(find(p_key));
}
#endif
private:
struct _Data {
Element *first = nullptr;
Element *last = nullptr;
int size_cache = 0;
bool erase(Element *p_I) {
ERR_FAIL_NULL_V(p_I, false);
ERR_FAIL_COND_V(p_I->data != this, false);
if (first == p_I) {
first = p_I->next_ptr;
}
if (last == p_I) {
last = p_I->prev_ptr;
}
if (p_I->prev_ptr) {
p_I->prev_ptr->next_ptr = p_I->next_ptr;
}
if (p_I->next_ptr) {
p_I->next_ptr->prev_ptr = p_I->prev_ptr;
}
memdelete_allocator<Element, A>(p_I);
size_cache--;
return true;
}
};
_Data *_data = nullptr;
public:
/**
* return a const iterator to the beginning of the list.
*/
_FORCE_INLINE_ const Element *front() const {
return _data ? _data->first : nullptr;
}
/**
* return an iterator to the beginning of the list.
*/
_FORCE_INLINE_ Element *front() {
return _data ? _data->first : nullptr;
}
/**
* return a const iterator to the last member of the list.
*/
_FORCE_INLINE_ const Element *back() const {
return _data ? _data->last : nullptr;
}
/**
* return an iterator to the last member of the list.
*/
_FORCE_INLINE_ Element *back() {
return _data ? _data->last : nullptr;
}
/**
* store a new element at the end of the list
*/
Element *push_back(const T &value) {
if (!_data) {
_data = memnew_allocator(_Data, A);
_data->first = nullptr;
_data->last = nullptr;
_data->size_cache = 0;
}
Element *n = memnew_allocator(Element, A);
n->value = (T &)value;
n->prev_ptr = _data->last;
n->next_ptr = nullptr;
n->data = _data;
if (_data->last) {
_data->last->next_ptr = n;
}
_data->last = n;
if (!_data->first) {
_data->first = n;
}
_data->size_cache++;
return n;
}
void pop_back() {
if (_data && _data->last) {
erase(_data->last);
}
}
/**
* store a new element at the beginning of the list
*/
Element *push_front(const T &value) {
if (!_data) {
_data = memnew_allocator(_Data, A);
_data->first = nullptr;
_data->last = nullptr;
_data->size_cache = 0;
}
Element *n = memnew_allocator(Element, A);
n->value = (T &)value;
n->prev_ptr = nullptr;
n->next_ptr = _data->first;
n->data = _data;
if (_data->first) {
_data->first->prev_ptr = n;
}
_data->first = n;
if (!_data->last) {
_data->last = n;
}
_data->size_cache++;
return n;
}
void pop_front() {
if (_data && _data->first) {
erase(_data->first);
}
}
Element *insert_after(Element *p_element, const T &p_value) {
CRASH_COND(p_element && (!_data || p_element->data != _data));
if (!p_element) {
return push_back(p_value);
}
Element *n = memnew_allocator(Element, A);
n->value = (T &)p_value;
n->prev_ptr = p_element;
n->next_ptr = p_element->next_ptr;
n->data = _data;
if (!p_element->next_ptr) {
_data->last = n;
} else {
p_element->next_ptr->prev_ptr = n;
}
p_element->next_ptr = n;
_data->size_cache++;
return n;
}
Element *insert_before(Element *p_element, const T &p_value) {
CRASH_COND(p_element && (!_data || p_element->data != _data));
if (!p_element) {
return push_back(p_value);
}
Element *n = memnew_allocator(Element, A);
n->value = (T &)p_value;
n->prev_ptr = p_element->prev_ptr;
n->next_ptr = p_element;
n->data = _data;
if (!p_element->prev_ptr) {
_data->first = n;
} else {
p_element->prev_ptr->next_ptr = n;
}
p_element->prev_ptr = n;
_data->size_cache++;
return n;
}
/**
* find an element in the list,
*/
template <typename T_v>
Element *find(const T_v &p_val) {
Element *it = front();
while (it) {
if (it->value == p_val) {
return it;
}
it = it->next();
}
return nullptr;
}
/**
* erase an element in the list, by iterator pointing to it. Return true if it was found/erased.
*/
bool erase(Element *p_I) {
if (_data && p_I) {
bool ret = _data->erase(p_I);
if (_data->size_cache == 0) {
memdelete_allocator<_Data, A>(_data);
_data = nullptr;
}
return ret;
}
return false;
}
/**
* erase the first element in the list, that contains value
*/
bool erase(const T &value) {
Element *I = find(value);
return erase(I);
}
/**
* return whether the list is empty
*/
_FORCE_INLINE_ bool is_empty() const {
return (!_data || !_data->size_cache);
}
/**
* clear the list
*/
void clear() {
while (front()) {
erase(front());
}
}
_FORCE_INLINE_ int size() const {
return _data ? _data->size_cache : 0;
}
void swap(Element *p_A, Element *p_B) {
ERR_FAIL_COND(!p_A || !p_B);
ERR_FAIL_COND(p_A->data != _data);
ERR_FAIL_COND(p_B->data != _data);
if (p_A == p_B) {
return;
}
Element *A_prev = p_A->prev_ptr;
Element *A_next = p_A->next_ptr;
Element *B_prev = p_B->prev_ptr;
Element *B_next = p_B->next_ptr;
if (A_prev) {
A_prev->next_ptr = p_B;
} else {
_data->first = p_B;
}
if (B_prev) {
B_prev->next_ptr = p_A;
} else {
_data->first = p_A;
}
if (A_next) {
A_next->prev_ptr = p_B;
} else {
_data->last = p_B;
}
if (B_next) {
B_next->prev_ptr = p_A;
} else {
_data->last = p_A;
}
p_A->prev_ptr = A_next == p_B ? p_B : B_prev;
p_A->next_ptr = B_next == p_A ? p_B : B_next;
p_B->prev_ptr = B_next == p_A ? p_A : A_prev;
p_B->next_ptr = A_next == p_B ? p_A : A_next;
}
/**
* copy the list
*/
void operator=(const List &p_list) {
clear();
const Element *it = p_list.front();
while (it) {
push_back(it->get());
it = it->next();
}
}
void operator=(List &&p_list) {
if (unlikely(this == &p_list)) {
return;
}
clear();
_data = p_list._data;
p_list._data = nullptr;
}
// Random access to elements, use with care,
// do not use for iteration.
T &get(int p_index) {
CRASH_BAD_INDEX(p_index, size());
Element *I = front();
int c = 0;
while (c < p_index) {
I = I->next();
c++;
}
return I->get();
}
// Random access to elements, use with care,
// do not use for iteration.
const T &get(int p_index) const {
CRASH_BAD_INDEX(p_index, size());
const Element *I = front();
int c = 0;
while (c < p_index) {
I = I->next();
c++;
}
return I->get();
}
void move_to_back(Element *p_I) {
ERR_FAIL_COND(p_I->data != _data);
if (!p_I->next_ptr) {
return;
}
if (_data->first == p_I) {
_data->first = p_I->next_ptr;
}
if (_data->last == p_I) {
_data->last = p_I->prev_ptr;
}
if (p_I->prev_ptr) {
p_I->prev_ptr->next_ptr = p_I->next_ptr;
}
p_I->next_ptr->prev_ptr = p_I->prev_ptr;
_data->last->next_ptr = p_I;
p_I->prev_ptr = _data->last;
p_I->next_ptr = nullptr;
_data->last = p_I;
}
void reverse() {
int s = size() / 2;
Element *F = front();
Element *B = back();
for (int i = 0; i < s; i++) {
SWAP(F->value, B->value);
F = F->next();
B = B->prev();
}
}
void move_to_front(Element *p_I) {
ERR_FAIL_COND(p_I->data != _data);
if (!p_I->prev_ptr) {
return;
}
if (_data->first == p_I) {
_data->first = p_I->next_ptr;
}
if (_data->last == p_I) {
_data->last = p_I->prev_ptr;
}
p_I->prev_ptr->next_ptr = p_I->next_ptr;
if (p_I->next_ptr) {
p_I->next_ptr->prev_ptr = p_I->prev_ptr;
}
_data->first->prev_ptr = p_I;
p_I->next_ptr = _data->first;
p_I->prev_ptr = nullptr;
_data->first = p_I;
}
void move_before(Element *value, Element *where) {
if (value->prev_ptr) {
value->prev_ptr->next_ptr = value->next_ptr;
} else {
_data->first = value->next_ptr;
}
if (value->next_ptr) {
value->next_ptr->prev_ptr = value->prev_ptr;
} else {
_data->last = value->prev_ptr;
}
value->next_ptr = where;
if (!where) {
value->prev_ptr = _data->last;
_data->last = value;
return;
}
value->prev_ptr = where->prev_ptr;
if (where->prev_ptr) {
where->prev_ptr->next_ptr = value;
} else {
_data->first = value;
}
where->prev_ptr = value;
}
void sort() {
sort_custom<Comparator<T>>();
}
template <typename C>
void sort_custom() {
if (size() < 2) {
return;
}
SortList<Element, T, &Element::value, &Element::prev_ptr, &Element::next_ptr, C> sorter;
sorter.sort(_data->first, _data->last);
}
const void *id() const {
return (void *)_data;
}
/**
* copy constructor for the list
*/
List(const List &p_list) {
const Element *it = p_list.front();
while (it) {
push_back(it->get());
it = it->next();
}
}
List(List &&p_list) {
_data = p_list._data;
p_list._data = nullptr;
}
List() {}
List(std::initializer_list<T> p_init) {
for (const T &E : p_init) {
push_back(E);
}
}
~List() {
clear();
if (_data) {
ERR_FAIL_COND(_data->size_cache);
memdelete_allocator<_Data, A>(_data);
}
}
};
template <typename T, typename A>
void List<T, A>::Element::transfer_to_back(List<T, A> *p_dst_list) {
// Detach from current.
if (data->first == this) {
data->first = data->first->next_ptr;
}
if (data->last == this) {
data->last = data->last->prev_ptr;
}
if (prev_ptr) {
prev_ptr->next_ptr = next_ptr;
}
if (next_ptr) {
next_ptr->prev_ptr = prev_ptr;
}
data->size_cache--;
// Attach to the back of the new one.
if (!p_dst_list->_data) {
p_dst_list->_data = memnew_allocator(_Data, A);
p_dst_list->_data->first = this;
p_dst_list->_data->last = nullptr;
p_dst_list->_data->size_cache = 0;
prev_ptr = nullptr;
} else {
p_dst_list->_data->last->next_ptr = this;
prev_ptr = p_dst_list->_data->last;
}
p_dst_list->_data->last = this;
next_ptr = nullptr;
data = p_dst_list->_data;
p_dst_list->_data->size_cache++;
}

View File

@@ -0,0 +1,413 @@
/**************************************************************************/
/* local_vector.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/os/memory.h"
#include "core/templates/sort_array.h"
#include "core/templates/vector.h"
#include <initializer_list>
#include <type_traits>
// If tight, it grows strictly as much as needed.
// Otherwise, it grows exponentially (the default and what you want in most cases).
template <typename T, typename U = uint32_t, bool force_trivial = false, bool tight = false>
class LocalVector {
static_assert(!force_trivial, "force_trivial is no longer supported. Use resize_uninitialized instead.");
private:
U count = 0;
U capacity = 0;
T *data = nullptr;
template <bool p_init>
void _resize(U p_size) {
if (p_size < count) {
if constexpr (!std::is_trivially_destructible_v<T>) {
for (U i = p_size; i < count; i++) {
data[i].~T();
}
}
count = p_size;
} else if (p_size > count) {
reserve(p_size);
if constexpr (p_init) {
memnew_arr_placement(data + count, p_size - count);
} else {
static_assert(std::is_trivially_destructible_v<T>, "T must be trivially destructible to resize uninitialized");
}
count = p_size;
}
}
public:
_FORCE_INLINE_ T *ptr() { return data; }
_FORCE_INLINE_ const T *ptr() const { return data; }
_FORCE_INLINE_ U size() const { return count; }
_FORCE_INLINE_ Span<T> span() const { return Span(data, count); }
_FORCE_INLINE_ operator Span<T>() const { return span(); }
// Must take a copy instead of a reference (see GH-31736).
_FORCE_INLINE_ void push_back(T p_elem) {
if (unlikely(count == capacity)) {
reserve(count + 1);
}
memnew_placement(&data[count++], T(std::move(p_elem)));
}
void remove_at(U p_index) {
ERR_FAIL_UNSIGNED_INDEX(p_index, count);
count--;
for (U i = p_index; i < count; i++) {
data[i] = std::move(data[i + 1]);
}
data[count].~T();
}
/// Removes the item copying the last value into the position of the one to
/// remove. It's generally faster than `remove_at`.
void remove_at_unordered(U p_index) {
ERR_FAIL_INDEX(p_index, count);
count--;
if (count > p_index) {
data[p_index] = std::move(data[count]);
}
data[count].~T();
}
_FORCE_INLINE_ bool erase(const T &p_val) {
int64_t idx = find(p_val);
if (idx >= 0) {
remove_at(idx);
return true;
}
return false;
}
bool erase_unordered(const T &p_val) {
int64_t idx = find(p_val);
if (idx >= 0) {
remove_at_unordered(idx);
return true;
}
return false;
}
U erase_multiple_unordered(const T &p_val) {
U from = 0;
U occurrences = 0;
while (true) {
int64_t idx = find(p_val, from);
if (idx == -1) {
break;
}
remove_at_unordered(idx);
from = idx;
occurrences++;
}
return occurrences;
}
void reverse() {
for (U i = 0; i < count / 2; i++) {
SWAP(data[i], data[count - i - 1]);
}
}
#ifndef DISABLE_DEPRECATED
[[deprecated("Use reverse() instead")]] void invert() { reverse(); }
#endif
_FORCE_INLINE_ void clear() { resize(0); }
_FORCE_INLINE_ void reset() {
clear();
if (data) {
memfree(data);
data = nullptr;
capacity = 0;
}
}
_FORCE_INLINE_ bool is_empty() const { return count == 0; }
_FORCE_INLINE_ U get_capacity() const { return capacity; }
void reserve(U p_size) {
ERR_FAIL_COND_MSG(p_size < size(), "reserve() called with a capacity smaller than the current size. This is likely a mistake.");
if (p_size > capacity) {
if (tight) {
capacity = p_size;
} else {
capacity = MAX((U)2, capacity + ((1 + capacity) >> 1));
if (p_size > capacity) {
capacity = p_size;
}
}
data = (T *)memrealloc(data, capacity * sizeof(T));
CRASH_COND_MSG(!data, "Out of memory");
}
}
/// Resize the vector.
/// Elements are initialized (or not) depending on what the default C++ behavior for T is.
/// Note: If force_trivial is set, this will behave like resize_uninitialized instead.
void resize(U p_size) {
// Don't init when trivially constructible.
_resize<!std::is_trivially_constructible_v<T>>(p_size);
}
/// Resize and set all values to 0 / false / nullptr.
_FORCE_INLINE_ void resize_initialized(U p_size) { _resize<true>(p_size); }
/// Resize and set all values to 0 / false / nullptr.
/// This is only available for trivially destructible types (otherwise, trivial resize might be UB).
_FORCE_INLINE_ void resize_uninitialized(U p_size) { _resize<false>(p_size); }
_FORCE_INLINE_ const T &operator[](U p_index) const {
CRASH_BAD_UNSIGNED_INDEX(p_index, count);
return data[p_index];
}
_FORCE_INLINE_ T &operator[](U p_index) {
CRASH_BAD_UNSIGNED_INDEX(p_index, count);
return data[p_index];
}
struct Iterator {
_FORCE_INLINE_ T &operator*() const {
return *elem_ptr;
}
_FORCE_INLINE_ T *operator->() const { return elem_ptr; }
_FORCE_INLINE_ Iterator &operator++() {
elem_ptr++;
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
elem_ptr--;
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return elem_ptr == b.elem_ptr; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return elem_ptr != b.elem_ptr; }
Iterator(T *p_ptr) { elem_ptr = p_ptr; }
Iterator() {}
Iterator(const Iterator &p_it) { elem_ptr = p_it.elem_ptr; }
private:
T *elem_ptr = nullptr;
};
struct ConstIterator {
_FORCE_INLINE_ const T &operator*() const {
return *elem_ptr;
}
_FORCE_INLINE_ const T *operator->() const { return elem_ptr; }
_FORCE_INLINE_ ConstIterator &operator++() {
elem_ptr++;
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
elem_ptr--;
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return elem_ptr == b.elem_ptr; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return elem_ptr != b.elem_ptr; }
ConstIterator(const T *p_ptr) { elem_ptr = p_ptr; }
ConstIterator() {}
ConstIterator(const ConstIterator &p_it) { elem_ptr = p_it.elem_ptr; }
private:
const T *elem_ptr = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(data);
}
_FORCE_INLINE_ Iterator end() {
return Iterator(data + size());
}
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(ptr());
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(ptr() + size());
}
void insert(U p_pos, T p_val) {
ERR_FAIL_UNSIGNED_INDEX(p_pos, count + 1);
if (p_pos == count) {
push_back(std::move(p_val));
} else {
resize(count + 1);
for (U i = count - 1; i > p_pos; i--) {
data[i] = std::move(data[i - 1]);
}
data[p_pos] = std::move(p_val);
}
}
int64_t find(const T &p_val, int64_t p_from = 0) const {
if (p_from < 0) {
p_from = size() + p_from;
}
if (p_from < 0 || p_from >= size()) {
return -1;
}
return span().find(p_val, p_from);
}
bool has(const T &p_val) const {
return find(p_val) != -1;
}
template <typename C>
void sort_custom() {
U len = count;
if (len == 0) {
return;
}
SortArray<T, C> sorter;
sorter.sort(data, len);
}
void sort() {
sort_custom<Comparator<T>>();
}
void ordered_insert(T p_val) {
U i;
for (i = 0; i < count; i++) {
if (p_val < data[i]) {
break;
}
}
insert(i, p_val);
}
operator Vector<T>() const {
Vector<T> ret;
ret.resize(count);
T *w = ret.ptrw();
if (w) {
if constexpr (std::is_trivially_copyable_v<T>) {
memcpy(w, data, sizeof(T) * count);
} else {
for (U i = 0; i < count; i++) {
w[i] = data[i];
}
}
}
return ret;
}
Vector<uint8_t> to_byte_array() const { //useful to pass stuff to gpu or variant
Vector<uint8_t> ret;
ret.resize(count * sizeof(T));
uint8_t *w = ret.ptrw();
if (w) {
memcpy(w, data, sizeof(T) * count);
}
return ret;
}
_FORCE_INLINE_ LocalVector() {}
_FORCE_INLINE_ LocalVector(std::initializer_list<T> p_init) {
reserve(p_init.size());
for (const T &element : p_init) {
push_back(element);
}
}
_FORCE_INLINE_ LocalVector(const LocalVector &p_from) {
resize(p_from.size());
for (U i = 0; i < p_from.count; i++) {
data[i] = p_from.data[i];
}
}
_FORCE_INLINE_ LocalVector(LocalVector &&p_from) {
data = p_from.data;
count = p_from.count;
capacity = p_from.capacity;
p_from.data = nullptr;
p_from.count = 0;
p_from.capacity = 0;
}
inline void operator=(const LocalVector &p_from) {
resize(p_from.size());
for (U i = 0; i < p_from.count; i++) {
data[i] = p_from.data[i];
}
}
inline void operator=(const Vector<T> &p_from) {
resize(p_from.size());
for (U i = 0; i < count; i++) {
data[i] = p_from[i];
}
}
inline void operator=(LocalVector &&p_from) {
if (unlikely(this == &p_from)) {
return;
}
reset();
data = p_from.data;
count = p_from.count;
capacity = p_from.capacity;
p_from.data = nullptr;
p_from.count = 0;
p_from.capacity = 0;
}
inline void operator=(Vector<T> &&p_from) {
resize(p_from.size());
for (U i = 0; i < count; i++) {
data[i] = std::move(p_from[i]);
}
}
_FORCE_INLINE_ ~LocalVector() {
if (data) {
reset();
}
}
};
template <typename T, typename U = uint32_t>
using TightLocalVector = LocalVector<T, U, false, true>;
// Zero-constructing LocalVector initializes count, capacity and data to 0 and thus empty.
template <typename T, typename U, bool force_trivial, bool tight>
struct is_zero_constructible<LocalVector<T, U, force_trivial, tight>> : std::true_type {};

150
core/templates/lru.h Normal file
View File

@@ -0,0 +1,150 @@
/**************************************************************************/
/* lru.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "hash_map.h"
#include "list.h"
template <typename TKey, typename TData, typename Hasher = HashMapHasherDefault, typename Comparator = HashMapComparatorDefault<TKey>, void (*BeforeEvict)(TKey &, TData &) = nullptr>
class LRUCache {
public:
struct Pair {
TKey key;
TData data;
Pair() {}
Pair(const TKey &p_key, const TData &p_data) :
key(p_key),
data(p_data) {
}
};
typedef typename List<Pair>::Element *Element;
private:
List<Pair> _list;
HashMap<TKey, Element, Hasher, Comparator> _map;
size_t capacity;
public:
const Pair *insert(const TKey &p_key, const TData &p_value) {
Element *e = _map.getptr(p_key);
Element n = _list.push_front(Pair(p_key, p_value));
if (e) {
GODOT_GCC_WARNING_PUSH_AND_IGNORE("-Waddress")
if constexpr (BeforeEvict != nullptr) {
BeforeEvict((*e)->get().key, (*e)->get().data);
}
GODOT_GCC_WARNING_POP
_list.erase(*e);
_map.erase(p_key);
}
_map[p_key] = _list.front();
while (_map.size() > capacity) {
Element d = _list.back();
GODOT_GCC_WARNING_PUSH_AND_IGNORE("-Waddress")
if constexpr (BeforeEvict != nullptr) {
BeforeEvict(d->get().key, d->get().data);
}
GODOT_GCC_WARNING_POP
_map.erase(d->get().key);
_list.pop_back();
}
return &n->get();
}
void clear() {
_map.clear();
_list.clear();
}
bool has(const TKey &p_key) const {
return _map.getptr(p_key);
}
bool erase(const TKey &p_key) {
Element *e = _map.getptr(p_key);
if (!e) {
return false;
}
_list.move_to_front(*e);
_map.erase(p_key);
_list.pop_front();
return true;
}
const TData &get(const TKey &p_key) {
Element *e = _map.getptr(p_key);
CRASH_COND(!e);
_list.move_to_front(*e);
return (*e)->get().data;
}
const TData *getptr(const TKey &p_key) {
Element *e = _map.getptr(p_key);
if (!e) {
return nullptr;
} else {
_list.move_to_front(*e);
return &(*e)->get().data;
}
}
_FORCE_INLINE_ size_t get_capacity() const { return capacity; }
_FORCE_INLINE_ size_t get_size() const { return _map.size(); }
void set_capacity(size_t p_capacity) {
if (capacity > 0) {
capacity = p_capacity;
while (_map.size() > capacity) {
Element d = _list.back();
GODOT_GCC_WARNING_PUSH_AND_IGNORE("-Waddress")
if constexpr (BeforeEvict != nullptr) {
BeforeEvict(d->get().key, d->get().data);
}
GODOT_GCC_WARNING_POP
_map.erase(d->get().key);
_list.pop_back();
}
}
}
LRUCache() {
capacity = 64;
}
LRUCache(int p_capacity) {
capacity = p_capacity;
}
};

View File

@@ -0,0 +1,178 @@
/**************************************************************************/
/* paged_allocator.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/core_globals.h"
#include "core/os/memory.h"
#include "core/os/spin_lock.h"
#include "core/string/ustring.h"
#include "core/typedefs.h"
#include <type_traits>
#include <typeinfo> // IWYU pragma: keep // Used in macro.
template <typename T, bool thread_safe = false, uint32_t DEFAULT_PAGE_SIZE = 4096>
class PagedAllocator {
T **page_pool = nullptr;
T ***available_pool = nullptr;
uint32_t pages_allocated = 0;
uint32_t allocs_available = 0;
uint32_t page_shift = 0;
uint32_t page_mask = 0;
uint32_t page_size = 0;
SpinLock spin_lock;
public:
template <typename... Args>
T *alloc(Args &&...p_args) {
if constexpr (thread_safe) {
spin_lock.lock();
}
if (unlikely(allocs_available == 0)) {
uint32_t pages_used = pages_allocated;
pages_allocated++;
page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated);
available_pool = (T ***)memrealloc(available_pool, sizeof(T **) * pages_allocated);
page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size);
available_pool[pages_used] = (T **)memalloc(sizeof(T *) * page_size);
for (uint32_t i = 0; i < page_size; i++) {
available_pool[0][i] = &page_pool[pages_used][i];
}
allocs_available += page_size;
}
allocs_available--;
T *alloc = available_pool[allocs_available >> page_shift][allocs_available & page_mask];
if constexpr (thread_safe) {
spin_lock.unlock();
}
memnew_placement(alloc, T(p_args...));
return alloc;
}
void free(T *p_mem) {
if constexpr (thread_safe) {
spin_lock.lock();
}
p_mem->~T();
available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem;
allocs_available++;
if constexpr (thread_safe) {
spin_lock.unlock();
}
}
template <typename... Args>
T *new_allocation(Args &&...p_args) { return alloc(p_args...); }
void delete_allocation(T *p_mem) { free(p_mem); }
private:
void _reset(bool p_allow_unfreed) {
if (!p_allow_unfreed || !std::is_trivially_destructible_v<T>) {
ERR_FAIL_COND(allocs_available < pages_allocated * page_size);
}
if (pages_allocated) {
for (uint32_t i = 0; i < pages_allocated; i++) {
memfree(page_pool[i]);
memfree(available_pool[i]);
}
memfree(page_pool);
memfree(available_pool);
page_pool = nullptr;
available_pool = nullptr;
pages_allocated = 0;
allocs_available = 0;
}
}
public:
void reset(bool p_allow_unfreed = false) {
if constexpr (thread_safe) {
spin_lock.lock();
}
_reset(p_allow_unfreed);
if constexpr (thread_safe) {
spin_lock.unlock();
}
}
bool is_configured() const {
if constexpr (thread_safe) {
spin_lock.lock();
}
bool result = page_size > 0;
if constexpr (thread_safe) {
spin_lock.unlock();
}
return result;
}
void configure(uint32_t p_page_size) {
if constexpr (thread_safe) {
spin_lock.lock();
}
ERR_FAIL_COND(page_pool != nullptr); // Safety check.
ERR_FAIL_COND(p_page_size == 0);
page_size = nearest_power_of_2_templated(p_page_size);
page_mask = page_size - 1;
page_shift = get_shift_from_power_of_2(page_size);
if constexpr (thread_safe) {
spin_lock.unlock();
}
}
// Power of 2 recommended because of alignment with OS page sizes.
// Even if element is bigger, it's still a multiple and gets rounded to amount of pages.
PagedAllocator(uint32_t p_page_size = DEFAULT_PAGE_SIZE) {
configure(p_page_size);
}
~PagedAllocator() {
if constexpr (thread_safe) {
spin_lock.lock();
}
bool leaked = allocs_available < pages_allocated * page_size;
if (leaked) {
if (CoreGlobals::leak_reporting_enabled) {
ERR_PRINT(String("Pages in use exist at exit in PagedAllocator: ") + String(typeid(T).name()));
}
} else {
_reset(false);
}
if constexpr (thread_safe) {
spin_lock.unlock();
}
}
};

View File

@@ -0,0 +1,373 @@
/**************************************************************************/
/* paged_array.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/os/spin_lock.h"
#include "core/typedefs.h"
#include <type_traits>
// PagedArray is used mainly for filling a very large array from multiple threads efficiently and without causing major fragmentation
// PageArrayPool manages central page allocation in a thread safe matter
template <typename T>
class PagedArrayPool {
T **page_pool = nullptr;
uint32_t pages_allocated = 0;
uint32_t *available_page_pool = nullptr;
uint32_t pages_available = 0;
uint32_t page_size = 0;
SpinLock spin_lock;
public:
struct PageInfo {
T *page = nullptr;
uint32_t page_id = 0;
};
PageInfo alloc_page() {
spin_lock.lock();
if (unlikely(pages_available == 0)) {
uint32_t pages_used = pages_allocated;
pages_allocated++;
page_pool = (T **)memrealloc(page_pool, sizeof(T *) * pages_allocated);
available_page_pool = (uint32_t *)memrealloc(available_page_pool, sizeof(uint32_t) * pages_allocated);
page_pool[pages_used] = (T *)memalloc(sizeof(T) * page_size);
available_page_pool[0] = pages_used;
pages_available++;
}
pages_available--;
uint32_t page_id = available_page_pool[pages_available];
T *page = page_pool[page_id];
spin_lock.unlock();
return PageInfo{ page, page_id };
}
void free_page(uint32_t p_page_id) {
spin_lock.lock();
available_page_pool[pages_available] = p_page_id;
pages_available++;
spin_lock.unlock();
}
uint32_t get_page_size_shift() const {
return get_shift_from_power_of_2(page_size);
}
uint32_t get_page_size_mask() const {
return page_size - 1;
}
void reset() {
ERR_FAIL_COND(pages_available < pages_allocated);
if (pages_allocated) {
for (uint32_t i = 0; i < pages_allocated; i++) {
memfree(page_pool[i]);
}
memfree(page_pool);
memfree(available_page_pool);
page_pool = nullptr;
available_page_pool = nullptr;
pages_allocated = 0;
pages_available = 0;
}
}
bool is_configured() const {
return page_size > 0;
}
void configure(uint32_t p_page_size) {
ERR_FAIL_COND(page_pool != nullptr); // Safety check.
ERR_FAIL_COND(p_page_size == 0);
page_size = nearest_power_of_2_templated(p_page_size);
}
PagedArrayPool(uint32_t p_page_size = 4096) { // power of 2 recommended because of alignment with OS page sizes. Even if element is bigger, its still a multiple and get rounded amount of pages
configure(p_page_size);
}
~PagedArrayPool() {
ERR_FAIL_COND_MSG(pages_available < pages_allocated, "Pages in use exist at exit in PagedArrayPool");
reset();
}
};
// PageArray is a local array that is optimized to grow in place, then be cleared often.
// It does so by allocating pages from a PagedArrayPool.
// It is safe to use multiple PagedArrays from different threads, sharing a single PagedArrayPool
template <typename T>
class PagedArray {
PagedArrayPool<T> *page_pool = nullptr;
T **page_data = nullptr;
uint32_t *page_ids = nullptr;
uint32_t max_pages_used = 0;
uint32_t page_size_shift = 0;
uint32_t page_size_mask = 0;
uint64_t count = 0;
_FORCE_INLINE_ uint32_t _get_pages_in_use() const {
if (count == 0) {
return 0;
} else {
return ((count - 1) >> page_size_shift) + 1;
}
}
void _grow_page_array() {
//no more room in the page array to put the new page, make room
if (max_pages_used == 0) {
max_pages_used = 1;
} else {
max_pages_used *= 2; // increase in powers of 2 to keep allocations to minimum
}
page_data = (T **)memrealloc(page_data, sizeof(T *) * max_pages_used);
page_ids = (uint32_t *)memrealloc(page_ids, sizeof(uint32_t) * max_pages_used);
}
public:
_FORCE_INLINE_ const T &operator[](uint64_t p_index) const {
CRASH_BAD_UNSIGNED_INDEX(p_index, count);
uint32_t page = p_index >> page_size_shift;
uint32_t offset = p_index & page_size_mask;
return page_data[page][offset];
}
_FORCE_INLINE_ T &operator[](uint64_t p_index) {
CRASH_BAD_UNSIGNED_INDEX(p_index, count);
uint32_t page = p_index >> page_size_shift;
uint32_t offset = p_index & page_size_mask;
return page_data[page][offset];
}
_FORCE_INLINE_ void push_back(const T &p_value) {
uint32_t remainder = count & page_size_mask;
if (unlikely(remainder == 0)) {
// at 0, so time to request a new page
uint32_t page_count = _get_pages_in_use();
uint32_t new_page_count = page_count + 1;
if (unlikely(new_page_count > max_pages_used)) {
ERR_FAIL_NULL(page_pool); // Safety check.
_grow_page_array(); //keep out of inline
}
typename PagedArrayPool<T>::PageInfo page_info = page_pool->alloc_page();
page_data[page_count] = page_info.page;
page_ids[page_count] = page_info.page_id;
}
// place the new value
uint32_t page = count >> page_size_shift;
uint32_t offset = count & page_size_mask;
if constexpr (!std::is_trivially_constructible_v<T>) {
memnew_placement(&page_data[page][offset], T(p_value));
} else {
page_data[page][offset] = p_value;
}
count++;
}
_FORCE_INLINE_ void pop_back() {
ERR_FAIL_COND(count == 0);
if constexpr (!std::is_trivially_destructible_v<T>) {
uint32_t page = (count - 1) >> page_size_shift;
uint32_t offset = (count - 1) & page_size_mask;
page_data[page][offset].~T();
}
uint32_t remainder = count & page_size_mask;
if (unlikely(remainder == 1)) {
// one element remained, so page must be freed.
uint32_t last_page = _get_pages_in_use() - 1;
page_pool->free_page(page_ids[last_page]);
}
count--;
}
void remove_at_unordered(uint64_t p_index) {
ERR_FAIL_UNSIGNED_INDEX(p_index, count);
(*this)[p_index] = (*this)[count - 1];
pop_back();
}
void clear() {
//destruct if needed
if constexpr (!std::is_trivially_destructible_v<T>) {
for (uint64_t i = 0; i < count; i++) {
uint32_t page = i >> page_size_shift;
uint32_t offset = i & page_size_mask;
page_data[page][offset].~T();
}
}
//return the pages to the pagepool, so they can be used by another array eventually
uint32_t pages_used = _get_pages_in_use();
for (uint32_t i = 0; i < pages_used; i++) {
page_pool->free_page(page_ids[i]);
}
count = 0;
//note we leave page_data and page_indices intact for next use. If you really want to clear them call reset()
}
void reset() {
clear();
if (page_data) {
memfree(page_data);
memfree(page_ids);
page_data = nullptr;
page_ids = nullptr;
max_pages_used = 0;
}
}
// This takes the pages from a source array and merges them to this one
// resulting order is undefined, but content is merged very efficiently,
// making it ideal to fill content on several threads to later join it.
void merge_unordered(PagedArray<T> &p_array) {
ERR_FAIL_COND(page_pool != p_array.page_pool);
uint32_t remainder = count & page_size_mask;
T *remainder_page = nullptr;
uint32_t remainder_page_id = 0;
if (remainder > 0) {
uint32_t last_page = _get_pages_in_use() - 1;
remainder_page = page_data[last_page];
remainder_page_id = page_ids[last_page];
}
count -= remainder;
uint32_t src_page_index = 0;
uint32_t page_size = page_size_mask + 1;
while (p_array.count > 0) {
uint32_t page_count = _get_pages_in_use();
uint32_t new_page_count = page_count + 1;
if (unlikely(new_page_count > max_pages_used)) {
_grow_page_array(); //keep out of inline
}
page_data[page_count] = p_array.page_data[src_page_index];
page_ids[page_count] = p_array.page_ids[src_page_index];
uint32_t take = MIN(p_array.count, page_size); //pages to take away
p_array.count -= take;
count += take;
src_page_index++;
}
//handle the remainder page if exists
if (remainder_page) {
uint32_t new_remainder = count & page_size_mask;
if (new_remainder > 0) {
//must merge old remainder with new remainder
T *dst_page = page_data[_get_pages_in_use() - 1];
uint32_t to_copy = MIN(page_size - new_remainder, remainder);
for (uint32_t i = 0; i < to_copy; i++) {
if constexpr (!std::is_trivially_constructible_v<T>) {
memnew_placement(&dst_page[i + new_remainder], T(remainder_page[i + remainder - to_copy]));
} else {
dst_page[i + new_remainder] = remainder_page[i + remainder - to_copy];
}
if constexpr (!std::is_trivially_destructible_v<T>) {
remainder_page[i + remainder - to_copy].~T();
}
}
remainder -= to_copy; //subtract what was copied from remainder
count += to_copy; //add what was copied to the count
if (remainder == 0) {
//entire remainder copied, let go of remainder page
page_pool->free_page(remainder_page_id);
remainder_page = nullptr;
}
}
if (remainder > 0) {
//there is still remainder, append it
uint32_t page_count = _get_pages_in_use();
uint32_t new_page_count = page_count + 1;
if (unlikely(new_page_count > max_pages_used)) {
_grow_page_array(); //keep out of inline
}
page_data[page_count] = remainder_page;
page_ids[page_count] = remainder_page_id;
count += remainder;
}
}
}
_FORCE_INLINE_ uint64_t size() const {
return count;
}
void set_page_pool(PagedArrayPool<T> *p_page_pool) {
ERR_FAIL_COND(max_pages_used > 0); // Safety check.
page_pool = p_page_pool;
page_size_mask = page_pool->get_page_size_mask();
page_size_shift = page_pool->get_page_size_shift();
}
~PagedArray() {
reset();
}
};

95
core/templates/pair.h Normal file
View File

@@ -0,0 +1,95 @@
/**************************************************************************/
/* pair.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/typedefs.h"
template <typename F, typename S>
struct Pair {
F first{};
S second{};
constexpr Pair() = default;
constexpr Pair(const F &p_first, const S &p_second) :
first(p_first), second(p_second) {}
constexpr bool operator==(const Pair &p_other) const { return first == p_other.first && second == p_other.second; }
constexpr bool operator!=(const Pair &p_other) const { return first != p_other.first || second != p_other.second; }
constexpr bool operator<(const Pair &p_other) const { return first == p_other.first ? (second < p_other.second) : (first < p_other.first); }
constexpr bool operator<=(const Pair &p_other) const { return first == p_other.first ? (second <= p_other.second) : (first < p_other.first); }
constexpr bool operator>(const Pair &p_other) const { return first == p_other.first ? (second > p_other.second) : (first > p_other.first); }
constexpr bool operator>=(const Pair &p_other) const { return first == p_other.first ? (second >= p_other.second) : (first > p_other.first); }
};
template <typename F, typename S>
struct PairSort {
constexpr bool operator()(const Pair<F, S> &p_lhs, const Pair<F, S> &p_rhs) const {
return p_lhs < p_rhs;
}
};
// Pair is zero-constructible if and only if both constrained types are zero-constructible.
template <typename F, typename S>
struct is_zero_constructible<Pair<F, S>> : std::conjunction<is_zero_constructible<F>, is_zero_constructible<S>> {};
template <typename K, typename V>
struct KeyValue {
const K key{};
V value{};
KeyValue &operator=(const KeyValue &p_kv) = delete;
KeyValue &operator=(KeyValue &&p_kv) = delete;
constexpr KeyValue(const KeyValue &p_kv) = default;
constexpr KeyValue(KeyValue &&p_kv) = default;
constexpr KeyValue(const K &p_key, const V &p_value) :
key(p_key), value(p_value) {}
constexpr KeyValue(const Pair<K, V> &p_pair) :
key(p_pair.first), value(p_pair.second) {}
constexpr bool operator==(const KeyValue &p_other) const { return key == p_other.key && value == p_other.value; }
constexpr bool operator!=(const KeyValue &p_other) const { return key != p_other.key || value != p_other.value; }
constexpr bool operator<(const KeyValue &p_other) const { return key == p_other.key ? (value < p_other.value) : (key < p_other.key); }
constexpr bool operator<=(const KeyValue &p_other) const { return key == p_other.key ? (value <= p_other.value) : (key < p_other.key); }
constexpr bool operator>(const KeyValue &p_other) const { return key == p_other.key ? (value > p_other.value) : (key > p_other.key); }
constexpr bool operator>=(const KeyValue &p_other) const { return key == p_other.key ? (value >= p_other.value) : (key > p_other.key); }
};
template <typename K, typename V>
struct KeyValueSort {
constexpr bool operator()(const KeyValue<K, V> &p_lhs, const KeyValue<K, V> &p_rhs) const {
return p_lhs.key < p_rhs.key;
}
};
// KeyValue is zero-constructible if and only if both constrained types are zero-constructible.
template <typename K, typename V>
struct is_zero_constructible<KeyValue<K, V>> : std::conjunction<is_zero_constructible<K>, is_zero_constructible<V>> {};

161
core/templates/pass_func.h Normal file
View File

@@ -0,0 +1,161 @@
/**************************************************************************/
/* pass_func.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#define PASS0R(m_r, m_name) \
m_r m_name() { \
return PASSBASE->m_name(); \
}
#define PASS0RC(m_r, m_name) \
m_r m_name() const { \
return PASSBASE->m_name(); \
}
#define PASS1R(m_r, m_name, m_type1) \
m_r m_name(m_type1 arg1) { \
return PASSBASE->m_name(arg1); \
}
#define PASS1RC(m_r, m_name, m_type1) \
m_r m_name(m_type1 arg1) const { \
return PASSBASE->m_name(arg1); \
}
#define PASS2R(m_r, m_name, m_type1, m_type2) \
m_r m_name(m_type1 arg1, m_type2 arg2) { \
return PASSBASE->m_name(arg1, arg2); \
}
#define PASS2RC(m_r, m_name, m_type1, m_type2) \
m_r m_name(m_type1 arg1, m_type2 arg2) const { \
return PASSBASE->m_name(arg1, arg2); \
}
#define PASS3R(m_r, m_name, m_type1, m_type2, m_type3) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3) { \
return PASSBASE->m_name(arg1, arg2, arg3); \
}
#define PASS3RC(m_r, m_name, m_type1, m_type2, m_type3) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3) const { \
return PASSBASE->m_name(arg1, arg2, arg3); \
}
#define PASS4R(m_r, m_name, m_type1, m_type2, m_type3, m_type4) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4) { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4); \
}
#define PASS4RC(m_r, m_name, m_type1, m_type2, m_type3, m_type4) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4) const { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4); \
}
#define PASS5R(m_r, m_name, m_type1, m_type2, m_type3, m_type4, m_type5) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5) { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5); \
}
#define PASS5RC(m_r, m_name, m_type1, m_type2, m_type3, m_type4, m_type5) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5) const { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5); \
}
#define PASS6R(m_r, m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6) { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6); \
}
#define PASS6RC(m_r, m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6) \
m_r m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6) const { \
return PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6); \
}
#define PASS0(m_name) \
void m_name() { \
PASSBASE->m_name(); \
}
#define PASS1(m_name, m_type1) \
void m_name(m_type1 arg1) { \
PASSBASE->m_name(arg1); \
}
#define PASS1C(m_name, m_type1) \
void m_name(m_type1 arg1) const { \
PASSBASE->m_name(arg1); \
}
#define PASS2(m_name, m_type1, m_type2) \
void m_name(m_type1 arg1, m_type2 arg2) { \
PASSBASE->m_name(arg1, arg2); \
}
#define PASS2C(m_name, m_type1, m_type2) \
void m_name(m_type1 arg1, m_type2 arg2) const { \
PASSBASE->m_name(arg1, arg2); \
}
#define PASS3(m_name, m_type1, m_type2, m_type3) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3) { \
PASSBASE->m_name(arg1, arg2, arg3); \
}
#define PASS4(m_name, m_type1, m_type2, m_type3, m_type4) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4); \
}
#define PASS5(m_name, m_type1, m_type2, m_type3, m_type4, m_type5) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5); \
}
#define PASS6(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6); \
}
#define PASS7(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7); \
}
#define PASS8(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); \
}
#define PASS9(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); \
}
#define PASS10(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); \
}
#define PASS11(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10, m_type11) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10, m_type11 arg11) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11); \
}
#define PASS12(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10, m_type11, m_type12) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10, m_type11 arg11, m_type12 arg12) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12); \
}
#define PASS13(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10, m_type11, m_type12, m_type13) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10, m_type11 arg11, m_type12 arg12, m_type13 arg13) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); \
}
#define PASS14(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10, m_type11, m_type12, m_type13, m_type14) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10, m_type11 arg11, m_type12 arg12, m_type13 arg13, m_type14 arg14) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14); \
}
#define PASS15(m_name, m_type1, m_type2, m_type3, m_type4, m_type5, m_type6, m_type7, m_type8, m_type9, m_type10, m_type11, m_type12, m_type13, m_type14, m_type15) \
void m_name(m_type1 arg1, m_type2 arg2, m_type3 arg3, m_type4 arg4, m_type5 arg5, m_type6 arg6, m_type7 arg7, m_type8 arg8, m_type9 arg9, m_type10 arg10, m_type11 arg11, m_type12 arg12, m_type13 arg13, m_type14 arg14, m_type15 arg15) { \
PASSBASE->m_name(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15); \
}

View File

@@ -0,0 +1,212 @@
/**************************************************************************/
/* pooled_list.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
// Simple template to provide a pool with O(1) allocate and free.
// The freelist could alternatively be a linked list placed within the unused elements
// to use less memory, however a separate freelist is probably more cache friendly.
// NOTE : Take great care when using this with non POD types. The construction and destruction
// is done in the LocalVector, NOT as part of the pool. So requesting a new item does not guarantee
// a constructor is run, and free does not guarantee a destructor.
// You should generally handle clearing
// an item explicitly after a request, as it may contain 'leftovers'.
// This is by design for fastest use in the BVH. If you want a more general pool
// that does call constructors / destructors on request / free, this should probably be
// a separate template.
// The zero_on_first_request feature is optional and is useful for e.g. pools of handles,
// which may use a ref count which we want to be initialized to zero the first time a handle is created,
// but left alone on subsequent allocations (as will typically be incremented).
// Note that there is no function to compact the pool - this would
// invalidate any existing pool IDs held externally.
// Compaction can be done but would rely on a more complex method
// of preferentially giving out lower IDs in the freelist first.
#include "core/templates/local_vector.h"
template <typename T, typename U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
class PooledList {
LocalVector<T, U> list;
LocalVector<U, U> freelist;
// not all list members are necessarily used
U _used_size;
public:
PooledList() {
_used_size = 0;
}
// Use with care, in most cases you should make sure to
// free all elements first (i.e. _used_size would be zero),
// although it could also be used without this as an optimization
// in some cases.
void clear() {
list.clear();
freelist.clear();
_used_size = 0;
}
uint64_t estimate_memory_use() const {
return ((uint64_t)list.size() * sizeof(T)) + ((uint64_t)freelist.size() * sizeof(U));
}
const T &operator[](U p_index) const {
return list[p_index];
}
T &operator[](U p_index) {
return list[p_index];
}
// To be explicit in a pool there is a distinction
// between the number of elements that are currently
// in use, and the number of elements that have been reserved.
// Using size() would be vague.
U used_size() const { return _used_size; }
U reserved_size() const { return list.size(); }
T *request(U &r_id) {
_used_size++;
if (freelist.size()) {
// pop from freelist
int new_size = freelist.size() - 1;
r_id = freelist[new_size];
freelist.resize_uninitialized(new_size);
return &list[r_id];
}
r_id = list.size();
if constexpr (force_trivial || std::is_trivially_constructible_v<T>) {
list.resize_uninitialized(r_id + 1);
} else {
list.resize_initialized(r_id + 1);
}
static_assert((!zero_on_first_request) || (__is_pod(T)), "zero_on_first_request requires trivial type");
if constexpr (zero_on_first_request && __is_pod(T)) {
list[r_id] = {};
}
return &list[r_id];
}
void free(const U &p_id) {
// should not be on free list already
ERR_FAIL_UNSIGNED_INDEX(p_id, list.size());
freelist.push_back(p_id);
ERR_FAIL_COND_MSG(!_used_size, "_used_size has become out of sync, have you double freed an item?");
_used_size--;
}
};
// a pooled list which automatically keeps a list of the active members
template <typename T, typename U = uint32_t, bool force_trivial = false, bool zero_on_first_request = false>
class TrackedPooledList {
public:
U pool_used_size() const { return _pool.used_size(); }
U pool_reserved_size() const { return _pool.reserved_size(); }
U active_size() const { return _active_list.size(); }
// use with care, see the earlier notes in the PooledList clear()
void clear() {
_pool.clear();
_active_list.clear();
_active_map.clear();
}
U get_active_id(U p_index) const {
return _active_list[p_index];
}
const T &get_active(U p_index) const {
return _pool[get_active_id(p_index)];
}
T &get_active(U p_index) {
return _pool[get_active_id(p_index)];
}
const T &operator[](U p_index) const {
return _pool[p_index];
}
T &operator[](U p_index) {
return _pool[p_index];
}
T *request(U &r_id) {
T *item = _pool.request(r_id);
// add to the active list
U active_list_id = _active_list.size();
_active_list.push_back(r_id);
// expand the active map (this should be in sync with the pool list
if (_pool.used_size() > _active_map.size()) {
_active_map.resize_uninitialized(_pool.used_size());
}
// store in the active map
_active_map[r_id] = active_list_id;
return item;
}
void free(const U &p_id) {
_pool.free(p_id);
// remove from the active list.
U list_id = _active_map[p_id];
// zero the _active map to detect bugs (only in debug?)
_active_map[p_id] = -1;
_active_list.remove_unordered(list_id);
// keep the replacement in sync with the correct list Id
if (list_id < _active_list.size()) {
// which pool id has been replaced in the active list
U replacement_id = _active_list[list_id];
// keep that replacements map up to date with the new position
_active_map[replacement_id] = list_id;
}
}
const LocalVector<U, U> &get_active_list() const { return _active_list; }
private:
PooledList<T, U, force_trivial, zero_on_first_request> _pool;
LocalVector<U, U> _active_map;
LocalVector<U, U> _active_list;
};

778
core/templates/rb_map.h Normal file
View File

@@ -0,0 +1,778 @@
/**************************************************************************/
/* rb_map.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/os/memory.h"
#include "core/templates/pair.h"
#include <initializer_list>
// based on the very nice implementation of rb-trees by:
// https://web.archive.org/web/20120507164830/https://web.mit.edu/~emin/www/source_code/red_black_tree/index.html
template <typename K, typename V, typename C = Comparator<K>, typename A = DefaultAllocator>
class RBMap {
enum Color {
RED,
BLACK
};
struct _Data;
public:
class Element {
private:
friend class RBMap<K, V, C, A>;
int color = RED;
Element *right = nullptr;
Element *left = nullptr;
Element *parent = nullptr;
Element *_next = nullptr;
Element *_prev = nullptr;
KeyValue<K, V> _data;
public:
KeyValue<K, V> &key_value() { return _data; }
const KeyValue<K, V> &key_value() const { return _data; }
const Element *next() const {
return _next;
}
Element *next() {
return _next;
}
const Element *prev() const {
return _prev;
}
Element *prev() {
return _prev;
}
const K &key() const {
return _data.key;
}
V &value() {
return _data.value;
}
const V &value() const {
return _data.value;
}
V &get() {
return _data.value;
}
const V &get() const {
return _data.value;
}
Element(const KeyValue<K, V> &p_data) :
_data(p_data) {}
};
typedef KeyValue<K, V> ValueType;
struct Iterator {
friend class RBMap<K, V, C, A>;
_FORCE_INLINE_ KeyValue<K, V> &operator*() const {
return E->key_value();
}
_FORCE_INLINE_ KeyValue<K, V> *operator->() const { return &E->key_value(); }
_FORCE_INLINE_ Iterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &p_it) const { return E == p_it.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &p_it) const { return E != p_it.E; }
explicit operator bool() const {
return E != nullptr;
}
Iterator &operator=(const Iterator &p_it) {
E = p_it.E;
return *this;
}
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
private:
Element *E = nullptr;
};
struct ConstIterator {
_FORCE_INLINE_ const KeyValue<K, V> &operator*() const {
return E->key_value();
}
_FORCE_INLINE_ const KeyValue<K, V> *operator->() const { return &E->key_value(); }
_FORCE_INLINE_ ConstIterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &p_it) const { return E == p_it.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &p_it) const { return E != p_it.E; }
explicit operator bool() const {
return E != nullptr;
}
ConstIterator &operator=(const ConstIterator &p_it) {
E = p_it.E;
return *this;
}
ConstIterator(const Element *p_E) { E = p_E; }
ConstIterator() {}
ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
private:
const Element *E = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(front());
}
_FORCE_INLINE_ Iterator end() {
return Iterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ Iterator find(const K &p_key) {
return Iterator(find(p_key));
}
#endif
_FORCE_INLINE_ void remove(const Iterator &p_iter) {
return erase(p_iter.E);
}
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(front());
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ ConstIterator find(const K &p_key) const {
return ConstIterator(find(p_key));
}
#endif
private:
struct _Data {
Element *_root = nullptr;
Element *_nil = nullptr;
int size_cache = 0;
_FORCE_INLINE_ _Data() {
#ifdef GLOBALNIL_DISABLED
_nil = memnew_allocator(Element, A);
_nil->parent = _nil->left = _nil->right = _nil;
_nil->color = BLACK;
#else
_nil = (Element *)&_GlobalNilClass::_nil;
#endif
}
void _create_root() {
_root = memnew_allocator(Element(KeyValue<K, V>(K(), V())), A);
_root->parent = _root->left = _root->right = _nil;
_root->color = BLACK;
}
void _free_root() {
if (_root) {
memdelete_allocator<Element, A>(_root);
_root = nullptr;
}
}
~_Data() {
_free_root();
#ifdef GLOBALNIL_DISABLED
memdelete_allocator<Element, A>(_nil);
#endif
}
};
_Data _data;
inline void _set_color(Element *p_node, int p_color) {
ERR_FAIL_COND(p_node == _data._nil && p_color == RED);
p_node->color = p_color;
}
inline void _rotate_left(Element *p_node) {
Element *r = p_node->right;
p_node->right = r->left;
if (r->left != _data._nil) {
r->left->parent = p_node;
}
r->parent = p_node->parent;
if (p_node == p_node->parent->left) {
p_node->parent->left = r;
} else {
p_node->parent->right = r;
}
r->left = p_node;
p_node->parent = r;
}
inline void _rotate_right(Element *p_node) {
Element *l = p_node->left;
p_node->left = l->right;
if (l->right != _data._nil) {
l->right->parent = p_node;
}
l->parent = p_node->parent;
if (p_node == p_node->parent->right) {
p_node->parent->right = l;
} else {
p_node->parent->left = l;
}
l->right = p_node;
p_node->parent = l;
}
inline Element *_successor(Element *p_node) const {
Element *node = p_node;
if (node->right != _data._nil) {
node = node->right;
while (node->left != _data._nil) { /* returns the minimum of the right subtree of node */
node = node->left;
}
return node;
} else {
while (node == node->parent->right) {
node = node->parent;
}
if (node->parent == _data._root) {
return nullptr; // No successor, as p_node = last node
}
return node->parent;
}
}
inline Element *_predecessor(Element *p_node) const {
Element *node = p_node;
if (node->left != _data._nil) {
node = node->left;
while (node->right != _data._nil) { /* returns the minimum of the left subtree of node */
node = node->right;
}
return node;
} else {
while (node == node->parent->left) {
node = node->parent;
}
if (node == _data._root) {
return nullptr; // No predecessor, as p_node = first node
}
return node->parent;
}
}
Element *_find(const K &p_key) const {
Element *node = _data._root->left;
C less;
while (node != _data._nil) {
if (less(p_key, node->_data.key)) {
node = node->left;
} else if (less(node->_data.key, p_key)) {
node = node->right;
} else {
return node; // found
}
}
return nullptr;
}
Element *_find_closest(const K &p_key) const {
Element *node = _data._root->left;
Element *prev = nullptr;
C less;
while (node != _data._nil) {
prev = node;
if (less(p_key, node->_data.key)) {
node = node->left;
} else if (less(node->_data.key, p_key)) {
node = node->right;
} else {
return node; // found
}
}
if (prev == nullptr) {
return nullptr; // tree empty
}
if (less(p_key, prev->_data.key)) {
prev = prev->_prev;
}
return prev;
}
void _insert_rb_fix(Element *p_new_node) {
Element *node = p_new_node;
Element *nparent = node->parent;
Element *ngrand_parent = nullptr;
while (nparent->color == RED) {
ngrand_parent = nparent->parent;
if (nparent == ngrand_parent->left) {
if (ngrand_parent->right->color == RED) {
_set_color(nparent, BLACK);
_set_color(ngrand_parent->right, BLACK);
_set_color(ngrand_parent, RED);
node = ngrand_parent;
nparent = node->parent;
} else {
if (node == nparent->right) {
_rotate_left(nparent);
node = nparent;
nparent = node->parent;
}
_set_color(nparent, BLACK);
_set_color(ngrand_parent, RED);
_rotate_right(ngrand_parent);
}
} else {
if (ngrand_parent->left->color == RED) {
_set_color(nparent, BLACK);
_set_color(ngrand_parent->left, BLACK);
_set_color(ngrand_parent, RED);
node = ngrand_parent;
nparent = node->parent;
} else {
if (node == nparent->left) {
_rotate_right(nparent);
node = nparent;
nparent = node->parent;
}
_set_color(nparent, BLACK);
_set_color(ngrand_parent, RED);
_rotate_left(ngrand_parent);
}
}
}
_set_color(_data._root->left, BLACK);
}
Element *_insert(const K &p_key, const V &p_value) {
Element *new_parent = _data._root;
Element *node = _data._root->left;
C less;
while (node != _data._nil) {
new_parent = node;
if (less(p_key, node->_data.key)) {
node = node->left;
} else if (less(node->_data.key, p_key)) {
node = node->right;
} else {
node->_data.value = p_value;
return node; // Return existing node with new value
}
}
typedef KeyValue<K, V> KV;
Element *new_node = memnew_allocator(Element(KV(p_key, p_value)), A);
new_node->parent = new_parent;
new_node->right = _data._nil;
new_node->left = _data._nil;
//new_node->data=_data;
if (new_parent == _data._root || less(p_key, new_parent->_data.key)) {
new_parent->left = new_node;
} else {
new_parent->right = new_node;
}
new_node->_next = _successor(new_node);
new_node->_prev = _predecessor(new_node);
if (new_node->_next) {
new_node->_next->_prev = new_node;
}
if (new_node->_prev) {
new_node->_prev->_next = new_node;
}
_data.size_cache++;
_insert_rb_fix(new_node);
return new_node;
}
void _erase_fix_rb(Element *p_node) {
Element *root = _data._root->left;
Element *node = _data._nil;
Element *sibling = p_node;
Element *parent = sibling->parent;
while (node != root) { // If red node found, will exit at a break
if (sibling->color == RED) {
_set_color(sibling, BLACK);
_set_color(parent, RED);
if (sibling == parent->right) {
sibling = sibling->left;
_rotate_left(parent);
} else {
sibling = sibling->right;
_rotate_right(parent);
}
}
if ((sibling->left->color == BLACK) && (sibling->right->color == BLACK)) {
_set_color(sibling, RED);
if (parent->color == RED) {
_set_color(parent, BLACK);
break;
} else { // loop: haven't found any red nodes yet
node = parent;
parent = node->parent;
sibling = (node == parent->left) ? parent->right : parent->left;
}
} else {
if (sibling == parent->right) {
if (sibling->right->color == BLACK) {
_set_color(sibling->left, BLACK);
_set_color(sibling, RED);
_rotate_right(sibling);
sibling = sibling->parent;
}
_set_color(sibling, parent->color);
_set_color(parent, BLACK);
_set_color(sibling->right, BLACK);
_rotate_left(parent);
break;
} else {
if (sibling->left->color == BLACK) {
_set_color(sibling->right, BLACK);
_set_color(sibling, RED);
_rotate_left(sibling);
sibling = sibling->parent;
}
_set_color(sibling, parent->color);
_set_color(parent, BLACK);
_set_color(sibling->left, BLACK);
_rotate_right(parent);
break;
}
}
}
ERR_FAIL_COND(_data._nil->color != BLACK);
}
void _erase(Element *p_node) {
Element *rp = ((p_node->left == _data._nil) || (p_node->right == _data._nil)) ? p_node : p_node->_next;
Element *node = (rp->left == _data._nil) ? rp->right : rp->left;
Element *sibling = nullptr;
if (rp == rp->parent->left) {
rp->parent->left = node;
sibling = rp->parent->right;
} else {
rp->parent->right = node;
sibling = rp->parent->left;
}
if (node->color == RED) {
node->parent = rp->parent;
_set_color(node, BLACK);
} else if (rp->color == BLACK && rp->parent != _data._root) {
_erase_fix_rb(sibling);
}
if (rp != p_node) {
ERR_FAIL_COND(rp == _data._nil);
rp->left = p_node->left;
rp->right = p_node->right;
rp->parent = p_node->parent;
rp->color = p_node->color;
if (p_node->left != _data._nil) {
p_node->left->parent = rp;
}
if (p_node->right != _data._nil) {
p_node->right->parent = rp;
}
if (p_node == p_node->parent->left) {
p_node->parent->left = rp;
} else {
p_node->parent->right = rp;
}
}
if (p_node->_next) {
p_node->_next->_prev = p_node->_prev;
}
if (p_node->_prev) {
p_node->_prev->_next = p_node->_next;
}
memdelete_allocator<Element, A>(p_node);
_data.size_cache--;
ERR_FAIL_COND(_data._nil->color == RED);
}
void _calculate_depth(Element *p_element, int &max_d, int d) const {
if (p_element == _data._nil) {
return;
}
_calculate_depth(p_element->left, max_d, d + 1);
_calculate_depth(p_element->right, max_d, d + 1);
if (d > max_d) {
max_d = d;
}
}
void _cleanup_tree(Element *p_element) {
if (p_element == _data._nil) {
return;
}
_cleanup_tree(p_element->left);
_cleanup_tree(p_element->right);
memdelete_allocator<Element, A>(p_element);
}
void _copy_from(const RBMap &p_map) {
clear();
// not the fastest way, but safeset to write.
for (Element *I = p_map.front(); I; I = I->next()) {
insert(I->key(), I->value());
}
}
public:
const Element *find(const K &p_key) const {
if (!_data._root) {
return nullptr;
}
const Element *res = _find(p_key);
return res;
}
Element *find(const K &p_key) {
if (!_data._root) {
return nullptr;
}
Element *res = _find(p_key);
return res;
}
const Element *find_closest(const K &p_key) const {
if (!_data._root) {
return nullptr;
}
const Element *res = _find_closest(p_key);
return res;
}
Element *find_closest(const K &p_key) {
if (!_data._root) {
return nullptr;
}
Element *res = _find_closest(p_key);
return res;
}
bool has(const K &p_key) const {
return find(p_key) != nullptr;
}
Element *insert(const K &p_key, const V &p_value) {
if (!_data._root) {
_data._create_root();
}
return _insert(p_key, p_value);
}
void erase(Element *p_element) {
if (!_data._root || !p_element) {
return;
}
_erase(p_element);
if (_data.size_cache == 0 && _data._root) {
_data._free_root();
}
}
bool erase(const K &p_key) {
if (!_data._root) {
return false;
}
Element *e = find(p_key);
if (!e) {
return false;
}
_erase(e);
if (_data.size_cache == 0 && _data._root) {
_data._free_root();
}
return true;
}
const V &operator[](const K &p_key) const {
CRASH_COND(!_data._root);
const Element *e = find(p_key);
CRASH_COND(!e);
return e->_data.value;
}
V &operator[](const K &p_key) {
if (!_data._root) {
_data._create_root();
}
Element *e = find(p_key);
if (!e) {
e = insert(p_key, V());
}
return e->_data.value;
}
Element *front() const {
if (!_data._root) {
return nullptr;
}
Element *e = _data._root->left;
if (e == _data._nil) {
return nullptr;
}
while (e->left != _data._nil) {
e = e->left;
}
return e;
}
Element *back() const {
if (!_data._root) {
return nullptr;
}
Element *e = _data._root->left;
if (e == _data._nil) {
return nullptr;
}
while (e->right != _data._nil) {
e = e->right;
}
return e;
}
inline bool is_empty() const {
return _data.size_cache == 0;
}
inline int size() const {
return _data.size_cache;
}
int calculate_depth() const {
// used for debug mostly
if (!_data._root) {
return 0;
}
int max_d = 0;
_calculate_depth(_data._root->left, max_d, 0);
return max_d;
}
void clear() {
if (!_data._root) {
return;
}
_cleanup_tree(_data._root->left);
_data._root->left = _data._nil;
_data.size_cache = 0;
_data._free_root();
}
void operator=(const RBMap &p_map) {
_copy_from(p_map);
}
RBMap(const RBMap &p_map) {
_copy_from(p_map);
}
RBMap(std::initializer_list<KeyValue<K, V>> p_init) {
for (const KeyValue<K, V> &E : p_init) {
insert(E.key, E.value);
}
}
_FORCE_INLINE_ RBMap() {}
~RBMap() {
clear();
}
};

716
core/templates/rb_set.h Normal file
View File

@@ -0,0 +1,716 @@
/**************************************************************************/
/* rb_set.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/typedefs.h"
#include <initializer_list>
// based on the very nice implementation of rb-trees by:
// https://web.archive.org/web/20120507164830/https://web.mit.edu/~emin/www/source_code/red_black_tree/index.html
template <typename T, typename C = Comparator<T>, typename A = DefaultAllocator>
class RBSet {
enum Color {
RED,
BLACK
};
struct _Data;
public:
class Element {
private:
friend class RBSet<T, C, A>;
int color = RED;
Element *right = nullptr;
Element *left = nullptr;
Element *parent = nullptr;
Element *_next = nullptr;
Element *_prev = nullptr;
T value;
//_Data *data;
public:
const Element *next() const {
return _next;
}
Element *next() {
return _next;
}
const Element *prev() const {
return _prev;
}
Element *prev() {
return _prev;
}
T &get() {
return value;
}
const T &get() const {
return value;
}
Element() {}
};
typedef T ValueType;
struct Iterator {
_FORCE_INLINE_ T &operator*() const {
return E->get();
}
_FORCE_INLINE_ T *operator->() const { return &E->get(); }
_FORCE_INLINE_ Iterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return E != b.E; }
explicit operator bool() const { return E != nullptr; }
Iterator(Element *p_E) { E = p_E; }
Iterator() {}
Iterator(const Iterator &p_it) { E = p_it.E; }
private:
Element *E = nullptr;
};
struct ConstIterator {
_FORCE_INLINE_ const T &operator*() const {
return E->get();
}
_FORCE_INLINE_ const T *operator->() const { return &E->get(); }
_FORCE_INLINE_ ConstIterator &operator++() {
E = E->next();
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
E = E->prev();
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return E == b.E; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return E != b.E; }
_FORCE_INLINE_ ConstIterator(const Element *p_E) { E = p_E; }
_FORCE_INLINE_ ConstIterator() {}
_FORCE_INLINE_ ConstIterator(const ConstIterator &p_it) { E = p_it.E; }
explicit operator bool() const { return E != nullptr; }
private:
const Element *E = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(front());
}
_FORCE_INLINE_ Iterator end() {
return Iterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ Iterator find(const K &p_key) {
return Iterator(find(p_key));
}
#endif
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(front());
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(nullptr);
}
#if 0
//to use when replacing find()
_FORCE_INLINE_ ConstIterator find(const K &p_key) const {
return ConstIterator(find(p_key));
}
#endif
private:
struct _Data {
Element *_root = nullptr;
Element *_nil = nullptr;
int size_cache = 0;
_FORCE_INLINE_ _Data() {
#ifdef GLOBALNIL_DISABLED
_nil = memnew_allocator(Element, A);
_nil->parent = _nil->left = _nil->right = _nil;
_nil->color = BLACK;
#else
_nil = (Element *)&_GlobalNilClass::_nil;
#endif
}
void _create_root() {
_root = memnew_allocator(Element, A);
_root->parent = _root->left = _root->right = _nil;
_root->color = BLACK;
}
void _free_root() {
if (_root) {
memdelete_allocator<Element, A>(_root);
_root = nullptr;
}
}
~_Data() {
_free_root();
#ifdef GLOBALNIL_DISABLED
memdelete_allocator<Element, A>(_nil);
#endif
}
};
_Data _data;
inline void _set_color(Element *p_node, int p_color) {
ERR_FAIL_COND(p_node == _data._nil && p_color == RED);
p_node->color = p_color;
}
inline void _rotate_left(Element *p_node) {
Element *r = p_node->right;
p_node->right = r->left;
if (r->left != _data._nil) {
r->left->parent = p_node;
}
r->parent = p_node->parent;
if (p_node == p_node->parent->left) {
p_node->parent->left = r;
} else {
p_node->parent->right = r;
}
r->left = p_node;
p_node->parent = r;
}
inline void _rotate_right(Element *p_node) {
Element *l = p_node->left;
p_node->left = l->right;
if (l->right != _data._nil) {
l->right->parent = p_node;
}
l->parent = p_node->parent;
if (p_node == p_node->parent->right) {
p_node->parent->right = l;
} else {
p_node->parent->left = l;
}
l->right = p_node;
p_node->parent = l;
}
inline Element *_successor(Element *p_node) const {
Element *node = p_node;
if (node->right != _data._nil) {
node = node->right;
while (node->left != _data._nil) { /* returns the minimum of the right subtree of node */
node = node->left;
}
return node;
} else {
while (node == node->parent->right) {
node = node->parent;
}
if (node->parent == _data._root) {
return nullptr; // No successor, as p_node = last node
}
return node->parent;
}
}
inline Element *_predecessor(Element *p_node) const {
Element *node = p_node;
if (node->left != _data._nil) {
node = node->left;
while (node->right != _data._nil) { /* returns the minimum of the left subtree of node */
node = node->right;
}
return node;
} else {
while (node == node->parent->left) {
node = node->parent;
}
if (node == _data._root) {
return nullptr; // No predecessor, as p_node = first node.
}
return node->parent;
}
}
Element *_find(const T &p_value) const {
Element *node = _data._root->left;
C less;
while (node != _data._nil) {
if (less(p_value, node->value)) {
node = node->left;
} else if (less(node->value, p_value)) {
node = node->right;
} else {
return node; // found
}
}
return nullptr;
}
Element *_lower_bound(const T &p_value) const {
Element *node = _data._root->left;
Element *prev = nullptr;
C less;
while (node != _data._nil) {
prev = node;
if (less(p_value, node->value)) {
node = node->left;
} else if (less(node->value, p_value)) {
node = node->right;
} else {
return node; // found
}
}
if (prev == nullptr) {
return nullptr; // tree empty
}
if (less(prev->value, p_value)) {
prev = prev->_next;
}
return prev;
}
void _insert_rb_fix(Element *p_new_node) {
Element *node = p_new_node;
Element *nparent = node->parent;
Element *ngrand_parent = nullptr;
while (nparent->color == RED) {
ngrand_parent = nparent->parent;
if (nparent == ngrand_parent->left) {
if (ngrand_parent->right->color == RED) {
_set_color(nparent, BLACK);
_set_color(ngrand_parent->right, BLACK);
_set_color(ngrand_parent, RED);
node = ngrand_parent;
nparent = node->parent;
} else {
if (node == nparent->right) {
_rotate_left(nparent);
node = nparent;
nparent = node->parent;
}
_set_color(nparent, BLACK);
_set_color(ngrand_parent, RED);
_rotate_right(ngrand_parent);
}
} else {
if (ngrand_parent->left->color == RED) {
_set_color(nparent, BLACK);
_set_color(ngrand_parent->left, BLACK);
_set_color(ngrand_parent, RED);
node = ngrand_parent;
nparent = node->parent;
} else {
if (node == nparent->left) {
_rotate_right(nparent);
node = nparent;
nparent = node->parent;
}
_set_color(nparent, BLACK);
_set_color(ngrand_parent, RED);
_rotate_left(ngrand_parent);
}
}
}
_set_color(_data._root->left, BLACK);
}
Element *_insert(const T &p_value) {
Element *new_parent = _data._root;
Element *node = _data._root->left;
C less;
while (node != _data._nil) {
new_parent = node;
if (less(p_value, node->value)) {
node = node->left;
} else if (less(node->value, p_value)) {
node = node->right;
} else {
return node; // Return existing node
}
}
Element *new_node = memnew_allocator(Element, A);
new_node->parent = new_parent;
new_node->right = _data._nil;
new_node->left = _data._nil;
new_node->value = p_value;
//new_node->data=_data;
if (new_parent == _data._root || less(p_value, new_parent->value)) {
new_parent->left = new_node;
} else {
new_parent->right = new_node;
}
new_node->_next = _successor(new_node);
new_node->_prev = _predecessor(new_node);
if (new_node->_next) {
new_node->_next->_prev = new_node;
}
if (new_node->_prev) {
new_node->_prev->_next = new_node;
}
_data.size_cache++;
_insert_rb_fix(new_node);
return new_node;
}
void _erase_fix_rb(Element *p_node) {
Element *root = _data._root->left;
Element *node = _data._nil;
Element *sibling = p_node;
Element *parent = sibling->parent;
while (node != root) { // If red node found, will exit at a break
if (sibling->color == RED) {
_set_color(sibling, BLACK);
_set_color(parent, RED);
if (sibling == parent->right) {
sibling = sibling->left;
_rotate_left(parent);
} else {
sibling = sibling->right;
_rotate_right(parent);
}
}
if ((sibling->left->color == BLACK) && (sibling->right->color == BLACK)) {
_set_color(sibling, RED);
if (parent->color == RED) {
_set_color(parent, BLACK);
break;
} else { // loop: haven't found any red nodes yet
node = parent;
parent = node->parent;
sibling = (node == parent->left) ? parent->right : parent->left;
}
} else {
if (sibling == parent->right) {
if (sibling->right->color == BLACK) {
_set_color(sibling->left, BLACK);
_set_color(sibling, RED);
_rotate_right(sibling);
sibling = sibling->parent;
}
_set_color(sibling, parent->color);
_set_color(parent, BLACK);
_set_color(sibling->right, BLACK);
_rotate_left(parent);
break;
} else {
if (sibling->left->color == BLACK) {
_set_color(sibling->right, BLACK);
_set_color(sibling, RED);
_rotate_left(sibling);
sibling = sibling->parent;
}
_set_color(sibling, parent->color);
_set_color(parent, BLACK);
_set_color(sibling->left, BLACK);
_rotate_right(parent);
break;
}
}
}
ERR_FAIL_COND(_data._nil->color != BLACK);
}
void _erase(Element *p_node) {
Element *rp = ((p_node->left == _data._nil) || (p_node->right == _data._nil)) ? p_node : p_node->_next;
Element *node = (rp->left == _data._nil) ? rp->right : rp->left;
Element *sibling = nullptr;
if (rp == rp->parent->left) {
rp->parent->left = node;
sibling = rp->parent->right;
} else {
rp->parent->right = node;
sibling = rp->parent->left;
}
if (node->color == RED) {
node->parent = rp->parent;
_set_color(node, BLACK);
} else if (rp->color == BLACK && rp->parent != _data._root) {
_erase_fix_rb(sibling);
}
if (rp != p_node) {
ERR_FAIL_COND(rp == _data._nil);
rp->left = p_node->left;
rp->right = p_node->right;
rp->parent = p_node->parent;
rp->color = p_node->color;
if (p_node->left != _data._nil) {
p_node->left->parent = rp;
}
if (p_node->right != _data._nil) {
p_node->right->parent = rp;
}
if (p_node == p_node->parent->left) {
p_node->parent->left = rp;
} else {
p_node->parent->right = rp;
}
}
if (p_node->_next) {
p_node->_next->_prev = p_node->_prev;
}
if (p_node->_prev) {
p_node->_prev->_next = p_node->_next;
}
memdelete_allocator<Element, A>(p_node);
_data.size_cache--;
ERR_FAIL_COND(_data._nil->color == RED);
}
void _calculate_depth(Element *p_element, int &max_d, int d) const {
if (p_element == _data._nil) {
return;
}
_calculate_depth(p_element->left, max_d, d + 1);
_calculate_depth(p_element->right, max_d, d + 1);
if (d > max_d) {
max_d = d;
}
}
void _cleanup_tree(Element *p_element) {
if (p_element == _data._nil) {
return;
}
_cleanup_tree(p_element->left);
_cleanup_tree(p_element->right);
memdelete_allocator<Element, A>(p_element);
}
void _copy_from(const RBSet &p_set) {
clear();
// not the fastest way, but safeset to write.
for (Element *I = p_set.front(); I; I = I->next()) {
insert(I->get());
}
}
public:
const Element *find(const T &p_value) const {
if (!_data._root) {
return nullptr;
}
const Element *res = _find(p_value);
return res;
}
Element *find(const T &p_value) {
if (!_data._root) {
return nullptr;
}
Element *res = _find(p_value);
return res;
}
Element *lower_bound(const T &p_value) const {
if (!_data._root) {
return nullptr;
}
return _lower_bound(p_value);
}
bool has(const T &p_value) const {
return find(p_value) != nullptr;
}
Element *insert(const T &p_value) {
if (!_data._root) {
_data._create_root();
}
return _insert(p_value);
}
void erase(Element *p_element) {
if (!_data._root || !p_element) {
return;
}
_erase(p_element);
if (_data.size_cache == 0 && _data._root) {
_data._free_root();
}
}
bool erase(const T &p_value) {
if (!_data._root) {
return false;
}
Element *e = find(p_value);
if (!e) {
return false;
}
_erase(e);
if (_data.size_cache == 0 && _data._root) {
_data._free_root();
}
return true;
}
Element *front() const {
if (!_data._root) {
return nullptr;
}
Element *e = _data._root->left;
if (e == _data._nil) {
return nullptr;
}
while (e->left != _data._nil) {
e = e->left;
}
return e;
}
Element *back() const {
if (!_data._root) {
return nullptr;
}
Element *e = _data._root->left;
if (e == _data._nil) {
return nullptr;
}
while (e->right != _data._nil) {
e = e->right;
}
return e;
}
inline bool is_empty() const {
return _data.size_cache == 0;
}
inline int size() const {
return _data.size_cache;
}
int calculate_depth() const {
// used for debug mostly
if (!_data._root) {
return 0;
}
int max_d = 0;
_calculate_depth(_data._root->left, max_d, 0);
return max_d;
}
void clear() {
if (!_data._root) {
return;
}
_cleanup_tree(_data._root->left);
_data._root->left = _data._nil;
_data.size_cache = 0;
_data._free_root();
}
void operator=(const RBSet &p_set) {
_copy_from(p_set);
}
RBSet(const RBSet &p_set) {
_copy_from(p_set);
}
RBSet(std::initializer_list<T> p_init) {
for (const T &E : p_init) {
insert(E);
}
}
_FORCE_INLINE_ RBSet() {}
~RBSet() {
clear();
}
};

76
core/templates/rid.h Normal file
View File

@@ -0,0 +1,76 @@
/**************************************************************************/
/* rid.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/typedefs.h"
class RID_AllocBase;
class RID {
friend class RID_AllocBase;
uint64_t _id = 0;
public:
_ALWAYS_INLINE_ bool operator==(const RID &p_rid) const {
return _id == p_rid._id;
}
_ALWAYS_INLINE_ bool operator<(const RID &p_rid) const {
return _id < p_rid._id;
}
_ALWAYS_INLINE_ bool operator<=(const RID &p_rid) const {
return _id <= p_rid._id;
}
_ALWAYS_INLINE_ bool operator>(const RID &p_rid) const {
return _id > p_rid._id;
}
_ALWAYS_INLINE_ bool operator>=(const RID &p_rid) const {
return _id >= p_rid._id;
}
_ALWAYS_INLINE_ bool operator!=(const RID &p_rid) const {
return _id != p_rid._id;
}
_ALWAYS_INLINE_ bool is_valid() const { return _id != 0; }
_ALWAYS_INLINE_ bool is_null() const { return _id == 0; }
_ALWAYS_INLINE_ uint32_t get_local_index() const { return _id & 0xFFFFFFFF; }
static _ALWAYS_INLINE_ RID from_uint64(uint64_t p_id) {
RID _rid;
_rid._id = p_id;
return _rid;
}
_ALWAYS_INLINE_ uint64_t get_id() const { return _id; }
_ALWAYS_INLINE_ RID() {}
};
template <>
struct is_zero_constructible<RID> : std::true_type {};

View File

@@ -0,0 +1,33 @@
/**************************************************************************/
/* rid_owner.cpp */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#include "rid_owner.h"
SafeNumeric<uint64_t> RID_AllocBase::base_id{ 1 };

578
core/templates/rid_owner.h Normal file
View File

@@ -0,0 +1,578 @@
/**************************************************************************/
/* rid_owner.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/os/mutex.h"
#include "core/string/print_string.h"
#include "core/templates/local_vector.h"
#include "core/templates/rid.h"
#include "core/templates/safe_refcount.h"
#include <cstdio>
#include <typeinfo> // IWYU pragma: keep // Used in macro.
#ifdef SANITIZERS_ENABLED
#ifdef __has_feature
#if __has_feature(thread_sanitizer)
#define TSAN_ENABLED
#endif
#elif defined(__SANITIZE_THREAD__)
#define TSAN_ENABLED
#endif
#endif
#ifdef TSAN_ENABLED
#include <sanitizer/tsan_interface.h>
#endif
// The following macros would need to be implemented somehow
// for purely weakly ordered architectures. There's a test case
// ("[RID_Owner] Thread safety") with potential to catch issues
// on such architectures if these primitives fail to be implemented.
// For now, they will be just markers about needs that may arise.
#define WEAK_MEMORY_ORDER 0
#if WEAK_MEMORY_ORDER
// Ideally, we'd have implementations that collaborate with the
// sync mechanism used (e.g., the mutex) so instead of some full
// memory barriers being issued, some acquire-release on the
// primitive itself. However, these implementations will at least
// provide correctness.
#define SYNC_ACQUIRE std::atomic_thread_fence(std::memory_order_acquire);
#define SYNC_RELEASE std::atomic_thread_fence(std::memory_order_release);
#else
// Compiler barriers are enough in this case.
#define SYNC_ACQUIRE std::atomic_signal_fence(std::memory_order_acquire);
#define SYNC_RELEASE std::atomic_signal_fence(std::memory_order_release);
#endif
class RID_AllocBase {
static SafeNumeric<uint64_t> base_id;
protected:
static RID _make_from_id(uint64_t p_id) {
RID rid;
rid._id = p_id;
return rid;
}
static RID _gen_rid() {
return _make_from_id(_gen_id());
}
friend struct VariantUtilityFunctions;
static uint64_t _gen_id() {
return base_id.increment();
}
public:
virtual ~RID_AllocBase() {}
};
template <typename T, bool THREAD_SAFE = false>
class RID_Alloc : public RID_AllocBase {
struct Chunk {
T data;
uint32_t validator;
};
Chunk **chunks = nullptr;
uint32_t **free_list_chunks = nullptr;
uint32_t elements_in_chunk;
uint32_t max_alloc = 0;
uint32_t alloc_count = 0;
uint32_t chunk_limit = 0;
const char *description = nullptr;
mutable Mutex mutex;
_FORCE_INLINE_ RID _allocate_rid() {
if constexpr (THREAD_SAFE) {
mutex.lock();
}
if (alloc_count == max_alloc) {
//allocate a new chunk
uint32_t chunk_count = alloc_count == 0 ? 0 : (max_alloc / elements_in_chunk);
if (THREAD_SAFE && chunk_count == chunk_limit) {
mutex.unlock();
if (description != nullptr) {
ERR_FAIL_V_MSG(RID(), vformat("Element limit for RID of type '%s' reached.", String(description)));
} else {
ERR_FAIL_V_MSG(RID(), "Element limit reached.");
}
}
//grow chunks
if constexpr (!THREAD_SAFE) {
chunks = (Chunk **)memrealloc(chunks, sizeof(Chunk *) * (chunk_count + 1));
}
chunks[chunk_count] = (Chunk *)memalloc(sizeof(Chunk) * elements_in_chunk); //but don't initialize
//grow free lists
if constexpr (!THREAD_SAFE) {
free_list_chunks = (uint32_t **)memrealloc(free_list_chunks, sizeof(uint32_t *) * (chunk_count + 1));
}
free_list_chunks[chunk_count] = (uint32_t *)memalloc(sizeof(uint32_t) * elements_in_chunk);
//initialize
for (uint32_t i = 0; i < elements_in_chunk; i++) {
// Don't initialize chunk.
chunks[chunk_count][i].validator = 0xFFFFFFFF;
free_list_chunks[chunk_count][i] = alloc_count + i;
}
if constexpr (THREAD_SAFE) {
// Store atomically to avoid data race with the load in get_or_null().
((std::atomic<uint32_t> *)&max_alloc)->store(max_alloc + elements_in_chunk, std::memory_order_relaxed);
} else {
max_alloc += elements_in_chunk;
}
}
uint32_t free_index = free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk];
uint32_t free_chunk = free_index / elements_in_chunk;
uint32_t free_element = free_index % elements_in_chunk;
uint32_t validator = 1 + (uint32_t)(_gen_id() % 0x7FFFFFFF);
uint64_t id = validator;
id <<= 32;
id |= free_index;
chunks[free_chunk][free_element].validator = validator;
chunks[free_chunk][free_element].validator |= 0x80000000; //mark uninitialized bit
alloc_count++;
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
return _make_from_id(id);
}
public:
RID make_rid() {
RID rid = _allocate_rid();
initialize_rid(rid);
return rid;
}
RID make_rid(const T &p_value) {
RID rid = _allocate_rid();
initialize_rid(rid, p_value);
return rid;
}
//allocate but don't initialize, use initialize_rid afterwards
RID allocate_rid() {
return _allocate_rid();
}
_FORCE_INLINE_ T *get_or_null(const RID &p_rid, bool p_initialize = false) {
if (p_rid == RID()) {
return nullptr;
}
if constexpr (THREAD_SAFE) {
SYNC_ACQUIRE;
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
uint32_t ma;
if constexpr (THREAD_SAFE) { // Read atomically to avoid data race with the store in _allocate_rid().
ma = ((std::atomic<uint32_t> *)&max_alloc)->load(std::memory_order_relaxed);
} else {
ma = max_alloc;
}
if (unlikely(idx >= ma)) {
return nullptr;
}
uint32_t idx_chunk = idx / elements_in_chunk;
uint32_t idx_element = idx % elements_in_chunk;
uint32_t validator = uint32_t(id >> 32);
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_acquire(&chunks[idx_chunk]); // We know not a race in practice.
__tsan_acquire(&chunks[idx_chunk][idx_element]); // We know not a race in practice.
#endif
}
Chunk &c = chunks[idx_chunk][idx_element];
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_release(&chunks[idx_chunk]);
__tsan_release(&chunks[idx_chunk][idx_element]);
__tsan_acquire(&c.validator); // We know not a race in practice.
#endif
}
if (unlikely(p_initialize)) {
if (unlikely(!(c.validator & 0x80000000))) {
ERR_FAIL_V_MSG(nullptr, "Initializing already initialized RID");
}
if (unlikely((c.validator & 0x7FFFFFFF) != validator)) {
ERR_FAIL_V_MSG(nullptr, "Attempting to initialize the wrong RID");
}
c.validator &= 0x7FFFFFFF; //initialized
} else if (unlikely(c.validator != validator)) {
if ((c.validator & 0x80000000) && c.validator != 0xFFFFFFFF) {
ERR_FAIL_V_MSG(nullptr, "Attempting to use an uninitialized RID");
}
return nullptr;
}
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_release(&c.validator);
#endif
}
T *ptr = &c.data;
return ptr;
}
void initialize_rid(RID p_rid) {
T *mem = get_or_null(p_rid, true);
ERR_FAIL_NULL(mem);
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_acquire(mem); // We know not a race in practice.
#endif
}
memnew_placement(mem, T);
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_release(mem);
#endif
SYNC_RELEASE;
}
}
void initialize_rid(RID p_rid, const T &p_value) {
T *mem = get_or_null(p_rid, true);
ERR_FAIL_NULL(mem);
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_acquire(mem); // We know not a race in practice.
#endif
}
memnew_placement(mem, T(p_value));
if constexpr (THREAD_SAFE) {
#ifdef TSAN_ENABLED
__tsan_release(mem);
#endif
SYNC_RELEASE;
}
}
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
if constexpr (THREAD_SAFE) {
mutex.lock();
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
return false;
}
uint32_t idx_chunk = idx / elements_in_chunk;
uint32_t idx_element = idx % elements_in_chunk;
uint32_t validator = uint32_t(id >> 32);
bool owned = (chunks[idx_chunk][idx_element].validator & 0x7FFFFFFF) == validator;
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
return owned;
}
_FORCE_INLINE_ void free(const RID &p_rid) {
if constexpr (THREAD_SAFE) {
mutex.lock();
}
uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
ERR_FAIL();
}
uint32_t idx_chunk = idx / elements_in_chunk;
uint32_t idx_element = idx % elements_in_chunk;
uint32_t validator = uint32_t(id >> 32);
if (unlikely(chunks[idx_chunk][idx_element].validator & 0x80000000)) {
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
ERR_FAIL_MSG("Attempted to free an uninitialized or invalid RID");
} else if (unlikely(chunks[idx_chunk][idx_element].validator != validator)) {
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
ERR_FAIL();
}
chunks[idx_chunk][idx_element].data.~T();
chunks[idx_chunk][idx_element].validator = 0xFFFFFFFF; // go invalid
alloc_count--;
free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk] = idx;
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
}
_FORCE_INLINE_ uint32_t get_rid_count() const {
return alloc_count;
}
LocalVector<RID> get_owned_list() const {
LocalVector<RID> owned;
if constexpr (THREAD_SAFE) {
mutex.lock();
}
for (size_t i = 0; i < max_alloc; i++) {
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator != 0xFFFFFFFF) {
owned.push_back(_make_from_id((validator << 32) | i));
}
}
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
return owned;
}
//used for fast iteration in the elements or RIDs
void fill_owned_buffer(RID *p_rid_buffer) const {
if constexpr (THREAD_SAFE) {
mutex.lock();
}
uint32_t idx = 0;
for (size_t i = 0; i < max_alloc; i++) {
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator != 0xFFFFFFFF) {
p_rid_buffer[idx] = _make_from_id((validator << 32) | i);
idx++;
}
}
if constexpr (THREAD_SAFE) {
mutex.unlock();
}
}
void set_description(const char *p_description) {
description = p_description;
}
RID_Alloc(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) {
elements_in_chunk = sizeof(T) > p_target_chunk_byte_size ? 1 : (p_target_chunk_byte_size / sizeof(T));
if constexpr (THREAD_SAFE) {
chunk_limit = (p_maximum_number_of_elements / elements_in_chunk) + 1;
chunks = (Chunk **)memalloc(sizeof(Chunk *) * chunk_limit);
free_list_chunks = (uint32_t **)memalloc(sizeof(uint32_t *) * chunk_limit);
SYNC_RELEASE;
}
}
~RID_Alloc() {
if constexpr (THREAD_SAFE) {
SYNC_ACQUIRE;
}
if (alloc_count) {
print_error(vformat("ERROR: %d RID allocations of type '%s' were leaked at exit.",
alloc_count, description ? description : typeid(T).name()));
for (size_t i = 0; i < max_alloc; i++) {
uint32_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator & 0x80000000) {
continue; //uninitialized
}
if (validator != 0xFFFFFFFF) {
chunks[i / elements_in_chunk][i % elements_in_chunk].data.~T();
}
}
}
uint32_t chunk_count = max_alloc / elements_in_chunk;
for (uint32_t i = 0; i < chunk_count; i++) {
memfree(chunks[i]);
memfree(free_list_chunks[i]);
}
if (chunks) {
memfree(chunks);
memfree(free_list_chunks);
}
}
};
template <typename T, bool THREAD_SAFE = false>
class RID_PtrOwner {
RID_Alloc<T *, THREAD_SAFE> alloc;
public:
_FORCE_INLINE_ RID make_rid(T *p_ptr) {
return alloc.make_rid(p_ptr);
}
_FORCE_INLINE_ RID allocate_rid() {
return alloc.allocate_rid();
}
_FORCE_INLINE_ void initialize_rid(RID p_rid, T *p_ptr) {
alloc.initialize_rid(p_rid, p_ptr);
}
_FORCE_INLINE_ T *get_or_null(const RID &p_rid) {
T **ptr = alloc.get_or_null(p_rid);
if (unlikely(!ptr)) {
return nullptr;
}
return *ptr;
}
_FORCE_INLINE_ void replace(const RID &p_rid, T *p_new_ptr) {
T **ptr = alloc.get_or_null(p_rid);
ERR_FAIL_NULL(ptr);
*ptr = p_new_ptr;
}
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
return alloc.owns(p_rid);
}
_FORCE_INLINE_ void free(const RID &p_rid) {
alloc.free(p_rid);
}
_FORCE_INLINE_ uint32_t get_rid_count() const {
return alloc.get_rid_count();
}
_FORCE_INLINE_ LocalVector<RID> get_owned_list() const {
return alloc.get_owned_list();
}
void fill_owned_buffer(RID *p_rid_buffer) const {
alloc.fill_owned_buffer(p_rid_buffer);
}
void set_description(const char *p_description) {
alloc.set_description(p_description);
}
RID_PtrOwner(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) :
alloc(p_target_chunk_byte_size, p_maximum_number_of_elements) {}
};
template <typename T, bool THREAD_SAFE = false>
class RID_Owner {
RID_Alloc<T, THREAD_SAFE> alloc;
public:
_FORCE_INLINE_ RID make_rid() {
return alloc.make_rid();
}
_FORCE_INLINE_ RID make_rid(const T &p_ptr) {
return alloc.make_rid(p_ptr);
}
_FORCE_INLINE_ RID allocate_rid() {
return alloc.allocate_rid();
}
_FORCE_INLINE_ void initialize_rid(RID p_rid) {
alloc.initialize_rid(p_rid);
}
_FORCE_INLINE_ void initialize_rid(RID p_rid, const T &p_ptr) {
alloc.initialize_rid(p_rid, p_ptr);
}
_FORCE_INLINE_ T *get_or_null(const RID &p_rid) {
return alloc.get_or_null(p_rid);
}
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
return alloc.owns(p_rid);
}
_FORCE_INLINE_ void free(const RID &p_rid) {
alloc.free(p_rid);
}
_FORCE_INLINE_ uint32_t get_rid_count() const {
return alloc.get_rid_count();
}
_FORCE_INLINE_ LocalVector<RID> get_owned_list() const {
return alloc.get_owned_list();
}
void fill_owned_buffer(RID *p_rid_buffer) const {
alloc.fill_owned_buffer(p_rid_buffer);
}
void set_description(const char *p_description) {
alloc.set_description(p_description);
}
RID_Owner(uint32_t p_target_chunk_byte_size = 65536, uint32_t p_maximum_number_of_elements = 262144) :
alloc(p_target_chunk_byte_size, p_maximum_number_of_elements) {}
};

View File

@@ -0,0 +1,217 @@
/**************************************************************************/
/* ring_buffer.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/templates/local_vector.h"
template <typename T>
class RingBuffer {
LocalVector<T> data;
int read_pos = 0;
int write_pos = 0;
int size_mask;
inline int inc(int &p_var, int p_size) const {
int ret = p_var;
p_var += p_size;
p_var = p_var & size_mask;
return ret;
}
public:
T read() {
ERR_FAIL_COND_V(space_left() < 1, T());
return data.ptr()[inc(read_pos, 1)];
}
int read(T *p_buf, int p_size, bool p_advance = true) {
int left = data_left();
p_size = MIN(left, p_size);
int pos = read_pos;
int to_read = p_size;
int dst = 0;
while (to_read) {
int end = pos + to_read;
end = MIN(end, size());
int total = end - pos;
const T *read = data.ptr();
for (int i = 0; i < total; i++) {
p_buf[dst++] = read[pos + i];
}
to_read -= total;
pos = 0;
}
if (p_advance) {
inc(read_pos, p_size);
}
return p_size;
}
int copy(T *p_buf, int p_offset, int p_size) const {
int left = data_left();
if ((p_offset + p_size) > left) {
p_size -= left - p_offset;
if (p_size <= 0) {
return 0;
}
}
p_size = MIN(left, p_size);
int pos = read_pos;
inc(pos, p_offset);
int to_read = p_size;
int dst = 0;
while (to_read) {
int end = pos + to_read;
end = MIN(end, size());
int total = end - pos;
for (int i = 0; i < total; i++) {
p_buf[dst++] = data[pos + i];
}
to_read -= total;
pos = 0;
}
return p_size;
}
int find(const T &t, int p_offset, int p_max_size) const {
int left = data_left();
if ((p_offset + p_max_size) > left) {
p_max_size -= left - p_offset;
if (p_max_size <= 0) {
return 0;
}
}
p_max_size = MIN(left, p_max_size);
int pos = read_pos;
inc(pos, p_offset);
int to_read = p_max_size;
while (to_read) {
int end = pos + to_read;
end = MIN(end, size());
int total = end - pos;
for (int i = 0; i < total; i++) {
if (data[pos + i] == t) {
return i + (p_max_size - to_read);
}
}
to_read -= total;
pos = 0;
}
return -1;
}
inline int advance_read(int p_n) {
p_n = MIN(p_n, data_left());
inc(read_pos, p_n);
return p_n;
}
inline int decrease_write(int p_n) {
p_n = MIN(p_n, data_left());
inc(write_pos, size_mask + 1 - p_n);
return p_n;
}
Error write(const T &p_v) {
ERR_FAIL_COND_V(space_left() < 1, FAILED);
data[inc(write_pos, 1)] = p_v;
return OK;
}
int write(const T *p_buf, int p_size) {
int left = space_left();
p_size = MIN(left, p_size);
int pos = write_pos;
int to_write = p_size;
int src = 0;
while (to_write) {
int end = pos + to_write;
end = MIN(end, size());
int total = end - pos;
for (int i = 0; i < total; i++) {
data[pos + i] = p_buf[src++];
}
to_write -= total;
pos = 0;
}
inc(write_pos, p_size);
return p_size;
}
inline int space_left() const {
int left = read_pos - write_pos;
if (left < 0) {
return size() + left - 1;
}
if (left == 0) {
return size() - 1;
}
return left - 1;
}
inline int data_left() const {
return size() - space_left() - 1;
}
inline int size() const {
return data.size();
}
inline void clear() {
read_pos = 0;
write_pos = 0;
}
void resize(int p_power) {
int old_size = size();
int new_size = 1 << p_power;
int mask = new_size - 1;
data.resize(int64_t(1) << int64_t(p_power));
if (old_size < new_size && read_pos > write_pos) {
for (int i = 0; i < write_pos; i++) {
data[(old_size + i) & mask] = data[i];
}
write_pos = (old_size + write_pos) & mask;
} else {
read_pos = read_pos & mask;
write_pos = write_pos & mask;
}
size_mask = mask;
}
RingBuffer(int p_power = 0) {
resize(p_power);
}
~RingBuffer() {}
};

244
core/templates/safe_list.h Normal file
View File

@@ -0,0 +1,244 @@
/**************************************************************************/
/* safe_list.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/os/memory.h"
#include "core/typedefs.h"
#include <atomic>
#include <functional>
#include <initializer_list>
// Design goals for these classes:
// - Accessing this list with an iterator will never result in a use-after free,
// even if the element being accessed has been logically removed from the list on
// another thread.
// - Logical deletion from the list will not result in deallocation at that time,
// instead the node will be deallocated at a later time when it is safe to do so.
// - No blocking synchronization primitives will be used.
// This is used in very specific areas of the engine where it's critical that these guarantees are held.
template <typename T, typename A = DefaultAllocator>
class SafeList {
struct SafeListNode {
std::atomic<SafeListNode *> next = nullptr;
// If the node is logically deleted, this pointer will typically point
// to the previous list item in time that was also logically deleted.
std::atomic<SafeListNode *> graveyard_next = nullptr;
std::function<void(T)> deletion_fn = [](T t) { return; };
T val;
};
static_assert(std::atomic<T>::is_always_lock_free);
std::atomic<SafeListNode *> head = nullptr;
std::atomic<SafeListNode *> graveyard_head = nullptr;
std::atomic_uint active_iterator_count = 0;
public:
class Iterator {
friend class SafeList;
SafeListNode *cursor = nullptr;
SafeList *list = nullptr;
Iterator(SafeListNode *p_cursor, SafeList *p_list) :
cursor(p_cursor), list(p_list) {
list->active_iterator_count++;
}
public:
Iterator(const Iterator &p_other) :
cursor(p_other.cursor), list(p_other.list) {
list->active_iterator_count++;
}
~Iterator() {
list->active_iterator_count--;
}
public:
T &operator*() {
return cursor->val;
}
Iterator &operator++() {
cursor = cursor->next;
return *this;
}
// These two operators are mostly useful for comparisons to nullptr.
bool operator==(const void *p_other) const {
return cursor == p_other;
}
bool operator!=(const void *p_other) const {
return cursor != p_other;
}
// These two allow easy range-based for loops.
bool operator==(const Iterator &p_other) const {
return cursor == p_other.cursor;
}
bool operator!=(const Iterator &p_other) const {
return cursor != p_other.cursor;
}
};
public:
// Calling this will cause an allocation.
void insert(T p_value) {
SafeListNode *new_node = memnew_allocator(SafeListNode, A);
new_node->val = p_value;
SafeListNode *expected_head = nullptr;
do {
expected_head = head.load();
new_node->next.store(expected_head);
} while (!head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ new_node));
}
Iterator find(T p_value) {
for (Iterator it = begin(); it != end(); ++it) {
if (*it == p_value) {
return it;
}
}
return end();
}
void erase(T p_value, std::function<void(T)> p_deletion_fn) {
Iterator tmp = find(p_value);
erase(tmp, p_deletion_fn);
}
void erase(T p_value) {
Iterator tmp = find(p_value);
erase(tmp, [](T t) { return; });
}
void erase(Iterator &p_iterator, std::function<void(T)> p_deletion_fn) {
p_iterator.cursor->deletion_fn = p_deletion_fn;
erase(p_iterator);
}
void erase(Iterator &p_iterator) {
if (find(p_iterator.cursor->val) == nullptr) {
// Not in the list, nothing to do.
return;
}
// First, remove the node from the list.
while (true) {
Iterator prev = begin();
SafeListNode *expected_head = prev.cursor;
for (; prev != end(); ++prev) {
if (prev.cursor && prev.cursor->next == p_iterator.cursor) {
break;
}
}
if (prev != end()) {
// There exists a node before this.
prev.cursor->next.store(p_iterator.cursor->next.load());
// Done.
break;
} else {
if (head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ p_iterator.cursor->next.load())) {
// Successfully reassigned the head pointer before another thread changed it to something else.
break;
}
// Fall through upon failure, try again.
}
}
// Then queue it for deletion by putting it in the node graveyard.
// Don't touch `next` because an iterator might still be pointing at this node.
SafeListNode *expected_head = nullptr;
do {
expected_head = graveyard_head.load();
p_iterator.cursor->graveyard_next.store(expected_head);
} while (!graveyard_head.compare_exchange_strong(/* expected= */ expected_head, /* new= */ p_iterator.cursor));
}
Iterator begin() {
return Iterator(head.load(), this);
}
Iterator end() {
return Iterator(nullptr, this);
}
// Calling this will cause zero to many deallocations.
bool maybe_cleanup() {
SafeListNode *cursor = nullptr;
SafeListNode *new_graveyard_head = nullptr;
do {
// The access order here is theoretically important.
cursor = graveyard_head.load();
if (active_iterator_count.load() != 0) {
// It's not safe to clean up with an active iterator, because that iterator
// could be pointing to an element that we want to delete.
return false;
}
// Any iterator created after this point will never point to a deleted node.
// Swap it out with the current graveyard head.
} while (!graveyard_head.compare_exchange_strong(/* expected= */ cursor, /* new= */ new_graveyard_head));
// Our graveyard list is now unreachable by any active iterators,
// detached from the main graveyard head and ready for deletion.
while (cursor) {
SafeListNode *tmp = cursor;
cursor = cursor->graveyard_next;
tmp->deletion_fn(tmp->val);
memdelete_allocator<SafeListNode, A>(tmp);
}
return true;
}
_FORCE_INLINE_ SafeList() {}
_FORCE_INLINE_ SafeList(std::initializer_list<T> p_init) {
for (const T &E : p_init) {
insert(E);
}
}
~SafeList() {
#ifdef DEBUG_ENABLED
if (!maybe_cleanup()) {
ERR_PRINT("There are still iterators around when destructing a SafeList. Memory will be leaked. This is a bug.");
}
#else
maybe_cleanup();
#endif
}
};

View File

@@ -0,0 +1,223 @@
/**************************************************************************/
/* safe_refcount.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/typedefs.h"
#ifdef DEV_ENABLED
#include "core/error/error_macros.h"
#endif
#include <atomic>
#include <type_traits> // IWYU pragma: keep // Used in macro.
// Design goals for these classes:
// - No automatic conversions or arithmetic operators,
// to keep explicit the use of atomics everywhere.
// - Using acquire-release semantics, even to set the first value.
// The first value may be set relaxedly in many cases, but adding the distinction
// between relaxed and unrelaxed operation to the interface would make it needlessly
// flexible. There's negligible waste in having release semantics for the initial
// value and, as an important benefit, you can be sure the value is properly synchronized
// even with threads that are already running.
// These are used in very specific areas of the engine where it's critical that these guarantees are held
#define SAFE_NUMERIC_TYPE_PUN_GUARANTEES(m_type) \
static_assert(sizeof(SafeNumeric<m_type>) == sizeof(m_type)); \
static_assert(alignof(SafeNumeric<m_type>) == alignof(m_type)); \
static_assert(std::is_trivially_destructible_v<std::atomic<m_type>>);
#define SAFE_FLAG_TYPE_PUN_GUARANTEES \
static_assert(sizeof(SafeFlag) == sizeof(bool)); \
static_assert(alignof(SafeFlag) == alignof(bool));
template <typename T>
class SafeNumeric {
std::atomic<T> value;
static_assert(std::atomic<T>::is_always_lock_free);
public:
_ALWAYS_INLINE_ void set(T p_value) {
value.store(p_value, std::memory_order_release);
}
_ALWAYS_INLINE_ T get() const {
return value.load(std::memory_order_acquire);
}
_ALWAYS_INLINE_ T increment() {
return value.fetch_add(1, std::memory_order_acq_rel) + 1;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postincrement() {
return value.fetch_add(1, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T decrement() {
return value.fetch_sub(1, std::memory_order_acq_rel) - 1;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postdecrement() {
return value.fetch_sub(1, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T add(T p_value) {
return value.fetch_add(p_value, std::memory_order_acq_rel) + p_value;
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postadd(T p_value) {
return value.fetch_add(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T sub(T p_value) {
return value.fetch_sub(p_value, std::memory_order_acq_rel) - p_value;
}
_ALWAYS_INLINE_ T bit_or(T p_value) {
return value.fetch_or(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T bit_and(T p_value) {
return value.fetch_and(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T bit_xor(T p_value) {
return value.fetch_xor(p_value, std::memory_order_acq_rel);
}
// Returns the original value instead of the new one
_ALWAYS_INLINE_ T postsub(T p_value) {
return value.fetch_sub(p_value, std::memory_order_acq_rel);
}
_ALWAYS_INLINE_ T exchange_if_greater(T p_value) {
while (true) {
T tmp = value.load(std::memory_order_acquire);
if (tmp >= p_value) {
return tmp; // already greater, or equal
}
if (value.compare_exchange_weak(tmp, p_value, std::memory_order_acq_rel)) {
return p_value;
}
}
}
_ALWAYS_INLINE_ T conditional_increment() {
while (true) {
T c = value.load(std::memory_order_acquire);
if (c == 0) {
return 0;
}
if (value.compare_exchange_weak(c, c + 1, std::memory_order_acq_rel)) {
return c + 1;
}
}
}
_ALWAYS_INLINE_ explicit SafeNumeric(T p_value = static_cast<T>(0)) {
set(p_value);
}
};
class SafeFlag {
std::atomic_bool flag;
static_assert(std::atomic_bool::is_always_lock_free);
public:
_ALWAYS_INLINE_ bool is_set() const {
return flag.load(std::memory_order_acquire);
}
_ALWAYS_INLINE_ void set() {
flag.store(true, std::memory_order_release);
}
_ALWAYS_INLINE_ void clear() {
flag.store(false, std::memory_order_release);
}
_ALWAYS_INLINE_ void set_to(bool p_value) {
flag.store(p_value, std::memory_order_release);
}
_ALWAYS_INLINE_ explicit SafeFlag(bool p_value = false) {
set_to(p_value);
}
};
class SafeRefCount {
SafeNumeric<uint32_t> count;
#ifdef DEV_ENABLED
_ALWAYS_INLINE_ void _check_unref_safety() {
// This won't catch every misuse, but it's better than nothing.
CRASH_COND_MSG(count.get() == 0,
"Trying to unreference a SafeRefCount which is already zero is wrong and a symptom of it being misused.\n"
"Upon a SafeRefCount reaching zero any object whose lifetime is tied to it, as well as the ref count itself, must be destroyed.\n"
"Moreover, to guarantee that, no multiple threads should be racing to do the final unreferencing to zero.");
}
#endif
public:
_ALWAYS_INLINE_ bool ref() { // true on success
return count.conditional_increment() != 0;
}
_ALWAYS_INLINE_ uint32_t refval() { // none-zero on success
return count.conditional_increment();
}
_ALWAYS_INLINE_ bool unref() { // true if must be disposed of
#ifdef DEV_ENABLED
_check_unref_safety();
#endif
return count.decrement() == 0;
}
_ALWAYS_INLINE_ uint32_t unrefval() { // 0 if must be disposed of
#ifdef DEV_ENABLED
_check_unref_safety();
#endif
return count.decrement();
}
_ALWAYS_INLINE_ uint32_t get() const {
return count.get();
}
_ALWAYS_INLINE_ void init(uint32_t p_value = 1) {
count.set(p_value);
}
};

171
core/templates/self_list.h Normal file
View File

@@ -0,0 +1,171 @@
/**************************************************************************/
/* self_list.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/templates/sort_list.h"
#include "core/typedefs.h"
template <typename T>
class SelfList {
public:
class List {
SelfList<T> *_first = nullptr;
SelfList<T> *_last = nullptr;
public:
void add(SelfList<T> *p_elem) {
ERR_FAIL_COND(p_elem->_root);
p_elem->_root = this;
p_elem->_next = _first;
p_elem->_prev = nullptr;
if (_first) {
_first->_prev = p_elem;
} else {
_last = p_elem;
}
_first = p_elem;
}
void add_last(SelfList<T> *p_elem) {
ERR_FAIL_COND(p_elem->_root);
p_elem->_root = this;
p_elem->_next = nullptr;
p_elem->_prev = _last;
if (_last) {
_last->_next = p_elem;
} else {
_first = p_elem;
}
_last = p_elem;
}
void remove(SelfList<T> *p_elem) {
ERR_FAIL_COND(p_elem->_root != this);
if (p_elem->_next) {
p_elem->_next->_prev = p_elem->_prev;
}
if (p_elem->_prev) {
p_elem->_prev->_next = p_elem->_next;
}
if (_first == p_elem) {
_first = p_elem->_next;
}
if (_last == p_elem) {
_last = p_elem->_prev;
}
p_elem->_next = nullptr;
p_elem->_prev = nullptr;
p_elem->_root = nullptr;
}
void clear() {
while (_first) {
remove(_first);
}
}
void sort() {
sort_custom<Comparator<T>>();
}
template <typename C>
void sort_custom() {
if (_first == _last) {
return;
}
struct PtrComparator {
C compare;
_FORCE_INLINE_ bool operator()(const T *p_a, const T *p_b) const { return compare(*p_a, *p_b); }
};
using Element = SelfList<T>;
SortList<Element, T *, &Element::_self, &Element::_prev, &Element::_next, PtrComparator> sorter;
sorter.sort(_first, _last);
}
_FORCE_INLINE_ SelfList<T> *first() { return _first; }
_FORCE_INLINE_ const SelfList<T> *first() const { return _first; }
// Forbid copying, which has broken behavior.
void operator=(const List &) = delete;
_FORCE_INLINE_ List() {}
_FORCE_INLINE_ ~List() {
// A self list must be empty on destruction.
DEV_ASSERT(_first == nullptr);
}
};
private:
List *_root = nullptr;
T *_self = nullptr;
SelfList<T> *_next = nullptr;
SelfList<T> *_prev = nullptr;
public:
_FORCE_INLINE_ bool in_list() const { return _root; }
_FORCE_INLINE_ void remove_from_list() {
if (_root) {
_root->remove(this);
}
}
_FORCE_INLINE_ SelfList<T> *next() { return _next; }
_FORCE_INLINE_ SelfList<T> *prev() { return _prev; }
_FORCE_INLINE_ const SelfList<T> *next() const { return _next; }
_FORCE_INLINE_ const SelfList<T> *prev() const { return _prev; }
_FORCE_INLINE_ T *self() const { return _self; }
// Forbid copying, which has broken behavior.
void operator=(const SelfList<T> &) = delete;
_FORCE_INLINE_ SelfList(T *p_self) {
_self = p_self;
}
_FORCE_INLINE_ ~SelfList() {
if (_root) {
_root->remove(this);
}
}
};

View File

@@ -0,0 +1,36 @@
/**************************************************************************/
/* simple_type.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include <type_traits>
template <typename T>
using GetSimpleTypeT = typename std::remove_cv_t<std::remove_reference_t<T>>;

315
core/templates/sort_array.h Normal file
View File

@@ -0,0 +1,315 @@
/**************************************************************************/
/* sort_array.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/typedefs.h"
#define ERR_BAD_COMPARE(cond) \
if (unlikely(cond)) { \
ERR_PRINT("bad comparison function; sorting will be broken"); \
break; \
}
#ifdef DEBUG_ENABLED
#define SORT_ARRAY_VALIDATE_ENABLED true
#else
#define SORT_ARRAY_VALIDATE_ENABLED false
#endif
template <typename T, typename Comparator = Comparator<T>, bool Validate = SORT_ARRAY_VALIDATE_ENABLED>
class SortArray {
enum {
INTROSORT_THRESHOLD = 16
};
public:
Comparator compare;
inline int64_t median_of_3_index(const T *p_ptr, int64_t a_index, int64_t b_index, int64_t c_index) const {
const T &a = p_ptr[a_index];
const T &b = p_ptr[b_index];
const T &c = p_ptr[c_index];
if (compare(a, b)) {
if (compare(b, c)) {
return b_index;
} else if (compare(a, c)) {
return c_index;
} else {
return a_index;
}
} else if (compare(a, c)) {
return a_index;
} else if (compare(b, c)) {
return c_index;
} else {
return b_index;
}
}
inline int64_t bitlog(int64_t n) const {
int64_t k;
for (k = 0; n != 1; n >>= 1) {
++k;
}
return k;
}
/* Heap / Heapsort functions */
inline void push_heap(int64_t p_first, int64_t p_hole_idx, int64_t p_top_index, T p_value, T *p_array) const {
int64_t parent = (p_hole_idx - 1) / 2;
while (p_hole_idx > p_top_index && compare(p_array[p_first + parent], p_value)) {
p_array[p_first + p_hole_idx] = p_array[p_first + parent];
p_hole_idx = parent;
parent = (p_hole_idx - 1) / 2;
}
p_array[p_first + p_hole_idx] = p_value;
}
inline void pop_heap(int64_t p_first, int64_t p_last, int64_t p_result, T p_value, T *p_array) const {
p_array[p_result] = p_array[p_first];
adjust_heap(p_first, 0, p_last - p_first, p_value, p_array);
}
inline void pop_heap(int64_t p_first, int64_t p_last, T *p_array) const {
pop_heap(p_first, p_last - 1, p_last - 1, p_array[p_last - 1], p_array);
}
inline void adjust_heap(int64_t p_first, int64_t p_hole_idx, int64_t p_len, T p_value, T *p_array) const {
int64_t top_index = p_hole_idx;
int64_t second_child = 2 * p_hole_idx + 2;
while (second_child < p_len) {
if (compare(p_array[p_first + second_child], p_array[p_first + (second_child - 1)])) {
second_child--;
}
p_array[p_first + p_hole_idx] = p_array[p_first + second_child];
p_hole_idx = second_child;
second_child = 2 * (second_child + 1);
}
if (second_child == p_len) {
p_array[p_first + p_hole_idx] = p_array[p_first + (second_child - 1)];
p_hole_idx = second_child - 1;
}
push_heap(p_first, p_hole_idx, top_index, p_value, p_array);
}
inline void sort_heap(int64_t p_first, int64_t p_last, T *p_array) const {
while (p_last - p_first > 1) {
pop_heap(p_first, p_last--, p_array);
}
}
inline void make_heap(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_last - p_first < 2) {
return;
}
int64_t len = p_last - p_first;
int64_t parent = (len - 2) / 2;
while (true) {
adjust_heap(p_first, parent, len, p_array[p_first + parent], p_array);
if (parent == 0) {
return;
}
parent--;
}
}
inline void partial_sort(int64_t p_first, int64_t p_last, int64_t p_middle, T *p_array) const {
make_heap(p_first, p_middle, p_array);
for (int64_t i = p_middle; i < p_last; i++) {
if (compare(p_array[i], p_array[p_first])) {
pop_heap(p_first, p_middle, i, p_array[i], p_array);
}
}
sort_heap(p_first, p_middle, p_array);
}
inline void partial_select(int64_t p_first, int64_t p_last, int64_t p_middle, T *p_array) const {
make_heap(p_first, p_middle, p_array);
for (int64_t i = p_middle; i < p_last; i++) {
if (compare(p_array[i], p_array[p_first])) {
pop_heap(p_first, p_middle, i, p_array[i], p_array);
}
}
}
inline int64_t partitioner(int64_t p_first, int64_t p_last, int64_t p_pivot, T *p_array) const {
const int64_t unmodified_first = p_first;
const int64_t unmodified_last = p_last;
const T *pivot_element_location = &p_array[p_pivot];
while (true) {
while (p_first != p_pivot && compare(p_array[p_first], *pivot_element_location)) {
if constexpr (Validate) {
ERR_BAD_COMPARE(p_first == unmodified_last - 1);
}
p_first++;
}
p_last--;
while (p_last != p_pivot && compare(*pivot_element_location, p_array[p_last])) {
if constexpr (Validate) {
ERR_BAD_COMPARE(p_last == unmodified_first);
}
p_last--;
}
if (p_first >= p_last) {
return p_first;
}
if (pivot_element_location == &p_array[p_first]) {
pivot_element_location = &p_array[p_last];
} else if (pivot_element_location == &p_array[p_last]) {
pivot_element_location = &p_array[p_first];
}
SWAP(p_array[p_first], p_array[p_last]);
p_first++;
}
}
inline void introsort(int64_t p_first, int64_t p_last, T *p_array, int64_t p_max_depth) const {
while (p_last - p_first > INTROSORT_THRESHOLD) {
if (p_max_depth == 0) {
partial_sort(p_first, p_last, p_last, p_array);
return;
}
p_max_depth--;
int64_t cut = partitioner(
p_first,
p_last,
median_of_3_index(p_array, p_first, p_first + (p_last - p_first) / 2, p_last - 1),
p_array);
introsort(cut, p_last, p_array, p_max_depth);
p_last = cut;
}
}
inline void introselect(int64_t p_first, int64_t p_nth, int64_t p_last, T *p_array, int64_t p_max_depth) const {
while (p_last - p_first > 3) {
if (p_max_depth == 0) {
partial_select(p_first, p_nth + 1, p_last, p_array);
SWAP(p_first, p_nth);
return;
}
p_max_depth--;
int64_t cut = partitioner(
p_first,
p_last,
median_of_3_index(p_array, p_first, p_first + (p_last - p_first) / 2, p_last - 1),
p_array);
if (cut <= p_nth) {
p_first = cut;
} else {
p_last = cut;
}
}
insertion_sort(p_first, p_last, p_array);
}
inline void unguarded_linear_insert(int64_t p_last, T p_value, T *p_array) const {
int64_t next = p_last - 1;
while (compare(p_value, p_array[next])) {
if constexpr (Validate) {
ERR_BAD_COMPARE(next == 0);
}
p_array[p_last] = p_array[next];
p_last = next;
next--;
}
p_array[p_last] = p_value;
}
inline void linear_insert(int64_t p_first, int64_t p_last, T *p_array) const {
T val = p_array[p_last];
if (compare(val, p_array[p_first])) {
for (int64_t i = p_last; i > p_first; i--) {
p_array[i] = p_array[i - 1];
}
p_array[p_first] = val;
} else {
unguarded_linear_insert(p_last, val, p_array);
}
}
inline void insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_first == p_last) {
return;
}
for (int64_t i = p_first + 1; i != p_last; i++) {
linear_insert(p_first, i, p_array);
}
}
inline void unguarded_insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
for (int64_t i = p_first; i != p_last; i++) {
unguarded_linear_insert(i, p_array[i], p_array);
}
}
inline void final_insertion_sort(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_last - p_first > INTROSORT_THRESHOLD) {
insertion_sort(p_first, p_first + INTROSORT_THRESHOLD, p_array);
unguarded_insertion_sort(p_first + INTROSORT_THRESHOLD, p_last, p_array);
} else {
insertion_sort(p_first, p_last, p_array);
}
}
inline void sort_range(int64_t p_first, int64_t p_last, T *p_array) const {
if (p_first != p_last) {
introsort(p_first, p_last, p_array, bitlog(p_last - p_first) * 2);
final_insertion_sort(p_first, p_last, p_array);
}
}
inline void sort(T *p_array, int64_t p_len) const {
sort_range(0, p_len, p_array);
}
inline void nth_element(int64_t p_first, int64_t p_last, int64_t p_nth, T *p_array) const {
if (p_first == p_last || p_nth == p_last) {
return;
}
introselect(p_first, p_nth, p_last, p_array, bitlog(p_last - p_first) * 2);
}
};

148
core/templates/sort_list.h Normal file
View File

@@ -0,0 +1,148 @@
/**************************************************************************/
/* sort_list.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/typedefs.h"
template <typename Element, typename T, T Element::*value, Element *Element::*prev, Element *Element::*next, typename Comparator = Comparator<T>>
class SortList {
public:
Comparator compare;
void sort(Element *&r_head, Element *&r_tail) {
Element *sorted_until;
if (_is_sorted(r_head, r_tail, sorted_until)) {
return;
}
// In case we're sorting only part of a larger list.
Element *head_prev = r_head->*prev;
r_head->*prev = nullptr;
Element *tail_next = r_tail->*next;
r_tail->*next = nullptr;
// Sort unsorted section and merge.
Element *head2 = sorted_until->*next;
_split(sorted_until, head2);
_merge_sort(head2, r_tail);
_merge(r_head, sorted_until, head2, r_tail, r_head, r_tail);
// Reconnect to larger list if needed.
if (head_prev) {
_connect(head_prev, r_head);
}
if (tail_next) {
_connect(r_tail, tail_next);
}
}
private:
bool _is_sorted(Element *p_head, Element *p_tail, Element *&r_sorted_until) {
r_sorted_until = p_head;
while (r_sorted_until != p_tail) {
if (compare(r_sorted_until->*next->*value, r_sorted_until->*value)) {
return false;
}
r_sorted_until = r_sorted_until->*next;
}
return true;
}
void _merge_sort(Element *&r_head, Element *&r_tail) {
if (r_head == r_tail) {
return;
}
Element *tail1 = _get_mid(r_head);
Element *head2 = tail1->*next;
_split(tail1, head2);
_merge_sort(r_head, tail1);
_merge_sort(head2, r_tail);
_merge(r_head, tail1, head2, r_tail, r_head, r_tail);
}
void _merge(
Element *p_head1, Element *p_tail1,
Element *p_head2, Element *p_tail2,
Element *&r_head, Element *&r_tail) {
if (compare(p_head2->*value, p_head1->*value)) {
r_head = p_head2;
p_head2 = p_head2->*next;
} else {
r_head = p_head1;
p_head1 = p_head1->*next;
}
Element *curr = r_head;
while (p_head1 && p_head2) {
if (compare(p_head2->*value, p_head1->*value)) {
_connect(curr, p_head2);
p_head2 = p_head2->*next;
} else {
_connect(curr, p_head1);
p_head1 = p_head1->*next;
}
curr = curr->*next;
}
if (p_head1) {
_connect(curr, p_head1);
r_tail = p_tail1;
} else {
_connect(curr, p_head2);
r_tail = p_tail2;
}
}
Element *_get_mid(Element *p_head) {
Element *end = p_head;
Element *mid = p_head;
while (end->*next && end->*next->*next) {
end = end->*next->*next;
mid = mid->*next;
}
return mid;
}
_FORCE_INLINE_ void _connect(Element *p_a, Element *p_b) {
p_a->*next = p_b;
p_b->*prev = p_a;
}
_FORCE_INLINE_ void _split(Element *p_a, Element *p_b) {
p_a->*next = nullptr;
p_b->*prev = nullptr;
}
};

170
core/templates/span.h Normal file
View File

@@ -0,0 +1,170 @@
/**************************************************************************/
/* span.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/error/error_macros.h"
#include "core/typedefs.h"
// Equivalent of std::span.
// Represents a view into a contiguous memory space.
// DISCLAIMER: This data type does not own the underlying buffer. DO NOT STORE IT.
// Additionally, for the lifetime of the Span, do not resize the buffer, and do not insert or remove elements from it.
// Failure to respect this may lead to crashes or undefined behavior.
template <typename T>
class Span {
const T *_ptr = nullptr;
uint64_t _len = 0;
public:
static constexpr bool is_string = std::disjunction_v<
std::is_same<T, char>,
std::is_same<T, char16_t>,
std::is_same<T, char32_t>,
std::is_same<T, wchar_t>>;
_FORCE_INLINE_ constexpr Span() = default;
_FORCE_INLINE_ Span(const T *p_ptr, uint64_t p_len) :
_ptr(p_ptr), _len(p_len) {
#ifdef DEBUG_ENABLED
// TODO In c++20, make this check run only in non-consteval, and make this constructor constexpr.
if (_ptr == nullptr && _len > 0) {
ERR_PRINT("Internal bug, please report: Span was created from nullptr with size > 0. Recovering by using size = 0.");
_len = 0;
}
#endif
}
// Allows creating Span directly from C arrays and string literals.
template <size_t N>
_FORCE_INLINE_ constexpr Span(const T (&p_array)[N]) :
_ptr(p_array), _len(N) {
if constexpr (is_string) {
// Cut off the \0 terminator implicitly added to string literals.
if (N > 0 && p_array[N - 1] == '\0') {
_len--;
}
}
}
_FORCE_INLINE_ constexpr uint64_t size() const { return _len; }
_FORCE_INLINE_ constexpr bool is_empty() const { return _len == 0; }
_FORCE_INLINE_ constexpr const T *ptr() const { return _ptr; }
// NOTE: Span subscripts sanity check the bounds to avoid undefined behavior.
// This is slower than direct buffer access and can prevent autovectorization.
// If the bounds are known, use ptr() subscript instead.
_FORCE_INLINE_ constexpr const T &operator[](uint64_t p_idx) const {
CRASH_COND(p_idx >= _len);
return _ptr[p_idx];
}
_FORCE_INLINE_ constexpr const T *begin() const { return _ptr; }
_FORCE_INLINE_ constexpr const T *end() const { return _ptr + _len; }
template <typename T1>
_FORCE_INLINE_ constexpr Span<T1> reinterpret() const {
return Span<T1>(reinterpret_cast<const T1 *>(_ptr), _len * sizeof(T) / sizeof(T1));
}
// Algorithms.
constexpr int64_t find(const T &p_val, uint64_t p_from = 0) const;
constexpr int64_t rfind(const T &p_val, uint64_t p_from) const;
_FORCE_INLINE_ constexpr int64_t rfind(const T &p_val) const { return rfind(p_val, size() - 1); }
constexpr uint64_t count(const T &p_val) const;
/// Find the index of the given value using binary search.
/// Note: Assumes that elements in the span are sorted. Otherwise, use find() instead.
template <typename Comparator = Comparator<T>>
constexpr uint64_t bisect(const T &p_value, bool p_before, Comparator compare = Comparator()) const;
};
template <typename T>
constexpr int64_t Span<T>::find(const T &p_val, uint64_t p_from) const {
for (uint64_t i = p_from; i < size(); i++) {
if (ptr()[i] == p_val) {
return i;
}
}
return -1;
}
template <typename T>
constexpr int64_t Span<T>::rfind(const T &p_val, uint64_t p_from) const {
for (int64_t i = p_from; i >= 0; i--) {
if (ptr()[i] == p_val) {
return i;
}
}
return -1;
}
template <typename T>
constexpr uint64_t Span<T>::count(const T &p_val) const {
uint64_t amount = 0;
for (uint64_t i = 0; i < size(); i++) {
if (ptr()[i] == p_val) {
amount++;
}
}
return amount;
}
template <typename T>
template <typename Comparator>
constexpr uint64_t Span<T>::bisect(const T &p_value, bool p_before, Comparator compare) const {
uint64_t lo = 0;
uint64_t hi = size();
if (p_before) {
while (lo < hi) {
const uint64_t mid = (lo + hi) / 2;
if (compare(ptr()[mid], p_value)) {
lo = mid + 1;
} else {
hi = mid;
}
}
} else {
while (lo < hi) {
const uint64_t mid = (lo + hi) / 2;
if (compare(p_value, ptr()[mid])) {
hi = mid;
} else {
lo = mid + 1;
}
}
}
return lo;
}
// Zero-constructing Span initializes _ptr and _len to 0 (and thus empty).
template <typename T>
struct is_zero_constructible<Span<T>> : std::true_type {};

122
core/templates/tuple.h Normal file
View File

@@ -0,0 +1,122 @@
/**************************************************************************/
/* tuple.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
// Simple recursive Tuple type that has no runtime overhead.
//
// The compile-time recursion works as follows:
// Assume the following: Tuple<int, float> my_tuple(42, 3.14f);
// This expands to a class hierarchy that inherits from the previous step.
// So in this case this leads to:
// - struct Tuple<int> : Tuple<float> <--- This contains the int value.
// - struct Tuple<float> <--- This contains the float value.
// where each of the classes has a single field of the type for that step in the
// recursion. So: float value; int value; etc.
//
// This works by splitting up the parameter pack for each step in the recursion minus the first.
// so the first step creates the "T value" from the first template parameter.
// any further template arguments end up in "Rest", which we then use to instantiate a new
// tuple, but now minus the first argument. To write this all out:
//
// Tuple<int, float>
// step 1: Tuple T = int, Rest = float. Results in a Tuple<int> : Tuple<float>
// step 2: Tuple T = float, no Rest. Results in a Tuple<float>
//
// tuple_get<I> works through a similar recursion, using the inheritance chain to walk to the right node.
// In order to tuple_get<1>(my_tuple), from the example tuple above:
//
// 1. We want tuple_get<1> to return the float, which is one level "up" from Tuple<int> : Tuple<float>,
// (the real type of the Tuple "root").
// 2. Since index 1 > 0, it casts the tuple to its parent type (Tuple<float>). This works because
// we cast to Tuple<Rest...> which in this case is just float.
// 3. Now we're looking for index 0 in Tuple<float>, which directly returns its value field. Note
// how get<0> is a template specialization.
//
// At compile time, this gets fully resolved. The compiler sees get<1>(my_tuple) and:
// 1. Creates TupleGet<1, Tuple<int, float>>::tuple_get which contains the cast to Tuple<float>.
// 2. Creates TupleGet<0, Tuple<float>>::tuple_get which directly returns the value.
// 3. The compiler will then simply optimize all of this nonsense away and return the float directly.
#include "core/typedefs.h"
template <typename... Types>
struct Tuple;
template <>
struct Tuple<> {};
template <typename T, typename... Rest>
struct Tuple<T, Rest...> : Tuple<Rest...> {
T value;
Tuple() = default;
template <typename F, typename... R>
_FORCE_INLINE_ Tuple(F &&f, R &&...rest) :
Tuple<Rest...>(std::forward<R>(rest)...),
value(std::forward<F>(f)) {}
};
// Tuple is zero-constructible if and only if all constrained types are zero-constructible.
template <typename... Types>
struct is_zero_constructible<Tuple<Types...>> : std::conjunction<is_zero_constructible<Types>...> {};
template <size_t I, typename Tuple>
struct TupleGet;
template <typename First, typename... Rest>
struct TupleGet<0, Tuple<First, Rest...>> {
_FORCE_INLINE_ static First &tuple_get(Tuple<First, Rest...> &t) {
return t.value;
}
};
// Rationale for using auto here is that the alternative is writing a
// helper struct to create an otherwise useless type. we would have to write
// a second recursive template chain like: TupleGetType<I, Tuple<First, Rest...>>::type
// just to recover the type in the most baroque way possible.
template <size_t I, typename First, typename... Rest>
struct TupleGet<I, Tuple<First, Rest...>> {
_FORCE_INLINE_ static auto &tuple_get(Tuple<First, Rest...> &t) {
return TupleGet<I - 1, Tuple<Rest...>>::tuple_get(static_cast<Tuple<Rest...> &>(t));
}
};
template <size_t I, typename... Types>
_FORCE_INLINE_ auto &tuple_get(Tuple<Types...> &t) {
return TupleGet<I, Tuple<Types...>>::tuple_get(t);
}
template <size_t I, typename... Types>
_FORCE_INLINE_ const auto &tuple_get(const Tuple<Types...> &t) {
return TupleGet<I, Tuple<Types...>>::tuple_get(t);
}

373
core/templates/vector.h Normal file
View File

@@ -0,0 +1,373 @@
/**************************************************************************/
/* vector.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
/**
* @class Vector
* Vector container. Simple copy-on-write container.
*
* LocalVector is an alternative available for internal use when COW is not
* required.
*/
#include "core/error/error_macros.h"
#include "core/templates/cowdata.h"
#include "core/templates/sort_array.h"
#include <initializer_list>
template <typename T>
class Vector;
template <typename T>
class VectorWriteProxy {
public:
_FORCE_INLINE_ T &operator[](typename CowData<T>::Size p_index) {
CRASH_BAD_INDEX(p_index, ((Vector<T> *)(this))->_cowdata.size());
return ((Vector<T> *)(this))->_cowdata.ptrw()[p_index];
}
};
template <typename T>
class Vector {
friend class VectorWriteProxy<T>;
public:
VectorWriteProxy<T> write;
typedef typename CowData<T>::Size Size;
private:
CowData<T> _cowdata;
public:
// Must take a copy instead of a reference (see GH-31736).
bool push_back(T p_elem);
_FORCE_INLINE_ bool append(const T &p_elem) { return push_back(p_elem); } //alias
void fill(T p_elem);
void remove_at(Size p_index) { _cowdata.remove_at(p_index); }
_FORCE_INLINE_ bool erase(const T &p_val) {
Size idx = find(p_val);
if (idx >= 0) {
remove_at(idx);
return true;
}
return false;
}
void reverse();
_FORCE_INLINE_ T *ptrw() { return _cowdata.ptrw(); }
_FORCE_INLINE_ const T *ptr() const { return _cowdata.ptr(); }
_FORCE_INLINE_ Size size() const { return _cowdata.size(); }
_FORCE_INLINE_ operator Span<T>() const { return _cowdata.span(); }
_FORCE_INLINE_ Span<T> span() const { return _cowdata.span(); }
_FORCE_INLINE_ void clear() { _cowdata.clear(); }
_FORCE_INLINE_ bool is_empty() const { return _cowdata.is_empty(); }
_FORCE_INLINE_ T get(Size p_index) { return _cowdata.get(p_index); }
_FORCE_INLINE_ const T &get(Size p_index) const { return _cowdata.get(p_index); }
_FORCE_INLINE_ void set(Size p_index, const T &p_elem) { _cowdata.set(p_index, p_elem); }
/// Resize the vector.
/// Elements are initialized (or not) depending on what the default C++ behavior for this type is.
_FORCE_INLINE_ Error resize(Size p_size) {
return _cowdata.template resize<!std::is_trivially_constructible_v<T>>(p_size);
}
/// Resize and set all values to 0 / false / nullptr.
/// This is only available for zero constructible types.
_FORCE_INLINE_ Error resize_initialized(Size p_size) {
return _cowdata.template resize<true>(p_size);
}
/// Resize and set all values to 0 / false / nullptr.
/// This is only available for trivially destructible types (otherwise, trivial resize might be UB).
_FORCE_INLINE_ Error resize_uninitialized(Size p_size) {
// resize() statically asserts that T is compatible, no need to do it ourselves.
return _cowdata.template resize<false>(p_size);
}
_FORCE_INLINE_ const T &operator[](Size p_index) const { return _cowdata.get(p_index); }
// Must take a copy instead of a reference (see GH-31736).
Error insert(Size p_pos, T p_val) { return _cowdata.insert(p_pos, p_val); }
Size find(const T &p_val, Size p_from = 0) const {
if (p_from < 0) {
p_from = size() + p_from;
}
if (p_from < 0 || p_from >= size()) {
return -1;
}
return span().find(p_val, p_from);
}
Size rfind(const T &p_val, Size p_from = -1) const {
if (p_from < 0) {
p_from = size() + p_from;
}
if (p_from < 0 || p_from >= size()) {
return -1;
}
return span().rfind(p_val, p_from);
}
Size count(const T &p_val) const { return span().count(p_val); }
// Must take a copy instead of a reference (see GH-31736).
void append_array(Vector<T> p_other);
_FORCE_INLINE_ bool has(const T &p_val) const { return find(p_val) != -1; }
void sort() {
sort_custom<Comparator<T>>();
}
template <typename Comparator, bool Validate = SORT_ARRAY_VALIDATE_ENABLED, typename... Args>
void sort_custom(Args &&...args) {
Size len = _cowdata.size();
if (len == 0) {
return;
}
T *data = ptrw();
SortArray<T, Comparator, Validate> sorter{ args... };
sorter.sort(data, len);
}
Size bsearch(const T &p_value, bool p_before) {
return bsearch_custom<Comparator<T>>(p_value, p_before);
}
template <typename Comparator, typename Value, typename... Args>
Size bsearch_custom(const Value &p_value, bool p_before, Args &&...args) {
return span().bisect(p_value, p_before, Comparator{ args... });
}
Vector<T> duplicate() {
return *this;
}
void ordered_insert(const T &p_val) {
Size i;
for (i = 0; i < _cowdata.size(); i++) {
if (p_val < operator[](i)) {
break;
}
}
insert(i, p_val);
}
void operator=(const Vector &p_from) { _cowdata = p_from._cowdata; }
void operator=(Vector &&p_from) { _cowdata = std::move(p_from._cowdata); }
Vector<uint8_t> to_byte_array() const {
Vector<uint8_t> ret;
if (is_empty()) {
return ret;
}
size_t alloc_size = size() * sizeof(T);
ret.resize(alloc_size);
if (alloc_size) {
memcpy(ret.ptrw(), ptr(), alloc_size);
}
return ret;
}
Vector<T> slice(Size p_begin, Size p_end = CowData<T>::MAX_INT) const {
Vector<T> result;
const Size s = size();
Size begin = CLAMP(p_begin, -s, s);
if (begin < 0) {
begin += s;
}
Size end = CLAMP(p_end, -s, s);
if (end < 0) {
end += s;
}
ERR_FAIL_COND_V(begin > end, result);
Size result_size = end - begin;
result.resize(result_size);
const T *const r = ptr();
T *const w = result.ptrw();
for (Size i = 0; i < result_size; ++i) {
w[i] = r[begin + i];
}
return result;
}
bool operator==(const Vector<T> &p_arr) const {
Size s = size();
if (s != p_arr.size()) {
return false;
}
for (Size i = 0; i < s; i++) {
if (operator[](i) != p_arr[i]) {
return false;
}
}
return true;
}
bool operator!=(const Vector<T> &p_arr) const {
Size s = size();
if (s != p_arr.size()) {
return true;
}
for (Size i = 0; i < s; i++) {
if (operator[](i) != p_arr[i]) {
return true;
}
}
return false;
}
struct Iterator {
_FORCE_INLINE_ T &operator*() const {
return *elem_ptr;
}
_FORCE_INLINE_ T *operator->() const { return elem_ptr; }
_FORCE_INLINE_ Iterator &operator++() {
elem_ptr++;
return *this;
}
_FORCE_INLINE_ Iterator &operator--() {
elem_ptr--;
return *this;
}
_FORCE_INLINE_ bool operator==(const Iterator &b) const { return elem_ptr == b.elem_ptr; }
_FORCE_INLINE_ bool operator!=(const Iterator &b) const { return elem_ptr != b.elem_ptr; }
Iterator(T *p_ptr) { elem_ptr = p_ptr; }
Iterator() {}
Iterator(const Iterator &p_it) { elem_ptr = p_it.elem_ptr; }
private:
T *elem_ptr = nullptr;
};
struct ConstIterator {
_FORCE_INLINE_ const T &operator*() const {
return *elem_ptr;
}
_FORCE_INLINE_ const T *operator->() const { return elem_ptr; }
_FORCE_INLINE_ ConstIterator &operator++() {
elem_ptr++;
return *this;
}
_FORCE_INLINE_ ConstIterator &operator--() {
elem_ptr--;
return *this;
}
_FORCE_INLINE_ bool operator==(const ConstIterator &b) const { return elem_ptr == b.elem_ptr; }
_FORCE_INLINE_ bool operator!=(const ConstIterator &b) const { return elem_ptr != b.elem_ptr; }
ConstIterator(const T *p_ptr) { elem_ptr = p_ptr; }
ConstIterator() {}
ConstIterator(const ConstIterator &p_it) { elem_ptr = p_it.elem_ptr; }
private:
const T *elem_ptr = nullptr;
};
_FORCE_INLINE_ Iterator begin() {
return Iterator(ptrw());
}
_FORCE_INLINE_ Iterator end() {
return Iterator(ptrw() + size());
}
_FORCE_INLINE_ ConstIterator begin() const {
return ConstIterator(ptr());
}
_FORCE_INLINE_ ConstIterator end() const {
return ConstIterator(ptr() + size());
}
_FORCE_INLINE_ Vector() {}
_FORCE_INLINE_ Vector(std::initializer_list<T> p_init) :
_cowdata(p_init) {}
_FORCE_INLINE_ Vector(const Vector &p_from) = default;
_FORCE_INLINE_ Vector(Vector &&p_from) = default;
_FORCE_INLINE_ ~Vector() {}
};
template <typename T>
void Vector<T>::reverse() {
T *p = ptrw();
for (Size i = 0; i < size() / 2; i++) {
SWAP(p[i], p[size() - i - 1]);
}
}
template <typename T>
void Vector<T>::append_array(Vector<T> p_other) {
const Size ds = p_other.size();
if (ds == 0) {
return;
}
const Size bs = size();
resize(bs + ds);
T *p = ptrw();
for (Size i = 0; i < ds; ++i) {
p[bs + i] = p_other[i];
}
}
template <typename T>
bool Vector<T>::push_back(T p_elem) {
Error err = resize(size() + 1);
ERR_FAIL_COND_V(err, true);
set(size() - 1, p_elem);
return false;
}
template <typename T>
void Vector<T>::fill(T p_elem) {
T *p = ptrw();
for (Size i = 0; i < size(); i++) {
p[i] = p_elem;
}
}
// Zero-constructing Vector initializes CowData.ptr() to nullptr and thus empty.
template <typename T>
struct is_zero_constructible<Vector<T>> : std::true_type {};

109
core/templates/vset.h Normal file
View File

@@ -0,0 +1,109 @@
/**************************************************************************/
/* vset.h */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#pragma once
#include "core/templates/vector.h"
#include "core/typedefs.h"
template <typename T>
class VSet {
Vector<T> _data;
protected:
_FORCE_INLINE_ int _find(const T &p_val, bool &r_exact) const {
r_exact = false;
if (_data.is_empty()) {
return 0;
}
int64_t pos = _data.span().bisect(p_val, true);
if (pos < _data.size() && !(p_val < _data[pos]) && !(_data[pos] < p_val)) {
r_exact = true;
}
return pos;
}
_FORCE_INLINE_ int _find_exact(const T &p_val) const {
if (_data.is_empty()) {
return -1;
}
int64_t pos = _data.span().bisect(p_val, true);
if (pos < _data.size() && !(p_val < _data[pos]) && !(_data[pos] < p_val)) {
return pos;
}
return -1;
}
public:
void insert(const T &p_val) {
bool exact;
int pos = _find(p_val, exact);
if (exact) {
return;
}
_data.insert(pos, p_val);
}
bool has(const T &p_val) const {
return _find_exact(p_val) != -1;
}
void erase(const T &p_val) {
int pos = _find_exact(p_val);
if (pos < 0) {
return;
}
_data.remove_at(pos);
}
int find(const T &p_val) const {
return _find_exact(p_val);
}
_FORCE_INLINE_ bool is_empty() const { return _data.is_empty(); }
_FORCE_INLINE_ int size() const { return _data.size(); }
inline T &operator[](int p_index) {
return _data.write[p_index];
}
inline const T &operator[](int p_index) const {
return _data[p_index];
}
_FORCE_INLINE_ VSet() {}
_FORCE_INLINE_ VSet(std::initializer_list<T> p_init) :
_data(p_init) {}
};