initial commit, 4.5 stable
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled

This commit is contained in:
2025-09-16 20:46:46 -04:00
commit 9d30169a8d
13378 changed files with 7050105 additions and 0 deletions

View File

@@ -0,0 +1,137 @@
#!/usr/bin/env python3
# Script used to dump char ranges for specific properties from
# the Unicode Character Database to the `char_range.inc` file.
# NOTE: This script is deliberately not integrated into the build system;
# you should run it manually whenever you want to update the data.
import os
import sys
from typing import Final, List, Tuple
from urllib.request import urlopen
if __name__ == "__main__":
sys.path.insert(1, os.path.join(os.path.dirname(__file__), "../../"))
from methods import generate_copyright_header
URL: Final[str] = "https://www.unicode.org/Public/16.0.0/ucd/DerivedCoreProperties.txt"
xid_start: List[Tuple[int, int]] = []
xid_continue: List[Tuple[int, int]] = []
uppercase_letter: List[Tuple[int, int]] = []
lowercase_letter: List[Tuple[int, int]] = []
unicode_letter: List[Tuple[int, int]] = []
def merge_ranges(ranges: List[Tuple[int, int]]) -> None:
if len(ranges) < 2:
return
last_start: int = ranges[0][0]
last_end: int = ranges[0][1]
original_ranges: List[Tuple[int, int]] = ranges[1:]
ranges.clear()
for curr_range in original_ranges:
curr_start: int = curr_range[0]
curr_end: int = curr_range[1]
if last_end + 1 != curr_start:
ranges.append((last_start, last_end))
last_start = curr_start
last_end = curr_end
ranges.append((last_start, last_end))
def parse_unicode_data() -> None:
lines: List[str] = [line.decode("utf-8") for line in urlopen(URL)]
for line in lines:
if line.startswith("#") or not line.strip():
continue
split_line: List[str] = line.split(";")
char_range: str = split_line[0].strip()
char_property: str = split_line[1].strip().split("#")[0].strip()
range_start: str = char_range
range_end: str = char_range
if ".." in char_range:
range_start, range_end = char_range.split("..")
range_tuple: Tuple[int, int] = (int(range_start, 16), int(range_end, 16))
if char_property == "XID_Start":
xid_start.append(range_tuple)
elif char_property == "XID_Continue":
xid_continue.append(range_tuple)
elif char_property == "Uppercase":
uppercase_letter.append(range_tuple)
elif char_property == "Lowercase":
lowercase_letter.append(range_tuple)
elif char_property == "Alphabetic":
unicode_letter.append(range_tuple)
# Underscore technically isn't in XID_Start, but for our purposes it's included.
xid_start.append((0x005F, 0x005F))
xid_start.sort(key=lambda x: x[0])
merge_ranges(xid_start)
merge_ranges(xid_continue)
merge_ranges(uppercase_letter)
merge_ranges(lowercase_letter)
merge_ranges(unicode_letter)
def make_array(array_name: str, range_list: List[Tuple[int, int]]) -> str:
result: str = f"\n\nconstexpr inline CharRange {array_name}[] = {{\n"
for start, end in range_list:
result += f"\t{{ 0x{start:x}, 0x{end:x} }},\n"
result += "};"
return result
def generate_char_range_inc() -> None:
parse_unicode_data()
source: str = generate_copyright_header("char_range.inc")
source += f"""
// This file was generated using the `misc/scripts/char_range_fetch.py` script.
#pragma once
#include "core/typedefs.h"
// Unicode Derived Core Properties
// Source: {URL}
struct CharRange {{
\tchar32_t start;
\tchar32_t end;
}};"""
source += make_array("xid_start", xid_start)
source += make_array("xid_continue", xid_continue)
source += make_array("uppercase_letter", uppercase_letter)
source += make_array("lowercase_letter", lowercase_letter)
source += make_array("unicode_letter", unicode_letter)
source += "\n"
char_range_path: str = os.path.join(os.path.dirname(__file__), "../../core/string/char_range.inc")
with open(char_range_path, "w", newline="\n") as f:
f.write(source)
print("`char_range.inc` generated successfully.")
if __name__ == "__main__":
generate_char_range_inc()

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if len(sys.argv) < 2:
print("ERROR: You must run program with file name as argument.")
sys.exit(50)
fname = sys.argv[1]
with open(fname.strip(), "r", encoding="utf-8") as fileread:
file_contents = fileread.read()
# If find "ERROR: AddressSanitizer:", then happens invalid read or write
# This is critical bug, so we need to fix this as fast as possible
if file_contents.find("ERROR: AddressSanitizer:") != -1:
print("FATAL ERROR: An incorrectly used memory was found.")
sys.exit(51)
# There is also possible, that program crashed with or without backtrace.
if (
file_contents.find("Program crashed with signal") != -1
or file_contents.find("Dumping the backtrace") != -1
or file_contents.find("Segmentation fault (core dumped)") != -1
or file_contents.find("Aborted (core dumped)") != -1
or file_contents.find("terminate called without an active exception") != -1
):
print("FATAL ERROR: Godot has been crashed.")
sys.exit(52)
# Finding memory leaks in Godot is quite difficult, because we need to take into
# account leaks also in external libraries. They are usually provided without
# debugging symbols, so the leak report from it usually has only 2/3 lines,
# so searching for 5 element - "#4 0x" - should correctly detect the vast
# majority of memory leaks
if file_contents.find("ERROR: LeakSanitizer:") != -1:
if file_contents.find("#4 0x") != -1:
print("ERROR: Memory leak was found")
sys.exit(53)
# It may happen that Godot detects leaking nodes/resources and removes them, so
# this possibility should also be handled as a potential error, even if
# LeakSanitizer doesn't report anything
if file_contents.find("ObjectDB instances leaked at exit") != -1:
print("ERROR: Memory leak was found")
sys.exit(54)
# In test project may be put several assert functions which will control if
# project is executed with right parameters etc. which normally will not stop
# execution of project
if file_contents.find("Assertion failed") != -1:
print("ERROR: Assertion failed in project, check execution log for more info")
sys.exit(55)
# For now Godot leaks a lot of rendering stuff so for now we just show info
# about it and this needs to be re-enabled after fixing this memory leaks.
if file_contents.find("were leaked") != -1 or file_contents.find("were never freed") != -1:
print("WARNING: Memory leak was found")
sys.exit(0)

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
header = """\
/**************************************************************************/
/* $filename */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
"""
if len(sys.argv) < 2:
print("Invalid usage of copyright_headers.py, it should be called with a path to one or multiple files.")
sys.exit(1)
for f in sys.argv[1:]:
fname = f
# Handle replacing $filename with actual filename and keep alignment
fsingle = os.path.basename(fname.strip())
rep_fl = "$filename"
rep_fi = fsingle
len_fl = len(rep_fl)
len_fi = len(rep_fi)
# Pad with spaces to keep alignment
if len_fi < len_fl:
for x in range(len_fl - len_fi):
rep_fi += " "
elif len_fl < len_fi:
for x in range(len_fi - len_fl):
rep_fl += " "
if header.find(rep_fl) != -1:
text = header.replace(rep_fl, rep_fi)
else:
text = header.replace("$filename", fsingle)
text += "\n"
# We now have the proper header, so we want to ignore the one in the original file
# and potentially empty lines and badly formatted lines, while keeping comments that
# come after the header, and then keep everything non-header unchanged.
# To do so, we skip empty lines that may be at the top in a first pass.
# In a second pass, we skip all consecutive comment lines starting with "/*",
# then we can append the rest (step 2).
with open(fname.strip(), "r", encoding="utf-8") as fileread:
line = fileread.readline()
header_done = False
while line.strip() == "" and line != "": # Skip empty lines at the top
line = fileread.readline()
if line.find("/**********") == -1: # Godot header starts this way
# Maybe starting with a non-Godot comment, abort header magic
header_done = True
while not header_done: # Handle header now
if line.find("/*") != 0: # No more starting with a comment
header_done = True
if line.strip() != "":
text += line
line = fileread.readline()
while line != "": # Dump everything until EOF
text += line
line = fileread.readline()
# Write
with open(fname.strip(), "w", encoding="utf-8", newline="\n") as filewrite:
filewrite.write(text)

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import os
import sys
if len(sys.argv) < 2:
print("Invalid usage of dotnet_format.py, it should be called with a path to one or multiple files.")
sys.exit(1)
# Create dummy generated files, if needed.
for path in [
"modules/mono/SdkPackageVersions.props",
]:
if os.path.exists(path):
continue
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w", encoding="utf-8", newline="\n") as f:
f.write("<Project />")
# Avoid importing GeneratedIncludes.props.
os.environ["GodotSkipGenerated"] = "true"
# Match all the input files to their respective C# project.
projects = {
path: " ".join([f for f in sys.argv[1:] if os.path.commonpath([f, path]) == path])
for path in [os.path.dirname(f) for f in glob.glob("**/*.csproj", recursive=True)]
}
# Run dotnet format on all projects with more than 0 modified files.
for path, files in projects.items():
if files:
os.system(f"dotnet format {path} --include {files}")

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if len(sys.argv) < 2:
print("Invalid usage of file_format.py, it should be called with a path to one or multiple files.")
sys.exit(1)
BOM = b"\xef\xbb\xbf"
changed = []
invalid = []
for file in sys.argv[1:]:
try:
with open(file, "rt", encoding="utf-8") as f:
original = f.read()
except UnicodeDecodeError:
invalid.append(file)
continue
if original == "":
continue
EOL = "\r\n" if file.endswith((".csproj", ".sln", ".bat")) or file.startswith("misc/msvs") else "\n"
WANTS_BOM = file.endswith((".csproj", ".sln"))
revamp = EOL.join([line.rstrip("\n\r\t ") for line in original.splitlines(True)]).rstrip(EOL) + EOL
new_raw = revamp.encode(encoding="utf-8")
if not WANTS_BOM and new_raw.startswith(BOM):
new_raw = new_raw[len(BOM) :]
elif WANTS_BOM and not new_raw.startswith(BOM):
new_raw = BOM + new_raw
with open(file, "rb") as f:
old_raw = f.read()
if old_raw != new_raw:
changed.append(file)
with open(file, "wb") as f:
f.write(new_raw)
if changed:
for file in changed:
print(f"FIXED: {file}")
if invalid:
for file in invalid:
print(f"REQUIRES MANUAL CHANGES: {file}")
sys.exit(1)

View File

@@ -0,0 +1,26 @@
set -uo pipefail
shopt -s globstar
echo -e ".gitignore validation..."
# Get a list of files that exist in the repo but are ignored.
# The --verbose flag also includes files un-ignored via ! prefixes.
# We filter those out with a somewhat awkward `awk` directive.
# (Explanation: Split each line by : delimiters,
# see if the actual gitignore line shown in the third field starts with !,
# if it doesn't, print it.)
# ignorecase for the sake of Windows users.
output=$(git -c core.ignorecase=true check-ignore --verbose --no-index **/* | \
awk -F ':' '{ if ($3 !~ /^!/) print $0 }')
# Then we take this result and return success if it's empty.
if [ -z "$output" ]; then
exit 0
else
# And print the result if it isn't.
echo "$output"
exit 1
fi

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
if len(sys.argv) < 2:
print("Invalid usage of header_guards.py, it should be called with a path to one or multiple files.")
sys.exit(1)
changed = []
invalid = []
for file in sys.argv[1:]:
header_start = -1
header_end = -1
with open(file.strip(), "rt", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
sline = line.strip()
if header_start < 0:
if sline == "": # Skip empty lines at the top.
continue
if sline.startswith("/**********"): # Godot header starts this way.
header_start = idx
else:
header_end = 0 # There is no Godot header.
break
else:
if not sline.startswith(("*", "/*")): # Not in the Godot header anymore.
header_end = idx + 1 # The guard should be two lines below the Godot header.
break
if (HEADER_CHECK_OFFSET := header_end) < 0 or HEADER_CHECK_OFFSET >= len(lines):
invalid.append(file)
continue
if lines[HEADER_CHECK_OFFSET].startswith("#pragma once"):
continue
# Might be using legacy header guards.
HEADER_BEGIN_OFFSET = HEADER_CHECK_OFFSET + 1
HEADER_END_OFFSET = len(lines) - 1
if HEADER_BEGIN_OFFSET >= HEADER_END_OFFSET:
invalid.append(file)
continue
if (
lines[HEADER_CHECK_OFFSET].startswith("#ifndef")
and lines[HEADER_BEGIN_OFFSET].startswith("#define")
and lines[HEADER_END_OFFSET].startswith("#endif")
):
lines[HEADER_CHECK_OFFSET] = "#pragma once"
lines[HEADER_BEGIN_OFFSET] = "\n"
lines.pop()
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
continue
# Verify `#pragma once` doesn't exist at invalid location.
misplaced = False
for line in lines:
if line.startswith("#pragma once"):
misplaced = True
break
if misplaced:
invalid.append(file)
continue
# Assume that we're simply missing a guard entirely.
lines.insert(HEADER_CHECK_OFFSET, "#pragma once\n\n")
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
if changed:
for file in changed:
print(f"FIXED: {file}")
if invalid:
for file in invalid:
print(f"REQUIRES MANUAL CHANGES: {file}")
sys.exit(1)

View File

@@ -0,0 +1,146 @@
#!/usr/bin/env python
if __name__ != "__main__":
raise SystemExit(f'Utility script "{__file__}" should not be used as a module!')
import argparse
import os
import shutil
import subprocess
import sys
import urllib.request
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../"))
from misc.utility.color import Ansi, color_print
parser = argparse.ArgumentParser(description="Install D3D12 dependencies for Windows platforms.")
parser.add_argument(
"--mingw_prefix",
default=os.getenv("MINGW_PREFIX", ""),
help="Explicitly specify a path containing the MinGW bin folder.",
)
args = parser.parse_args()
# Base Godot dependencies path
# If cross-compiling (no LOCALAPPDATA), we install in `bin`
deps_folder = os.getenv("LOCALAPPDATA")
if deps_folder:
deps_folder = os.path.join(deps_folder, "Godot", "build_deps")
else:
deps_folder = os.path.join("bin", "build_deps")
# Mesa NIR
# Check for latest version: https://github.com/godotengine/godot-nir-static/releases/latest
mesa_version = "23.1.9-1"
# WinPixEventRuntime
# Check for latest version: https://www.nuget.org/api/v2/package/WinPixEventRuntime (check downloaded filename)
pix_version = "1.0.240308001"
pix_archive = os.path.join(deps_folder, f"WinPixEventRuntime_{pix_version}.nupkg")
pix_folder = os.path.join(deps_folder, "pix")
# DirectX 12 Agility SDK
# Check for latest version: https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12 (check downloaded filename)
# After updating this, remember to change the default value of the `rendering/rendering_device/d3d12/agility_sdk_version`
# project setting to match the minor version (e.g. for `1.613.3`, it should be `613`).
agility_sdk_version = "1.613.3"
agility_sdk_archive = os.path.join(deps_folder, f"Agility_SDK_{agility_sdk_version}.nupkg")
agility_sdk_folder = os.path.join(deps_folder, "agility_sdk")
# Create dependencies folder
if not os.path.exists(deps_folder):
os.makedirs(deps_folder)
# Mesa NIR
color_print(f"{Ansi.BOLD}[1/3] Mesa NIR")
for arch in [
"arm64-llvm",
"arm64-msvc",
"x86_32-gcc",
"x86_32-llvm",
"x86_32-msvc",
"x86_64-gcc",
"x86_64-llvm",
"x86_64-msvc",
]:
mesa_filename = "godot-nir-static-" + arch + "-release.zip"
mesa_archive = os.path.join(deps_folder, mesa_filename)
mesa_folder = os.path.join(deps_folder, "mesa-" + arch)
if os.path.isfile(mesa_archive):
os.remove(mesa_archive)
print(f"Downloading Mesa NIR {mesa_filename} ...")
urllib.request.urlretrieve(
f"https://github.com/godotengine/godot-nir-static/releases/download/{mesa_version}/{mesa_filename}",
mesa_archive,
)
if os.path.exists(mesa_folder):
print(f"Removing existing local Mesa NIR installation in {mesa_folder} ...")
shutil.rmtree(mesa_folder)
print(f"Extracting Mesa NIR {mesa_filename} to {mesa_folder} ...")
shutil.unpack_archive(mesa_archive, mesa_folder)
os.remove(mesa_archive)
print("Mesa NIR installed successfully.\n")
# WinPixEventRuntime
# MinGW needs DLLs converted with dlltool.
# We rely on finding gendef/dlltool to detect if we have MinGW.
# Check existence of needed tools for generating mingw library.
pathstr = os.environ.get("PATH", "")
if args.mingw_prefix:
pathstr = os.path.join(args.mingw_prefix, "bin") + os.pathsep + pathstr
gendef = shutil.which("x86_64-w64-mingw32-gendef", path=pathstr) or shutil.which("gendef", path=pathstr) or ""
dlltool = shutil.which("x86_64-w64-mingw32-dlltool", path=pathstr) or shutil.which("dlltool", path=pathstr) or ""
has_mingw = gendef != "" and dlltool != ""
color_print(f"{Ansi.BOLD}[2/3] WinPixEventRuntime")
if os.path.isfile(pix_archive):
os.remove(pix_archive)
print(f"Downloading WinPixEventRuntime {pix_version} ...")
urllib.request.urlretrieve(f"https://www.nuget.org/api/v2/package/WinPixEventRuntime/{pix_version}", pix_archive)
if os.path.exists(pix_folder):
print(f"Removing existing local WinPixEventRuntime installation in {pix_folder} ...")
shutil.rmtree(pix_folder)
print(f"Extracting WinPixEventRuntime {pix_version} to {pix_folder} ...")
shutil.unpack_archive(pix_archive, pix_folder, "zip")
os.remove(pix_archive)
if has_mingw:
print("Adapting WinPixEventRuntime to also support MinGW alongside MSVC.")
cwd = os.getcwd()
os.chdir(pix_folder)
subprocess.run([gendef, "./bin/x64/WinPixEventRuntime.dll"])
subprocess.run(
[dlltool]
+ "--machine i386:x86-64 --no-leading-underscore -d WinPixEventRuntime.def -D WinPixEventRuntime.dll -l ./bin/x64/libWinPixEventRuntime.a".split()
)
subprocess.run([gendef, "./bin/ARM64/WinPixEventRuntime.dll"])
subprocess.run(
[dlltool]
+ "--machine arm64 --no-leading-underscore -d WinPixEventRuntime.def -D WinPixEventRuntime.dll -l ./bin/ARM64/libWinPixEventRuntime.a".split()
)
os.chdir(cwd)
else:
print(
'MinGW support requires "dlltool" and "gendef" dependencies, so only MSVC support is provided for WinPixEventRuntime. Did you forget to provide a `--mingw_prefix`?'
)
print(f"WinPixEventRuntime {pix_version} installed successfully.\n")
# DirectX 12 Agility SDK
color_print(f"{Ansi.BOLD}[3/3] DirectX 12 Agility SDK")
if os.path.isfile(agility_sdk_archive):
os.remove(agility_sdk_archive)
print(f"Downloading DirectX 12 Agility SDK {agility_sdk_version} ...")
urllib.request.urlretrieve(
f"https://www.nuget.org/api/v2/package/Microsoft.Direct3D.D3D12/{agility_sdk_version}", agility_sdk_archive
)
if os.path.exists(agility_sdk_folder):
print(f"Removing existing local DirectX 12 Agility SDK installation in {agility_sdk_folder} ...")
shutil.rmtree(agility_sdk_folder)
print(f"Extracting DirectX 12 Agility SDK {agility_sdk_version} to {agility_sdk_folder} ...")
shutil.unpack_archive(agility_sdk_archive, agility_sdk_folder, "zip")
os.remove(agility_sdk_archive)
print(f"DirectX 12 Agility SDK {agility_sdk_version} installed successfully.\n")
# Complete message
color_print(f'{Ansi.GREEN}All Direct3D 12 SDK components were installed to "{deps_folder}" successfully!')
color_print(f'{Ansi.GREEN}You can now build Godot with Direct3D 12 support enabled by running "scons d3d12=yes".')

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env sh
set -euo pipefail
IFS=$'\n\t'
new_ver_full=''
# Check currently installed and latest available Vulkan SDK versions.
if command -v jq 2>&1 >/dev/null; then
curl -L "https://sdk.lunarg.com/sdk/download/latest/mac/config.json" -o /tmp/vulkan-sdk.json
new_ver_full=`jq -r '.version' /tmp/vulkan-sdk.json`
new_ver=`echo "$new_ver_full" | awk -F. '{ printf("%d%02d%04d%02d\n", $1,$2,$3,$4); }';`
rm -f /tmp/vulkan-sdk.json
for f in $HOME/VulkanSDK/*; do
if [ -d "$f" ]; then
f=`echo "${f##*/}" | awk -F. '{ printf("%d%02d%04d%02d\n", $1,$2,$3,$4); }';`
if [ $f -ge $new_ver ]; then
echo 'Latest or newer Vulkan SDK is already installed. Skipping installation.'
exit 0
fi
fi
done
else
echo 'Error: Could not find 'jq' command. Is jq installed? Try running "brew install jq" or "port install jq" and rerunning this script.'
exit 1
fi
# Download and install the Vulkan SDK.
curl -L "https://sdk.lunarg.com/sdk/download/latest/mac/vulkan-sdk.zip" -o /tmp/vulkan-sdk.zip
unzip /tmp/vulkan-sdk.zip -d /tmp
if [ -d "/tmp/vulkansdk-macOS-$new_ver_full.app" ]; then
/tmp/vulkansdk-macOS-$new_ver_full.app/Contents/MacOS/vulkansdk-macOS-$new_ver_full --accept-licenses --default-answer --confirm-command install
rm -rf /tmp/vulkansdk-macOS-$new_ver_full.app
else
echo "Couldn't install the Vulkan SDK, the unzipped contents may no longer match what this script expects."
exit 1
fi
rm -f /tmp/vulkan-sdk.zip
echo 'Vulkan SDK installed successfully! You can now build Godot by running "scons".'

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env bash
# Generate .ico, .icns and .zip set of icons for Steam
# Make icons with transparent backgrounds and all sizes
for s in 16 24 32 48 64 128 256 512 1024; do
convert -resize ${s}x$s -antialias \
-background transparent \
../../icon.svg icon$s.png
done
# 16px tga file for library
convert icon16.png icon16.tga
# zip for Linux
zip godot-icons.zip icon*.png
# ico for Windows
# Not including biggest ones or it blows up in size
icotool -c -o godot-icon.ico icon{16,24,32,48,64,128,256}.png
# icns for macOS
# Only some sizes: https://iconhandbook.co.uk/reference/chart/osx/
png2icns godot-icon.icns icon{16,32,128,256,512,1024}.png
rm -f icon*.png

View File

@@ -0,0 +1,66 @@
#!/bin/sh
if [ ! -e "version.py" ]; then
echo "This script should be ran from the root folder of the Godot repository."
exit 1
fi
while getopts "h?sv:g:" opt; do
case "$opt" in
h|\?)
echo "Usage: $0 [OPTIONS...]"
echo
echo " -s script friendly file name (godot.tar.gz)"
echo " -v godot version for file name (e.g. 4.0-stable)"
echo " -g git treeish to archive (e.g. master)"
echo
exit 1
;;
s)
script_friendly_name=1
;;
v)
godot_version=$OPTARG
;;
g)
git_treeish=$OPTARG
;;
esac
done
if [ ! -z "$git_treeish" ]; then
HEAD=$(git rev-parse $git_treeish)
else
HEAD=$(git rev-parse HEAD)
fi
if [ ! -z "$script_friendly_name" ]; then
NAME=godot
else
if [ ! -z "$godot_version" ]; then
NAME=godot-$godot_version
else
NAME=godot-$HEAD
fi
fi
CURDIR=$(pwd)
TMPDIR=$(mktemp -d -t godot-XXXXXX)
echo "Generating tarball for revision $HEAD with folder name '$NAME'."
echo
echo "The tarball will be written to the parent folder:"
echo " $(dirname $CURDIR)/$NAME.tar.gz"
git archive $HEAD --prefix=$NAME/ -o $TMPDIR/$NAME.tar
# Adding custom .git/HEAD to tarball so that we can generate GODOT_VERSION_HASH.
cd $TMPDIR
mkdir -p $NAME/.git
echo $HEAD > $NAME/.git/HEAD
tar -uf $NAME.tar $NAME
cd $CURDIR
gzip -c $TMPDIR/$NAME.tar > ../$NAME.tar.gz
rm -rf $TMPDIR

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
import argparse
import glob
import os
if __name__ != "__main__":
raise ImportError(f"{__name__} should not be used as a module.")
def main():
parser = argparse.ArgumentParser(description="Cleanup old cache files")
parser.add_argument("timestamp", type=int, help="Unix timestamp cutoff")
parser.add_argument("directory", help="Path to cache directory")
args = parser.parse_args()
ret = 0
# TODO: Convert to non-hardcoded path
if os.path.exists("redundant.txt"):
with open("redundant.txt") as redundant:
for item in map(str.strip, redundant):
if os.path.isfile(item):
try:
os.remove(item)
except OSError:
print(f'Failed to handle "{item}"; skipping.')
ret += 1
for file in glob.glob(os.path.join(args.directory, "*", "*")):
try:
if os.path.getatime(file) < args.timestamp:
os.remove(file)
except OSError:
print(f'Failed to handle "{file}"; skipping.')
ret += 1
return ret
try:
raise SystemExit(main())
except KeyboardInterrupt:
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)

117
misc/scripts/ucaps_fetch.py Normal file
View File

@@ -0,0 +1,117 @@
#!/usr/bin/env python3
# Script used to dump case mappings from
# the Unicode Character Database to the `ucaps.h` file.
# NOTE: This script is deliberately not integrated into the build system;
# you should run it manually whenever you want to update the data.
import os
import sys
from typing import Final, List, Tuple
from urllib.request import urlopen
if __name__ == "__main__":
sys.path.insert(1, os.path.join(os.path.dirname(__file__), "../../"))
from methods import generate_copyright_header
URL: Final[str] = "https://www.unicode.org/Public/16.0.0/ucd/UnicodeData.txt"
lower_to_upper: List[Tuple[str, str]] = []
upper_to_lower: List[Tuple[str, str]] = []
def parse_unicode_data() -> None:
lines: List[str] = [line.decode("utf-8") for line in urlopen(URL)]
for line in lines:
split_line: List[str] = line.split(";")
code_value: str = split_line[0].strip()
uppercase_mapping: str = split_line[12].strip()
lowercase_mapping: str = split_line[13].strip()
if uppercase_mapping:
lower_to_upper.append((f"0x{code_value}", f"0x{uppercase_mapping}"))
if lowercase_mapping:
upper_to_lower.append((f"0x{code_value}", f"0x{lowercase_mapping}"))
def make_cap_table(table_name: str, len_name: str, table: List[Tuple[str, str]]) -> str:
result: str = f"static const int {table_name}[{len_name}][2] = {{\n"
for first, second in table:
result += f"\t{{ {first}, {second} }},\n"
result += "};\n\n"
return result
def generate_ucaps_fetch() -> None:
parse_unicode_data()
source: str = generate_copyright_header("ucaps.h")
source += f"""
#pragma once
// This file was generated using the `misc/scripts/ucaps_fetch.py` script.
#define LTU_LEN {len(lower_to_upper)}
#define UTL_LEN {len(upper_to_lower)}\n\n"""
source += make_cap_table("caps_table", "LTU_LEN", lower_to_upper)
source += make_cap_table("reverse_caps_table", "UTL_LEN", upper_to_lower)
source += """static int _find_upper(int ch) {
\tint low = 0;
\tint high = LTU_LEN - 1;
\tint middle;
\twhile (low <= high) {
\t\tmiddle = (low + high) / 2;
\t\tif (ch < caps_table[middle][0]) {
\t\t\thigh = middle - 1; // Search low end of array.
\t\t} else if (caps_table[middle][0] < ch) {
\t\t\tlow = middle + 1; // Search high end of array.
\t\t} else {
\t\t\treturn caps_table[middle][1];
\t\t}
\t}
\treturn ch;
}
static int _find_lower(int ch) {
\tint low = 0;
\tint high = UTL_LEN - 1;
\tint middle;
\twhile (low <= high) {
\t\tmiddle = (low + high) / 2;
\t\tif (ch < reverse_caps_table[middle][0]) {
\t\t\thigh = middle - 1; // Search low end of array.
\t\t} else if (reverse_caps_table[middle][0] < ch) {
\t\t\tlow = middle + 1; // Search high end of array.
\t\t} else {
\t\t\treturn reverse_caps_table[middle][1];
\t\t}
\t}
\treturn ch;
}
"""
ucaps_path: str = os.path.join(os.path.dirname(__file__), "../../core/string/ucaps.h")
with open(ucaps_path, "w", newline="\n") as f:
f.write(source)
print("`ucaps.h` generated successfully.")
if __name__ == "__main__":
generate_ucaps_fetch()

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env python3
# Script used to dump char ranges from
# the Unicode Character Database to the `char_range.inc` file.
# NOTE: This script is deliberately not integrated into the build system;
# you should run it manually whenever you want to update the data.
import os
import sys
from typing import Final, List, Set, Tuple
from urllib.request import urlopen
if __name__ == "__main__":
sys.path.insert(1, os.path.join(os.path.dirname(__file__), "../../"))
from methods import generate_copyright_header
URL: Final[str] = "https://www.unicode.org/Public/16.0.0/ucd/Blocks.txt"
ranges: List[Tuple[str, str, str]] = []
exclude_blocks: Set[str] = {
"High Surrogates",
"High Private Use Surrogates",
"Low Surrogates",
"Variation Selectors",
"Specials",
"Egyptian Hieroglyph Format Controls",
"Tags",
"Variation Selectors Supplement",
}
def parse_unicode_data() -> None:
lines: List[str] = [line.decode("utf-8") for line in urlopen(URL)]
for line in lines:
if line.startswith("#") or not line.strip():
continue
split_line: List[str] = line.split(";")
char_range: str = split_line[0].strip()
block: str = split_line[1].strip()
if block in exclude_blocks:
continue
range_start, range_end = char_range.split("..")
ranges.append((f"0x{range_start}", f"0x{range_end}", block))
def make_array(array_name: str, ranges: List[Tuple[str, str, str]]) -> str:
result: str = f"static UniRange {array_name}[] = {{\n"
for start, end, block in ranges:
result += f'\t{{ {start}, {end}, U"{block}" }},\n'
result += """\t{ 0x10FFFF, 0x10FFFF, String() }
};\n\n"""
return result
def generate_unicode_ranges_inc() -> None:
parse_unicode_data()
source: str = generate_copyright_header("unicode_ranges.inc")
source += f"""
// This file was generated using the `misc/scripts/unicode_ranges_fetch.py` script.
#ifndef UNICODE_RANGES_INC
#define UNICODE_RANGES_INC
// Unicode Character Blocks
// Source: {URL}
struct UniRange {{
\tint32_t start;
\tint32_t end;
\tString name;
}};\n\n"""
source += make_array("unicode_ranges", ranges)
source += "#endif // UNICODE_RANGES_INC\n"
unicode_ranges_path: str = os.path.join(os.path.dirname(__file__), "../../editor/import/unicode_ranges.inc")
with open(unicode_ranges_path, "w", newline="\n") as f:
f.write(source)
print("`unicode_ranges.inc` generated successfully.")
if __name__ == "__main__":
generate_unicode_ranges_inc()

View File

@@ -0,0 +1,86 @@
#!/bin/bash
set -o pipefail
if [ ! -f "version.py" ]; then
echo "Warning: This script is intended to be run from the root of the Godot repository."
echo "Some of the paths checks may not work as intended from a different folder."
fi
if [ $# != 1 ]; then
echo "Usage: @0 <path-to-godot-executable>"
exit 1
fi
api_validation_dir="$( dirname -- "$( dirname -- "${BASH_SOURCE[0]//\.\//}" )" )/extension_api_validation/"
has_problems=0
warn_extra=0
reference_tag=""
expected_errors=""
make_annotation()
{
local title=$1
local body=$2
local type=$3
local file=$4
if [[ "$GITHUB_OUTPUT" == "" ]]; then
echo "$title"
echo "$body"
else
body="$(awk 1 ORS='%0A' - <<<"$body")"
echo "::$type file=$file,title=$title ::$body"
fi
}
get_expected_output()
{
local parts=()
IFS='_' read -ra parts <<< "$(basename -s .expected "$1")"
if [[ "${#parts[@]}" == "2" ]]; then
cat "$1" >> "$expected_errors"
get_expected_output "$(find "$api_validation_dir" -name "${parts[1]}*.expected")"
reference_tag="${parts[0]}"
warn_extra=0
else
cat "$1" >> "$expected_errors"
reference_tag="${parts[0]}"
warn_extra=1
fi
}
while read -r file; do
reference_file="$(mktemp)"
validate="$(mktemp)"
validation_output="$(mktemp)"
allowed_errors="$(mktemp)"
expected_errors="$(mktemp)"
get_expected_output "$file"
# Download the reference extension_api.json
wget -nv --retry-on-http-error=503 --tries=5 --timeout=60 -cO "$reference_file" "https://raw.githubusercontent.com/godotengine/godot-cpp/godot-$reference_tag/gdextension/extension_api.json" || has_problems=1
# Validate the current API against the reference
"$1" --headless --validate-extension-api "$reference_file" 2>&1 | tee "$validate" | awk '!/^Validate extension JSON:/' - || true
# Collect the expected and actual validation errors
awk '/^Validate extension JSON:/' - < "$validate" | sort > "$validation_output"
awk '/^Validate extension JSON:/' - < "$expected_errors" | sort > "$allowed_errors"
# Differences between the expected and actual errors
new_validation_error="$(comm -23 "$validation_output" "$allowed_errors")"
obsolete_validation_error="$(comm -13 "$validation_output" "$allowed_errors")"
if [ -n "$obsolete_validation_error" ] && [ "$warn_extra" = "1" ]; then
#make_annotation "The following validation errors no longer occur (compared to $reference_tag):" "$obsolete_validation_error" warning "$file"
echo "The following validation errors no longer occur (compared to $reference_tag):"
echo "$obsolete_validation_error"
fi
if [ -n "$new_validation_error" ]; then
make_annotation "Compatibility to $reference_tag is broken in the following ways:" "$new_validation_error" error "$file"
has_problems=1
fi
rm -f "$reference_file" "$validate" "$validation_output" "$allowed_errors" "$expected_errors"
done <<< "$(find "$api_validation_dir" -name "*.expected")"
exit $has_problems