feat: modules moved and engine moved to submodule

This commit is contained in:
Jan van der Weide 2025-04-12 18:40:44 +02:00
parent dfb5e645cd
commit c33d2130cc
5136 changed files with 225275 additions and 64485 deletions

View file

@ -0,0 +1,135 @@
#!/usr/bin/env python3
# Script used to dump char ranges for specific properties from
# the Unicode Character Database to the `char_range.inc` file.
# NOTE: This script is deliberately not integrated into the build system;
# you should run it manually whenever you want to update the data.
import os
import sys
from typing import Final, List, Tuple
from urllib.request import urlopen
if __name__ == "__main__":
sys.path.insert(1, os.path.join(os.path.dirname(__file__), "../../"))
from methods import generate_copyright_header
URL: Final[str] = "https://www.unicode.org/Public/16.0.0/ucd/DerivedCoreProperties.txt"
xid_start: List[Tuple[int, int]] = []
xid_continue: List[Tuple[int, int]] = []
uppercase_letter: List[Tuple[int, int]] = []
lowercase_letter: List[Tuple[int, int]] = []
unicode_letter: List[Tuple[int, int]] = []
def merge_ranges(ranges: List[Tuple[int, int]]) -> None:
if len(ranges) < 2:
return
last_start: int = ranges[0][0]
last_end: int = ranges[0][1]
original_ranges: List[Tuple[int, int]] = ranges[1:]
ranges.clear()
for curr_range in original_ranges:
curr_start: int = curr_range[0]
curr_end: int = curr_range[1]
if last_end + 1 != curr_start:
ranges.append((last_start, last_end))
last_start = curr_start
last_end = curr_end
ranges.append((last_start, last_end))
def parse_unicode_data() -> None:
lines: List[str] = [line.decode("utf-8") for line in urlopen(URL)]
for line in lines:
if line.startswith("#") or not line.strip():
continue
split_line: List[str] = line.split(";")
char_range: str = split_line[0].strip()
char_property: str = split_line[1].strip().split("#")[0].strip()
range_start: str = char_range
range_end: str = char_range
if ".." in char_range:
range_start, range_end = char_range.split("..")
range_tuple: Tuple[int, int] = (int(range_start, 16), int(range_end, 16))
if char_property == "XID_Start":
xid_start.append(range_tuple)
elif char_property == "XID_Continue":
xid_continue.append(range_tuple)
elif char_property == "Uppercase":
uppercase_letter.append(range_tuple)
elif char_property == "Lowercase":
lowercase_letter.append(range_tuple)
elif char_property == "Alphabetic":
unicode_letter.append(range_tuple)
# Underscore technically isn't in XID_Start, but for our purposes it's included.
xid_start.append((0x005F, 0x005F))
xid_start.sort(key=lambda x: x[0])
merge_ranges(xid_start)
merge_ranges(xid_continue)
merge_ranges(uppercase_letter)
merge_ranges(lowercase_letter)
merge_ranges(unicode_letter)
def make_array(array_name: str, range_list: List[Tuple[int, int]]) -> str:
result: str = f"constexpr inline CharRange {array_name}[] = {{\n"
for start, end in range_list:
result += f"\t{{ 0x{start:x}, 0x{end:x} }},\n"
result += "};\n\n"
return result
def generate_char_range_inc() -> None:
parse_unicode_data()
source: str = generate_copyright_header("char_range.inc")
source += f"""
// This file was generated using the `misc/scripts/char_range_fetch.py` script.
#pragma once
#include "core/typedefs.h"
// Unicode Derived Core Properties
// Source: {URL}
struct CharRange {{
\tchar32_t start;
\tchar32_t end;
}};\n\n"""
source += make_array("xid_start", xid_start)
source += make_array("xid_continue", xid_continue)
source += make_array("uppercase_letter", uppercase_letter)
source += make_array("lowercase_letter", lowercase_letter)
source += make_array("unicode_letter", unicode_letter)
char_range_path: str = os.path.join(os.path.dirname(__file__), "../../core/string/char_range.inc")
with open(char_range_path, "w", newline="\n") as f:
f.write(source)
print("`char_range.inc` generated successfully.")
if __name__ == "__main__":
generate_char_range_inc()

View file

@ -2,7 +2,6 @@
# -*- coding: utf-8 -*-
import sys
from pathlib import Path
if len(sys.argv) < 2:
print("Invalid usage of header_guards.py, it should be called with a path to one or multiple files.")
@ -13,7 +12,7 @@ invalid = []
for file in sys.argv[1:]:
header_start = -1
HEADER_CHECK_OFFSET = -1
header_end = -1
with open(file.strip(), "rt", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
@ -28,17 +27,21 @@ for file in sys.argv[1:]:
if sline.startswith("/**********"): # Godot header starts this way.
header_start = idx
else:
HEADER_CHECK_OFFSET = 0 # There is no Godot header.
header_end = 0 # There is no Godot header.
break
else:
if not sline.startswith("*") and not sline.startswith("/*"): # Not in the Godot header anymore.
HEADER_CHECK_OFFSET = idx + 1 # The include should be two lines below the Godot header.
if not sline.startswith(("*", "/*")): # Not in the Godot header anymore.
header_end = idx + 1 # The guard should be two lines below the Godot header.
break
if HEADER_CHECK_OFFSET < 0:
if (HEADER_CHECK_OFFSET := header_end) < 0 or HEADER_CHECK_OFFSET >= len(lines):
invalid.append(file)
continue
if lines[HEADER_CHECK_OFFSET].startswith("#pragma once"):
continue
# Might be using legacy header guards.
HEADER_BEGIN_OFFSET = HEADER_CHECK_OFFSET + 1
HEADER_END_OFFSET = len(lines) - 1
@ -46,124 +49,35 @@ for file in sys.argv[1:]:
invalid.append(file)
continue
split = file.split("/") # Already in posix-format.
prefix = ""
if split[0] == "modules" and split[-1] == "register_types.h":
prefix = f"{split[1]}_" # Name of module.
elif split[0] == "platform" and (file.endswith("api/api.h") or "/export/" in file):
prefix = f"{split[1]}_" # Name of platform.
elif file.startswith("modules/mono/utils") and "mono" not in split[-1]:
prefix = "MONO_"
elif file == "servers/rendering/storage/utilities.h":
prefix = "RENDERER_"
suffix = ""
if "dummy" in file and "dummy" not in split[-1]:
suffix = "_DUMMY"
elif "gles3" in file and "gles3" not in split[-1]:
suffix = "_GLES3"
elif "renderer_rd" in file and "rd" not in split[-1]:
suffix = "_RD"
elif split[-1] == "ustring.h":
suffix = "_GODOT"
name = (f"{prefix}{Path(file).stem}{suffix}{Path(file).suffix}".upper()
.replace(".", "_").replace("-", "_").replace(" ", "_")) # fmt: skip
HEADER_CHECK = f"#ifndef {name}\n"
HEADER_BEGIN = f"#define {name}\n"
HEADER_END = f"#endif // {name}\n"
if (
lines[HEADER_CHECK_OFFSET] == HEADER_CHECK
and lines[HEADER_BEGIN_OFFSET] == HEADER_BEGIN
and lines[HEADER_END_OFFSET] == HEADER_END
):
continue
# Guards might exist but with the wrong names.
if (
lines[HEADER_CHECK_OFFSET].startswith("#ifndef")
and lines[HEADER_BEGIN_OFFSET].startswith("#define")
and lines[HEADER_END_OFFSET].startswith("#endif")
):
lines[HEADER_CHECK_OFFSET] = HEADER_CHECK
lines[HEADER_BEGIN_OFFSET] = HEADER_BEGIN
lines[HEADER_END_OFFSET] = HEADER_END
lines[HEADER_CHECK_OFFSET] = "#pragma once"
lines[HEADER_BEGIN_OFFSET] = "\n"
lines.pop()
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
continue
header_check = -1
header_begin = -1
header_end = -1
pragma_once = -1
objc = False
for idx, line in enumerate(lines):
if line.startswith("// #import"): # Some dummy obj-c files only have commented out import lines.
objc = True
break
if not line.startswith("#"):
continue
elif line.startswith("#ifndef") and header_check == -1:
header_check = idx
elif line.startswith("#define") and header_begin == -1:
header_begin = idx
elif line.startswith("#endif") and header_end == -1:
header_end = idx
elif line.startswith("#pragma once"):
pragma_once = idx
break
elif line.startswith("#import"):
objc = True
# Verify `#pragma once` doesn't exist at invalid location.
misplaced = False
for line in lines:
if line.startswith("#pragma once"):
misplaced = True
break
if objc:
if misplaced:
invalid.append(file)
continue
if pragma_once != -1:
lines.pop(pragma_once)
lines.insert(HEADER_CHECK_OFFSET, HEADER_CHECK)
lines.insert(HEADER_BEGIN_OFFSET, HEADER_BEGIN)
lines.append("\n")
lines.append(HEADER_END)
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
continue
if header_check == -1 and header_begin == -1 and header_end == -1:
# Guards simply didn't exist
lines.insert(HEADER_CHECK_OFFSET, HEADER_CHECK)
lines.insert(HEADER_BEGIN_OFFSET, HEADER_BEGIN)
lines.append("\n")
lines.append(HEADER_END)
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
continue
if header_check != -1 and header_begin != -1 and header_end != -1:
# All prepends "found", see if we can salvage this.
if header_check == header_begin - 1 and header_begin < header_end:
lines.pop(header_check)
lines.pop(header_begin - 1)
lines.pop(header_end - 2)
if lines[header_end - 3] == "\n":
lines.pop(header_end - 3)
lines.insert(HEADER_CHECK_OFFSET, HEADER_CHECK)
lines.insert(HEADER_BEGIN_OFFSET, HEADER_BEGIN)
lines.append("\n")
lines.append(HEADER_END)
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
continue
invalid.append(file)
# Assume that we're simply missing a guard entirely.
lines.insert(HEADER_CHECK_OFFSET, "#pragma once\n\n")
with open(file, "wt", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
changed.append(file)
if changed:
for file in changed:

View file

@ -8,7 +8,7 @@ import urllib.request
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../"))
from misc.utility.color import Ansi
from misc.utility.color import Ansi, color_print
# Base Godot dependencies path
# If cross-compiling (no LOCALAPPDATA), we install in `bin`
@ -42,7 +42,7 @@ if not os.path.exists(deps_folder):
os.makedirs(deps_folder)
# Mesa NIR
print(f"{Ansi.BOLD}[1/3] Mesa NIR{Ansi.RESET}")
color_print(f"{Ansi.BOLD}[1/3] Mesa NIR")
if os.path.isfile(mesa_archive):
os.remove(mesa_archive)
print(f"Downloading Mesa NIR {mesa_filename} ...")
@ -69,7 +69,7 @@ if dlltool == "":
dlltool = shutil.which("x86_64-w64-mingw32-dlltool") or ""
has_mingw = gendef != "" and dlltool != ""
print(f"{Ansi.BOLD}[2/3] WinPixEventRuntime{Ansi.RESET}")
color_print(f"{Ansi.BOLD}[2/3] WinPixEventRuntime")
if os.path.isfile(pix_archive):
os.remove(pix_archive)
print(f"Downloading WinPixEventRuntime {pix_version} ...")
@ -100,7 +100,7 @@ else:
print(f"WinPixEventRuntime {pix_version} installed successfully.\n")
# DirectX 12 Agility SDK
print(f"{Ansi.BOLD}[3/3] DirectX 12 Agility SDK{Ansi.RESET}")
color_print(f"{Ansi.BOLD}[3/3] DirectX 12 Agility SDK")
if os.path.isfile(agility_sdk_archive):
os.remove(agility_sdk_archive)
print(f"Downloading DirectX 12 Agility SDK {agility_sdk_version} ...")
@ -116,5 +116,5 @@ os.remove(agility_sdk_archive)
print(f"DirectX 12 Agility SDK {agility_sdk_version} installed successfully.\n")
# Complete message
print(f'{Ansi.GREEN}All Direct3D 12 SDK components were installed to "{deps_folder}" successfully!{Ansi.RESET}')
print(f'{Ansi.GREEN}You can now build Godot with Direct3D 12 support enabled by running "scons d3d12=yes".{Ansi.RESET}')
color_print(f'{Ansi.GREEN}All Direct3D 12 SDK components were installed to "{deps_folder}" successfully!')
color_print(f'{Ansi.GREEN}You can now build Godot with Direct3D 12 support enabled by running "scons d3d12=yes".')

View file

@ -54,7 +54,7 @@ echo " $(dirname $CURDIR)/$NAME.tar.gz"
git archive $HEAD --prefix=$NAME/ -o $TMPDIR/$NAME.tar
# Adding custom .git/HEAD to tarball so that we can generate VERSION_HASH.
# Adding custom .git/HEAD to tarball so that we can generate GODOT_VERSION_HASH.
cd $TMPDIR
mkdir -p $NAME/.git
echo $HEAD > $NAME/.git/HEAD

View file

@ -0,0 +1,47 @@
#!/usr/bin/env python3
import argparse
import glob
import os
if __name__ != "__main__":
raise ImportError(f"{__name__} should not be used as a module.")
def main():
parser = argparse.ArgumentParser(description="Cleanup old cache files")
parser.add_argument("timestamp", type=int, help="Unix timestamp cutoff")
parser.add_argument("directory", help="Path to cache directory")
args = parser.parse_args()
ret = 0
# TODO: Convert to non-hardcoded path
if os.path.exists("redundant.txt"):
with open("redundant.txt") as redundant:
for item in map(str.strip, redundant):
if os.path.isfile(item):
try:
os.remove(item)
except OSError:
print(f'Failed to handle "{item}"; skipping.')
ret += 1
for file in glob.glob(os.path.join(args.directory, "*", "*")):
try:
if os.path.getatime(file) < args.timestamp:
os.remove(file)
except OSError:
print(f'Failed to handle "{file}"; skipping.')
ret += 1
return ret
try:
raise SystemExit(main())
except KeyboardInterrupt:
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)

View file

@ -55,8 +55,7 @@ def generate_ucaps_fetch() -> None:
source: str = generate_copyright_header("ucaps.h")
source += f"""
#ifndef UCAPS_H
#define UCAPS_H
#pragma once
// This file was generated using the `misc/scripts/ucaps_fetch.py` script.
@ -105,8 +104,6 @@ static int _find_lower(int ch) {
\treturn ch;
}
#endif // UCAPS_H
"""
ucaps_path: str = os.path.join(os.path.dirname(__file__), "../../core/string/ucaps.h")