First Commit

This commit is contained in:
2025-11-18 14:18:26 -07:00
parent 33eb6e3707
commit 27277ec342
6106 changed files with 3571167 additions and 0 deletions

38
3rdparty/3rdparty.props vendored Normal file
View File

@@ -0,0 +1,38 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30128.1</_ProjectFileVersion>
<OutDir>$(SolutionDir)build\3rdparty\lib-$(PlatformName)-$(Configuration)\</OutDir>
<IntDir>$(SolutionDir)build\3rdparty\obj-$(ProjectName)-$(PlatformName)-$(Configuration)\</IntDir>
<ExtensionsToDeleteOnClean>*.bsc;*.idb;*.sbr;*.res;*.pch;*.pdb;*.obj;*.tlb;*.tli;*.tlh;*.tmp;*.rsp;*.pgc;*.pgd;*.meta;$(TargetPath);$(ExtensionsToDeleteOnClean)</ExtensionsToDeleteOnClean>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<AdditionalIncludeDirectories>$(ProjectDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<PreprocessorDefinitions>__WIN32__;WIN32;_WINDOWS;_CRT_SECURE_NO_WARNINGS;_CRT_SECURE_NO_DEPRECATE;_HAS_EXCEPTIONS=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<FunctionLevelLinking>true</FunctionLevelLinking>
<RuntimeTypeInfo>false</RuntimeTypeInfo>
<ExceptionHandling>false</ExceptionHandling>
<WarningLevel>TurnOffAllWarnings</WarningLevel>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<CompileAs>Default</CompileAs>
<LanguageStandard>stdcpp20</LanguageStandard>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<ConformanceMode>true</ConformanceMode>
<!-- MSVC automatically adds __AVX__ and __AVX2__ appropriately -->
<PreprocessorDefinitions>_M_X86;__SSE4_1__;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnableEnhancedInstructionSet Condition="!$(Configuration.Contains(AVX2)) Or $(Configuration.Contains(Clang))">NotSet</EnableEnhancedInstructionSet>
<EnableEnhancedInstructionSet Condition="$(Configuration.Contains(AVX2)) And !$(Configuration.Contains(Clang))">AdvancedVectorExtensions2</EnableEnhancedInstructionSet>
<AdditionalOptions Condition="'$(Platform)'=='x64' And $(Configuration.Contains(Clang)) And !$(Configuration.Contains(AVX2))"> -march=nehalem %(AdditionalOptions)</AdditionalOptions>
<AdditionalOptions Condition="'$(Platform)'=='x64' And $(Configuration.Contains(Clang)) And $(Configuration.Contains(AVX2))"> -march=haswell %(AdditionalOptions)</AdditionalOptions>
<AdditionalOptions Condition="'$(Platform)'=='ARM64' And $(Configuration.Contains(Clang))"> -march=armv8.4-a %(AdditionalOptions)</AdditionalOptions>
<AdditionalOptions Condition="!$(Configuration.Contains(Clang))">%(AdditionalOptions) /Zc:externConstexpr /Zc:__cplusplus /Zo /utf-8</AdditionalOptions>
<!-- Force ThinLTO for Release builds, MSVC doesn't seem to do it otherwise. -->
<!-- Also due to include order, needs to be set here, rather than in CodeGen_Release.props -->
<AdditionalOptions Condition="$(Configuration.Contains(Clang)) And $(Configuration.Contains(Release))"> -flto=thin %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
</ItemDefinitionGroup>
</Project>

18
3rdparty/DefaultProjectRootDir.props vendored Normal file
View File

@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Label="UserMacros">
<ProjectRootDir>$(ProjectDir)</ProjectRootDir>
<SvnRootDir>$(ProjectRootDir)\..\..</SvnRootDir>
</PropertyGroup>
<PropertyGroup>
<_ProjectFileVersion>10.0.30128.1</_ProjectFileVersion>
</PropertyGroup>
<ItemGroup>
<BuildMacro Include="ProjectRootDir">
<Value>$(ProjectRootDir)</Value>
</BuildMacro>
<BuildMacro Include="SvnRootDir">
<Value>$(SvnRootDir)</Value>
</BuildMacro>
</ItemGroup>
</Project>

41
3rdparty/ccc/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,41 @@
cmake_minimum_required(VERSION 3.14)
project(ccc)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
add_library(ccc STATIC
src/ccc/ast.cpp
src/ccc/ast.h
src/ccc/elf.cpp
src/ccc/elf.h
src/ccc/elf_symtab.cpp
src/ccc/elf_symtab.h
src/ccc/importer_flags.cpp
src/ccc/importer_flags.h
src/ccc/mdebug_analysis.cpp
src/ccc/mdebug_analysis.h
src/ccc/mdebug_importer.cpp
src/ccc/mdebug_importer.h
src/ccc/mdebug_section.cpp
src/ccc/mdebug_section.h
src/ccc/mdebug_symbols.cpp
src/ccc/mdebug_symbols.h
src/ccc/sndll.cpp
src/ccc/sndll.h
src/ccc/stabs.cpp
src/ccc/stabs.h
src/ccc/stabs_to_ast.cpp
src/ccc/stabs_to_ast.h
src/ccc/symbol_database.cpp
src/ccc/symbol_database.h
src/ccc/symbol_file.cpp
src/ccc/symbol_file.h
src/ccc/symbol_table.cpp
src/ccc/symbol_table.h
src/ccc/util.cpp
src/ccc/util.h
)
target_include_directories(ccc PUBLIC src)

37
3rdparty/ccc/README.md vendored Normal file
View File

@@ -0,0 +1,37 @@
# Chaos Compiler Collection
This code was originally developed in the following repository and was copied
into PCSX2 by the author:
- [https://github.com/chaoticgd/ccc](https://github.com/chaoticgd/ccc)
It includes additional resources that are not present in the PCSX2 repository.
## Documentation
### DWARF (.debug) Section
- [DWARF Debugging Information Format](https://dwarfstd.org/doc/dwarf_1_1_0.pdf)
### MIPS Debug (.mdebug) Section
- [Third Eye Software and the MIPS symbol table (Peter Rowell)](http://datahedron.com/mips.html)
- [MIPS Mdebug Debugging Information (David Anderson, 1996)](https://www.prevanders.net/Mdebug.ps)
- MIPS Assembly Language Programmer's Guide, Symbol Table Chapter (Silicon Graphics, 1992)
- Tru64 UNIX Object File and Symbol Table Format Specification, Symbol Table Chapter
- `mdebugread.c` from gdb (reading)
- `ecoff.c` from gas (writing)
- `include/coff/sym.h` from binutils (headers)
### MIPS EABI
- [MIPS EABI](https://sourceware.org/legacy-ml/binutils/2003-06/msg00436.html)
### STABS
- [The "stabs" representation of debugging information (Julia Menapace, Jim Kingdon, and David MacKenzie, 1992-???)](https://sourceware.org/gdb/onlinedocs/stabs.html)
- `stabs.c` from binutils (reading)
- `stabsread.c` from gdb (reading)
- `dbxread.c` from gdb (reading)
- `dbxout.c` from gcc (writing)
- `stab.def` from gcc (symbol codes)

75
3rdparty/ccc/ccc.vcxproj vendored Normal file
View File

@@ -0,0 +1,75 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)common\vsprops\BaseProjectConfig.props" />
<Import Project="$(SolutionDir)common\vsprops\WinSDK.props" />
<PropertyGroup Label="Globals">
<ProjectGuid>{2589F8CE-EA77-4B73-911E-64074569795B}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<PlatformToolset Condition="!$(Configuration.Contains(Clang))">$(DefaultPlatformToolset)</PlatformToolset>
<PlatformToolset Condition="$(Configuration.Contains(Clang))">ClangCL</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
<WholeProgramOptimization Condition="$(Configuration.Contains(Release))">true</WholeProgramOptimization>
<UseDebugLibraries Condition="$(Configuration.Contains(Debug))">true</UseDebugLibraries>
<UseDebugLibraries Condition="!$(Configuration.Contains(Debug))">false</UseDebugLibraries>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings" />
<ImportGroup Label="PropertySheets">
<Import Project="..\DefaultProjectRootDir.props" />
<Import Project="..\3rdparty.props" />
<Import Condition="$(Configuration.Contains(Debug))" Project="..\..\common\vsprops\CodeGen_Debug.props" />
<Import Condition="$(Configuration.Contains(Devel))" Project="..\..\common\vsprops\CodeGen_Devel.props" />
<Import Condition="$(Configuration.Contains(Release))" Project="..\..\common\vsprops\CodeGen_Release.props" />
<Import Condition="!$(Configuration.Contains(Release))" Project="..\..\common\vsprops\IncrementalLinking.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<CodeAnalysisRuleSet>AllRules.ruleset</CodeAnalysisRuleSet>
</PropertyGroup>
<ItemGroup>
<ClInclude Include="src\ccc\ast.h" />
<ClInclude Include="src\ccc\elf.h" />
<ClInclude Include="src\ccc\elf_symtab.h" />
<ClInclude Include="src\ccc\importer_flags.h" />
<ClInclude Include="src\ccc\mdebug_analysis.h" />
<ClInclude Include="src\ccc\mdebug_importer.h" />
<ClInclude Include="src\ccc\mdebug_section.h" />
<ClInclude Include="src\ccc\mdebug_symbols.h" />
<ClInclude Include="src\ccc\sndll.h" />
<ClInclude Include="src\ccc\stabs.h" />
<ClInclude Include="src\ccc\stabs_to_ast.h" />
<ClInclude Include="src\ccc\symbol_database.h" />
<ClInclude Include="src\ccc\symbol_file.h" />
<ClInclude Include="src\ccc\symbol_table.h" />
<ClInclude Include="src\ccc\util.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\ccc\ast.cpp" />
<ClCompile Include="src\ccc\elf.cpp" />
<ClCompile Include="src\ccc\elf_symtab.cpp" />
<ClCompile Include="src\ccc\importer_flags.cpp" />
<ClCompile Include="src\ccc\mdebug_analysis.cpp" />
<ClCompile Include="src\ccc\mdebug_importer.cpp" />
<ClCompile Include="src\ccc\mdebug_section.cpp" />
<ClCompile Include="src\ccc\mdebug_symbols.cpp" />
<ClCompile Include="src\ccc\sndll.cpp" />
<ClCompile Include="src\ccc\stabs.cpp" />
<ClCompile Include="src\ccc\stabs_to_ast.cpp" />
<ClCompile Include="src\ccc\symbol_database.cpp" />
<ClCompile Include="src\ccc\symbol_file.cpp" />
<ClCompile Include="src\ccc\symbol_table.cpp" />
<ClCompile Include="src\ccc\util.cpp" />
</ItemGroup>
<ItemDefinitionGroup>
<ClCompile>
<WarningLevel>TurnOffAllWarnings</WarningLevel>
<AdditionalIncludeDirectories>$(ProjectDir)src;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<LanguageStandard>stdcpp20</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets" />
</Project>

111
3rdparty/ccc/ccc.vcxproj.filters vendored Normal file
View File

@@ -0,0 +1,111 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
<Extensions>cpp;c;cc;cxx;c++;cppm;ixx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
<Extensions>h;hh;hpp;hxx;h++;hm;inl;inc;ipp;xsd</Extensions>
</Filter>
<Filter Include="Resource Files">
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\ccc\ast.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\elf.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\elf_symtab.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\importer_flags.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\mdebug_analysis.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\mdebug_importer.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\mdebug_section.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\mdebug_symbols.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\sndll.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\stabs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\stabs_to_ast.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\symbol_database.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\symbol_file.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\symbol_table.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="src\ccc\util.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\ccc\ast.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\elf.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\elf_symtab.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\importer_flags.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\mdebug_analysis.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\mdebug_importer.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\mdebug_section.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\mdebug_symbols.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\sndll.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\stabs.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\stabs_to_ast.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\symbol_database.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\symbol_file.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\symbol_table.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="src\ccc\util.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
</Project>

562
3rdparty/ccc/src/ccc/ast.cpp vendored Normal file
View File

@@ -0,0 +1,562 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "ast.h"
#include "importer_flags.h"
#include "symbol_database.h"
namespace ccc::ast {
static bool compare_nodes_and_merge(
CompareResult& dest, const Node& node_lhs, const Node& node_rhs, const SymbolDatabase* database);
static bool try_to_match_wobbly_typedefs(
const Node& node_lhs, const Node& node_rhs, const SymbolDatabase& database);
void Node::set_access_specifier(AccessSpecifier specifier, u32 importer_flags)
{
if((importer_flags & NO_ACCESS_SPECIFIERS) == 0) {
access_specifier = specifier;
}
}
std::pair<Node*, DataType*> Node::physical_type(SymbolDatabase& database, s32 max_depth)
{
Node* type = this;
DataType* symbol = nullptr;
for(s32 i = 0; i < max_depth && type->descriptor == TYPE_NAME; i++) {
DataType* data_type = database.data_types.symbol_from_handle(type->as<TypeName>().data_type_handle);
if (!data_type || !data_type->type()) {
break;
}
type = data_type->type();
symbol = data_type;
}
return std::pair(type, symbol);
}
std::pair<const Node*, const DataType*> Node::physical_type(const SymbolDatabase& database, s32 max_depth) const
{
return const_cast<Node*>(this)->physical_type(const_cast<SymbolDatabase&>(database), max_depth);
}
const char* member_function_modifier_to_string(MemberFunctionModifier modifier)
{
switch(modifier) {
case MemberFunctionModifier::NONE: return "none";
case MemberFunctionModifier::STATIC: return "static";
case MemberFunctionModifier::VIRTUAL: return "virtual";
}
return "";
}
bool StructOrUnion::flatten_fields(
std::vector<FlatField>& output,
const DataType* symbol,
const SymbolDatabase& database,
bool skip_statics,
s32 base_offset,
s32 max_fields,
s32 max_depth) const
{
if(max_depth == 0) {
return false;
}
for(const std::unique_ptr<Node>& type_name : base_classes) {
if(type_name->descriptor != TYPE_NAME) {
continue;
}
s32 new_base_offset = base_offset + type_name->offset_bytes;
DataTypeHandle handle = type_name->as<TypeName>().data_type_handle;
const DataType* base_class_symbol = database.data_types.symbol_from_handle(handle);
if(!base_class_symbol || !base_class_symbol->type() || base_class_symbol->type()->descriptor != STRUCT_OR_UNION) {
continue;
}
const StructOrUnion& base_class = base_class_symbol->type()->as<StructOrUnion>();
if(!base_class.flatten_fields(output, base_class_symbol, database, skip_statics, new_base_offset, max_fields, max_depth - 1)) {
return false;
}
}
for(const std::unique_ptr<Node>& field : fields) {
if(skip_statics && field->storage_class == STORAGE_CLASS_STATIC) {
continue;
}
if((s32) output.size() >= max_fields) {
return false;
}
FlatField& flat = output.emplace_back();
flat.node = field.get();
flat.symbol = symbol;
flat.base_offset = base_offset;
}
return true;
}
const char* type_name_source_to_string(TypeNameSource source)
{
switch(source) {
case TypeNameSource::REFERENCE: return "reference";
case TypeNameSource::CROSS_REFERENCE: return "cross_reference";
case TypeNameSource::UNNAMED_THIS: return "this";
}
return "";
}
const char* forward_declared_type_to_string(ForwardDeclaredType type)
{
switch(type) {
case ForwardDeclaredType::STRUCT: return "struct";
case ForwardDeclaredType::UNION: return "union";
case ForwardDeclaredType::ENUM: return "enum";
}
return "";
}
DataTypeHandle TypeName::data_type_handle_unless_forward_declared() const
{
if(!is_forward_declared) {
return data_type_handle;
} else {
return DataTypeHandle();
}
}
CompareResult compare_nodes(
const Node& node_lhs, const Node& node_rhs, const SymbolDatabase* database, bool check_intrusive_fields)
{
CompareResult result = CompareResultType::MATCHES_NO_SWAP;
if(node_lhs.descriptor != node_rhs.descriptor) {
return CompareFailReason::DESCRIPTOR;
}
if(check_intrusive_fields) {
if(node_lhs.storage_class != node_rhs.storage_class) {
// In some cases we can determine that a type was typedef'd for C
// translation units, but not for C++ translation units, so we need
// to add a special case for that here.
if(node_lhs.storage_class == STORAGE_CLASS_TYPEDEF && node_rhs.storage_class == STORAGE_CLASS_NONE) {
result = CompareResultType::MATCHES_FAVOUR_LHS;
} else if(node_lhs.storage_class == STORAGE_CLASS_NONE && node_rhs.storage_class == STORAGE_CLASS_TYPEDEF) {
result = CompareResultType::MATCHES_FAVOUR_RHS;
} else {
return CompareFailReason::STORAGE_CLASS;
}
}
// Vtable pointers and constructors can sometimes contain type numbers
// that are different between translation units, so we don't want to
// compare them.
bool is_vtable_pointer = node_lhs.is_vtable_pointer && node_rhs.is_vtable_pointer;
bool is_numbered_constructor = node_lhs.name.starts_with("$_") && node_rhs.name.starts_with("$_");
if(node_lhs.name != node_rhs.name && !is_vtable_pointer && !is_numbered_constructor) {
return CompareFailReason::NAME;
}
if(node_lhs.offset_bytes != node_rhs.offset_bytes) {
return CompareFailReason::RELATIVE_OFFSET_BYTES;
}
if(node_lhs.size_bits != node_rhs.size_bits) {
return CompareFailReason::SIZE_BITS;
}
if(node_lhs.is_const != node_rhs.is_const) {
return CompareFailReason::CONSTNESS;
}
}
switch(node_lhs.descriptor) {
case ARRAY: {
const auto [lhs, rhs] = Node::as<Array>(node_lhs, node_rhs);
if(compare_nodes_and_merge(result, *lhs.element_type.get(), *rhs.element_type.get(), database)) {
return result;
}
if(lhs.element_count != rhs.element_count) {
return CompareFailReason::ARRAY_ELEMENT_COUNT;
}
break;
}
case BITFIELD: {
const auto [lhs, rhs] = Node::as<BitField>(node_lhs, node_rhs);
if(lhs.bitfield_offset_bits != rhs.bitfield_offset_bits) {
return CompareFailReason::BITFIELD_OFFSET_BITS;
}
if(compare_nodes_and_merge(result, *lhs.underlying_type.get(), *rhs.underlying_type.get(), database)) {
return result;
}
break;
}
case BUILTIN: {
const auto [lhs, rhs] = Node::as<BuiltIn>(node_lhs, node_rhs);
if(lhs.bclass != rhs.bclass) {
return CompareFailReason::BUILTIN_CLASS;
}
break;
}
case ENUM: {
const auto [lhs, rhs] = Node::as<Enum>(node_lhs, node_rhs);
if(lhs.constants != rhs.constants) {
return CompareFailReason::ENUM_CONSTANTS;
}
break;
}
case ERROR_NODE: {
break;
}
case FUNCTION: {
const auto [lhs, rhs] = Node::as<Function>(node_lhs, node_rhs);
if(lhs.return_type.has_value() != rhs.return_type.has_value()) {
return CompareFailReason::FUNCTION_RETURN_TYPE_HAS_VALUE;
}
if(lhs.return_type.has_value()) {
if(compare_nodes_and_merge(result, *lhs.return_type->get(), *rhs.return_type->get(), database)) {
return result;
}
}
if(lhs.parameters.has_value() && rhs.parameters.has_value()) {
if(lhs.parameters->size() != rhs.parameters->size()) {
return CompareFailReason::FUNCTION_PARAMAETER_COUNT;
}
for(size_t i = 0; i < lhs.parameters->size(); i++) {
if(compare_nodes_and_merge(result, *(*lhs.parameters)[i].get(), *(*rhs.parameters)[i].get(), database)) {
return result;
}
}
} else if(lhs.parameters.has_value() != rhs.parameters.has_value()) {
return CompareFailReason::FUNCTION_PARAMETERS_HAS_VALUE;
}
if(lhs.modifier != rhs.modifier) {
return CompareFailReason::FUNCTION_MODIFIER;
}
break;
}
case POINTER_OR_REFERENCE: {
const auto [lhs, rhs] = Node::as<PointerOrReference>(node_lhs, node_rhs);
if(lhs.is_pointer != rhs.is_pointer) {
return CompareFailReason::DESCRIPTOR;
}
if(compare_nodes_and_merge(result, *lhs.value_type.get(), *rhs.value_type.get(), database)) {
return result;
}
break;
}
case POINTER_TO_DATA_MEMBER: {
const auto [lhs, rhs] = Node::as<PointerToDataMember>(node_lhs, node_rhs);
if(compare_nodes_and_merge(result, *lhs.class_type.get(), *rhs.class_type.get(), database)) {
return result;
}
if(compare_nodes_and_merge(result, *lhs.member_type.get(), *rhs.member_type.get(), database)) {
return result;
}
break;
}
case STRUCT_OR_UNION: {
const auto [lhs, rhs] = Node::as<StructOrUnion>(node_lhs, node_rhs);
if(lhs.is_struct != rhs.is_struct) {
return CompareFailReason::DESCRIPTOR;
}
if(lhs.base_classes.size() != rhs.base_classes.size()) {
return CompareFailReason::BASE_CLASS_COUNT;
}
for(size_t i = 0; i < lhs.base_classes.size(); i++) {
if(compare_nodes_and_merge(result, *lhs.base_classes[i].get(), *rhs.base_classes[i].get(), database)) {
return result;
}
}
if(lhs.fields.size() != rhs.fields.size()) {
return CompareFailReason::FIELDS_SIZE;
}
for(size_t i = 0; i < lhs.fields.size(); i++) {
if(compare_nodes_and_merge(result, *lhs.fields[i].get(), *rhs.fields[i].get(), database)) {
return result;
}
}
if(lhs.member_functions.size() != rhs.member_functions.size()) {
return CompareFailReason::MEMBER_FUNCTION_COUNT;
}
for(size_t i = 0; i < lhs.member_functions.size(); i++) {
if(compare_nodes_and_merge(result, *lhs.member_functions[i].get(), *rhs.member_functions[i].get(), database)) {
return result;
}
}
break;
}
case TYPE_NAME: {
const auto [lhs, rhs] = Node::as<TypeName>(node_lhs, node_rhs);
// Don't check the source so that REFERENCE and CROSS_REFERENCE are
// treated as the same.
if(lhs.data_type_handle != rhs.data_type_handle) {
return CompareFailReason::TYPE_NAME;
}
const TypeName::UnresolvedStabs* lhs_unresolved_stabs = lhs.unresolved_stabs.get();
const TypeName::UnresolvedStabs* rhs_unresolved_stabs = rhs.unresolved_stabs.get();
if(lhs_unresolved_stabs && rhs_unresolved_stabs) {
if(lhs_unresolved_stabs->type_name != rhs_unresolved_stabs->type_name) {
return CompareFailReason::TYPE_NAME;
}
} else if(lhs_unresolved_stabs || rhs_unresolved_stabs) {
return CompareFailReason::TYPE_NAME;
}
break;
}
}
return result;
}
static bool compare_nodes_and_merge(
CompareResult& dest, const Node& node_lhs, const Node& node_rhs, const SymbolDatabase* database)
{
CompareResult result = compare_nodes(node_lhs, node_rhs, database, true);
if(database) {
if(result.type == CompareResultType::DIFFERS && try_to_match_wobbly_typedefs(node_lhs, node_rhs, *database)) {
result.type = CompareResultType::MATCHES_FAVOUR_LHS;
} else if(result.type == CompareResultType::DIFFERS && try_to_match_wobbly_typedefs(node_rhs, node_lhs, *database)) {
result.type = CompareResultType::MATCHES_FAVOUR_RHS;
}
}
if(dest.type != result.type) {
if(dest.type == CompareResultType::DIFFERS || result.type == CompareResultType::DIFFERS) {
// If any of the inner types differ, the outer type does too.
dest.type = CompareResultType::DIFFERS;
} else if(dest.type == CompareResultType::MATCHES_CONFUSED || result.type == CompareResultType::MATCHES_CONFUSED) {
// Propagate confusion.
dest.type = CompareResultType::MATCHES_CONFUSED;
} else if(dest.type == CompareResultType::MATCHES_FAVOUR_LHS && result.type == CompareResultType::MATCHES_FAVOUR_RHS) {
// One of the results favours the LHS node and the other favours the
// RHS node so we are confused.
dest.type = CompareResultType::MATCHES_CONFUSED;
} else if(dest.type == CompareResultType::MATCHES_FAVOUR_RHS && result.type == CompareResultType::MATCHES_FAVOUR_LHS) {
// One of the results favours the LHS node and the other favours the
// RHS node so we are confused.
dest.type = CompareResultType::MATCHES_CONFUSED;
} else if(dest.type == CompareResultType::MATCHES_FAVOUR_LHS || result.type == CompareResultType::MATCHES_FAVOUR_LHS) {
// One of the results favours the LHS node and the other is neutral
// so go with the LHS node.
dest.type = CompareResultType::MATCHES_FAVOUR_LHS;
} else if(dest.type == CompareResultType::MATCHES_FAVOUR_RHS || result.type == CompareResultType::MATCHES_FAVOUR_RHS) {
// One of the results favours the RHS node and the other is neutral
// so go with the RHS node.
dest.type = CompareResultType::MATCHES_FAVOUR_RHS;
}
}
if(dest.fail_reason == CompareFailReason::NONE) {
dest.fail_reason = result.fail_reason;
}
return dest.type == CompareResultType::DIFFERS;
}
static bool try_to_match_wobbly_typedefs(
const Node& type_name_node, const Node& raw_node, const SymbolDatabase& database)
{
// Detect if one side has a typedef when the other just has the plain type.
// This was previously a common reason why type deduplication would fail.
if(type_name_node.descriptor != TYPE_NAME) {
return false;
}
const TypeName& type_name = type_name_node.as<TypeName>();
if(const TypeName::UnresolvedStabs* unresolved_stabs = type_name.unresolved_stabs.get()) {
if(unresolved_stabs->referenced_file_handle == (u32) -1 || !unresolved_stabs->stabs_type_number.valid()) {
return false;
}
const SourceFile* source_file =
database.source_files.symbol_from_handle(unresolved_stabs->referenced_file_handle);
CCC_ASSERT(source_file);
auto handle = source_file->stabs_type_number_to_handle.find(unresolved_stabs->stabs_type_number);
if(handle != source_file->stabs_type_number_to_handle.end()) {
const DataType* referenced_type = database.data_types.symbol_from_handle(handle->second);
CCC_ASSERT(referenced_type && referenced_type->type());
// Don't compare 'intrusive' fields e.g. the offset.
CompareResult new_result = compare_nodes(*referenced_type->type(), raw_node, &database, false);
if(new_result.type != CompareResultType::DIFFERS) {
return true;
}
}
}
return false;
}
const char* compare_fail_reason_to_string(CompareFailReason reason)
{
switch(reason) {
case CompareFailReason::NONE: return "error";
case CompareFailReason::DESCRIPTOR: return "descriptor";
case CompareFailReason::STORAGE_CLASS: return "storage class";
case CompareFailReason::NAME: return "name";
case CompareFailReason::RELATIVE_OFFSET_BYTES: return "relative offset";
case CompareFailReason::ABSOLUTE_OFFSET_BYTES: return "absolute offset";
case CompareFailReason::BITFIELD_OFFSET_BITS: return "bitfield offset";
case CompareFailReason::SIZE_BITS: return "size";
case CompareFailReason::CONSTNESS: return "constness";
case CompareFailReason::ARRAY_ELEMENT_COUNT: return "array element count";
case CompareFailReason::BUILTIN_CLASS: return "builtin class";
case CompareFailReason::FUNCTION_RETURN_TYPE_HAS_VALUE: return "function return type has value";
case CompareFailReason::FUNCTION_PARAMAETER_COUNT: return "function paramaeter count";
case CompareFailReason::FUNCTION_PARAMETERS_HAS_VALUE: return "function parameter";
case CompareFailReason::FUNCTION_MODIFIER: return "function modifier";
case CompareFailReason::ENUM_CONSTANTS: return "enum constant";
case CompareFailReason::BASE_CLASS_COUNT: return "base class count";
case CompareFailReason::FIELDS_SIZE: return "fields size";
case CompareFailReason::MEMBER_FUNCTION_COUNT: return "member function count";
case CompareFailReason::VTABLE_GLOBAL: return "vtable global";
case CompareFailReason::TYPE_NAME: return "type name";
case CompareFailReason::VARIABLE_CLASS: return "variable class";
case CompareFailReason::VARIABLE_TYPE: return "variable type";
case CompareFailReason::VARIABLE_STORAGE: return "variable storage";
case CompareFailReason::VARIABLE_BLOCK: return "variable block";
}
return "";
}
const char* node_type_to_string(const Node& node)
{
switch(node.descriptor) {
case ARRAY: return "array";
case BITFIELD: return "bitfield";
case BUILTIN: return "builtin";
case ENUM: return "enum";
case ERROR_NODE: return "error";
case FUNCTION: return "function";
case POINTER_OR_REFERENCE: {
const PointerOrReference& pointer_or_reference = node.as<PointerOrReference>();
if(pointer_or_reference.is_pointer) {
return "pointer";
} else {
return "reference";
}
}
case POINTER_TO_DATA_MEMBER: return "pointer_to_data_member";
case STRUCT_OR_UNION: {
const StructOrUnion& struct_or_union = node.as<StructOrUnion>();
if(struct_or_union.is_struct) {
return "struct";
} else {
return "union";
}
}
case TYPE_NAME: return "type_name";
}
return "";
}
const char* storage_class_to_string(StorageClass storage_class)
{
switch(storage_class) {
case STORAGE_CLASS_NONE: return "none";
case STORAGE_CLASS_TYPEDEF: return "typedef";
case STORAGE_CLASS_EXTERN: return "extern";
case STORAGE_CLASS_STATIC: return "static";
case STORAGE_CLASS_AUTO: return "auto";
case STORAGE_CLASS_REGISTER: return "register";
}
return "";
}
const char* access_specifier_to_string(AccessSpecifier specifier)
{
switch(specifier) {
case AS_PUBLIC: return "public";
case AS_PROTECTED: return "protected";
case AS_PRIVATE: return "private";
}
return "";
}
const char* builtin_class_to_string(BuiltInClass bclass)
{
switch(bclass) {
case BuiltInClass::VOID_TYPE: return "void";
case BuiltInClass::UNSIGNED_8: return "8-bit unsigned integer";
case BuiltInClass::SIGNED_8: return "8-bit signed integer";
case BuiltInClass::UNQUALIFIED_8: return "8-bit integer";
case BuiltInClass::BOOL_8: return "8-bit boolean";
case BuiltInClass::UNSIGNED_16: return "16-bit unsigned integer";
case BuiltInClass::SIGNED_16: return "16-bit signed integer";
case BuiltInClass::UNSIGNED_32: return "32-bit unsigned integer";
case BuiltInClass::SIGNED_32: return "32-bit signed integer";
case BuiltInClass::FLOAT_32: return "32-bit floating point";
case BuiltInClass::UNSIGNED_64: return "64-bit unsigned integer";
case BuiltInClass::SIGNED_64: return "64-bit signed integer";
case BuiltInClass::FLOAT_64: return "64-bit floating point";
case BuiltInClass::UNSIGNED_128: return "128-bit unsigned integer";
case BuiltInClass::SIGNED_128: return "128-bit signed integer";
case BuiltInClass::UNQUALIFIED_128: return "128-bit integer";
case BuiltInClass::FLOAT_128: return "128-bit floating point";
}
return "";
}
s32 builtin_class_size(BuiltInClass bclass)
{
switch(bclass) {
case BuiltInClass::VOID_TYPE: return 0;
case BuiltInClass::UNSIGNED_8: return 1;
case BuiltInClass::SIGNED_8: return 1;
case BuiltInClass::UNQUALIFIED_8: return 1;
case BuiltInClass::BOOL_8: return 1;
case BuiltInClass::UNSIGNED_16: return 2;
case BuiltInClass::SIGNED_16: return 2;
case BuiltInClass::UNSIGNED_32: return 4;
case BuiltInClass::SIGNED_32: return 4;
case BuiltInClass::FLOAT_32: return 4;
case BuiltInClass::UNSIGNED_64: return 8;
case BuiltInClass::SIGNED_64: return 8;
case BuiltInClass::FLOAT_64: return 8;
case BuiltInClass::UNSIGNED_128: return 16;
case BuiltInClass::SIGNED_128: return 16;
case BuiltInClass::UNQUALIFIED_128: return 16;
case BuiltInClass::FLOAT_128: return 16;
}
return 0;
}
}

377
3rdparty/ccc/src/ccc/ast.h vendored Normal file
View File

@@ -0,0 +1,377 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "symbol_database.h"
namespace ccc::ast {
enum NodeDescriptor : u8 {
ARRAY,
BITFIELD,
BUILTIN,
ENUM,
ERROR_NODE,
FUNCTION,
POINTER_OR_REFERENCE,
POINTER_TO_DATA_MEMBER,
STRUCT_OR_UNION,
TYPE_NAME
};
enum AccessSpecifier {
AS_PUBLIC = 0,
AS_PROTECTED = 1,
AS_PRIVATE = 2
};
// To add a new type of node:
// 1. Add it to the NodeDescriptor enum.
// 2. Create a struct for it.
// 3. Add support for it in for_each_node.
// 4. Add support for it in compute_size_bytes_recursive.
// 5. Add support for it in compare_nodes.
// 6. Add support for it in node_type_to_string.
// 7. Add support for it in CppPrinter::ast_node.
// 8. Add support for it in write_json.
// 9. Add support for it in refine_node.
struct Node {
const NodeDescriptor descriptor;
u8 is_const : 1 = false;
u8 is_volatile : 1 = false;
u8 is_virtual_base_class : 1 = false;
u8 is_vtable_pointer : 1 = false;
u8 is_constructor_or_destructor : 1 = false;
u8 is_special_member_function : 1 = false;
u8 is_operator_member_function : 1 = false;
u8 cannot_compute_size : 1 = false;
u8 storage_class : 4 = STORAGE_CLASS_NONE;
u8 access_specifier : 2 = AS_PUBLIC;
s32 size_bytes = -1;
// If the name isn't populated for a given node, the name from the last
// ancestor to have one should be used i.e. when processing the tree you
// should pass the name down.
std::string name;
s32 offset_bytes = -1; // Offset relative to start of last inline struct/union.
s32 size_bits = -1; // Size stored in the .mdebug symbol table, may not be set.
Node(NodeDescriptor d) : descriptor(d) {}
Node(const Node& rhs) = default;
virtual ~Node() {}
template <typename SubType>
SubType& as() {
CCC_ASSERT(descriptor == SubType::DESCRIPTOR);
return *static_cast<SubType*>(this);
}
template <typename SubType>
const SubType& as() const {
CCC_ASSERT(descriptor == SubType::DESCRIPTOR);
return *static_cast<const SubType*>(this);
}
template <typename SubType>
static std::pair<const SubType&, const SubType&> as(const Node& lhs, const Node& rhs) {
CCC_ASSERT(lhs.descriptor == SubType::DESCRIPTOR && rhs.descriptor == SubType::DESCRIPTOR);
return std::pair<const SubType&, const SubType&>(static_cast<const SubType&>(lhs), static_cast<const SubType&>(rhs));
}
void set_access_specifier(AccessSpecifier specifier, u32 importer_flags);
// If this node is a type name, repeatedly resolve it to the type it's
// referencing, otherwise return (this, nullptr).
std::pair<Node*, DataType*> physical_type(SymbolDatabase& database, s32 max_depth = 100);
std::pair<const Node*, const DataType*> physical_type(const SymbolDatabase& database, s32 max_depth = 100) const;
};
struct Array : Node {
std::unique_ptr<Node> element_type;
s32 element_count = -1;
Array() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = ARRAY;
};
struct BitField : Node {
s32 bitfield_offset_bits = -1; // Offset relative to the last byte (not the position of the underlying type!).
std::unique_ptr<Node> underlying_type;
BitField() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = BITFIELD;
};
enum class BuiltInClass {
VOID_TYPE,
UNSIGNED_8, SIGNED_8, UNQUALIFIED_8, BOOL_8,
UNSIGNED_16, SIGNED_16,
UNSIGNED_32, SIGNED_32, FLOAT_32,
UNSIGNED_64, SIGNED_64, FLOAT_64,
UNSIGNED_128, SIGNED_128, UNQUALIFIED_128, FLOAT_128
};
struct BuiltIn : Node {
BuiltInClass bclass = BuiltInClass::VOID_TYPE;
BuiltIn() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = BUILTIN;
};
struct Enum : Node {
std::vector<std::pair<s32, std::string>> constants;
Enum() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = ENUM;
};
struct Error : Node {
std::string message;
Error() : Node(ERROR_NODE) {}
static const constexpr NodeDescriptor DESCRIPTOR = ERROR_NODE;
};
enum class MemberFunctionModifier {
NONE,
STATIC,
VIRTUAL
};
const char* member_function_modifier_to_string(MemberFunctionModifier modifier);
struct Function : Node {
std::optional<std::unique_ptr<Node>> return_type;
std::optional<std::vector<std::unique_ptr<Node>>> parameters;
MemberFunctionModifier modifier = MemberFunctionModifier::NONE;
s32 vtable_index = -1;
FunctionHandle definition_handle; // Filled in by fill_in_pointers_to_member_function_definitions.
Function() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = FUNCTION;
};
struct PointerOrReference : Node {
bool is_pointer = true;
std::unique_ptr<Node> value_type;
PointerOrReference() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = POINTER_OR_REFERENCE;
};
struct PointerToDataMember : Node {
std::unique_ptr<Node> class_type;
std::unique_ptr<Node> member_type;
PointerToDataMember() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = POINTER_TO_DATA_MEMBER;
};
struct StructOrUnion : Node {
bool is_struct = true;
std::vector<std::unique_ptr<Node>> base_classes;
std::vector<std::unique_ptr<Node>> fields;
std::vector<std::unique_ptr<Node>> member_functions;
StructOrUnion() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = STRUCT_OR_UNION;
struct FlatField {
// The field itself.
const Node* node;
// The symbol that owns the node.
const DataType* symbol;
// Offset of the innermost enclosing base class in the object.
s32 base_offset = 0;
};
// Generate a flat list of all the fields in this class as well as all the
// base classes recursively, but only until the max_fields or max_depth
// limits are reached. Return true if all the fields were enumerated.
bool flatten_fields(
std::vector<FlatField>& output,
const DataType* symbol,
const SymbolDatabase& database,
bool skip_statics,
s32 base_offset = 0,
s32 max_fields = 100000,
s32 max_depth = 100) const;
};
enum class TypeNameSource : u8 {
REFERENCE, // A STABS type reference.
CROSS_REFERENCE, // A STABS cross reference.
UNNAMED_THIS // A this parameter (or return type) referencing an unnamed type.
};
const char* type_name_source_to_string(TypeNameSource source);
enum class ForwardDeclaredType {
STRUCT,
UNION,
ENUM // Should be illegal but STABS supports cross references to enums so it's here.
};
const char* forward_declared_type_to_string(ForwardDeclaredType type);
struct TypeName : Node {
DataTypeHandle data_type_handle;
TypeNameSource source = TypeNameSource::REFERENCE;
bool is_forward_declared = false;
DataTypeHandle data_type_handle_unless_forward_declared() const;
struct UnresolvedStabs {
std::string type_name;
SourceFileHandle referenced_file_handle;
StabsTypeNumber stabs_type_number;
std::optional<ForwardDeclaredType> type;
};
std::unique_ptr<UnresolvedStabs> unresolved_stabs;
TypeName() : Node(DESCRIPTOR) {}
static const constexpr NodeDescriptor DESCRIPTOR = TYPE_NAME;
};
enum class CompareResultType {
MATCHES_NO_SWAP, // Both lhs and rhs are identical.
MATCHES_CONFUSED, // Both lhs and rhs are almost identical, and we don't which is better.
MATCHES_FAVOUR_LHS, // Both lhs and rhs are almost identical, but lhs is better.
MATCHES_FAVOUR_RHS, // Both lhs and rhs are almost identical, but rhs is better.
DIFFERS, // The two nodes differ substantially.
};
enum class CompareFailReason {
NONE,
DESCRIPTOR,
STORAGE_CLASS,
NAME,
RELATIVE_OFFSET_BYTES,
ABSOLUTE_OFFSET_BYTES,
BITFIELD_OFFSET_BITS,
SIZE_BITS,
CONSTNESS,
ARRAY_ELEMENT_COUNT,
BUILTIN_CLASS,
FUNCTION_RETURN_TYPE_HAS_VALUE,
FUNCTION_PARAMAETER_COUNT,
FUNCTION_PARAMETERS_HAS_VALUE,
FUNCTION_MODIFIER,
ENUM_CONSTANTS,
BASE_CLASS_COUNT,
FIELDS_SIZE,
MEMBER_FUNCTION_COUNT,
VTABLE_GLOBAL,
TYPE_NAME,
VARIABLE_CLASS,
VARIABLE_TYPE,
VARIABLE_STORAGE,
VARIABLE_BLOCK
};
struct CompareResult {
CompareResult(CompareResultType type) : type(type), fail_reason(CompareFailReason::NONE) {}
CompareResult(CompareFailReason reason) : type(CompareResultType::DIFFERS), fail_reason(reason) {}
CompareResultType type;
CompareFailReason fail_reason;
};
// Compare two AST nodes and their children recursively. This will only check
// fields that will be equal for two versions of the same type from different
// translation units.
CompareResult compare_nodes(const Node& lhs, const Node& rhs, const SymbolDatabase* database, bool check_intrusive_fields);
const char* compare_fail_reason_to_string(CompareFailReason reason);
const char* node_type_to_string(const Node& node);
const char* storage_class_to_string(StorageClass storage_class);
const char* access_specifier_to_string(AccessSpecifier specifier);
const char* builtin_class_to_string(BuiltInClass bclass);
s32 builtin_class_size(BuiltInClass bclass);
enum TraversalOrder {
PREORDER_TRAVERSAL,
POSTORDER_TRAVERSAL
};
enum ExplorationMode {
EXPLORE_CHILDREN,
DONT_EXPLORE_CHILDREN
};
template <typename ThisNode, typename Callback>
void for_each_node(ThisNode& node, TraversalOrder order, Callback callback)
{
if(order == PREORDER_TRAVERSAL && callback(node) == DONT_EXPLORE_CHILDREN) {
return;
}
switch(node.descriptor) {
case ARRAY: {
auto& array = node.template as<Array>();
for_each_node(*array.element_type.get(), order, callback);
break;
}
case BITFIELD: {
auto& bitfield = node.template as<BitField>();
for_each_node(*bitfield.underlying_type.get(), order, callback);
break;
}
case BUILTIN: {
break;
}
case ENUM: {
break;
}
case ERROR_NODE: {
break;
}
case FUNCTION: {
auto& func = node.template as<Function>();
if(func.return_type.has_value()) {
for_each_node(*func.return_type->get(), order, callback);
}
if(func.parameters.has_value()) {
for(auto& child : *func.parameters) {
for_each_node(*child.get(), order, callback);
}
}
break;
}
case POINTER_OR_REFERENCE: {
auto& pointer_or_reference = node.template as<PointerOrReference>();
for_each_node(*pointer_or_reference.value_type.get(), order, callback);
break;
}
case POINTER_TO_DATA_MEMBER: {
auto& pointer = node.template as<PointerToDataMember>();
for_each_node(*pointer.class_type.get(), order, callback);
for_each_node(*pointer.member_type.get(), order, callback);
break;
}
case STRUCT_OR_UNION: {
auto& struct_or_union = node.template as<StructOrUnion>();
for(auto& child : struct_or_union.base_classes) {
for_each_node(*child.get(), order, callback);
}
for(auto& child : struct_or_union.fields) {
for_each_node(*child.get(), order, callback);
}
for(auto& child : struct_or_union.member_functions) {
for_each_node(*child.get(), order, callback);
}
break;
}
case TYPE_NAME: {
break;
}
}
if(order == POSTORDER_TRAVERSAL) {
callback(node);
}
}
}

128
3rdparty/ccc/src/ccc/elf.cpp vendored Normal file
View File

@@ -0,0 +1,128 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "elf.h"
namespace ccc {
Result<ElfFile> ElfFile::parse(std::vector<u8> image)
{
ElfFile elf;
elf.image = std::move(image);
const ElfIdentHeader* ident = get_unaligned<ElfIdentHeader>(elf.image, 0);
CCC_CHECK(ident, "ELF ident header out of range.");
CCC_CHECK(ident->magic == CCC_FOURCC("\x7f\x45\x4c\x46"), "Not an ELF file.");
CCC_CHECK(ident->e_class == ElfIdentClass::B32, "Wrong ELF class (not 32 bit).");
const ElfFileHeader* header = get_unaligned<ElfFileHeader>(elf.image, sizeof(ElfIdentHeader));
CCC_CHECK(header, "ELF file header out of range.");
elf.file_header = *header;
const ElfSectionHeader* shstr_section_header =
get_unaligned<ElfSectionHeader>(elf.image, header->shoff + header->shstrndx * sizeof(ElfSectionHeader));
CCC_CHECK(shstr_section_header, "ELF section name header out of range.");
for(u32 i = 0; i < header->shnum; i++) {
u64 header_offset = header->shoff + i * sizeof(ElfSectionHeader);
const ElfSectionHeader* section_header = get_unaligned<ElfSectionHeader>(elf.image, header_offset);
CCC_CHECK(section_header, "ELF section header out of range.");
std::optional<std::string_view> name = get_string(elf.image, shstr_section_header->offset + section_header->name);
CCC_CHECK(name.has_value(), "ELF section name out of range.");
ElfSection& section = elf.sections.emplace_back();
section.name = *name;
section.header = *section_header;
}
for(u32 i = 0; i < header->phnum; i++) {
u64 header_offset = header->phoff + i * sizeof(ElfProgramHeader);
const ElfProgramHeader* program_header = get_unaligned<ElfProgramHeader>(elf.image, header_offset);
CCC_CHECK(program_header, "ELF program header out of range.");
elf.segments.emplace_back(*program_header);
}
return elf;
}
Result<void> ElfFile::create_section_symbols(
SymbolDatabase& database, const SymbolGroup& group) const
{
for(const ElfSection& section : sections) {
Address address = Address::non_zero(section.header.addr);
Result<Section*> symbol = database.sections.create_symbol(
section.name, address, group.source, group.module_symbol);
CCC_RETURN_IF_ERROR(symbol);
(*symbol)->set_size(section.header.size);
}
return Result<void>();
}
const ElfSection* ElfFile::lookup_section(const char* name) const
{
for(const ElfSection& section : sections) {
if(section.name == name) {
return &section;
}
}
return nullptr;
}
std::optional<u32> ElfFile::file_offset_to_virtual_address(u32 file_offset) const
{
for(const ElfProgramHeader& segment : segments) {
if(file_offset >= segment.offset && file_offset < segment.offset + segment.filesz) {
return segment.vaddr + file_offset - segment.offset;
}
}
return std::nullopt;
}
const ElfProgramHeader* ElfFile::entry_point_segment() const
{
const ccc::ElfProgramHeader* entry_segment = nullptr;
for(const ccc::ElfProgramHeader& segment : segments) {
if(file_header.entry >= segment.vaddr && file_header.entry < segment.vaddr + segment.filesz) {
entry_segment = &segment;
}
}
return entry_segment;
}
std::optional<std::span<const u8>> ElfFile::get_virtual(u32 address, u32 size) const
{
u32 end_address = address + size;
if(end_address >= address) {
for(const ElfProgramHeader& segment : segments) {
if(address >= segment.vaddr && end_address <= segment.vaddr + segment.filesz) {
size_t begin_offset = segment.offset + (address - segment.vaddr);
size_t end_offset = begin_offset + size;
if(begin_offset <= image.size() && end_offset <= image.size()) {
return std::span<const u8>(image.data() + begin_offset, image.data() + end_offset);
}
}
}
}
return std::nullopt;
}
bool ElfFile::copy_virtual(u8* dest, u32 address, u32 size) const
{
std::optional<std::span<const u8>> block = get_virtual(address, size);
if(!block.has_value()) {
return false;
}
memcpy(dest, block->data(), size);
return true;
}
}

160
3rdparty/ccc/src/ccc/elf.h vendored Normal file
View File

@@ -0,0 +1,160 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "symbol_database.h"
namespace ccc {
enum class ElfIdentClass : u8 {
B32 = 0x1,
B64 = 0x2
};
CCC_PACKED_STRUCT(ElfIdentHeader,
/* 0x0 */ u32 magic; // 7f 45 4c 46
/* 0x4 */ ElfIdentClass e_class;
/* 0x5 */ u8 endianess;
/* 0x6 */ u8 version;
/* 0x7 */ u8 os_abi;
/* 0x8 */ u8 abi_version;
/* 0x9 */ u8 pad[7];
)
enum class ElfFileType : u16 {
NONE = 0x00,
REL = 0x01,
EXEC = 0x02,
DYN = 0x03,
CORE = 0x04,
LOOS = 0xfe00,
HIOS = 0xfeff,
LOPROC = 0xff00,
HIPROC = 0xffff
};
enum class ElfMachine : u16 {
MIPS = 0x08
};
CCC_PACKED_STRUCT(ElfFileHeader,
/* 0x10 */ ElfFileType type;
/* 0x12 */ ElfMachine machine;
/* 0x14 */ u32 version;
/* 0x18 */ u32 entry;
/* 0x1c */ u32 phoff;
/* 0x20 */ u32 shoff;
/* 0x24 */ u32 flags;
/* 0x28 */ u16 ehsize;
/* 0x2a */ u16 phentsize;
/* 0x2c */ u16 phnum;
/* 0x2e */ u16 shentsize;
/* 0x30 */ u16 shnum;
/* 0x32 */ u16 shstrndx;
)
enum class ElfSectionType : u32 {
NULL_SECTION = 0x0,
PROGBITS = 0x1,
SYMTAB = 0x2,
STRTAB = 0x3,
RELA = 0x4,
HASH = 0x5,
DYNAMIC = 0x6,
NOTE = 0x7,
NOBITS = 0x8,
REL = 0x9,
SHLIB = 0xa,
DYNSYM = 0xb,
INIT_ARRAY = 0xe,
FINI_ARRAY = 0xf,
PREINIT_ARRAY = 0x10,
GROUP = 0x11,
SYMTAB_SHNDX = 0x12,
NUM = 0x13,
LOOS = 0x60000000,
MIPS_DEBUG = 0x70000005
};
CCC_PACKED_STRUCT(ElfSectionHeader,
/* 0x00 */ u32 name;
/* 0x04 */ ElfSectionType type;
/* 0x08 */ u32 flags;
/* 0x0c */ u32 addr;
/* 0x10 */ u32 offset;
/* 0x14 */ u32 size;
/* 0x18 */ u32 link;
/* 0x1c */ u32 info;
/* 0x20 */ u32 addralign;
/* 0x24 */ u32 entsize;
)
struct ElfSection {
std::string name;
ElfSectionHeader header;
};
CCC_PACKED_STRUCT(ElfProgramHeader,
/* 0x00 */ u32 type;
/* 0x04 */ u32 offset;
/* 0x08 */ u32 vaddr;
/* 0x0c */ u32 paddr;
/* 0x10 */ u32 filesz;
/* 0x14 */ u32 memsz;
/* 0x18 */ u32 flags;
/* 0x1c */ u32 align;
)
struct ElfFile {
ElfFileHeader file_header;
std::vector<u8> image;
std::vector<ElfSection> sections;
std::vector<ElfProgramHeader> segments;
// Parse the ELF file header, section headers and program headers.
static Result<ElfFile> parse(std::vector<u8> image);
// Create a section object for each section header in the ELF file.
Result<void> create_section_symbols(SymbolDatabase& database, const SymbolGroup& group) const;
const ElfSection* lookup_section(const char* name) const;
std::optional<u32> file_offset_to_virtual_address(u32 file_offset) const;
// Find the program header for the segment that contains the entry point.
const ElfProgramHeader* entry_point_segment() const;
// Retrieve a block of data in an ELF file given its address and size.
std::optional<std::span<const u8>> get_virtual(u32 address, u32 size) const;
// Copy a block of data in an ELF file to the destination buffer given its
// address and size.
bool copy_virtual(u8* dest, u32 address, u32 size) const;
// Retrieve an object of type T from an ELF file given its address.
template <typename T>
std::optional<T> get_object_virtual(u32 address) const
{
std::optional<std::span<const u8>> result = get_virtual(address, sizeof(T));
if(!result.has_value()) {
return std::nullopt;
}
return *(T*) result->data();
}
// Retrieve an array of objects of type T from an ELF file given its
// address and element count.
template <typename T>
std::optional<std::span<const T>> get_array_virtual(u32 address, u32 element_count) const
{
std::optional<std::span<const u8>> result = get_virtual(address, element_count * sizeof(T));
if(!result.has_value()) {
return std::nullopt;
}
return std::span<const T>((T*) result->data(), (T*) (result->data() + result->size()));
}
};
}

214
3rdparty/ccc/src/ccc/elf_symtab.cpp vendored Normal file
View File

@@ -0,0 +1,214 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "elf_symtab.h"
#include "importer_flags.h"
namespace ccc::elf {
enum class SymbolBind : u8 {
LOCAL = 0,
GLOBAL = 1,
WEAK = 2,
NUM = 3,
GNU_UNIQUE = 10
};
enum class SymbolType : u8 {
NOTYPE = 0,
OBJECT = 1,
FUNC = 2,
SECTION = 3,
FILE = 4,
COMMON = 5,
TLS = 6,
NUM = 7,
GNU_IFUNC = 10
};
enum class SymbolVisibility {
DEFAULT = 0,
INTERNAL = 1,
HIDDEN = 2,
PROTECTED = 3
};
CCC_PACKED_STRUCT(Symbol,
/* 0x0 */ u32 name;
/* 0x4 */ u32 value;
/* 0x8 */ u32 size;
/* 0xc */ u8 info;
/* 0xd */ u8 other;
/* 0xe */ u16 shndx;
SymbolType type() const { return (SymbolType) (info & 0xf); }
SymbolBind bind() const { return (SymbolBind) (info >> 4); }
SymbolVisibility visibility() const { return (SymbolVisibility) (other & 0x3); }
)
static const char* symbol_bind_to_string(SymbolBind bind);
static const char* symbol_type_to_string(SymbolType type);
static const char* symbol_visibility_to_string(SymbolVisibility visibility);
Result<void> import_symbols(
SymbolDatabase& database,
const SymbolGroup& group,
std::span<const u8> symtab,
std::span<const u8> strtab,
u32 importer_flags,
DemanglerFunctions demangler)
{
for(u32 i = 0; i < symtab.size() / sizeof(Symbol); i++) {
const Symbol* symbol = get_unaligned<Symbol>(symtab, i * sizeof(Symbol));
CCC_ASSERT(symbol);
Address address;
if(symbol->value != 0) {
address = symbol->value;
}
if(!address.valid() || symbol->visibility() != SymbolVisibility::DEFAULT) {
continue;
}
if(!(importer_flags & DONT_DEDUPLICATE_SYMBOLS)) {
if(database.functions.first_handle_from_starting_address(address).valid()) {
continue;
}
if(database.global_variables.first_handle_from_starting_address(address).valid()) {
continue;
}
if(database.local_variables.first_handle_from_starting_address(address).valid()) {
continue;
}
}
std::optional<std::string_view> string_view = get_string(strtab, symbol->name);
CCC_CHECK(string_view.has_value(), "Symbol string out of range.");
std::string string(*string_view);
switch(symbol->type()) {
case SymbolType::NOTYPE: {
Result<Label*> label = database.labels.create_symbol(
std::move(string), group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(label);
// These symbols get emitted at the same addresses as functions
// and aren't extremely useful, so we want to mark them to
// prevent them from possibly being used as function names.
(*label)->is_junk =
(*label)->name() == "__gnu_compiled_c" ||
(*label)->name() == "__gnu_compiled_cplusplus" ||
(*label)->name() == "gcc2_compiled.";
break;
}
case SymbolType::OBJECT: {
if(symbol->size != 0) {
Result<GlobalVariable*> global_variable = database.global_variables.create_symbol(
std::move(string), group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(global_variable);
if(*global_variable) {
(*global_variable)->set_size(symbol->size);
}
} else {
Result<Label*> label = database.labels.create_symbol(
std::move(string), group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(label);
}
break;
}
case SymbolType::FUNC: {
Result<Function*> function = database.functions.create_symbol(
std::move(string), group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(function);
if(*function) {
(*function)->set_size(symbol->size);
}
break;
}
case SymbolType::FILE: {
Result<SourceFile*> source_file = database.source_files.create_symbol(
std::move(string), group.source, group.module_symbol);
CCC_RETURN_IF_ERROR(source_file);
break;
}
default: {}
}
}
return Result<void>();
}
Result<void> print_symbol_table(FILE* out, std::span<const u8> symtab, std::span<const u8> strtab)
{
fprintf(out, "ELF SYMBOLS:\n");
fprintf(out, " Num: Value Size Type Bind Vis Ndx Name\n");
for(u32 i = 0; i < symtab.size() / sizeof(Symbol); i++) {
const Symbol* symbol = get_unaligned<Symbol>(symtab, i * sizeof(Symbol));
CCC_ASSERT(symbol);
const char* type = symbol_type_to_string(symbol->type());
const char* bind = symbol_bind_to_string(symbol->bind());
const char* visibility = symbol_visibility_to_string(symbol->visibility());
std::optional<std::string_view> string = get_string(strtab, symbol->name);
CCC_CHECK(string.has_value(), "Symbol string out of range.");
fprintf(out, "%6u: %08x %5u %-7s %-7s %-7s %3u %s\n",
i, symbol->value, symbol->size, type, bind, visibility, symbol->shndx, string->data());
}
return Result<void>();
}
static const char* symbol_bind_to_string(SymbolBind bind)
{
switch(bind) {
case SymbolBind::LOCAL: return "LOCAL";
case SymbolBind::GLOBAL: return "GLOBAL";
case SymbolBind::WEAK: return "WEAK";
case SymbolBind::NUM: return "NUM";
case SymbolBind::GNU_UNIQUE: return "GNU_UNIQUE";
}
return "ERROR";
}
static const char* symbol_type_to_string(SymbolType type)
{
switch(type) {
case SymbolType::NOTYPE: return "NOTYPE";
case SymbolType::OBJECT: return "OBJECT";
case SymbolType::FUNC: return "FUNC";
case SymbolType::SECTION: return "SECTION";
case SymbolType::FILE: return "FILE";
case SymbolType::COMMON: return "COMMON";
case SymbolType::TLS: return "TLS";
case SymbolType::NUM: return "NUM";
case SymbolType::GNU_IFUNC: return "GNU_IFUNC";
}
return "ERROR";
}
static const char* symbol_visibility_to_string(SymbolVisibility visibility)
{
switch(visibility) {
case SymbolVisibility::DEFAULT: return "DEFAULT";
case SymbolVisibility::INTERNAL: return "INTERNAL";
case SymbolVisibility::HIDDEN: return "HIDDEN";
case SymbolVisibility::PROTECTED: return "PROTECTED";
}
return "ERROR";
}
}

20
3rdparty/ccc/src/ccc/elf_symtab.h vendored Normal file
View File

@@ -0,0 +1,20 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "symbol_database.h"
namespace ccc::elf {
Result<void> import_symbols(
SymbolDatabase& database,
const SymbolGroup& group,
std::span<const u8> symtab,
std::span<const u8> strtab,
u32 importer_flags,
DemanglerFunctions demangler);
Result<void> print_symbol_table(FILE* out, std::span<const u8> symtab, std::span<const u8> strtab);
}

95
3rdparty/ccc/src/ccc/importer_flags.cpp vendored Normal file
View File

@@ -0,0 +1,95 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "importer_flags.h"
namespace ccc {
const std::vector<ImporterFlagInfo> IMPORTER_FLAGS = {
{DEMANGLE_PARAMETERS, "--demangle-parameters", {
"Include parameters in demangled function names."
}},
{DEMANGLE_RETURN_TYPE, "--demangle-return-type", {
"Include return types at the end of demangled",
"function names if they're available."
}},
{DONT_DEDUPLICATE_SYMBOLS, "--dont-deduplicate-symbols", {
"Do not deduplicate matching symbols from",
"different symbol tables. This options has no",
"effect on data types."
}},
{DONT_DEDUPLICATE_TYPES, "--dont-deduplicate-types", {
"Do not deduplicate data types from different",
"translation units."
}},
{DONT_DEMANGLE_NAMES, "--dont-demangle-names", {
"Do not demangle function names, global variable",
"names, or overloaded operator names."
}},
{INCLUDE_GENERATED_MEMBER_FUNCTIONS, "--include-generated-functions", {
"Output member functions that were likely",
"automatically generated by the compiler."
}},
{NO_ACCESS_SPECIFIERS, "--no-access-specifiers", {
"Do not print access specifiers."
}},
{NO_MEMBER_FUNCTIONS, "--no-member-functions", {
"Do not print member functions."
}},
{NO_OPTIMIZED_OUT_FUNCTIONS, "--no-optimized-out-functions", {
"Discard functions that were optimized out."
}},
{STRICT_PARSING, "--strict", {
"Make more types of errors fatal."
}},
{TYPEDEF_ALL_ENUMS, "--typedef-all-enums", {
"Force all emitted C++ enums to be defined using",
"a typedef. With STABS, it is not always possible",
"to determine if an enum was like this in the",
"original source code, so this option should be",
"useful for reverse engineering C projects."
}},
{TYPEDEF_ALL_STRUCTS, "--typedef-all-structs", {
"Force all emitted C++ structure types to be",
"defined using a typedef."
}},
{TYPEDEF_ALL_UNIONS, "--typedef-all-unions", {
"Force all emitted C++ union types to be defined",
"using a typedef."
}},
{UNIQUE_FUNCTIONS, "--unique-functions", {
" If multiple identical .mdebug function symbols",
"are present, find the one that seems to have",
"actually been included in the linked binary, and",
"remove the addresses from all the rest. Using",
"this importer flag in combination with",
"--no-optimized-out-functions will remove these",
"duplicate function symbols entirely."
}}
};
u32 parse_importer_flag(const char* argument)
{
for(const ImporterFlagInfo& flag : IMPORTER_FLAGS) {
if(strcmp(flag.argument, argument) == 0) {
return flag.flag;
}
}
return NO_IMPORTER_FLAGS;
}
void print_importer_flags_help(FILE* out)
{
for(const ImporterFlagInfo& flag : IMPORTER_FLAGS) {
fprintf(out, "\n");
fprintf(out, " %-29s ", flag.argument);
for(size_t i = 0; i < flag.help_text.size(); i++) {
if(i > 0) {
fprintf(out, " ");
}
fprintf(out, "%s\n", flag.help_text[i]);
}
}
}
}

39
3rdparty/ccc/src/ccc/importer_flags.h vendored Normal file
View File

@@ -0,0 +1,39 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "util.h"
namespace ccc {
enum ImporterFlags {
NO_IMPORTER_FLAGS = 0,
DEMANGLE_PARAMETERS = (1 << 0),
DEMANGLE_RETURN_TYPE = (1 << 1),
DONT_DEDUPLICATE_SYMBOLS = (1 << 2),
DONT_DEDUPLICATE_TYPES = (1 << 3),
DONT_DEMANGLE_NAMES = (1 << 4),
INCLUDE_GENERATED_MEMBER_FUNCTIONS = (1 << 5),
NO_ACCESS_SPECIFIERS = (1 << 6),
NO_MEMBER_FUNCTIONS = (1 << 7),
NO_OPTIMIZED_OUT_FUNCTIONS = (1 << 8),
STRICT_PARSING = (1 << 9),
TYPEDEF_ALL_ENUMS = (1 << 10),
TYPEDEF_ALL_STRUCTS = (1 << 11),
TYPEDEF_ALL_UNIONS = (1 << 12),
UNIQUE_FUNCTIONS = (1 << 13)
};
struct ImporterFlagInfo {
ImporterFlags flag;
const char* argument;
std::vector<const char*> help_text;
};
extern const std::vector<ImporterFlagInfo> IMPORTER_FLAGS;
u32 parse_importer_flag(const char* argument);
void print_importer_flags_help(FILE* out);
}

349
3rdparty/ccc/src/ccc/mdebug_analysis.cpp vendored Normal file
View File

@@ -0,0 +1,349 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "mdebug_analysis.h"
#include "stabs_to_ast.h"
namespace ccc::mdebug {
Result<void> LocalSymbolTableAnalyser::stab_magic(const char* magic)
{
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::source_file(const char* path, Address text_address)
{
if(m_next_relative_path.empty()) {
m_next_relative_path = m_source_file.command_line_path;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::data_type(const ParsedSymbol& symbol)
{
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(
*symbol.name_colon_type.type.get(), nullptr, m_stabs_to_ast_state, 0, false, false);
CCC_RETURN_IF_ERROR(node);
if(symbol.is_typedef && (*node)->descriptor == ast::STRUCT_OR_UNION) {
ast::StructOrUnion& struct_or_union = (*node)->as<ast::StructOrUnion>();
const std::string& name = symbol.name_colon_type.name;
StabsTypeNumber type_number = symbol.name_colon_type.type->type_number;
fix_recursively_emitted_structures(struct_or_union, name, type_number, m_stabs_to_ast_state.file_handle);
}
bool is_struct = (*node)->descriptor == ast::STRUCT_OR_UNION && (*node)->as<ast::StructOrUnion>().is_struct;
bool force_typedef =
((m_context.importer_flags & TYPEDEF_ALL_ENUMS) && (*node)->descriptor == ast::ENUM) ||
((m_context.importer_flags & TYPEDEF_ALL_STRUCTS) && (*node)->descriptor == ast::STRUCT_OR_UNION && is_struct) ||
((m_context.importer_flags & TYPEDEF_ALL_UNIONS) && (*node)->descriptor == ast::STRUCT_OR_UNION && !is_struct);
(*node)->name = (symbol.name_colon_type.name == " ") ? "" : symbol.name_colon_type.name;
if(symbol.is_typedef || force_typedef) {
(*node)->storage_class = STORAGE_CLASS_TYPEDEF;
}
const char* name = (*node)->name.c_str();
StabsTypeNumber number = symbol.name_colon_type.type->type_number;
if(m_context.importer_flags & DONT_DEDUPLICATE_TYPES) {
Result<DataType*> data_type = m_database.data_types.create_symbol(
name, m_context.group.source, m_context.group.module_symbol);
CCC_RETURN_IF_ERROR(data_type);
m_source_file.stabs_type_number_to_handle[number] = (*data_type)->handle();
(*data_type)->set_type(std::move(*node));
(*data_type)->files = {m_source_file.handle()};
} else {
Result<ccc::DataType*> type = m_database.create_data_type_if_unique(
std::move(*node), number, name, m_source_file, m_context.group);
CCC_RETURN_IF_ERROR(type);
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::global_variable(
const char* mangled_name, Address address, const StabsType& type, bool is_static, GlobalStorageLocation location)
{
Result<GlobalVariable*> global = m_database.global_variables.create_symbol(
mangled_name, m_context.group.source, m_context.group.module_symbol, address, m_context.importer_flags, m_context.demangler);
CCC_RETURN_IF_ERROR(global);
CCC_ASSERT(*global);
m_global_variables.emplace_back((*global)->handle());
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(type, nullptr, m_stabs_to_ast_state, 0, true, false);
CCC_RETURN_IF_ERROR(node);
if(is_static) {
(*global)->storage_class = STORAGE_CLASS_STATIC;
}
(*global)->set_type(std::move(*node));
(*global)->storage.location = location;
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::sub_source_file(const char* path, Address text_address)
{
if(m_current_function && m_state == IN_FUNCTION_BEGINNING) {
Function::SubSourceFile& sub = m_current_function->sub_source_files.emplace_back();
sub.address = text_address;
sub.relative_path = path;
} else {
m_next_relative_path = path;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::procedure(
const char* mangled_name, Address address, const ProcedureDescriptor* procedure_descriptor, bool is_static)
{
if(!m_current_function || strcmp(mangled_name, m_current_function->mangled_name().c_str()) != 0) {
Result<void> result = create_function(mangled_name, address);
CCC_RETURN_IF_ERROR(result);
}
if(is_static) {
m_current_function->storage_class = STORAGE_CLASS_STATIC;
}
if(procedure_descriptor) {
m_current_function->stack_frame_size = procedure_descriptor->frame_size;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::label(const char* label, Address address, s32 line_number)
{
if(address.valid() && m_current_function && label[0] == '$') {
Function::LineNumberPair& pair = m_current_function->line_numbers.emplace_back();
pair.address = address;
pair.line_number = line_number;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::text_end(const char* name, s32 function_size)
{
if(m_state == IN_FUNCTION_BEGINNING) {
CCC_CHECK(m_current_function, "END TEXT symbol outside of function.");
m_current_function->set_size(function_size);
m_state = IN_FUNCTION_END;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::function(const char* mangled_name, const StabsType& return_type, Address address)
{
if(!m_current_function || strcmp(mangled_name, m_current_function->mangled_name().c_str()) != 0) {
Result<void> result = create_function(mangled_name, address);
CCC_RETURN_IF_ERROR(result);
} else {
// For MTV Music Maker 2, the addresses for static functions stored in
// the PROC symbols are relative to the translation unit, while the
// addresses stored in the FUN symbol are absolute. This is the only
// game I've found that seems to have this problem, but since in all
// other cases it seems all these addresses are all absolute, I may as
// well add in a hack here to deal with it.
bool no_module_base_address = m_context.group.module_symbol && m_context.group.module_symbol->address().get_or_zero() == 0;
bool new_address_greater = address.valid() && address > m_current_function->address();
if(no_module_base_address && new_address_greater) {
m_database.functions.move_symbol(m_current_function->handle(), address);
}
}
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(return_type, nullptr, m_stabs_to_ast_state, 0, true, true);
CCC_RETURN_IF_ERROR(node);
m_current_function->set_type(std::move(*node));
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::function_end()
{
if(m_current_function) {
m_current_function->set_parameter_variables(std::move(m_current_parameter_variables), m_database);
m_current_function->set_local_variables(std::move(m_current_local_variables), m_database);
}
m_current_function = nullptr;
m_current_parameter_variables = std::vector<ParameterVariableHandle>();
m_current_local_variables = std::vector<LocalVariableHandle>();
m_blocks.clear();
m_pending_local_variables.clear();
m_state = NOT_IN_FUNCTION;
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::parameter(
const char* name, const StabsType& type, bool is_stack, s32 value, bool is_by_reference)
{
CCC_CHECK(m_current_function, "Parameter symbol before first func/proc symbol.");
Result<ParameterVariable*> parameter_variable = m_database.parameter_variables.create_symbol(
name, m_context.group.source, m_context.group.module_symbol);
CCC_RETURN_IF_ERROR(parameter_variable);
m_current_parameter_variables.emplace_back((*parameter_variable)->handle());
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(type, nullptr, m_stabs_to_ast_state, 0, true, true);
CCC_RETURN_IF_ERROR(node);
(*parameter_variable)->set_type(std::move(*node));
if(is_stack) {
StackStorage& stack_storage = (*parameter_variable)->storage.emplace<StackStorage>();
stack_storage.stack_pointer_offset = value;
} else {
RegisterStorage& register_storage = (*parameter_variable)->storage.emplace<RegisterStorage>();
register_storage.dbx_register_number = value;
register_storage.is_by_reference = is_by_reference;
}
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::local_variable(
const char* name, const StabsType& type, u32 value, StabsSymbolDescriptor desc, SymbolClass sclass)
{
if(!m_current_function) {
return Result<void>();
}
Address address = (desc == StabsSymbolDescriptor::STATIC_LOCAL_VARIABLE) ? value : Address();
Result<LocalVariable*> local_variable = m_database.local_variables.create_symbol(
name, address, m_context.group.source, m_context.group.module_symbol);
CCC_RETURN_IF_ERROR(local_variable);
m_current_local_variables.emplace_back((*local_variable)->handle());
m_pending_local_variables.emplace_back((*local_variable)->handle());
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(type, nullptr, m_stabs_to_ast_state, 0, true, false);
CCC_RETURN_IF_ERROR(node);
if(desc == StabsSymbolDescriptor::STATIC_LOCAL_VARIABLE) {
GlobalStorage& global_storage = (*local_variable)->storage.emplace<GlobalStorage>();
std::optional<GlobalStorageLocation> location_opt =
symbol_class_to_global_variable_location(sclass);
CCC_CHECK(location_opt.has_value(),
"Invalid static local variable location %s.",
symbol_class(sclass));
global_storage.location = *location_opt;
(*node)->storage_class = STORAGE_CLASS_STATIC;
} else if(desc == StabsSymbolDescriptor::REGISTER_VARIABLE) {
RegisterStorage& register_storage = (*local_variable)->storage.emplace<RegisterStorage>();
register_storage.dbx_register_number = (s32) value;
} else if(desc == StabsSymbolDescriptor::LOCAL_VARIABLE) {
StackStorage& stack_storage = (*local_variable)->storage.emplace<StackStorage>();
stack_storage.stack_pointer_offset = (s32) value;
} else {
return CCC_FAILURE("LocalSymbolTableAnalyser::local_variable() called with bad symbol descriptor.");
}
(*local_variable)->set_type(std::move(*node));
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::lbrac(s32 begin_offset)
{
for(LocalVariableHandle local_variable_handle : m_pending_local_variables) {
if(LocalVariable* local_variable = m_database.local_variables.symbol_from_handle(local_variable_handle)) {
local_variable->live_range.low = m_source_file.address().value + begin_offset;
}
}
m_blocks.emplace_back(std::move(m_pending_local_variables));
m_pending_local_variables = {};
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::rbrac(s32 end_offset)
{
CCC_CHECK(!m_blocks.empty(), "RBRAC symbol without a matching LBRAC symbol.");
std::vector<LocalVariableHandle>& variables = m_blocks.back();
for(LocalVariableHandle local_variable_handle : variables) {
if(LocalVariable* local_variable = m_database.local_variables.symbol_from_handle(local_variable_handle)) {
local_variable->live_range.high = m_source_file.address().value + end_offset;
}
}
m_blocks.pop_back();
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::finish()
{
CCC_CHECK(m_state != IN_FUNCTION_BEGINNING,
"Unexpected end of symbol table for '%s'.", m_source_file.name().c_str());
if(m_current_function) {
Result<void> result = function_end();
CCC_RETURN_IF_ERROR(result);
}
m_source_file.set_functions(std::move(m_functions), m_database);
m_source_file.set_global_variables(std::move(m_global_variables), m_database);
return Result<void>();
}
Result<void> LocalSymbolTableAnalyser::create_function(const char* mangled_name, Address address)
{
if(m_current_function) {
Result<void> result = function_end();
CCC_RETURN_IF_ERROR(result);
}
Result<Function*> function = m_database.functions.create_symbol(
mangled_name, m_context.group.source, m_context.group.module_symbol, address, m_context.importer_flags, m_context.demangler);
CCC_RETURN_IF_ERROR(function);
CCC_ASSERT(*function);
m_current_function = *function;
m_functions.emplace_back(m_current_function->handle());
m_state = IN_FUNCTION_BEGINNING;
if(!m_next_relative_path.empty() && m_current_function->relative_path != m_source_file.command_line_path) {
m_current_function->relative_path = m_next_relative_path;
}
return Result<void>();
}
std::optional<GlobalStorageLocation> symbol_class_to_global_variable_location(SymbolClass symbol_class)
{
std::optional<GlobalStorageLocation> location;
switch(symbol_class) {
case SymbolClass::NIL: location = GlobalStorageLocation::NIL; break;
case SymbolClass::DATA: location = GlobalStorageLocation::DATA; break;
case SymbolClass::BSS: location = GlobalStorageLocation::BSS; break;
case SymbolClass::ABS: location = GlobalStorageLocation::ABS; break;
case SymbolClass::SDATA: location = GlobalStorageLocation::SDATA; break;
case SymbolClass::SBSS: location = GlobalStorageLocation::SBSS; break;
case SymbolClass::RDATA: location = GlobalStorageLocation::RDATA; break;
case SymbolClass::COMMON: location = GlobalStorageLocation::COMMON; break;
case SymbolClass::SCOMMON: location = GlobalStorageLocation::SCOMMON; break;
case SymbolClass::SUNDEFINED: location = GlobalStorageLocation::SUNDEFINED; break;
default: {}
}
return location;
}
}

99
3rdparty/ccc/src/ccc/mdebug_analysis.h vendored Normal file
View File

@@ -0,0 +1,99 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "importer_flags.h"
#include "mdebug_section.h"
#include "mdebug_symbols.h"
#include "stabs.h"
#include "stabs_to_ast.h"
#include "symbol_database.h"
namespace ccc::mdebug {
struct AnalysisContext {
const mdebug::SymbolTableReader* reader = nullptr;
const std::map<u32, const mdebug::Symbol*>* external_functions = nullptr;
const std::map<std::string, const mdebug::Symbol*>* external_globals = nullptr;
SymbolGroup group;
u32 importer_flags = NO_IMPORTER_FLAGS;
DemanglerFunctions demangler;
};
class LocalSymbolTableAnalyser {
public:
LocalSymbolTableAnalyser(SymbolDatabase& database, const StabsToAstState& stabs_to_ast_state, const AnalysisContext& context, SourceFile& source_file)
: m_database(database)
, m_context(context)
, m_stabs_to_ast_state(stabs_to_ast_state)
, m_source_file(source_file) {}
// Functions for processing individual symbols.
//
// In most cases these symbols will appear in the following order:
// PROC TEXT
// ... line numbers ... ($LM<N>)
// END TEXT
// LABEL TEXT FUN
// ... parameters ...
// ... blocks ... (... local variables ... LBRAC ... subblocks ... RBRAC)
// NIL NIL FUN
//
// For some compiler versions the symbols can appear in this order:
// LABEL TEXT FUN
// ... parameters ...
// first line number ($LM1)
// PROC TEXT
// ... line numbers ... ($LM<N>)
// END TEXT
// ... blocks ... (... local variables ... LBRAC ... subblocks ... RBRAC)
Result<void> stab_magic(const char* magic);
Result<void> source_file(const char* path, Address text_address);
Result<void> data_type(const ParsedSymbol& symbol);
Result<void> global_variable(
const char* mangled_name, Address address, const StabsType& type, bool is_static, GlobalStorageLocation location);
Result<void> sub_source_file(const char* name, Address text_address);
Result<void> procedure(
const char* mangled_name, Address address, const ProcedureDescriptor* procedure_descriptor, bool is_static);
Result<void> label(const char* label, Address address, s32 line_number);
Result<void> text_end(const char* name, s32 function_size);
Result<void> function(const char* mangled_name, const StabsType& return_type, Address address);
Result<void> function_end();
Result<void> parameter(
const char* name, const StabsType& type, bool is_stack, s32 value, bool is_by_reference);
Result<void> local_variable(
const char* name, const StabsType& type, u32 value, StabsSymbolDescriptor desc, SymbolClass sclass);
Result<void> lbrac(s32 begin_offset);
Result<void> rbrac(s32 end_offset);
Result<void> finish();
Result<void> create_function(const char* mangled_name, Address address);
protected:
enum AnalysisState {
NOT_IN_FUNCTION,
IN_FUNCTION_BEGINNING,
IN_FUNCTION_END
};
SymbolDatabase& m_database;
const AnalysisContext& m_context;
const StabsToAstState& m_stabs_to_ast_state;
AnalysisState m_state = NOT_IN_FUNCTION;
SourceFile& m_source_file;
std::vector<FunctionHandle> m_functions;
std::vector<GlobalVariableHandle> m_global_variables;
Function* m_current_function = nullptr;
std::vector<ParameterVariableHandle> m_current_parameter_variables;
std::vector<LocalVariableHandle> m_current_local_variables;
std::vector<std::vector<LocalVariableHandle>> m_blocks;
std::vector<LocalVariableHandle> m_pending_local_variables;
std::string m_next_relative_path;
};
std::optional<GlobalStorageLocation> symbol_class_to_global_variable_location(SymbolClass symbol_class);
};

668
3rdparty/ccc/src/ccc/mdebug_importer.cpp vendored Normal file
View File

@@ -0,0 +1,668 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "mdebug_importer.h"
namespace ccc::mdebug {
static Result<void> resolve_type_names(
SymbolDatabase& database, const SymbolGroup& group, u32 importer_flags);
static Result<void> resolve_type_name(
ast::TypeName& type_name,
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags);
static void compute_size_bytes(ast::Node& node, SymbolDatabase& database);
static void detect_duplicate_functions(SymbolDatabase& database, const SymbolGroup& group);
static void detect_fake_functions(SymbolDatabase& database, const std::map<u32, const mdebug::Symbol*>& external_functions, const SymbolGroup& group);
static void destroy_optimized_out_functions(
SymbolDatabase& database, const SymbolGroup& group);
Result<void> import_symbol_table(
SymbolDatabase& database,
std::span<const u8> elf,
s32 section_offset,
const SymbolGroup& group,
u32 importer_flags,
const DemanglerFunctions& demangler,
const std::atomic_bool* interrupt)
{
SymbolTableReader reader;
Result<void> reader_result = reader.init(elf, section_offset);
CCC_RETURN_IF_ERROR(reader_result);
Result<std::vector<mdebug::Symbol>> external_symbols = reader.parse_external_symbols();
CCC_RETURN_IF_ERROR(external_symbols);
// The addresses of the global variables aren't present in the local symbol
// table, so here we extract them from the external table. In addition, for
// some games we need to cross reference the function symbols in the local
// symbol table with the entries in the external symbol table.
std::map<u32, const mdebug::Symbol*> external_functions;
std::map<std::string, const mdebug::Symbol*> external_globals;
for(const mdebug::Symbol& external : *external_symbols) {
if(external.symbol_type == mdebug::SymbolType::PROC) {
external_functions[external.value] = &external;
}
if(external.symbol_type == mdebug::SymbolType::GLOBAL
&& (external.symbol_class != mdebug::SymbolClass::UNDEFINED)) {
external_globals[external.string] = &external;
}
}
// Bundle together some unchanging state to pass to import_files.
AnalysisContext context;
context.reader = &reader;
context.external_functions = &external_functions;
context.external_globals = &external_globals;
context.group = group;
context.importer_flags = importer_flags;
context.demangler = demangler;
Result<void> result = import_files(database, context, interrupt);
CCC_RETURN_IF_ERROR(result);
return Result<void>();
}
Result<void> import_files(SymbolDatabase& database, const AnalysisContext& context, const std::atomic_bool* interrupt)
{
Result<s32> file_count = context.reader->file_count();
CCC_RETURN_IF_ERROR(file_count);
for(s32 i = 0; i < *file_count; i++) {
if(interrupt && *interrupt) {
return CCC_FAILURE("Operation interrupted by user.");
}
Result<mdebug::File> file = context.reader->parse_file(i);
CCC_RETURN_IF_ERROR(file);
Result<void> result = import_file(database, *file, context);
CCC_RETURN_IF_ERROR(result);
}
// The files field may be modified by further analysis passes, so we
// need to save this information here.
for(DataType& data_type : database.data_types) {
if(context.group.is_in_group(data_type) && data_type.files.size() == 1) {
data_type.only_defined_in_single_translation_unit = true;
}
}
// Lookup data types and store data type handles in type names.
Result<void> type_name_result = resolve_type_names(database, context.group, context.importer_flags);
CCC_RETURN_IF_ERROR(type_name_result);
// Compute the size in bytes of all the AST nodes.
database.for_each_symbol([&](ccc::Symbol& symbol) {
if(context.group.is_in_group(symbol) && symbol.type()) {
compute_size_bytes(*symbol.type(), database);
}
});
// Propagate the size information to the global variable symbols.
for(GlobalVariable& global_variable : database.global_variables) {
if(global_variable.type() && global_variable.type()->size_bytes > -1) {
global_variable.set_size((u32) global_variable.type()->size_bytes);
}
}
// Propagate the size information to the static local variable symbols.
for(LocalVariable& local_variable : database.local_variables) {
bool is_static_local = std::holds_alternative<GlobalStorage>(local_variable.storage);
if(is_static_local && local_variable.type() && local_variable.type()->size_bytes > -1) {
local_variable.set_size((u32) local_variable.type()->size_bytes);
}
}
// Some games (e.g. Jet X2O) have multiple function symbols across different
// translation units with the same name and address.
if(context.importer_flags & UNIQUE_FUNCTIONS) {
detect_duplicate_functions(database, context.group);
}
// If multiple functions appear at the same address, discard the addresses
// of all of them except the real one.
if(context.external_functions) {
detect_fake_functions(database, *context.external_functions, context.group);
}
// Remove functions with no address. If there are any such functions, this
// will invalidate all pointers to symbols.
if(context.importer_flags & NO_OPTIMIZED_OUT_FUNCTIONS) {
destroy_optimized_out_functions(database, context.group);
}
return Result<void>();
}
Result<void> import_file(SymbolDatabase& database, const mdebug::File& input, const AnalysisContext& context)
{
// Parse the stab strings into a data structure that's vaguely
// one-to-one with the text-based representation.
u32 importer_flags_for_this_file = context.importer_flags;
Result<std::vector<ParsedSymbol>> symbols = parse_symbols(input.symbols, importer_flags_for_this_file);
CCC_RETURN_IF_ERROR(symbols);
// In stabs, types can be referenced by their number from other stabs,
// so here we build a map of type numbers to the parsed types.
std::map<StabsTypeNumber, const StabsType*> stabs_types;
for(const ParsedSymbol& symbol : *symbols) {
if(symbol.type == ParsedSymbolType::NAME_COLON_TYPE) {
symbol.name_colon_type.type->enumerate_numbered_types(stabs_types);
}
}
Result<SourceFile*> source_file = database.source_files.create_symbol(
input.full_path, input.address, context.group.source, context.group.module_symbol);
CCC_RETURN_IF_ERROR(source_file);
(*source_file)->working_dir = input.working_dir;
(*source_file)->command_line_path = input.command_line_path;
// Sometimes the INFO symbols contain information about what toolchain
// version was used for building the executable.
for(const mdebug::Symbol& symbol : input.symbols) {
if(symbol.symbol_class == mdebug::SymbolClass::INFO && strcmp(symbol.string, "@stabs") != 0) {
(*source_file)->toolchain_version_info.emplace(symbol.string);
}
}
StabsToAstState stabs_to_ast_state;
stabs_to_ast_state.file_handle = (*source_file)->handle().value;
stabs_to_ast_state.stabs_types = &stabs_types;
stabs_to_ast_state.importer_flags = importer_flags_for_this_file;
stabs_to_ast_state.demangler = context.demangler;
// Convert the parsed stabs symbols to a more standard C AST.
LocalSymbolTableAnalyser analyser(database, stabs_to_ast_state, context, **source_file);
for(const ParsedSymbol& symbol : *symbols) {
if(symbol.duplicate) {
continue;
}
switch(symbol.type) {
case ParsedSymbolType::NAME_COLON_TYPE: {
switch(symbol.name_colon_type.descriptor) {
case StabsSymbolDescriptor::LOCAL_FUNCTION:
case StabsSymbolDescriptor::GLOBAL_FUNCTION: {
const char* name = symbol.name_colon_type.name.c_str();
const StabsType& type = *symbol.name_colon_type.type.get();
Result<void> result = analyser.function(name, type, symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
break;
}
case StabsSymbolDescriptor::REFERENCE_PARAMETER_A:
case StabsSymbolDescriptor::REGISTER_PARAMETER:
case StabsSymbolDescriptor::VALUE_PARAMETER:
case StabsSymbolDescriptor::REFERENCE_PARAMETER_V: {
const char* name = symbol.name_colon_type.name.c_str();
const StabsType& type = *symbol.name_colon_type.type.get();
bool is_stack_variable = symbol.name_colon_type.descriptor == StabsSymbolDescriptor::VALUE_PARAMETER;
bool is_by_reference = symbol.name_colon_type.descriptor == StabsSymbolDescriptor::REFERENCE_PARAMETER_A
|| symbol.name_colon_type.descriptor == StabsSymbolDescriptor::REFERENCE_PARAMETER_V;
Result<void> result = analyser.parameter(name, type, is_stack_variable, symbol.raw->value, is_by_reference);
CCC_RETURN_IF_ERROR(result);
break;
}
case StabsSymbolDescriptor::REGISTER_VARIABLE:
case StabsSymbolDescriptor::LOCAL_VARIABLE:
case StabsSymbolDescriptor::STATIC_LOCAL_VARIABLE: {
const char* name = symbol.name_colon_type.name.c_str();
const StabsType& type = *symbol.name_colon_type.type.get();
Result<void> result = analyser.local_variable(
name, type, symbol.raw->value, symbol.name_colon_type.descriptor, symbol.raw->symbol_class);
CCC_RETURN_IF_ERROR(result);
break;
}
case StabsSymbolDescriptor::GLOBAL_VARIABLE:
case StabsSymbolDescriptor::STATIC_GLOBAL_VARIABLE: {
const char* name = symbol.name_colon_type.name.c_str();
u32 address = -1;
std::optional<GlobalStorageLocation> location =
symbol_class_to_global_variable_location(symbol.raw->symbol_class);
if(symbol.name_colon_type.descriptor == StabsSymbolDescriptor::GLOBAL_VARIABLE) {
// The address for non-static global variables is
// only stored in the external symbol table (and
// the ELF symbol table), so we pull that
// information in here.
if(context.external_globals) {
auto global_symbol = context.external_globals->find(symbol.name_colon_type.name);
if(global_symbol != context.external_globals->end()) {
address = (u32) global_symbol->second->value;
location = symbol_class_to_global_variable_location(global_symbol->second->symbol_class);
}
}
} else {
// And for static global variables it's just stored
// in the local symbol table.
address = (u32) symbol.raw->value;
}
CCC_CHECK(location.has_value(), "Invalid global variable location.")
const StabsType& type = *symbol.name_colon_type.type.get();
bool is_static = symbol.name_colon_type.descriptor == StabsSymbolDescriptor::STATIC_GLOBAL_VARIABLE;
Result<void> result = analyser.global_variable(name, address, type, is_static, *location);
CCC_RETURN_IF_ERROR(result);
break;
}
case StabsSymbolDescriptor::TYPE_NAME:
case StabsSymbolDescriptor::ENUM_STRUCT_OR_TYPE_TAG: {
Result<void> result = analyser.data_type(symbol);
CCC_RETURN_IF_ERROR(result);
break;
}
}
break;
}
case ParsedSymbolType::SOURCE_FILE: {
Result<void> result = analyser.source_file(symbol.raw->string, symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
break;
}
case ParsedSymbolType::SUB_SOURCE_FILE: {
Result<void> result = analyser.sub_source_file(symbol.raw->string, symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
break;
}
case ParsedSymbolType::LBRAC: {
Result<void> result = analyser.lbrac(symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
break;
}
case ParsedSymbolType::RBRAC: {
Result<void> result = analyser.rbrac(symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
break;
}
case ParsedSymbolType::FUNCTION_END: {
Result<void> result = analyser.function_end();
CCC_RETURN_IF_ERROR(result);
break;
}
case ParsedSymbolType::NON_STABS: {
if(symbol.raw->symbol_class == mdebug::SymbolClass::TEXT) {
if(symbol.raw->symbol_type == mdebug::SymbolType::PROC) {
Result<void> result = analyser.procedure(symbol.raw->string, symbol.raw->value, symbol.raw->procedure_descriptor, false);
CCC_RETURN_IF_ERROR(result);
} else if(symbol.raw->symbol_type == mdebug::SymbolType::STATICPROC) {
Result<void> result = analyser.procedure(symbol.raw->string, symbol.raw->value, symbol.raw->procedure_descriptor, true);
CCC_RETURN_IF_ERROR(result);
} else if(symbol.raw->symbol_type == mdebug::SymbolType::LABEL) {
Result<void> result = analyser.label(symbol.raw->string, symbol.raw->value, symbol.raw->index);
CCC_RETURN_IF_ERROR(result);
} else if(symbol.raw->symbol_type == mdebug::SymbolType::END) {
Result<void> result = analyser.text_end(symbol.raw->string, symbol.raw->value);
CCC_RETURN_IF_ERROR(result);
}
}
break;
}
}
}
Result<void> result = analyser.finish();
CCC_RETURN_IF_ERROR(result);
return Result<void>();
}
static Result<void> resolve_type_names(
SymbolDatabase& database, const SymbolGroup& group, u32 importer_flags)
{
Result<void> result;
database.for_each_symbol([&](ccc::Symbol& symbol) {
if(group.is_in_group(symbol) && symbol.type()) {
ast::for_each_node(*symbol.type(), ast::PREORDER_TRAVERSAL, [&](ast::Node& node) {
if(node.descriptor == ast::TYPE_NAME) {
Result<void> type_name_result = resolve_type_name(node.as<ast::TypeName>(), database, group, importer_flags);
if(!type_name_result.success()) {
result = std::move(type_name_result);
}
}
return ast::EXPLORE_CHILDREN;
});
}
});
return result;
}
static Result<void> resolve_type_name(
ast::TypeName& type_name,
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags)
{
ast::TypeName::UnresolvedStabs* unresolved_stabs = type_name.unresolved_stabs.get();
if(!unresolved_stabs) {
return Result<void>();
}
// Lookup the type by its STABS type number. This path ensures that the
// correct type is found even if multiple types have the same name.
if(unresolved_stabs->referenced_file_handle != (u32) -1 && unresolved_stabs->stabs_type_number.valid()) {
const SourceFile* source_file = database.source_files.symbol_from_handle(unresolved_stabs->referenced_file_handle);
CCC_ASSERT(source_file);
auto handle = source_file->stabs_type_number_to_handle.find(unresolved_stabs->stabs_type_number);
if(handle != source_file->stabs_type_number_to_handle.end()) {
type_name.data_type_handle = handle->second.value;
type_name.is_forward_declared = false;
type_name.unresolved_stabs.reset();
return Result<void>();
}
}
// Looking up the type by its STABS type number failed, so look for it by
// its name instead. This happens when a type is forward declared but not
// defined in a given translation unit.
if(!unresolved_stabs->type_name.empty()) {
for(auto& name_handle : database.data_types.handles_from_name(unresolved_stabs->type_name)) {
DataType* data_type = database.data_types.symbol_from_handle(name_handle.second);
if(data_type && group.is_in_group(*data_type)) {
type_name.data_type_handle = name_handle.second.value;
type_name.is_forward_declared = true;
type_name.unresolved_stabs.reset();
return Result<void>();
}
}
}
// If this branch is taken it means the type name was probably from an
// automatically generated member function of a nested struct trying to
// reference the struct (for the this parameter). We shouldn't create a
// forward declared type in this case.
if(type_name.source == ast::TypeNameSource::UNNAMED_THIS) {
return Result<void>();
}
// Type lookup failed. This happens when a type is forward declared in a
// translation unit with symbols but is not defined in one. We haven't
// already created a forward declared type, so we create one now.
std::unique_ptr<ast::Node> forward_declared_node;
if(unresolved_stabs->type.has_value()) {
switch(*unresolved_stabs->type) {
case ast::ForwardDeclaredType::STRUCT: {
std::unique_ptr<ast::StructOrUnion> node = std::make_unique<ast::StructOrUnion>();
node->is_struct = true;
forward_declared_node = std::move(node);
break;
}
case ast::ForwardDeclaredType::UNION: {
std::unique_ptr<ast::StructOrUnion> node = std::make_unique<ast::StructOrUnion>();
node->is_struct = false;
forward_declared_node = std::move(node);
break;
}
case ast::ForwardDeclaredType::ENUM: {
std::unique_ptr<ast::Enum> node = std::make_unique<ast::Enum>();
forward_declared_node = std::move(node);
break;
}
}
}
if(forward_declared_node) {
Result<DataType*> forward_declared_type = database.data_types.create_symbol(
unresolved_stabs->type_name, group.source, group.module_symbol);
CCC_RETURN_IF_ERROR(forward_declared_type);
(*forward_declared_type)->set_type(std::move(forward_declared_node));
(*forward_declared_type)->not_defined_in_any_translation_unit = true;
type_name.data_type_handle = (*forward_declared_type)->handle().value;
type_name.is_forward_declared = true;
type_name.unresolved_stabs.reset();
return Result<void>();
}
const char* error_message = "Unresolved %s type name '%s' with STABS type number (%d,%d).";
if(importer_flags & STRICT_PARSING) {
return CCC_FAILURE(error_message,
ast::type_name_source_to_string(type_name.source),
type_name.unresolved_stabs->type_name.c_str(),
type_name.unresolved_stabs->stabs_type_number.file,
type_name.unresolved_stabs->stabs_type_number.type);
} else {
CCC_WARN(error_message,
ast::type_name_source_to_string(type_name.source),
type_name.unresolved_stabs->type_name.c_str(),
type_name.unresolved_stabs->stabs_type_number.file,
type_name.unresolved_stabs->stabs_type_number.type);
}
return Result<void>();
}
static void compute_size_bytes(ast::Node& node, SymbolDatabase& database)
{
for_each_node(node, ast::POSTORDER_TRAVERSAL, [&](ast::Node& node) {
// Skip nodes that have already been processed.
if(node.size_bytes > -1 || node.cannot_compute_size) {
return ast::EXPLORE_CHILDREN;
}
// Can't compute size recursively.
node.cannot_compute_size = true;
switch(node.descriptor) {
case ast::ARRAY: {
ast::Array& array = node.as<ast::Array>();
if(array.element_type->size_bytes > -1) {
array.size_bytes = array.element_type->size_bytes * array.element_count;
}
break;
}
case ast::BITFIELD: {
break;
}
case ast::BUILTIN: {
ast::BuiltIn& built_in = node.as<ast::BuiltIn>();
built_in.size_bytes = builtin_class_size(built_in.bclass);
break;
}
case ast::FUNCTION: {
break;
}
case ast::ENUM: {
node.size_bytes = 4;
break;
}
case ast::ERROR_NODE: {
break;
}
case ast::STRUCT_OR_UNION: {
node.size_bytes = node.size_bits / 8;
break;
}
case ast::POINTER_OR_REFERENCE: {
node.size_bytes = 4;
break;
}
case ast::POINTER_TO_DATA_MEMBER: {
break;
}
case ast::TYPE_NAME: {
ast::TypeName& type_name = node.as<ast::TypeName>();
DataType* resolved_type = database.data_types.symbol_from_handle(type_name.data_type_handle_unless_forward_declared());
if(resolved_type) {
ast::Node* resolved_node = resolved_type->type();
CCC_ASSERT(resolved_node);
if(resolved_node->size_bytes < 0 && !resolved_node->cannot_compute_size) {
compute_size_bytes(*resolved_node, database);
}
type_name.size_bytes = resolved_node->size_bytes;
}
break;
}
}
if(node.size_bytes > -1) {
node.cannot_compute_size = false;
}
return ast::EXPLORE_CHILDREN;
});
}
static void detect_duplicate_functions(SymbolDatabase& database, const SymbolGroup& group)
{
std::vector<FunctionHandle> duplicate_functions;
for(Function& test_function : database.functions) {
if(!test_function.address().valid() && !group.is_in_group(test_function)) {
continue;
}
// Find cases where there are two or more functions at the same address.
auto functions_with_same_address = database.functions.handles_from_starting_address(test_function.address());
if(functions_with_same_address.begin() == functions_with_same_address.end()) {
continue;
}
if(++functions_with_same_address.begin() == functions_with_same_address.end()) {
continue;
}
// Try to figure out the address of the translation unit which the
// version of the function that actually ended up in the linked binary
// comes from. We can't just check which source file the symbol comes
// from because it may be present in multiple.
u32 source_file_address = UINT32_MAX;
for(SourceFile& source_file : database.source_files) {
if(source_file.address() < test_function.address()) {
source_file_address = std::min(source_file.address().value, source_file_address);
}
}
if(source_file_address == UINT32_MAX) {
continue;
}
// Remove the addresses from all the matching symbols from other
// translation units.
FunctionHandle best_handle;
u32 best_offset = UINT32_MAX;
for(const auto& [address, handle] : functions_with_same_address) {
ccc::Function* function = database.functions.symbol_from_handle(handle);
if(!function || !group.is_in_group(*function) || function->mangled_name() != test_function.mangled_name()) {
continue;
}
if(address - source_file_address < best_offset) {
if(best_handle.valid()) {
duplicate_functions.emplace_back(best_handle);
}
best_handle = function->handle();
best_offset = address - source_file_address;
} else {
duplicate_functions.emplace_back(function->handle());
}
}
for(FunctionHandle duplicate_function : duplicate_functions) {
database.functions.move_symbol(duplicate_function, Address());
}
duplicate_functions.clear();
}
}
static void detect_fake_functions(SymbolDatabase& database, const std::map<u32, const mdebug::Symbol*>& external_functions, const SymbolGroup& group)
{
// Find cases where multiple fake function symbols were emitted for a given
// address and cross-reference with the external symbol table to try and
// find which one is the real one.
s32 fake_function_count = 0;
for(Function& function : database.functions) {
if(!function.address().valid() || !group.is_in_group(function)) {
continue;
}
// Find cases where there are two or more functions at the same address.
auto functions_with_same_address = database.functions.handles_from_starting_address(function.address());
if(functions_with_same_address.begin() == functions_with_same_address.end()) {
continue;
}
if(++functions_with_same_address.begin() == functions_with_same_address.end()) {
continue;
}
auto external_function = external_functions.find(function.address().value);
if(external_function == external_functions.end() || strcmp(function.mangled_name().c_str(), external_function->second->string) != 0) {
database.functions.move_symbol(function.handle(), Address());
if(fake_function_count < 10) {
CCC_WARN("Discarding address of function symbol '%s' as it is probably incorrect.", function.mangled_name().c_str());
} else if(fake_function_count == 10) {
CCC_WARN("Discarding more addresses of function symbols.");
}
fake_function_count++;
}
}
}
static void destroy_optimized_out_functions(
SymbolDatabase& database, const SymbolGroup& group)
{
bool marked = false;
for(Function& function : database.functions) {
if(group.is_in_group(function) && !function.address().valid()) {
function.mark_for_destruction();
marked = true;
}
}
if(marked) {
// This will invalidate all pointers to symbols in the database.
database.destroy_marked_symbols();
}
}
void fill_in_pointers_to_member_function_definitions(SymbolDatabase& database)
{
// Fill in pointers from member function declaration to corresponding definitions.
for(Function& function : database.functions) {
const std::string& qualified_name = function.name();
std::string::size_type name_separator_pos = qualified_name.find_last_of("::");
if(name_separator_pos == std::string::npos || name_separator_pos < 2) {
continue;
}
std::string function_name = qualified_name.substr(name_separator_pos + 1);
// This won't work for some template types.
std::string::size_type type_separator_pos = qualified_name.find_last_of("::", name_separator_pos - 2);
std::string type_name;
if(type_separator_pos != std::string::npos) {
type_name = qualified_name.substr(type_separator_pos + 1, name_separator_pos - type_separator_pos - 2);
} else {
type_name = qualified_name.substr(0, name_separator_pos - 1);
}
for(const auto& name_handle : database.data_types.handles_from_name(type_name)) {
DataType* data_type = database.data_types.symbol_from_handle(name_handle.second);
if(!data_type || !data_type->type() || data_type->type()->descriptor != ast::STRUCT_OR_UNION) {
continue;
}
ast::StructOrUnion& struct_or_union = data_type->type()->as<ast::StructOrUnion>();
for(std::unique_ptr<ast::Node>& declaration : struct_or_union.member_functions) {
if(declaration->name == function_name) {
declaration->as<ast::Function>().definition_handle = function.handle().value;
function.is_member_function_ish = true;
break;
}
}
if(function.is_member_function_ish) {
break;
}
}
}
}
}

31
3rdparty/ccc/src/ccc/mdebug_importer.h vendored Normal file
View File

@@ -0,0 +1,31 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include <atomic>
#include "mdebug_analysis.h"
#include "mdebug_section.h"
#include "symbol_database.h"
namespace ccc::mdebug {
// Perform all the main analysis passes on the mdebug symbol table and convert
// it to a set of C++ ASTs.
Result<void> import_symbol_table(
SymbolDatabase& database,
std::span<const u8> elf,
s32 section_offset,
const SymbolGroup& group,
u32 importer_flags,
const DemanglerFunctions& demangler,
const std::atomic_bool* interrupt);
Result<void> import_files(SymbolDatabase& database, const AnalysisContext& context, const std::atomic_bool* interrupt);
Result<void> import_file(SymbolDatabase& database, const mdebug::File& input, const AnalysisContext& context);
// Try to add pointers from member function declarations to their definitions
// using a heuristic.
void fill_in_pointers_to_member_function_definitions(SymbolDatabase& database);
}

474
3rdparty/ccc/src/ccc/mdebug_section.cpp vendored Normal file
View File

@@ -0,0 +1,474 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "mdebug_section.h"
namespace ccc::mdebug {
// MIPS debug symbol table headers.
// See include/coff/sym.h from GNU binutils for more information.
CCC_PACKED_STRUCT(SymbolicHeader,
/* 0x00 */ s16 magic;
/* 0x02 */ s16 version_stamp;
/* 0x04 */ s32 line_number_count;
/* 0x08 */ s32 line_numbers_size_bytes;
/* 0x0c */ s32 line_numbers_offset;
/* 0x10 */ s32 dense_numbers_count;
/* 0x14 */ s32 dense_numbers_offset;
/* 0x18 */ s32 procedure_descriptor_count;
/* 0x1c */ s32 procedure_descriptors_offset;
/* 0x20 */ s32 local_symbol_count;
/* 0x24 */ s32 local_symbols_offset;
/* 0x28 */ s32 optimization_symbols_count;
/* 0x2c */ s32 optimization_symbols_offset;
/* 0x30 */ s32 auxiliary_symbol_count;
/* 0x34 */ s32 auxiliary_symbols_offset;
/* 0x38 */ s32 local_strings_size_bytes;
/* 0x3c */ s32 local_strings_offset;
/* 0x40 */ s32 external_strings_size_bytes;
/* 0x44 */ s32 external_strings_offset;
/* 0x48 */ s32 file_descriptor_count;
/* 0x4c */ s32 file_descriptors_offset;
/* 0x50 */ s32 relative_file_descriptor_count;
/* 0x54 */ s32 relative_file_descriptors_offset;
/* 0x58 */ s32 external_symbols_count;
/* 0x5c */ s32 external_symbols_offset;
)
CCC_PACKED_STRUCT(FileDescriptor,
/* 0x00 */ u32 address;
/* 0x04 */ s32 file_path_string_offset;
/* 0x08 */ s32 strings_offset;
/* 0x0c */ s32 cb_ss;
/* 0x10 */ s32 isym_base;
/* 0x14 */ s32 symbol_count;
/* 0x18 */ s32 line_number_entry_index_base;
/* 0x1c */ s32 cline;
/* 0x20 */ s32 optimization_entry_index_base;
/* 0x24 */ s32 copt;
/* 0x28 */ u16 ipd_first;
/* 0x2a */ u16 procedure_descriptor_count;
/* 0x2c */ s32 iaux_base;
/* 0x30 */ s32 caux;
/* 0x34 */ s32 rfd_base;
/* 0x38 */ s32 crfd;
/* 0x3c */ u32 lang : 5;
/* 0x3c */ u32 f_merge : 1;
/* 0x3c */ u32 f_readin : 1;
/* 0x3c */ u32 f_big_endian : 1;
/* 0x3c */ u32 reserved_1 : 22;
/* 0x40 */ s32 line_number_offset;
/* 0x44 */ s32 cb_line;
)
static_assert(sizeof(FileDescriptor) == 0x48);
CCC_PACKED_STRUCT(SymbolHeader,
/* 0x0 */ u32 iss;
/* 0x4 */ u32 value;
/* 0x8 */ u32 st : 6;
/* 0x8 */ u32 sc : 5;
/* 0x8 */ u32 reserved : 1;
/* 0x8 */ u32 index : 20;
)
static_assert(sizeof(SymbolHeader) == 0xc);
CCC_PACKED_STRUCT(ExternalSymbolHeader,
/* 0x0 */ u16 flags;
/* 0x2 */ s16 ifd;
/* 0x4 */ SymbolHeader symbol;
)
static_assert(sizeof(ExternalSymbolHeader) == 0x10);
static void print_symbol(FILE* out, const Symbol& symbol);
static void print_procedure_descriptor(FILE* out, const ProcedureDescriptor& procedure_descriptor);
static Result<s32> get_corruption_fixing_fudge_offset(s32 section_offset, const SymbolicHeader& hdrr);
static Result<Symbol> get_symbol(const SymbolHeader& header, std::span<const u8> elf, s32 strings_offset);
Result<void> SymbolTableReader::init(std::span<const u8> elf, s32 section_offset)
{
m_elf = elf;
m_section_offset = section_offset;
m_hdrr = get_unaligned<SymbolicHeader>(m_elf, m_section_offset);
CCC_CHECK(m_hdrr != nullptr, "MIPS debug section header out of bounds.");
CCC_CHECK(m_hdrr->magic == 0x7009, "Invalid symbolic header.");
Result<s32> fudge_offset = get_corruption_fixing_fudge_offset(m_section_offset, *m_hdrr);
CCC_RETURN_IF_ERROR(fudge_offset);
m_fudge_offset = *fudge_offset;
m_ready = true;
return Result<void>();
}
s32 SymbolTableReader::file_count() const
{
CCC_ASSERT(m_ready);
return m_hdrr->file_descriptor_count;
}
Result<File> SymbolTableReader::parse_file(s32 index) const
{
CCC_ASSERT(m_ready);
File file;
u64 fd_offset = m_hdrr->file_descriptors_offset + index * sizeof(FileDescriptor);
const FileDescriptor* fd_header = get_unaligned<FileDescriptor>(m_elf, fd_offset + m_fudge_offset);
CCC_CHECK(fd_header != nullptr, "MIPS debug file descriptor out of bounds.");
CCC_CHECK(fd_header->f_big_endian == 0, "Not little endian or bad file descriptor table.");
file.address = fd_header->address;
s32 rel_raw_path_offset = fd_header->strings_offset + fd_header->file_path_string_offset;
s32 raw_path_offset = m_hdrr->local_strings_offset + rel_raw_path_offset + m_fudge_offset;
std::optional<std::string_view> command_line_path = get_string(m_elf, raw_path_offset);
if(command_line_path.has_value()) {
file.command_line_path = *command_line_path;
}
// Parse local symbols.
for(s64 j = 0; j < fd_header->symbol_count; j++) {
u64 rel_symbol_offset = (fd_header->isym_base + j) * sizeof(SymbolHeader);
u64 symbol_offset = m_hdrr->local_symbols_offset + rel_symbol_offset + m_fudge_offset;
const SymbolHeader* symbol_header = get_unaligned<SymbolHeader>(m_elf, symbol_offset);
CCC_CHECK(symbol_header != nullptr, "Symbol header out of bounds.");
s32 strings_offset = m_hdrr->local_strings_offset + fd_header->strings_offset + m_fudge_offset;
Result<Symbol> sym = get_symbol(*symbol_header, m_elf, strings_offset);
CCC_RETURN_IF_ERROR(sym);
bool string_offset_equal = (s32) symbol_header->iss == fd_header->file_path_string_offset;
if(file.working_dir.empty() && string_offset_equal && sym->is_stabs() && sym->code() == N_SO && file.symbols.size() > 2) {
const Symbol& working_dir = file.symbols.back();
if(working_dir.is_stabs() && working_dir.code() == N_SO) {
file.working_dir = working_dir.string;
}
}
file.symbols.emplace_back(std::move(*sym));
}
// Parse procedure descriptors.
for(s64 i = 0; i < fd_header->procedure_descriptor_count; i++) {
u64 rel_procedure_offset = (fd_header->ipd_first + i) * sizeof(ProcedureDescriptor);
u64 procedure_offset = m_hdrr->procedure_descriptors_offset + rel_procedure_offset + m_fudge_offset;
const ProcedureDescriptor* procedure_descriptor = get_unaligned<ProcedureDescriptor>(m_elf, procedure_offset);
CCC_CHECK(procedure_descriptor != nullptr, "Procedure descriptor out of bounds.");
CCC_CHECK(procedure_descriptor->symbol_index < file.symbols.size(), "Symbol index out of bounds.");
file.symbols[procedure_descriptor->symbol_index].procedure_descriptor = procedure_descriptor;
}
file.full_path = merge_paths(file.working_dir, file.command_line_path);
return file;
}
Result<std::vector<Symbol>> SymbolTableReader::parse_external_symbols() const
{
CCC_ASSERT(m_ready);
std::vector<Symbol> external_symbols;
for(s64 i = 0; i < m_hdrr->external_symbols_count; i++) {
u64 sym_offset = m_hdrr->external_symbols_offset + i * sizeof(ExternalSymbolHeader);
const ExternalSymbolHeader* external_header = get_unaligned<ExternalSymbolHeader>(m_elf, sym_offset + m_fudge_offset);
CCC_CHECK(external_header != nullptr, "External header out of bounds.");
Result<Symbol> sym = get_symbol(external_header->symbol, m_elf, m_hdrr->external_strings_offset + m_fudge_offset);
CCC_RETURN_IF_ERROR(sym);
external_symbols.emplace_back(std::move(*sym));
}
return external_symbols;
}
void SymbolTableReader::print_header(FILE* dest) const
{
CCC_ASSERT(m_ready);
fprintf(dest, "Symbolic Header, magic = %hx, vstamp = %hx:\n",
(u16) m_hdrr->magic,
(u16) m_hdrr->version_stamp);
fprintf(dest, "\n");
fprintf(dest, " Offset Size (Bytes) Count\n");
fprintf(dest, " ------ ------------ -----\n");
fprintf(dest, " Line Numbers 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->line_numbers_offset,
(u32) m_hdrr->line_numbers_size_bytes,
m_hdrr->line_number_count);
fprintf(dest, " Dense Numbers 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->dense_numbers_offset,
(u32) m_hdrr->dense_numbers_count * 8,
m_hdrr->dense_numbers_count);
fprintf(dest, " Procedure Descriptors 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->procedure_descriptors_offset,
(u32) m_hdrr->procedure_descriptor_count * (u32) sizeof(ProcedureDescriptor),
m_hdrr->procedure_descriptor_count);
fprintf(dest, " Local Symbols 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->local_symbols_offset,
(u32) m_hdrr->local_symbol_count * (u32) sizeof(SymbolHeader),
m_hdrr->local_symbol_count);
fprintf(dest, " Optimization Symbols 0x%-8x " "- " "%-8d\n",
(u32) m_hdrr->optimization_symbols_offset,
m_hdrr->optimization_symbols_count);
fprintf(dest, " Auxiliary Symbols 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->auxiliary_symbols_offset,
(u32) m_hdrr->auxiliary_symbol_count * 4,
m_hdrr->auxiliary_symbol_count);
fprintf(dest, " Local Strings 0x%-8x " "0x%-8x " "-\n",
(u32) m_hdrr->local_strings_offset,
(u32) m_hdrr->local_strings_size_bytes);
fprintf(dest, " External Strings 0x%-8x " "0x%-8x " "-\n",
(u32) m_hdrr->external_strings_offset,
(u32) m_hdrr->external_strings_size_bytes);
fprintf(dest, " File Descriptors 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->file_descriptors_offset,
(u32) m_hdrr->file_descriptor_count * (u32) sizeof(FileDescriptor),
m_hdrr->file_descriptor_count);
fprintf(dest, " Relative File Descriptors 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->relative_file_descriptors_offset,
(u32) m_hdrr->relative_file_descriptor_count * 4,
m_hdrr->relative_file_descriptor_count);
fprintf(dest, " External Symbols 0x%-8x " "0x%-8x " "%-8d\n",
(u32) m_hdrr->external_symbols_offset,
(u32) m_hdrr->external_symbols_count * 16,
m_hdrr->external_symbols_count);
}
Result<void> SymbolTableReader::print_symbols(FILE* out, bool print_locals, bool print_procedure_descriptors, bool print_externals) const
{
if(print_locals || print_procedure_descriptors) {
s32 count = file_count();
for(s32 i = 0; i < count; i++) {
Result<File> file = parse_file(i);
CCC_RETURN_IF_ERROR(file);
fprintf(out, "FILE %s:\n", file->command_line_path.c_str());
for(const Symbol& symbol : file->symbols) {
if(print_locals || symbol.procedure_descriptor) {
print_symbol(out, symbol);
}
if(print_procedure_descriptors && symbol.procedure_descriptor) {
print_procedure_descriptor(out, *symbol.procedure_descriptor);
}
}
}
}
if(print_externals) {
fprintf(out, "EXTERNAL SYMBOLS:\n");
Result<std::vector<Symbol>> external_symbols = parse_external_symbols();
CCC_RETURN_IF_ERROR(external_symbols);
for(const Symbol& symbol : *external_symbols) {
print_symbol(out, symbol);
}
}
return Result<void>();
}
static void print_symbol(FILE* out, const Symbol& symbol)
{
fprintf(out, " %8x ", symbol.value);
const char* symbol_type_str = symbol_type(symbol.symbol_type);
if(symbol_type_str) {
fprintf(out, "%-11s ", symbol_type_str);
} else {
fprintf(out, "ST(%7u) ", (u32) symbol.symbol_type);
}
const char* symbol_class_str = symbol_class(symbol.symbol_class);
if(symbol_class_str) {
fprintf(out, "%-4s ", symbol_class_str);
} else if ((u32) symbol.symbol_class == 0) {
fprintf(out, " ");
} else {
fprintf(out, "SC(%4u) ", (u32) symbol.symbol_class);
}
if(symbol.is_stabs()) {
fprintf(out, "%-8s ", stabs_code_to_string(symbol.code()));
} else {
fprintf(out, "SI(%4u) ", symbol.index);
}
fprintf(out, "%s\n", symbol.string);
}
static void print_procedure_descriptor(FILE* out, const ProcedureDescriptor& procedure_descriptor)
{
fprintf(out, " Address 0x%08x\n", procedure_descriptor.address);
fprintf(out, " Symbol Index %d\n", procedure_descriptor.symbol_index);
fprintf(out, " Line Number Entry Index %d\n", procedure_descriptor.line_number_entry_index);
fprintf(out, " Saved Register Mask 0x%08x\n", procedure_descriptor.saved_register_mask);
fprintf(out, " Saved Register Offset %d\n", procedure_descriptor.saved_register_offset);
fprintf(out, " Optimization Entry Index %d\n", procedure_descriptor.optimization_entry_index);
fprintf(out, " Saved Float Register Mask 0x%08x\n", procedure_descriptor.saved_float_register_mask);
fprintf(out, " Saved Float Register Offset %d\n", procedure_descriptor.saved_float_register_offset);
fprintf(out, " Frame Size %d\n", procedure_descriptor.frame_size);
fprintf(out, " Frame Pointer Register %hd\n", procedure_descriptor.frame_pointer_register);
fprintf(out, " Return PC Register %hd\n", procedure_descriptor.return_pc_register);
fprintf(out, " Line Number Low %d\n", procedure_descriptor.line_number_low);
fprintf(out, " Line Number High %d\n", procedure_descriptor.line_number_high);
fprintf(out, " Line Number Offset %d\n", procedure_descriptor.line_number_offset);
}
static Result<s32> get_corruption_fixing_fudge_offset(s32 section_offset, const SymbolicHeader& hdrr)
{
// GCC will always put the first part of the symbol table right after the
// header, so if the header says it's somewhere else we know the section has
// probably been moved without updating its contents.
s32 right_after_header = INT32_MAX;
if(hdrr.line_numbers_offset > 0) right_after_header = std::min(hdrr.line_numbers_offset, right_after_header);
if(hdrr.dense_numbers_offset > 0) right_after_header = std::min(hdrr.dense_numbers_offset, right_after_header);
if(hdrr.procedure_descriptors_offset > 0) right_after_header = std::min(hdrr.procedure_descriptors_offset, right_after_header);
if(hdrr.local_symbols_offset > 0) right_after_header = std::min(hdrr.local_symbols_offset, right_after_header);
if(hdrr.optimization_symbols_offset > 0) right_after_header = std::min(hdrr.optimization_symbols_offset, right_after_header);
if(hdrr.auxiliary_symbols_offset > 0) right_after_header = std::min(hdrr.auxiliary_symbols_offset, right_after_header);
if(hdrr.local_strings_offset > 0) right_after_header = std::min(hdrr.local_strings_offset, right_after_header);
if(hdrr.external_strings_offset > 0) right_after_header = std::min(hdrr.external_strings_offset, right_after_header);
if(hdrr.file_descriptors_offset > 0) right_after_header = std::min(hdrr.file_descriptors_offset, right_after_header);
if(hdrr.relative_file_descriptors_offset > 0) right_after_header = std::min(hdrr.relative_file_descriptors_offset, right_after_header);
if(hdrr.external_symbols_offset > 0) right_after_header = std::min(hdrr.external_symbols_offset, right_after_header);
CCC_CHECK(right_after_header >= 0 && right_after_header < INT32_MAX, "Invalid symbolic header.");
// Figure out how much we need to adjust all the file offsets by.
s32 fudge_offset = section_offset - (right_after_header - sizeof(SymbolicHeader));
if(fudge_offset != 0) {
CCC_WARN("The .mdebug section was moved without updating its contents. Adjusting file offsets by %d bytes.", fudge_offset);
}
return fudge_offset;
}
static Result<Symbol> get_symbol(const SymbolHeader& header, std::span<const u8> elf, s32 strings_offset)
{
Symbol symbol;
std::optional<std::string_view> string = get_string(elf, strings_offset + header.iss);
CCC_CHECK(string.has_value(), "Symbol has invalid string.");
symbol.string = string->data();
symbol.value = header.value;
symbol.symbol_type = (SymbolType) header.st;
symbol.symbol_class = (SymbolClass) header.sc;
symbol.index = header.index;
if(symbol.is_stabs()) {
CCC_CHECK(stabs_code_to_string(symbol.code()) != nullptr, "Bad stabs symbol code '%x'.", symbol.code());
}
return symbol;
}
const char* symbol_type(SymbolType type)
{
switch(type) {
case SymbolType::NIL: return "NIL";
case SymbolType::GLOBAL: return "GLOBAL";
case SymbolType::STATIC: return "STATIC";
case SymbolType::PARAM: return "PARAM";
case SymbolType::LOCAL: return "LOCAL";
case SymbolType::LABEL: return "LABEL";
case SymbolType::PROC: return "PROC";
case SymbolType::BLOCK: return "BLOCK";
case SymbolType::END: return "END";
case SymbolType::MEMBER: return "MEMBER";
case SymbolType::TYPEDEF: return "TYPEDEF";
case SymbolType::FILE_SYMBOL: return "FILE";
case SymbolType::STATICPROC: return "STATICPROC";
case SymbolType::CONSTANT: return "CONSTANT";
}
return nullptr;
}
const char* symbol_class(SymbolClass symbol_class)
{
switch(symbol_class) {
case SymbolClass::NIL: return "NIL";
case SymbolClass::TEXT: return "TEXT";
case SymbolClass::DATA: return "DATA";
case SymbolClass::BSS: return "BSS";
case SymbolClass::REGISTER: return "REGISTER";
case SymbolClass::ABS: return "ABS";
case SymbolClass::UNDEFINED: return "UNDEFINED";
case SymbolClass::LOCAL: return "LOCAL";
case SymbolClass::BITS: return "BITS";
case SymbolClass::DBX: return "DBX";
case SymbolClass::REG_IMAGE: return "REG_IMAGE";
case SymbolClass::INFO: return "INFO";
case SymbolClass::USER_STRUCT: return "USER_STRUCT";
case SymbolClass::SDATA: return "SDATA";
case SymbolClass::SBSS: return "SBSS";
case SymbolClass::RDATA: return "RDATA";
case SymbolClass::VAR: return "VAR";
case SymbolClass::COMMON: return "COMMON";
case SymbolClass::SCOMMON: return "SCOMMON";
case SymbolClass::VAR_REGISTER: return "VAR_REGISTER";
case SymbolClass::VARIANT: return "VARIANT";
case SymbolClass::SUNDEFINED: return "SUNDEFINED";
case SymbolClass::INIT: return "INIT";
case SymbolClass::BASED_VAR: return "BASED_VAR";
case SymbolClass::XDATA: return "XDATA";
case SymbolClass::PDATA: return "PDATA";
case SymbolClass::FINI: return "FINI";
case SymbolClass::NONGP: return "NONGP";
}
return nullptr;
}
const char* stabs_code_to_string(StabsCode code)
{
switch(code) {
case STAB: return "STAB";
case N_GSYM: return "GSYM";
case N_FNAME: return "FNAME";
case N_FUN: return "FUN";
case N_STSYM: return "STSYM";
case N_LCSYM: return "LCSYM";
case N_MAIN: return "MAIN";
case N_PC: return "PC";
case N_NSYMS: return "NSYMS";
case N_NOMAP: return "NOMAP";
case N_OBJ: return "OBJ";
case N_OPT: return "OPT";
case N_RSYM: return "RSYM";
case N_M2C: return "M2C";
case N_SLINE: return "SLINE";
case N_DSLINE: return "DSLINE";
case N_BSLINE: return "BSLINE";
case N_EFD: return "EFD";
case N_EHDECL: return "EHDECL";
case N_CATCH: return "CATCH";
case N_SSYM: return "SSYM";
case N_SO: return "SO";
case N_LSYM: return "LSYM";
case N_BINCL: return "BINCL";
case N_SOL: return "SOL";
case N_PSYM: return "PSYM";
case N_EINCL: return "EINCL";
case N_ENTRY: return "ENTRY";
case N_LBRAC: return "LBRAC";
case N_EXCL: return "EXCL";
case N_SCOPE: return "SCOPE";
case N_RBRAC: return "RBRAC";
case N_BCOMM: return "BCOMM";
case N_ECOMM: return "ECOMM";
case N_ECOML: return "ECOML";
case N_NBTEXT: return "NBTEXT";
case N_NBDATA: return "NBDATA";
case N_NBBSS: return "NBBSS";
case N_NBSTS: return "NBSTS";
case N_NBLCS: return "NBLCS";
case N_LENG: return "LENG";
}
return nullptr;
}
}

176
3rdparty/ccc/src/ccc/mdebug_section.h vendored Normal file
View File

@@ -0,0 +1,176 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "util.h"
namespace ccc::mdebug {
struct SymbolicHeader;
enum class SymbolType : u32 {
NIL = 0,
GLOBAL = 1,
STATIC = 2,
PARAM = 3,
LOCAL = 4,
LABEL = 5,
PROC = 6,
BLOCK = 7,
END = 8,
MEMBER = 9,
TYPEDEF = 10,
FILE_SYMBOL = 11,
STATICPROC = 14,
CONSTANT = 15
};
enum class SymbolClass : u32 {
NIL = 0,
TEXT = 1,
DATA = 2,
BSS = 3,
REGISTER = 4,
ABS = 5,
UNDEFINED = 6,
LOCAL = 7,
BITS = 8,
DBX = 9,
REG_IMAGE = 10,
INFO = 11,
USER_STRUCT = 12,
SDATA = 13,
SBSS = 14,
RDATA = 15,
VAR = 16,
COMMON = 17,
SCOMMON = 18,
VAR_REGISTER = 19,
VARIANT = 20,
SUNDEFINED = 21,
INIT = 22,
BASED_VAR = 23,
XDATA = 24,
PDATA = 25,
FINI = 26,
NONGP = 27
};
// See stab.def from gcc for documentation on what all these are.
enum StabsCode {
STAB = 0x00,
N_GSYM = 0x20,
N_FNAME = 0x22,
N_FUN = 0x24,
N_STSYM = 0x26,
N_LCSYM = 0x28,
N_MAIN = 0x2a,
N_PC = 0x30,
N_NSYMS = 0x32,
N_NOMAP = 0x34,
N_OBJ = 0x38,
N_OPT = 0x3c,
N_RSYM = 0x40,
N_M2C = 0x42,
N_SLINE = 0x44,
N_DSLINE = 0x46,
N_BSLINE = 0x48,
N_EFD = 0x4a,
N_EHDECL = 0x50,
N_CATCH = 0x54,
N_SSYM = 0x60,
N_SO = 0x64,
N_LSYM = 0x80,
N_BINCL = 0x82,
N_SOL = 0x84,
N_PSYM = 0xa0,
N_EINCL = 0xa2,
N_ENTRY = 0xa4,
N_LBRAC = 0xc0,
N_EXCL = 0xc2,
N_SCOPE = 0xc4,
N_RBRAC = 0xe0,
N_BCOMM = 0xe2,
N_ECOMM = 0xe4,
N_ECOML = 0xe8,
N_NBTEXT = 0xf0,
N_NBDATA = 0xf2,
N_NBBSS = 0xf4,
N_NBSTS = 0xf6,
N_NBLCS = 0xf8,
N_LENG = 0xfe
};
CCC_PACKED_STRUCT(ProcedureDescriptor,
/* 0x00 */ u32 address;
/* 0x04 */ u32 symbol_index;
/* 0x08 */ s32 line_number_entry_index;
/* 0x0c */ s32 saved_register_mask;
/* 0x10 */ s32 saved_register_offset;
/* 0x14 */ s32 optimization_entry_index;
/* 0x18 */ s32 saved_float_register_mask;
/* 0x1c */ s32 saved_float_register_offset;
/* 0x20 */ s32 frame_size;
/* 0x24 */ s16 frame_pointer_register;
/* 0x26 */ s16 return_pc_register;
/* 0x28 */ s32 line_number_low;
/* 0x2c */ s32 line_number_high;
/* 0x30 */ u32 line_number_offset;
)
static_assert(sizeof(ProcedureDescriptor) == 0x34);
struct Symbol {
u32 value;
SymbolType symbol_type;
SymbolClass symbol_class;
u32 index;
const char* string;
const ProcedureDescriptor* procedure_descriptor = nullptr;
bool is_stabs() const {
return (index & 0xfff00) == 0x8f300;
}
StabsCode code() const {
return (StabsCode) (index - 0x8f300);
}
};
struct File {
std::vector<Symbol> symbols;
u32 address = 0;
std::string working_dir; // The working directory of gcc.
std::string command_line_path; // The source file path passed on the command line to gcc.
std::string full_path; // The full combined path.
};
class SymbolTableReader {
public:
Result<void> init(std::span<const u8> elf, s32 section_offset);
s32 file_count() const;
Result<File> parse_file(s32 index) const;
Result<std::vector<Symbol>> parse_external_symbols() const;
void print_header(FILE* out) const;
Result<void> print_symbols(FILE* out, bool print_locals, bool print_procedure_descriptors, bool print_externals) const;
protected:
bool m_ready = false;
std::span<const u8> m_elf;
s32 m_section_offset;
// If the .mdebug section was moved without updating its contents all the
// absolute file offsets stored within will be incorrect by a fixed amount.
s32 m_fudge_offset;
const SymbolicHeader* m_hdrr;
};
const char* symbol_type(SymbolType type);
const char* symbol_class(SymbolClass symbol_class);
const char* stabs_code_to_string(StabsCode code);
}

220
3rdparty/ccc/src/ccc/mdebug_symbols.cpp vendored Normal file
View File

@@ -0,0 +1,220 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "mdebug_symbols.h"
#include "importer_flags.h"
namespace ccc::mdebug {
static void mark_duplicate_symbols(std::vector<ParsedSymbol>& symbols);
Result<std::vector<ParsedSymbol>> parse_symbols(const std::vector<mdebug::Symbol>& input, u32& importer_flags)
{
std::vector<ParsedSymbol> output;
std::string prefix;
for(const mdebug::Symbol& symbol : input) {
if(symbol.is_stabs()) {
switch(symbol.code()) {
case mdebug::N_GSYM: // Global variable
case mdebug::N_FUN: // Function
case mdebug::N_STSYM: // Data section static global variable
case mdebug::N_LCSYM: // BSS section static global variable
case mdebug::N_RSYM: // Register variable
case mdebug::N_LSYM: // Automatic variable or type definition
case mdebug::N_PSYM: { // Parameter variable
// Some STABS symbols are split between multiple strings.
if(symbol.string[0] != '\0') {
if(symbol.string[strlen(symbol.string) - 1] == '\\') {
prefix += std::string(symbol.string, symbol.string + strlen(symbol.string) - 1);
} else {
std::string merged_string;
const char* string;
if(!prefix.empty()) {
merged_string = prefix + symbol.string;
string = merged_string.c_str();
prefix.clear();
} else {
string = symbol.string;
}
const char* input = string;
Result<StabsSymbol> parse_result = parse_stabs_symbol(input);
if(parse_result.success()) {
if(*input != '\0') {
if(importer_flags & STRICT_PARSING) {
return CCC_FAILURE("Unknown data '%s' at the end of the '%s' stab.", input, parse_result->name.c_str());
} else {
CCC_WARN("Unknown data '%s' at the end of the '%s' stab.", input, parse_result->name.c_str());
}
}
ParsedSymbol& parsed = output.emplace_back();
parsed.type = ParsedSymbolType::NAME_COLON_TYPE;
parsed.raw = &symbol;
parsed.name_colon_type = std::move(*parse_result);
} else if(parse_result.error().message == STAB_TRUNCATED_ERROR_MESSAGE) {
// Symbol truncated due to a GCC bug. Report a
// warning and try to tolerate further faults
// caused as a result of this.
CCC_WARN("%s Symbol string: %s", STAB_TRUNCATED_ERROR_MESSAGE, string);
importer_flags &= ~STRICT_PARSING;
} else {
return CCC_FAILURE("%s Symbol string: %s",
parse_result.error().message.c_str(), string);
}
}
} else {
CCC_CHECK(prefix.empty(), "Invalid STABS continuation.");
if(symbol.code() == mdebug::N_FUN) {
ParsedSymbol& func_end = output.emplace_back();
func_end.type = ParsedSymbolType::FUNCTION_END;
func_end.raw = &symbol;
}
}
break;
}
case mdebug::N_SOL: { // Sub-source file
ParsedSymbol& sub = output.emplace_back();
sub.type = ParsedSymbolType::SUB_SOURCE_FILE;
sub.raw = &symbol;
break;
}
case mdebug::N_LBRAC: { // Begin block
ParsedSymbol& begin_block = output.emplace_back();
begin_block.type = ParsedSymbolType::LBRAC;
begin_block.raw = &symbol;
break;
}
case mdebug::N_RBRAC: { // End block
ParsedSymbol& end_block = output.emplace_back();
end_block.type = ParsedSymbolType::RBRAC;
end_block.raw = &symbol;
break;
}
case mdebug::N_SO: { // Source filename
ParsedSymbol& so_symbol = output.emplace_back();
so_symbol.type = ParsedSymbolType::SOURCE_FILE;
so_symbol.raw = &symbol;
break;
}
case mdebug::STAB:
case mdebug::N_OPT:
case mdebug::N_BINCL:
case mdebug::N_EINCL: {
break;
}
case mdebug::N_FNAME:
case mdebug::N_MAIN:
case mdebug::N_PC:
case mdebug::N_NSYMS:
case mdebug::N_NOMAP:
case mdebug::N_OBJ:
case mdebug::N_M2C:
case mdebug::N_SLINE:
case mdebug::N_DSLINE:
case mdebug::N_BSLINE:
case mdebug::N_EFD:
case mdebug::N_EHDECL:
case mdebug::N_CATCH:
case mdebug::N_SSYM:
case mdebug::N_ENTRY:
case mdebug::N_EXCL:
case mdebug::N_SCOPE:
case mdebug::N_BCOMM:
case mdebug::N_ECOMM:
case mdebug::N_ECOML:
case mdebug::N_NBTEXT:
case mdebug::N_NBDATA:
case mdebug::N_NBBSS:
case mdebug::N_NBSTS:
case mdebug::N_NBLCS:
case mdebug::N_LENG: {
CCC_WARN("Unhandled N_%s symbol: %s", mdebug::stabs_code_to_string(symbol.code()), symbol.string);
break;
}
}
} else {
ParsedSymbol& non_stabs_symbol = output.emplace_back();
non_stabs_symbol.type = ParsedSymbolType::NON_STABS;
non_stabs_symbol.raw = &symbol;
}
}
mark_duplicate_symbols(output);
return output;
}
static void mark_duplicate_symbols(std::vector<ParsedSymbol>& symbols)
{
std::map<StabsTypeNumber, size_t> stabs_type_number_to_symbol;
for(size_t i = 0; i < symbols.size(); i++) {
ParsedSymbol& symbol = symbols[i];
if(symbol.type == ParsedSymbolType::NAME_COLON_TYPE) {
StabsType& type = *symbol.name_colon_type.type;
if(type.type_number.valid() && type.descriptor.has_value()) {
stabs_type_number_to_symbol.emplace(type.type_number, i);
}
}
}
for(ParsedSymbol& symbol : symbols) {
symbol.is_typedef =
symbol.type == ParsedSymbolType::NAME_COLON_TYPE &&
symbol.name_colon_type.descriptor == StabsSymbolDescriptor::TYPE_NAME &&
symbol.name_colon_type.type->descriptor != StabsTypeDescriptor::ENUM;
}
for(size_t i = 0; i < symbols.size(); i++) {
ParsedSymbol& symbol = symbols[i];
if(symbol.type != ParsedSymbolType::NAME_COLON_TYPE) {
continue;
}
bool is_type =
symbol.name_colon_type.descriptor == StabsSymbolDescriptor::TYPE_NAME ||
symbol.name_colon_type.descriptor == StabsSymbolDescriptor::ENUM_STRUCT_OR_TYPE_TAG;
if(!is_type) {
continue;
}
StabsType& type = *symbol.name_colon_type.type;
if(!type.descriptor.has_value()) {
auto referenced_index = stabs_type_number_to_symbol.find(type.type_number);
if(referenced_index != stabs_type_number_to_symbol.end()) {
ParsedSymbol& referenced = symbols[referenced_index->second];
if(referenced.name_colon_type.name == symbol.name_colon_type.name) {
// symbol: "Struct:T(1,1)=s1;"
// referenced: "Struct:t(1,1)"
symbol.duplicate = true;
}
}
}
if(type.descriptor.has_value() && type.descriptor == StabsTypeDescriptor::TYPE_REFERENCE) {
auto referenced_index = stabs_type_number_to_symbol.find(type.as<StabsTypeReferenceType>().type->type_number);
if(referenced_index != stabs_type_number_to_symbol.end() && referenced_index->second != i) {
ParsedSymbol& referenced = symbols[referenced_index->second];
if(referenced.name_colon_type.name == " ") {
// referenced: " :T(1,1)=e;"
// symbol: "ErraticEnum:t(1,2)=(1,1)"
referenced.name_colon_type.name = symbol.name_colon_type.name;
referenced.is_typedef = true;
symbol.duplicate = true;
}
if(referenced.name_colon_type.name == symbol.name_colon_type.name) {
// referenced: "NamedTypedefedStruct:T(1,1)=s1;"
// symbol: "NamedTypedefedStruct:t(1,2)=(1,1)"
referenced.is_typedef = true;
symbol.duplicate = true;
}
}
}
}
}
}

32
3rdparty/ccc/src/ccc/mdebug_symbols.h vendored Normal file
View File

@@ -0,0 +1,32 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "util.h"
#include "stabs.h"
#include "mdebug_section.h"
namespace ccc::mdebug {
enum class ParsedSymbolType {
NAME_COLON_TYPE,
SOURCE_FILE,
SUB_SOURCE_FILE,
LBRAC,
RBRAC,
FUNCTION_END,
NON_STABS
};
struct ParsedSymbol {
ParsedSymbolType type;
const mdebug::Symbol* raw;
StabsSymbol name_colon_type;
bool duplicate = false;
bool is_typedef = false;
};
Result<std::vector<ParsedSymbol>> parse_symbols(const std::vector<mdebug::Symbol>& input, u32& importer_flags);
}

193
3rdparty/ccc/src/ccc/sndll.cpp vendored Normal file
View File

@@ -0,0 +1,193 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "sndll.h"
#include "importer_flags.h"
namespace ccc {
CCC_PACKED_STRUCT(SNDLLHeaderCommon,
/* 0x00 */ u32 magic;
/* 0x04 */ u32 relocations;
/* 0x08 */ u32 relocation_count;
/* 0x0c */ u32 symbols;
/* 0x10 */ u32 symbol_count;
/* 0x14 */ u32 elf_path;
/* 0x18 */ u32 load_func;
/* 0x1c */ u32 unload_func;
/* 0x20 */ u32 unknown_20;
/* 0x24 */ u32 unknown_24;
/* 0x28 */ u32 unknown_28;
/* 0x2c */ u32 file_size;
/* 0x30 */ u32 unknown_30;
)
CCC_PACKED_STRUCT(SNDLLHeaderV1,
/* 0x00 */ SNDLLHeaderCommon common;
)
CCC_PACKED_STRUCT(SNDLLHeaderV2,
/* 0x00 */ SNDLLHeaderCommon common;
/* 0x34 */ u32 unknown_34;
/* 0x38 */ u32 unknown_38;
)
CCC_PACKED_STRUCT(SNDLLRelocation,
/* 0x0 */ u32 unknown_0;
/* 0x4 */ u32 unknown_4;
/* 0x8 */ u32 unknown_8;
)
CCC_PACKED_STRUCT(SNDLLSymbolHeader,
/* 0x0 */ u32 string;
/* 0x4 */ u32 value;
/* 0x8 */ u8 unknown_8;
/* 0x9 */ u8 unknown_9;
/* 0xa */ SNDLLSymbolType type;
/* 0xb */ u8 processed;
)
static Result<SNDLLFile> parse_sndll_common(
std::span<const u8> image, Address address, SNDLLType type, const SNDLLHeaderCommon& common, SNDLLVersion version);
static const char* sndll_symbol_type_to_string(SNDLLSymbolType type);
Result<SNDLLFile> parse_sndll_file(std::span<const u8> image, Address address, SNDLLType type)
{
std::optional<u32> magic = copy_unaligned<u32>(image, 0);
CCC_CHECK(magic.has_value(), "Failed to read SNDLL header.");
CCC_CHECK((*magic & 0xffffff) == CCC_FOURCC("SNR\00"), "Not a SNDLL %s.", address.valid() ? "section" : "file");
char version = *magic >> 24;
switch(version) {
case '1': {
const SNDLLHeaderV1* header = get_unaligned<SNDLLHeaderV1>(image, 0);
CCC_CHECK(header, "File too small to contain SNDLL V1 header.");
return parse_sndll_common(image, address, type, header->common, SNDLL_V1);
}
case '2': {
const SNDLLHeaderV2* header = get_unaligned<SNDLLHeaderV2>(image, 0);
CCC_CHECK(header, "File too small to contain SNDLL V2 header.");
return parse_sndll_common(image, address, type, header->common, SNDLL_V2);
}
}
return CCC_FAILURE("Unknown SNDLL version '%c'.", version);
}
static Result<SNDLLFile> parse_sndll_common(
std::span<const u8> image, Address address, SNDLLType type, const SNDLLHeaderCommon& common, SNDLLVersion version)
{
SNDLLFile sndll;
sndll.address = address;
sndll.type = type;
sndll.version = version;
if(common.elf_path) {
std::optional<std::string_view> elf_path = get_string(image, common.elf_path);
CCC_CHECK(elf_path.has_value(), "SNDLL header has invalid ELF path field.");
sndll.elf_path = *elf_path;
}
CCC_CHECK(common.symbol_count < (32 * 1024 * 1024) / sizeof(SNDLLSymbol), "SNDLL symbol count is too high.");
sndll.symbols.reserve(common.symbol_count);
for(u32 i = 0; i < common.symbol_count; i++) {
u32 symbol_offset = common.symbols - address.get_or_zero() + i * sizeof(SNDLLSymbolHeader);
const SNDLLSymbolHeader* symbol_header = get_unaligned<SNDLLSymbolHeader>(image, symbol_offset);
CCC_CHECK(symbol_header, "SNDLL symbol out of range.");
std::optional<std::string_view> string;
if(symbol_header->string) {
string = get_string(image, symbol_header->string - address.get_or_zero());
}
SNDLLSymbol& symbol = sndll.symbols.emplace_back();
symbol.type = symbol_header->type;
symbol.value = symbol_header->value;
if(string.has_value()) {
symbol.string = *string;
}
}
return sndll;
}
Result<void> import_sndll_symbols(
SymbolDatabase& database,
const SNDLLFile& sndll,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler)
{
for(const SNDLLSymbol& symbol : sndll.symbols) {
if(symbol.value == 0 || symbol.string.empty()) {
continue;
}
u32 address = symbol.value;
if(symbol.type != SNDLL_ABSOLUTE && sndll.type == SNDLLType::DYNAMIC_LIBRARY) {
address += sndll.address.get_or_zero();
}
if(!(importer_flags & DONT_DEDUPLICATE_SYMBOLS)) {
if(database.functions.first_handle_from_starting_address(address).valid()) {
continue;
}
if(database.global_variables.first_handle_from_starting_address(address).valid()) {
continue;
}
if(database.local_variables.first_handle_from_starting_address(address).valid()) {
continue;
}
}
const Section* section = database.sections.symbol_overlapping_address(address);
if(section) {
if(section->contains_code()) {
Result<Function*> function = database.functions.create_symbol(
symbol.string, group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(function);
continue;
} else if(section->contains_data()) {
Result<GlobalVariable*> global_variable = database.global_variables.create_symbol(
symbol.string, group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(global_variable);
continue;
}
}
Result<Label*> label = database.labels.create_symbol(
symbol.string, group.source, group.module_symbol, address, importer_flags, demangler);
CCC_RETURN_IF_ERROR(label);
}
return Result<void>();
}
void print_sndll_symbols(FILE* out, const SNDLLFile& sndll)
{
fprintf(out, "SNDLL SYMBOLS:\n");
for(const SNDLLSymbol& symbol : sndll.symbols) {
const char* type = sndll_symbol_type_to_string(symbol.type);
const char* string = !symbol.string.empty() ? symbol.string.c_str() : "(no string)";
fprintf(out, "%8s %08x %s\n", type, symbol.value, string);
}
}
static const char* sndll_symbol_type_to_string(SNDLLSymbolType type)
{
switch(type) {
case SNDLL_NIL: return "NIL";
case SNDLL_EXTERNAL: return "EXTERNAL";
case SNDLL_RELATIVE: return "RELATIVE";
case SNDLL_WEAK: return "WEAK";
case SNDLL_ABSOLUTE: return "ABSOLUTE";
}
return "invalid";
}
}

55
3rdparty/ccc/src/ccc/sndll.h vendored Normal file
View File

@@ -0,0 +1,55 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "symbol_database.h"
namespace ccc {
enum class SNDLLType {
SNDATA_SECTION,
DYNAMIC_LIBRARY
};
enum SNDLLVersion {
SNDLL_V1,
SNDLL_V2
};
enum SNDLLSymbolType : u8 {
SNDLL_NIL = 0, // I think this is just so that the first real symbol has an index of 1.
SNDLL_EXTERNAL = 1, // Symbol with an empty value, to be filled in from another module.
SNDLL_RELATIVE = 2, // Global symbol, value is relative to the start of the SNDLL file.
SNDLL_WEAK = 3, // Weak symbol, value is relative to the start of the SNDLL file.
SNDLL_ABSOLUTE = 4 // Global symbol, value is an absolute address.
};
struct SNDLLSymbol {
SNDLLSymbolType type = SNDLL_NIL;
u32 value = 0;
std::string string;
};
struct SNDLLFile {
Address address;
SNDLLType type;
SNDLLVersion version;
std::string elf_path;
std::vector<SNDLLSymbol> symbols;
};
// If a valid address is passed, the pointers in the header will be treated as
// addresses, otherwise they will be treated as file offsets.
Result<SNDLLFile> parse_sndll_file(std::span<const u8> image, Address address, SNDLLType type);
Result<void> import_sndll_symbols(
SymbolDatabase& database,
const SNDLLFile& sndll,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler);
void print_sndll_symbols(FILE* out, const SNDLLFile& sndll);
}

835
3rdparty/ccc/src/ccc/stabs.cpp vendored Normal file
View File

@@ -0,0 +1,835 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "stabs.h"
namespace ccc {
#define STABS_DEBUG(...) //__VA_ARGS__
#define STABS_DEBUG_PRINTF(...) STABS_DEBUG(printf(__VA_ARGS__);)
static bool validate_symbol_descriptor(StabsSymbolDescriptor descriptor);
static Result<std::unique_ptr<StabsType>> parse_stabs_type(const char*& input);
static Result<std::vector<StabsStructOrUnionType::Field>> parse_field_list(const char*& input);
static Result<std::vector<StabsStructOrUnionType::MemberFunctionSet>> parse_member_functions(const char*& input);
static Result<StabsStructOrUnionType::Visibility> parse_visibility_character(const char*& input);
STABS_DEBUG(static void print_field(const StabsStructOrUnionType::Field& field);)
const char* STAB_TRUNCATED_ERROR_MESSAGE =
"STABS symbol truncated. This was probably caused by a GCC bug. "
"Other symbols from the same translation unit may also be invalid.";
Result<StabsSymbol> parse_stabs_symbol(const char*& input)
{
STABS_DEBUG_PRINTF("PARSING %s\n", input);
StabsSymbol symbol;
Result<std::string> name = parse_dodgy_stabs_identifier(input, ':');
CCC_RETURN_IF_ERROR(name);
symbol.name = *name;
CCC_EXPECT_CHAR(input, ':', "identifier");
CCC_CHECK(*input != '\0', "Unexpected end of input.");
if((*input >= '0' && *input <= '9') || *input == '(') {
symbol.descriptor = StabsSymbolDescriptor::LOCAL_VARIABLE;
} else {
char symbol_descriptor = *(input++);
CCC_CHECK(symbol_descriptor != '\0', "Failed to parse symbol descriptor.");
symbol.descriptor = (StabsSymbolDescriptor) symbol_descriptor;
}
CCC_CHECK(validate_symbol_descriptor(symbol.descriptor),
"Invalid symbol descriptor '%c'.",
(char) symbol.descriptor);
CCC_CHECK(*input != '\0', "Unexpected end of input.");
if(*input == 't') {
input++;
}
auto type = parse_top_level_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
// Handle nested functions.
bool is_function =
symbol.descriptor == StabsSymbolDescriptor::LOCAL_FUNCTION ||
symbol.descriptor == StabsSymbolDescriptor::GLOBAL_FUNCTION;
if(is_function && input[0] == ',') {
input++;
while(*input != ',' && *input != '\0') input++; // enclosing function
CCC_EXPECT_CHAR(input, ',', "nested function suffix");
while(*input != ',' && *input != '\0') input++; // function
}
symbol.type = std::move(*type);
// Make sure that variable names aren't used as type names e.g. the STABS
// symbol "somevar:P123=*456" may be referenced by the type number 123, but
// the type name is not "somevar".
bool is_type = symbol.descriptor == StabsSymbolDescriptor::TYPE_NAME
|| symbol.descriptor == StabsSymbolDescriptor::ENUM_STRUCT_OR_TYPE_TAG;
if(is_type) {
symbol.type->name = symbol.name;
}
symbol.type->is_typedef = symbol.descriptor == StabsSymbolDescriptor::TYPE_NAME;
symbol.type->is_root = true;
return symbol;
}
static bool validate_symbol_descriptor(StabsSymbolDescriptor descriptor)
{
bool valid;
switch(descriptor) {
case StabsSymbolDescriptor::LOCAL_VARIABLE:
case StabsSymbolDescriptor::REFERENCE_PARAMETER_A:
case StabsSymbolDescriptor::LOCAL_FUNCTION:
case StabsSymbolDescriptor::GLOBAL_FUNCTION:
case StabsSymbolDescriptor::GLOBAL_VARIABLE:
case StabsSymbolDescriptor::REGISTER_PARAMETER:
case StabsSymbolDescriptor::VALUE_PARAMETER:
case StabsSymbolDescriptor::REGISTER_VARIABLE:
case StabsSymbolDescriptor::STATIC_GLOBAL_VARIABLE:
case StabsSymbolDescriptor::TYPE_NAME:
case StabsSymbolDescriptor::ENUM_STRUCT_OR_TYPE_TAG:
case StabsSymbolDescriptor::STATIC_LOCAL_VARIABLE:
case StabsSymbolDescriptor::REFERENCE_PARAMETER_V:
valid = true;
break;
default:
valid = false;
break;
}
return valid;
}
Result<std::unique_ptr<StabsType>> parse_top_level_stabs_type(const char*& input)
{
Result<std::unique_ptr<StabsType>> type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
// Handle first base class suffixes.
if((*type)->descriptor == StabsTypeDescriptor::STRUCT && input[0] == '~' && input[1] == '%') {
input += 2;
Result<std::unique_ptr<StabsType>> first_base_class = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(first_base_class);
(*type)->as<StabsStructType>().first_base_class = std::move(*first_base_class);
CCC_EXPECT_CHAR(input, ';', "first base class suffix");
}
// Handle extra live range information.
if(input[0] == ';' && input[1] == 'l') {
input += 2;
CCC_EXPECT_CHAR(input, '(', "live range suffix");
CCC_EXPECT_CHAR(input, '#', "live range suffix");
std::optional<s32> start = parse_number_s32(input);
CCC_CHECK(start.has_value(), "Failed to parse live range suffix.");
CCC_EXPECT_CHAR(input, ',', "live range suffix");
CCC_EXPECT_CHAR(input, '#', "live range suffix");
std::optional<s32> end = parse_number_s32(input);
CCC_CHECK(end.has_value(), "Failed to parse live range suffix.");
CCC_EXPECT_CHAR(input, ')', "live range suffix");
}
return type;
}
static Result<std::unique_ptr<StabsType>> parse_stabs_type(const char*& input)
{
StabsTypeNumber type_number;
CCC_CHECK(*input != '\0', "Unexpected end of input.");
if(*input == '(') {
// This file has type numbers made up of two pieces: an include file
// index and a type number.
input++;
std::optional<s32> file_index = parse_number_s32(input);
CCC_CHECK(file_index.has_value(), "Failed to parse type number (file index).");
CCC_EXPECT_CHAR(input, ',', "type number");
std::optional<s32> type_index = parse_number_s32(input);
CCC_CHECK(type_index.has_value(), "Failed to parse type number (type index).");
CCC_EXPECT_CHAR(input, ')', "type number");
type_number.file = *file_index;
type_number.type = *type_index;
if(*input != '=') {
return std::make_unique<StabsType>(type_number);
}
input++;
} else if(*input >= '0' && *input <= '9') {
// This file has type numbers which are just a single number. This is
// the more common case for games.
std::optional<s32> type_index = parse_number_s32(input);
CCC_CHECK(type_index.has_value(), "Failed to parse type number.");
type_number.type = *type_index;
if(*input != '=') {
return std::make_unique<StabsType>(type_number);
}
input++;
}
CCC_CHECK(*input != '\0', "Unexpected end of input.");
StabsTypeDescriptor descriptor;
if((*input >= '0' && *input <= '9') || *input == '(') {
descriptor = StabsTypeDescriptor::TYPE_REFERENCE;
} else {
char descriptor_char = *(input++);
CCC_CHECK(descriptor_char != '\0', "Failed to parse type descriptor.");
descriptor = (StabsTypeDescriptor) descriptor_char;
}
std::unique_ptr<StabsType> out_type;
switch(descriptor) {
case StabsTypeDescriptor::TYPE_REFERENCE: { // 0..9
auto type_reference = std::make_unique<StabsTypeReferenceType>(type_number);
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
type_reference->type = std::move(*type);
out_type = std::move(type_reference);
break;
}
case StabsTypeDescriptor::ARRAY: { // a
auto array = std::make_unique<StabsArrayType>(type_number);
auto index_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(index_type);
array->index_type = std::move(*index_type);
auto element_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(element_type);
array->element_type = std::move(*element_type);
out_type = std::move(array);
break;
}
case StabsTypeDescriptor::ENUM: { // e
auto enum_type = std::make_unique<StabsEnumType>(type_number);
STABS_DEBUG_PRINTF("enum {\n");
while(*input != ';') {
std::optional<std::string> name = parse_stabs_identifier(input, ':');
CCC_CHECK(name.has_value(), "Failed to parse enum field name.");
CCC_EXPECT_CHAR(input, ':', "enum");
std::optional<s32> value = parse_number_s32(input);
CCC_CHECK(value.has_value(), "Failed to parse enum value.");
enum_type->fields.emplace_back(*value, std::move(*name));
CCC_EXPECT_CHAR(input, ',', "enum");
}
input++;
STABS_DEBUG_PRINTF("}\n");
out_type = std::move(enum_type);
break;
}
case StabsTypeDescriptor::FUNCTION: { // f
auto function = std::make_unique<StabsFunctionType>(type_number);
auto return_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(return_type);
function->return_type = std::move(*return_type);
out_type = std::move(function);
break;
}
case StabsTypeDescriptor::VOLATILE_QUALIFIER: { // B
auto volatile_qualifier = std::make_unique<StabsVolatileQualifierType>(type_number);
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
volatile_qualifier->type = std::move(*type);
out_type = std::move(volatile_qualifier);
break;
}
case StabsTypeDescriptor::CONST_QUALIFIER: { // k
auto const_qualifier = std::make_unique<StabsConstQualifierType>(type_number);
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
const_qualifier->type = std::move(*type);
out_type = std::move(const_qualifier);
break;
}
case StabsTypeDescriptor::RANGE: { // r
auto range = std::make_unique<StabsRangeType>(type_number);
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
range->type = std::move(*type);
CCC_EXPECT_CHAR(input, ';', "range type descriptor");
std::optional<std::string> low = parse_stabs_identifier(input, ';');
CCC_CHECK(low.has_value(), "Failed to parse low part of range.");
CCC_EXPECT_CHAR(input, ';', "low range value");
std::optional<std::string> high = parse_stabs_identifier(input, ';');
CCC_CHECK(high.has_value(), "Failed to parse high part of range.");
CCC_EXPECT_CHAR(input, ';', "high range value");
range->low = std::move(*low);
range->high = std::move(*high);
out_type = std::move(range);
break;
}
case StabsTypeDescriptor::STRUCT: { // s
auto struct_type = std::make_unique<StabsStructType>(type_number);
STABS_DEBUG_PRINTF("struct {\n");
std::optional<s64> struct_size = parse_number_s64(input);
CCC_CHECK(struct_size.has_value(), "Failed to parse struct size.");
struct_type->size = *struct_size;
if(*input == '!') {
input++;
std::optional<s32> base_class_count = parse_number_s32(input);
CCC_CHECK(base_class_count.has_value(), "Failed to parse base class count.");
CCC_EXPECT_CHAR(input, ',', "base class section");
for(s64 i = 0; i < *base_class_count; i++) {
StabsStructOrUnionType::BaseClass base_class;
char is_virtual = *(input++);
switch(is_virtual) {
case '0': base_class.is_virtual = false; break;
case '1': base_class.is_virtual = true; break;
default: return CCC_FAILURE("Failed to parse base class (virtual character).");
}
Result<StabsStructOrUnionType::Visibility> visibility = parse_visibility_character(input);
CCC_RETURN_IF_ERROR(visibility);
base_class.visibility = *visibility;
std::optional<s32> offset = parse_number_s32(input);
CCC_CHECK(offset.has_value(), "Failed to parse base class offset.");
base_class.offset = (s32) *offset;
CCC_EXPECT_CHAR(input, ',', "base class section");
auto base_class_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(base_class_type);
base_class.type = std::move(*base_class_type);
CCC_EXPECT_CHAR(input, ';', "base class section");
struct_type->base_classes.emplace_back(std::move(base_class));
}
}
auto fields = parse_field_list(input);
CCC_RETURN_IF_ERROR(fields);
struct_type->fields = std::move(*fields);
auto member_functions = parse_member_functions(input);
CCC_RETURN_IF_ERROR(member_functions);
struct_type->member_functions = std::move(*member_functions);
STABS_DEBUG_PRINTF("}\n");
out_type = std::move(struct_type);
break;
}
case StabsTypeDescriptor::UNION: { // u
auto union_type = std::make_unique<StabsUnionType>(type_number);
STABS_DEBUG_PRINTF("union {\n");
std::optional<s64> union_size = parse_number_s64(input);
CCC_CHECK(union_size.has_value(), "Failed to parse struct size.");
union_type->size = *union_size;
auto fields = parse_field_list(input);
CCC_RETURN_IF_ERROR(fields);
union_type->fields = std::move(*fields);
auto member_functions = parse_member_functions(input);
CCC_RETURN_IF_ERROR(member_functions);
union_type->member_functions = std::move(*member_functions);
STABS_DEBUG_PRINTF("}\n");
out_type = std::move(union_type);
break;
}
case StabsTypeDescriptor::CROSS_REFERENCE: { // x
auto cross_reference = std::make_unique<StabsCrossReferenceType>(type_number);
char cross_reference_type = *(input++);
CCC_CHECK(cross_reference_type != '\0', "Failed to parse cross reference type.");
switch(cross_reference_type) {
case 'e': cross_reference->type = ast::ForwardDeclaredType::ENUM; break;
case 's': cross_reference->type = ast::ForwardDeclaredType::STRUCT; break;
case 'u': cross_reference->type = ast::ForwardDeclaredType::UNION; break;
default:
return CCC_FAILURE("Invalid cross reference type '%c'.", cross_reference->type);
}
Result<std::string> identifier = parse_dodgy_stabs_identifier(input, ':');
CCC_RETURN_IF_ERROR(identifier);
cross_reference->identifier = std::move(*identifier);
cross_reference->name = cross_reference->identifier;
CCC_EXPECT_CHAR(input, ':', "cross reference");
out_type = std::move(cross_reference);
break;
}
case StabsTypeDescriptor::FLOATING_POINT_BUILTIN: { // R
auto fp_builtin = std::make_unique<StabsFloatingPointBuiltInType>(type_number);
std::optional<s32> fpclass = parse_number_s32(input);
CCC_CHECK(fpclass.has_value(), "Failed to parse floating point built-in class.");
fp_builtin->fpclass = *fpclass;
CCC_EXPECT_CHAR(input, ';', "floating point builtin");
std::optional<s32> bytes = parse_number_s32(input);
CCC_CHECK(bytes.has_value(), "Failed to parse floating point built-in.");
fp_builtin->bytes = *bytes;
CCC_EXPECT_CHAR(input, ';', "floating point builtin");
std::optional<s32> value = parse_number_s32(input);
CCC_CHECK(value.has_value(), "Failed to parse floating point built-in.");
CCC_EXPECT_CHAR(input, ';', "floating point builtin");
out_type = std::move(fp_builtin);
break;
}
case StabsTypeDescriptor::METHOD: { // #
auto method = std::make_unique<StabsMethodType>(type_number);
if(*input == '#') {
input++;
auto return_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(return_type);
method->return_type = std::move(*return_type);
if(*input == ';') {
input++;
}
} else {
auto class_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(class_type);
method->class_type = std::move(*class_type);
CCC_EXPECT_CHAR(input, ',', "method");
auto return_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(return_type);
method->return_type = std::move(*return_type);
while(*input != '\0') {
if(*input == ';') {
input++;
break;
}
CCC_EXPECT_CHAR(input, ',', "method");
auto parameter_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(parameter_type);
method->parameter_types.emplace_back(std::move(*parameter_type));
}
}
out_type = std::move(method);
break;
}
case StabsTypeDescriptor::REFERENCE: { // &
auto reference = std::make_unique<StabsReferenceType>(type_number);
auto value_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(value_type);
reference->value_type = std::move(*value_type);
out_type = std::move(reference);
break;
}
case StabsTypeDescriptor::POINTER: { // *
auto pointer = std::make_unique<StabsPointerType>(type_number);
auto value_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(value_type);
pointer->value_type = std::move(*value_type);
out_type = std::move(pointer);
break;
}
case StabsTypeDescriptor::TYPE_ATTRIBUTE: { // @
if((*input >= '0' && *input <= '9') || *input == '(') {
auto member_pointer = std::make_unique<StabsPointerToDataMemberType>(type_number);
auto class_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(class_type);
member_pointer->class_type = std::move(*class_type);
CCC_EXPECT_CHAR(input, ',', "pointer to non-static data member");
auto member_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(member_type);
member_pointer->member_type = std::move(*member_type);
out_type = std::move(member_pointer);
} else {
auto type_attribute = std::make_unique<StabsSizeTypeAttributeType>(type_number);
CCC_CHECK(*input == 's', "Weird value following '@' type descriptor.");
input++;
std::optional<s64> size_bits = parse_number_s64(input);
CCC_CHECK(size_bits.has_value(), "Failed to parse type attribute.")
type_attribute->size_bits = *size_bits;
CCC_EXPECT_CHAR(input, ';', "type attribute");
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
type_attribute->type = std::move(*type);
out_type = std::move(type_attribute);
}
break;
}
case StabsTypeDescriptor::BUILTIN: { // -
auto built_in = std::make_unique<StabsBuiltInType>(type_number);
std::optional<s64> type_id = parse_number_s64(input);
CCC_CHECK(type_id.has_value(), "Failed to parse built-in.");
built_in->type_id = *type_id;
CCC_EXPECT_CHAR(input, ';', "builtin");
out_type = std::move(built_in);
break;
}
default: {
return CCC_FAILURE(
"Invalid type descriptor '%c' (%02x).",
(u32) descriptor, (u32) descriptor);
}
}
return out_type;
}
static Result<std::vector<StabsStructOrUnionType::Field>> parse_field_list(const char*& input)
{
std::vector<StabsStructOrUnionType::Field> fields;
while(*input != '\0') {
if(*input == ';') {
input++;
break;
}
const char* before_field = input;
StabsStructOrUnionType::Field field;
Result<std::string> name = parse_dodgy_stabs_identifier(input, ':');
CCC_RETURN_IF_ERROR(name);
field.name = std::move(*name);
CCC_EXPECT_CHAR(input, ':', "identifier");
if(*input == '/') {
input++;
Result<StabsStructOrUnionType::Visibility> visibility = parse_visibility_character(input);
CCC_RETURN_IF_ERROR(visibility);
field.visibility = *visibility;
}
if(*input == ':') {
input = before_field;
break;
}
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
field.type = std::move(*type);
if(field.name.size() >= 1 && field.name[0] == '$') {
// Virtual function table pointers and virtual base class pointers.
CCC_EXPECT_CHAR(input, ',', "field type");
std::optional<s32> offset_bits = parse_number_s32(input);
CCC_CHECK(offset_bits.has_value(), "Failed to parse field offset.");
field.offset_bits = *offset_bits;
CCC_EXPECT_CHAR(input, ';', "field offset");
} else if(*input == ':') {
// Static fields.
input++;
field.is_static = true;
std::optional<std::string> type_name = parse_stabs_identifier(input, ';');
CCC_CHECK(type_name.has_value(), "Failed to parse static field type name.");
field.type_name = std::move(*type_name);
CCC_EXPECT_CHAR(input, ';', "identifier");
} else if(*input == ',') {
// Normal fields.
input++;
std::optional<s32> offset_bits = parse_number_s32(input);
CCC_CHECK(offset_bits.has_value(), "Failed to parse field offset.");
field.offset_bits = *offset_bits;
CCC_EXPECT_CHAR(input, ',', "field offset");
std::optional<s32> size_bits = parse_number_s32(input);
CCC_CHECK(size_bits.has_value(), "Failed to parse field size.");
field.size_bits = *size_bits;
CCC_EXPECT_CHAR(input, ';', "field size");
} else {
return CCC_FAILURE("Expected ':' or ',', got '%c' (%hhx).", *input, *input);
}
STABS_DEBUG(print_field(field);)
fields.emplace_back(std::move(field));
}
return fields;
}
static Result<std::vector<StabsStructOrUnionType::MemberFunctionSet>> parse_member_functions(const char*& input)
{
// Check for if the next character is from an enclosing field list. If this
// is the case, the next character will be ',' for normal fields and ':' for
// static fields (see above).
if(*input == ',' || *input == ':') {
return std::vector<StabsStructOrUnionType::MemberFunctionSet>();
}
std::vector<StabsStructOrUnionType::MemberFunctionSet> member_functions;
while(*input != '\0') {
if(*input == ';') {
input++;
break;
}
StabsStructOrUnionType::MemberFunctionSet member_function_set;
std::optional<std::string> name = parse_stabs_identifier(input, ':');
CCC_CHECK(name.has_value(), "Failed to parse member function name.");
member_function_set.name = std::move(*name);
CCC_EXPECT_CHAR(input, ':', "member function");
CCC_EXPECT_CHAR(input, ':', "member function");
while(*input != '\0') {
if(*input == ';') {
input++;
break;
}
StabsStructOrUnionType::MemberFunction function;
auto type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(type);
function.type = std::move(*type);
CCC_EXPECT_CHAR(input, ':', "member function");
std::optional<std::string> identifier = parse_stabs_identifier(input, ';');
CCC_CHECK(identifier.has_value(), "Invalid member function identifier.");
CCC_EXPECT_CHAR(input, ';', "member function");
Result<StabsStructOrUnionType::Visibility> visibility = parse_visibility_character(input);
CCC_RETURN_IF_ERROR(visibility);
function.visibility = *visibility;
char modifiers = *(input++);
CCC_CHECK(modifiers != '\0', "Failed to parse member function modifiers.");
switch(modifiers) {
case 'A':
function.is_const = false;
function.is_volatile = false;
break;
case 'B':
function.is_const = true;
function.is_volatile = false;
break;
case 'C':
function.is_const = false;
function.is_volatile = true;
break;
case 'D':
function.is_const = true;
function.is_volatile = true;
break;
case '?':
case '.':
break;
default:
return CCC_FAILURE("Invalid member function modifiers.");
}
char flag = *(input++);
CCC_CHECK(flag != '\0', "Failed to parse member function type.");
switch(flag) {
case '.': { // normal member function
function.modifier = ast::MemberFunctionModifier::NONE;
break;
}
case '?': { // static member function
function.modifier = ast::MemberFunctionModifier::STATIC;
break;
}
case '*': { // virtual member function
std::optional<s32> vtable_index = parse_number_s32(input);
CCC_CHECK(vtable_index.has_value(), "Failed to parse vtable index.");
function.vtable_index = *vtable_index;
CCC_EXPECT_CHAR(input, ';', "virtual member function");
auto virtual_type = parse_stabs_type(input);
CCC_RETURN_IF_ERROR(virtual_type);
function.virtual_type = std::move(*virtual_type);
CCC_EXPECT_CHAR(input, ';', "virtual member function");
function.modifier = ast::MemberFunctionModifier::VIRTUAL;
break;
}
default:
return CCC_FAILURE("Invalid member function type.");
}
member_function_set.overloads.emplace_back(std::move(function));
}
STABS_DEBUG_PRINTF("member func: %s\n", member_function_set.name.c_str());
member_functions.emplace_back(std::move(member_function_set));
}
return member_functions;
}
static Result<StabsStructOrUnionType::Visibility> parse_visibility_character(const char*& input)
{
char visibility = *(input++);
switch(visibility) {
case '0': return StabsStructOrUnionType::Visibility::PRIVATE;
case '1': return StabsStructOrUnionType::Visibility::PROTECTED;
case '2': return StabsStructOrUnionType::Visibility::PUBLIC;
case '9': return StabsStructOrUnionType::Visibility::PUBLIC_OPTIMIZED_OUT;
default: break;
}
return CCC_FAILURE("Failed to parse visibility character.");
}
std::optional<s32> parse_number_s32(const char*& input)
{
char* end;
s64 value = strtoll(input, &end, 10);
if(end == input) {
return std::nullopt;
}
input = end;
return (s32) value;
}
std::optional<s64> parse_number_s64(const char*& input)
{
char* end;
s64 value = strtoll(input, &end, 10);
if(end == input) {
return std::nullopt;
}
input = end;
return value;
}
std::optional<std::string> parse_stabs_identifier(const char*& input, char terminator)
{
const char* begin = input;
for(; *input != '\0'; input++) {
if(*input == terminator) {
return std::string(begin, input);
}
}
return std::nullopt;
}
// The complexity here is because the input may contain an unescaped namespace
// separator '::' even if the field terminator is supposed to be a colon, as
// well as the raw contents of character literals. See test/ccc/stabs_tests.cpp
// for some examples.
Result<std::string> parse_dodgy_stabs_identifier(const char*& input, char terminator)
{
const char* begin = input;
s32 template_depth = 0;
for(; *input != '\0'; input++) {
// Skip past character literals.
if(*input == '\'') {
input++;
if(*input == '\'') {
input++; // Handle character literals containing a single quote.
}
while(*input != '\'' && *input != '\0') {
input++;
}
if(*input == '\0') {
break;
}
input++;
}
// Keep track of the template depth so we know when to expect the
// terminator character.
if(*input == '<') {
template_depth++;
}
if(*input == '>') {
template_depth--;
}
if(*input == terminator && template_depth == 0) {
return std::string(begin, input);
}
}
return CCC_FAILURE(STAB_TRUNCATED_ERROR_MESSAGE);
}
STABS_DEBUG(
static void print_field(const StabsStructOrUnionType::Field& field)
{
printf("\t%04x %04x %04x %04x %s\n", field.offset_bits / 8, field.size_bits / 8, field.offset_bits, field.size_bits, field.name.c_str());
}
)
const char* stabs_field_visibility_to_string(StabsStructOrUnionType::Visibility visibility)
{
switch(visibility) {
case StabsStructOrUnionType::Visibility::PRIVATE: return "private";
case StabsStructOrUnionType::Visibility::PROTECTED: return "protected";
case StabsStructOrUnionType::Visibility::PUBLIC: return "public";
case StabsStructOrUnionType::Visibility::PUBLIC_OPTIMIZED_OUT: return "public_optimizedout";
default: return "none";
}
return "";
}
}

379
3rdparty/ccc/src/ccc/stabs.h vendored Normal file
View File

@@ -0,0 +1,379 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "ast.h"
#include "util.h"
namespace ccc {
enum class StabsSymbolDescriptor : u8 {
LOCAL_VARIABLE = '_',
REFERENCE_PARAMETER_A = 'a',
LOCAL_FUNCTION = 'f',
GLOBAL_FUNCTION = 'F',
GLOBAL_VARIABLE = 'G',
REGISTER_PARAMETER = 'P',
VALUE_PARAMETER = 'p',
REGISTER_VARIABLE = 'r',
STATIC_GLOBAL_VARIABLE = 'S',
TYPE_NAME = 't',
ENUM_STRUCT_OR_TYPE_TAG = 'T',
STATIC_LOCAL_VARIABLE = 'V',
REFERENCE_PARAMETER_V = 'v'
};
struct StabsType;
struct StabsSymbol {
StabsSymbolDescriptor descriptor;
std::string name;
std::unique_ptr<StabsType> type;
};
Result<StabsSymbol> parse_stabs_symbol(const char*& input);
enum class StabsTypeDescriptor : u8 {
TYPE_REFERENCE = 0xef, // '0'..'9','('
ARRAY = 'a',
ENUM = 'e',
FUNCTION = 'f',
CONST_QUALIFIER = 'k',
RANGE = 'r',
STRUCT = 's',
UNION = 'u',
CROSS_REFERENCE = 'x',
VOLATILE_QUALIFIER = 'B',
FLOATING_POINT_BUILTIN = 'R',
METHOD = '#',
REFERENCE = '&',
POINTER = '*',
TYPE_ATTRIBUTE = '@',
POINTER_TO_DATA_MEMBER = 0xee, // also '@'
BUILTIN = '-'
};
struct StabsBaseClass;
struct StabsField;
struct StabsMemberFunctionSet;
// e.g. for "123=*456" 123 would be the type_number, the type descriptor would
// be of type POINTER and StabsPointerType::value_type would point to a type
// with type_number = 456.
struct StabsType {
StabsTypeNumber type_number;
// The name field is only populated for root types and cross references.
std::optional<std::string> name;
bool is_typedef = false;
bool is_root = false;
std::optional<StabsTypeDescriptor> descriptor;
StabsType(StabsTypeNumber n) : type_number(n) {}
StabsType(StabsTypeDescriptor d) : descriptor(d) {}
StabsType(StabsTypeNumber n, StabsTypeDescriptor d) : type_number(n), descriptor(d) {}
virtual ~StabsType() {}
template <typename SubType>
SubType& as()
{
CCC_ASSERT(descriptor == SubType::DESCRIPTOR);
return *static_cast<SubType*>(this);
}
template <typename SubType>
const SubType& as() const
{
CCC_ASSERT(descriptor == SubType::DESCRIPTOR);
return *static_cast<const SubType*>(this);
}
virtual void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const
{
if(type_number.valid() && descriptor.has_value()) {
output.emplace(type_number, this);
}
}
};
struct StabsTypeReferenceType : StabsType {
std::unique_ptr<StabsType> type;
StabsTypeReferenceType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::TYPE_REFERENCE;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
type->enumerate_numbered_types(output);
}
};
struct StabsArrayType : StabsType {
std::unique_ptr<StabsType> index_type;
std::unique_ptr<StabsType> element_type;
StabsArrayType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::ARRAY;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
index_type->enumerate_numbered_types(output);
element_type->enumerate_numbered_types(output);
}
};
struct StabsEnumType : StabsType {
std::vector<std::pair<s32, std::string>> fields;
StabsEnumType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::ENUM;
};
struct StabsFunctionType : StabsType {
std::unique_ptr<StabsType> return_type;
StabsFunctionType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::FUNCTION;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
return_type->enumerate_numbered_types(output);
}
};
struct StabsVolatileQualifierType : StabsType {
std::unique_ptr<StabsType> type;
StabsVolatileQualifierType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::VOLATILE_QUALIFIER;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
type->enumerate_numbered_types(output);
}
};
struct StabsConstQualifierType : StabsType {
std::unique_ptr<StabsType> type;
StabsConstQualifierType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::CONST_QUALIFIER;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
type->enumerate_numbered_types(output);
}
};
struct StabsRangeType : StabsType {
std::unique_ptr<StabsType> type;
std::string low;
std::string high; // Some compilers wrote out a wrapped around value here for zero (or variable?) length arrays.
StabsRangeType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::RANGE;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
type->enumerate_numbered_types(output);
}
};
struct StabsStructOrUnionType : StabsType {
enum class Visibility : u8 {
NONE,
PRIVATE,
PROTECTED,
PUBLIC,
PUBLIC_OPTIMIZED_OUT
};
struct BaseClass {
bool is_virtual;
Visibility visibility;
s32 offset = -1;
std::unique_ptr<StabsType> type;
};
struct Field {
std::string name;
Visibility visibility = Visibility::NONE;
std::unique_ptr<StabsType> type;
bool is_static = false;
s32 offset_bits = 0;
s32 size_bits = 0;
std::string type_name;
};
struct MemberFunction {
std::unique_ptr<StabsType> type;
std::unique_ptr<StabsType> virtual_type;
Visibility visibility;
bool is_const = false;
bool is_volatile = false;
ast::MemberFunctionModifier modifier = ast::MemberFunctionModifier::NONE;
s32 vtable_index = -1;
};
struct MemberFunctionSet {
std::string name;
std::vector<MemberFunction> overloads;
};
s64 size = -1;
std::vector<BaseClass> base_classes;
std::vector<Field> fields;
std::vector<MemberFunctionSet> member_functions;
std::unique_ptr<StabsType> first_base_class;
StabsStructOrUnionType(StabsTypeNumber n, StabsTypeDescriptor d) : StabsType(n, d) {}
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
for(const BaseClass& base_class : base_classes) {
base_class.type->enumerate_numbered_types(output);
}
for(const Field& field : fields) {
field.type->enumerate_numbered_types(output);
}
for(const MemberFunctionSet& member_function_set : member_functions) {
for(const MemberFunction& member_function : member_function_set.overloads) {
member_function.type->enumerate_numbered_types(output);
if(member_function.virtual_type.get()) {
member_function.virtual_type->enumerate_numbered_types(output);
}
}
}
if(first_base_class.get()) {
first_base_class->enumerate_numbered_types(output);
}
}
};
struct StabsStructType : StabsStructOrUnionType {
StabsStructType(StabsTypeNumber n) : StabsStructOrUnionType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::STRUCT;
};
struct StabsUnionType : StabsStructOrUnionType {
StabsUnionType(StabsTypeNumber n) : StabsStructOrUnionType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::UNION;
};
struct StabsCrossReferenceType : StabsType {
ast::ForwardDeclaredType type;
std::string identifier;
StabsCrossReferenceType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::CROSS_REFERENCE;
};
struct StabsFloatingPointBuiltInType : StabsType {
s32 fpclass = -1;
s32 bytes = -1;
StabsFloatingPointBuiltInType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::FLOATING_POINT_BUILTIN;
};
struct StabsMethodType : StabsType {
std::unique_ptr<StabsType> return_type;
std::optional<std::unique_ptr<StabsType>> class_type;
std::vector<std::unique_ptr<StabsType>> parameter_types;
StabsMethodType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::METHOD;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
return_type->enumerate_numbered_types(output);
if(class_type.has_value()) {
(*class_type)->enumerate_numbered_types(output);
}
for(const std::unique_ptr<StabsType>& parameter_type : parameter_types) {
parameter_type->enumerate_numbered_types(output);
}
}
};
struct StabsReferenceType : StabsType {
std::unique_ptr<StabsType> value_type;
StabsReferenceType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::REFERENCE;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
value_type->enumerate_numbered_types(output);
}
};
struct StabsPointerType : StabsType {
std::unique_ptr<StabsType> value_type;
StabsPointerType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::POINTER;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
value_type->enumerate_numbered_types(output);
}
};
struct StabsSizeTypeAttributeType : StabsType {
s64 size_bits = -1;
std::unique_ptr<StabsType> type;
StabsSizeTypeAttributeType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::TYPE_ATTRIBUTE;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
type->enumerate_numbered_types(output);
}
};
struct StabsPointerToDataMemberType : StabsType {
std::unique_ptr<StabsType> class_type;
std::unique_ptr<StabsType> member_type;
StabsPointerToDataMemberType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::POINTER_TO_DATA_MEMBER;
void enumerate_numbered_types(std::map<StabsTypeNumber, const StabsType*>& output) const override
{
StabsType::enumerate_numbered_types(output);
class_type->enumerate_numbered_types(output);
member_type->enumerate_numbered_types(output);
}
};
struct StabsBuiltInType : StabsType {
s64 type_id = -1;
StabsBuiltInType(StabsTypeNumber n) : StabsType(n, DESCRIPTOR) {}
static const constexpr StabsTypeDescriptor DESCRIPTOR = StabsTypeDescriptor::BUILTIN;
};
extern const char* STAB_TRUNCATED_ERROR_MESSAGE;
Result<std::unique_ptr<StabsType>> parse_top_level_stabs_type(const char*& input);
std::optional<s32> parse_number_s32(const char*& input);
std::optional<s64> parse_number_s64(const char*& input);
std::optional<std::string> parse_stabs_identifier(const char*& input, char terminator);
Result<std::string> parse_dodgy_stabs_identifier(const char*& input, char terminator);
const char* stabs_field_visibility_to_string(StabsStructOrUnionType::Visibility visibility);
}

834
3rdparty/ccc/src/ccc/stabs_to_ast.cpp vendored Normal file
View File

@@ -0,0 +1,834 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "stabs_to_ast.h"
#include "importer_flags.h"
#define AST_DEBUG(...) //__VA_ARGS__
#define AST_DEBUG_PRINTF(...) AST_DEBUG(printf(__VA_ARGS__);)
namespace ccc {
struct MemberFunctionInfo {
std::string name;
bool is_constructor_or_destructor = false;
bool is_special_member_function = false;
bool is_operator_member_function = false;
};
static bool is_void_like(const StabsType& type);
static Result<ast::BuiltInClass> classify_range(const StabsRangeType& type);
static Result<std::unique_ptr<ast::Node>> field_to_ast(
const StabsStructOrUnionType::Field& field,
const StabsType& enclosing_struct,
const StabsToAstState& state,
s32 depth);
static Result<bool> detect_bitfield(const StabsStructOrUnionType::Field& field, const StabsToAstState& state);
static Result<std::vector<std::unique_ptr<ast::Node>>> member_functions_to_ast(
const StabsStructOrUnionType& type, const StabsToAstState& state, s32 depth);
static MemberFunctionInfo check_member_function(
const std::string& mangled_name,
std::string_view type_name_no_template_args,
const DemanglerFunctions& demangler,
u32 importer_flags);
Result<std::unique_ptr<ast::Node>> stabs_type_to_ast(
const StabsType& type,
const StabsType* enclosing_struct,
const StabsToAstState& state,
s32 depth,
bool substitute_type_name,
bool force_substitute)
{
AST_DEBUG_PRINTF("%-*stype desc=%hhx '%c' num=(%d,%d) name=%s\n",
depth * 4, "",
type.descriptor.has_value() ? (u8) *type.descriptor : 'X',
(type.descriptor.has_value() && isprint((u8) *type.descriptor)) ? (u8) *type.descriptor : '!',
type.type_number.file, type.type_number.type,
type.name.has_value() ? type.name->c_str() : "");
if(depth > 200) {
const char* error_message = "Call depth greater than 200 in stabs_type_to_ast, probably infinite recursion.";
if(state.importer_flags & STRICT_PARSING) {
return CCC_FAILURE(error_message);
} else {
CCC_WARN(error_message);
auto error = std::make_unique<ast::Error>();
error->message = error_message;
return std::unique_ptr<ast::Node>(std::move(error));
}
}
// This makes sure that types are replaced with their type name in cases
// where that would be more appropriate.
if(type.name.has_value()) {
bool try_substitute = depth > 0 && (type.is_root
|| type.descriptor == StabsTypeDescriptor::RANGE
|| type.descriptor == StabsTypeDescriptor::BUILTIN);
// GCC emits anonymous enums with a name of " " since apparently some
// debuggers can't handle zero-length names.
bool is_name_empty = type.name == "" || type.name == " ";
// Cross references will be handled below.
bool is_cross_reference = type.descriptor == StabsTypeDescriptor::CROSS_REFERENCE;
bool is_void = is_void_like(type);
if((substitute_type_name || try_substitute) && !is_name_empty && !is_cross_reference && !is_void) {
auto type_name = std::make_unique<ast::TypeName>();
type_name->source = ast::TypeNameSource::REFERENCE;
type_name->unresolved_stabs = std::make_unique<ast::TypeName::UnresolvedStabs>();
type_name->unresolved_stabs->type_name = *type.name;
type_name->unresolved_stabs->referenced_file_handle = state.file_handle;
type_name->unresolved_stabs->stabs_type_number = type.type_number;
return std::unique_ptr<ast::Node>(std::move(type_name));
}
}
// This prevents infinite recursion when an automatically generated member
// function references an unnamed type.
bool can_compare_type_numbers = type.type_number.valid() && enclosing_struct && enclosing_struct->type_number.valid();
if(force_substitute && can_compare_type_numbers && type.type_number == enclosing_struct->type_number) {
// It's probably a this parameter (or return type) for an unnamed type.
auto type_name = std::make_unique<ast::TypeName>();
type_name->source = ast::TypeNameSource::UNNAMED_THIS;
type_name->unresolved_stabs = std::make_unique<ast::TypeName::UnresolvedStabs>();
type_name->unresolved_stabs->type_name = enclosing_struct->name.has_value() ? *enclosing_struct->name : "";
type_name->unresolved_stabs->referenced_file_handle = state.file_handle;
type_name->unresolved_stabs->stabs_type_number = type.type_number;
return std::unique_ptr<ast::Node>(std::move(type_name));
}
if(!type.descriptor.has_value()) {
// The definition of the type has been defined previously, so we have to
// look it up by its type number.
CCC_CHECK(type.type_number.valid(), "Cannot lookup type (type is anonymous).");
auto stabs_type = state.stabs_types->find(type.type_number);
if(stabs_type == state.stabs_types->end()) {
std::string error_message = "Failed to lookup STABS type by its type number ("
+ std::to_string(type.type_number.file) + "," + std::to_string(type.type_number.type) + ").";
if(state.importer_flags & STRICT_PARSING) {
return CCC_FAILURE("%s", error_message.c_str());
} else {
CCC_WARN("%s", error_message.c_str());
std::unique_ptr<ast::Error> error = std::make_unique<ast::Error>();
error->message = std::move(error_message);
return std::unique_ptr<ast::Node>(std::move(error));
}
}
return stabs_type_to_ast(
*stabs_type->second,
enclosing_struct,
state,
depth + 1,
substitute_type_name,
force_substitute);
}
std::unique_ptr<ast::Node> result;
switch(*type.descriptor) {
case StabsTypeDescriptor::TYPE_REFERENCE: {
const auto& stabs_type_ref = type.as<StabsTypeReferenceType>();
if(!type.type_number.valid() || !stabs_type_ref.type->type_number.valid() || stabs_type_ref.type->type_number != type.type_number) {
auto node = stabs_type_to_ast(
*stabs_type_ref.type,
enclosing_struct,
state,
depth + 1,
substitute_type_name,
force_substitute);
CCC_RETURN_IF_ERROR(node);
result = std::move(*node);
} else {
// I still don't know why in STABS void is a reference to
// itself, maybe because I'm not a philosopher.
auto builtin = std::make_unique<ast::BuiltIn>();
builtin->bclass = ast::BuiltInClass::VOID_TYPE;
result = std::move(builtin);
}
break;
}
case StabsTypeDescriptor::ARRAY: {
auto array = std::make_unique<ast::Array>();
const auto& stabs_array = type.as<StabsArrayType>();
auto element_node = stabs_type_to_ast(
*stabs_array.element_type,
enclosing_struct,
state,
depth + 1,
true,
force_substitute);
CCC_RETURN_IF_ERROR(element_node);
array->element_type = std::move(*element_node);
const StabsRangeType& index = stabs_array.index_type->as<StabsRangeType>();
char* end = nullptr;
const char* low = index.low.c_str();
s64 low_value = strtoll(low, &end, 10);
CCC_CHECK(end != low, "Failed to parse low part of range as integer.");
CCC_CHECK(low_value == 0, "Invalid index type for array.");
const char* high = index.high.c_str();
s64 high_value = strtoll(high, &end, 10);
CCC_CHECK(end != high, "Failed to parse low part of range as integer.");
if(high_value == 4294967295) {
// Some compilers wrote out a wrapped around value here.
array->element_count = 0;
} else {
array->element_count = (s32) high_value + 1;
}
result = std::move(array);
break;
}
case StabsTypeDescriptor::ENUM: {
auto inline_enum = std::make_unique<ast::Enum>();
const auto& stabs_enum = type.as<StabsEnumType>();
inline_enum->constants = stabs_enum.fields;
result = std::move(inline_enum);
break;
}
case StabsTypeDescriptor::FUNCTION: {
auto function = std::make_unique<ast::Function>();
auto node = stabs_type_to_ast(
*type.as<StabsFunctionType>().return_type,
enclosing_struct,
state,
depth + 1,
true,
force_substitute);
CCC_RETURN_IF_ERROR(node);
function->return_type = std::move(*node);
result = std::move(function);
break;
}
case StabsTypeDescriptor::VOLATILE_QUALIFIER: {
const auto& volatile_qualifier = type.as<StabsVolatileQualifierType>();
auto node = stabs_type_to_ast(
*volatile_qualifier.type.get(),
enclosing_struct,
state,
depth + 1,
substitute_type_name,
force_substitute);
CCC_RETURN_IF_ERROR(node);
result = std::move(*node);
result->is_volatile = true;
break;
}
case StabsTypeDescriptor::CONST_QUALIFIER: {
const auto& const_qualifier = type.as<StabsConstQualifierType>();
auto node = stabs_type_to_ast(
*const_qualifier.type.get(),
enclosing_struct,
state,
depth + 1,
substitute_type_name,
force_substitute);
result = std::move(*node);
result->is_const = true;
break;
}
case StabsTypeDescriptor::RANGE: {
auto builtin = std::make_unique<ast::BuiltIn>();
Result<ast::BuiltInClass> bclass = classify_range(type.as<StabsRangeType>());
CCC_RETURN_IF_ERROR(bclass);
builtin->bclass = *bclass;
result = std::move(builtin);
break;
}
case StabsTypeDescriptor::STRUCT:
case StabsTypeDescriptor::UNION: {
const StabsStructOrUnionType* stabs_struct_or_union;
if(type.descriptor == StabsTypeDescriptor::STRUCT) {
stabs_struct_or_union = &type.as<StabsStructType>();
} else {
stabs_struct_or_union = &type.as<StabsUnionType>();
}
auto struct_or_union = std::make_unique<ast::StructOrUnion>();
struct_or_union->is_struct = type.descriptor == StabsTypeDescriptor::STRUCT;
struct_or_union->size_bits = (s32) stabs_struct_or_union->size * 8;
for(const StabsStructOrUnionType::BaseClass& stabs_base_class : stabs_struct_or_union->base_classes) {
auto base_class = stabs_type_to_ast(
*stabs_base_class.type,
&type,
state,
depth + 1,
true,
force_substitute);
CCC_RETURN_IF_ERROR(base_class);
(*base_class)->offset_bytes = stabs_base_class.offset;
(*base_class)->set_access_specifier(stabs_field_visibility_to_access_specifier(stabs_base_class.visibility), state.importer_flags);
if(stabs_base_class.is_virtual) {
(*base_class)->is_virtual_base_class = true;
}
struct_or_union->base_classes.emplace_back(std::move(*base_class));
}
AST_DEBUG_PRINTF("%-*s beginfields\n", depth * 4, "");
for(const StabsStructOrUnionType::Field& field : stabs_struct_or_union->fields) {
auto node = field_to_ast(field, type, state, depth);
CCC_RETURN_IF_ERROR(node);
struct_or_union->fields.emplace_back(std::move(*node));
}
AST_DEBUG_PRINTF("%-*s endfields\n", depth * 4, "");
AST_DEBUG_PRINTF("%-*s beginmemberfuncs\n", depth * 4, "");
Result<std::vector<std::unique_ptr<ast::Node>>> member_functions =
member_functions_to_ast(*stabs_struct_or_union, state, depth);
CCC_RETURN_IF_ERROR(member_functions);
struct_or_union->member_functions = std::move(*member_functions);
AST_DEBUG_PRINTF("%-*s endmemberfuncs\n", depth * 4, "");
result = std::move(struct_or_union);
break;
}
case StabsTypeDescriptor::CROSS_REFERENCE: {
const auto& cross_reference = type.as<StabsCrossReferenceType>();
auto type_name = std::make_unique<ast::TypeName>();
type_name->source = ast::TypeNameSource::CROSS_REFERENCE;
type_name->unresolved_stabs = std::make_unique<ast::TypeName::UnresolvedStabs>();
type_name->unresolved_stabs->type_name = cross_reference.identifier;
type_name->unresolved_stabs->type = cross_reference.type;
result = std::move(type_name);
break;
}
case ccc::StabsTypeDescriptor::FLOATING_POINT_BUILTIN: {
const auto& fp_builtin = type.as<StabsFloatingPointBuiltInType>();
auto builtin = std::make_unique<ast::BuiltIn>();
switch(fp_builtin.bytes) {
case 1: builtin->bclass = ast::BuiltInClass::UNSIGNED_8; break;
case 2: builtin->bclass = ast::BuiltInClass::UNSIGNED_16; break;
case 4: builtin->bclass = ast::BuiltInClass::UNSIGNED_32; break;
case 8: builtin->bclass = ast::BuiltInClass::UNSIGNED_64; break;
case 16: builtin->bclass = ast::BuiltInClass::UNSIGNED_128; break;
default: builtin->bclass = ast::BuiltInClass::UNSIGNED_8; break;
}
result = std::move(builtin);
break;
}
case StabsTypeDescriptor::METHOD: {
const auto& stabs_method = type.as<StabsMethodType>();
auto function = std::make_unique<ast::Function>();
auto return_node = stabs_type_to_ast(
*stabs_method.return_type.get(),
enclosing_struct,
state,
depth + 1,
true,
true);
CCC_RETURN_IF_ERROR(return_node);
function->return_type = std::move(*return_node);
function->parameters.emplace();
for(const std::unique_ptr<StabsType>& parameter_type : stabs_method.parameter_types) {
auto parameter_node = stabs_type_to_ast(
*parameter_type,
enclosing_struct,
state,
depth + 1,
true,
true);
CCC_RETURN_IF_ERROR(parameter_node);
function->parameters->emplace_back(std::move(*parameter_node));
}
result = std::move(function);
break;
}
case StabsTypeDescriptor::POINTER: {
auto pointer = std::make_unique<ast::PointerOrReference>();
pointer->is_pointer = true;
auto value_node = stabs_type_to_ast(
*type.as<StabsPointerType>().value_type,
enclosing_struct,
state,
depth + 1,
true,
force_substitute);
CCC_RETURN_IF_ERROR(value_node);
pointer->value_type = std::move(*value_node);
result = std::move(pointer);
break;
}
case StabsTypeDescriptor::REFERENCE: {
auto reference = std::make_unique<ast::PointerOrReference>();
reference->is_pointer = false;
auto value_node = stabs_type_to_ast(
*type.as<StabsReferenceType>().value_type,
enclosing_struct,
state,
depth + 1,
true,
force_substitute);
CCC_RETURN_IF_ERROR(value_node);
reference->value_type = std::move(*value_node);
result = std::move(reference);
break;
}
case StabsTypeDescriptor::TYPE_ATTRIBUTE: {
const auto& stabs_type_attribute = type.as<StabsSizeTypeAttributeType>();
auto node = stabs_type_to_ast(
*stabs_type_attribute.type,
enclosing_struct,
state,
depth + 1,
substitute_type_name,
force_substitute);
CCC_RETURN_IF_ERROR(node);
result = std::move(*node);
result->size_bits = (s32) stabs_type_attribute.size_bits;
break;
}
case StabsTypeDescriptor::POINTER_TO_DATA_MEMBER: {
const auto& stabs_member_pointer = type.as<StabsPointerToDataMemberType>();
auto member_pointer = std::make_unique<ast::PointerToDataMember>();
auto class_node = stabs_type_to_ast(
*stabs_member_pointer.class_type.get(),
enclosing_struct,
state,
depth + 1,
true,
true);
CCC_RETURN_IF_ERROR(class_node);
member_pointer->class_type = std::move(*class_node);
auto member_node = stabs_type_to_ast(
*stabs_member_pointer.member_type.get(),
enclosing_struct,
state,
depth + 1,
true,
true);
CCC_RETURN_IF_ERROR(member_node);
member_pointer->member_type = std::move(*member_node);
result = std::move(member_pointer);
break;
}
case StabsTypeDescriptor::BUILTIN: {
CCC_CHECK(type.as<StabsBuiltInType>().type_id == 16,
"Unknown built-in type!");
auto builtin = std::make_unique<ast::BuiltIn>();
builtin->bclass = ast::BuiltInClass::BOOL_8;
result = std::move(builtin);
break;
}
}
CCC_CHECK(result, "Result of stabs_type_to_ast call is nullptr.");
return result;
}
static bool is_void_like(const StabsType& type)
{
// Unfortunately, a common case seems to be that various types (most
// commonly __builtin_va_list) are indistinguishable from void or void*, so
// we have to output them as a void built-in.
if(type.descriptor.has_value()) {
switch(*type.descriptor) {
case StabsTypeDescriptor::POINTER: {
return is_void_like(*type.as<StabsPointerType>().value_type.get());
}
case StabsTypeDescriptor::TYPE_REFERENCE: {
return type.as<StabsTypeReferenceType>().type->type_number == type.type_number;
}
default: {
break;
}
}
}
return false;
}
static Result<ast::BuiltInClass> classify_range(const StabsRangeType& type)
{
const char* low = type.low.c_str();
const char* high = type.high.c_str();
// Handle some special cases and values that are too large to easily store
// in a 64-bit integer.
static const struct { const char* low; const char* high; ast::BuiltInClass classification; } strings[] = {
{"4", "0", ast::BuiltInClass::FLOAT_32},
{"000000000000000000000000", "001777777777777777777777", ast::BuiltInClass::UNSIGNED_64},
{"00000000000000000000000000000000000000000000", "00000000000000000000001777777777777777777777", ast::BuiltInClass::UNSIGNED_64},
{"0000000000000", "01777777777777777777777", ast::BuiltInClass::UNSIGNED_64}, // IOP
{"0", "18446744073709551615", ast::BuiltInClass::UNSIGNED_64},
{"001000000000000000000000", "000777777777777777777777", ast::BuiltInClass::SIGNED_64},
{"00000000000000000000001000000000000000000000", "00000000000000000000000777777777777777777777", ast::BuiltInClass::SIGNED_64},
{"01000000000000000000000", "0777777777777777777777", ast::BuiltInClass::SIGNED_64}, // IOP
{"-9223372036854775808", "9223372036854775807", ast::BuiltInClass::SIGNED_64},
{"8", "0", ast::BuiltInClass::FLOAT_64},
{"00000000000000000000000000000000000000000000", "03777777777777777777777777777777777777777777", ast::BuiltInClass::UNSIGNED_128},
{"02000000000000000000000000000000000000000000", "01777777777777777777777777777777777777777777", ast::BuiltInClass::SIGNED_128},
{"000000000000000000000000", "0377777777777777777777777777777777", ast::BuiltInClass::UNQUALIFIED_128},
{"16", "0", ast::BuiltInClass::FLOAT_128},
{"0", "-1", ast::BuiltInClass::UNQUALIFIED_128} // Old homebrew toolchain
};
for(const auto& range : strings) {
if(strcmp(range.low, low) == 0 && strcmp(range.high, high) == 0) {
return range.classification;
}
}
// For smaller values we actually parse the bounds as integers.
char* end = nullptr;
s64 low_value = strtoll(type.low.c_str(), &end, low[0] == '0' ? 8 : 10);
CCC_CHECK(end != low, "Failed to parse low part of range as integer.");
s64 high_value = strtoll(type.high.c_str(), &end, high[0] == '0' ? 8 : 10);
CCC_CHECK(end != high, "Failed to parse high part of range as integer.");
static const struct { s64 low; s64 high; ast::BuiltInClass classification; } integers[] = {
{0, 255, ast::BuiltInClass::UNSIGNED_8},
{-128, 127, ast::BuiltInClass::SIGNED_8},
{0, 127, ast::BuiltInClass::UNQUALIFIED_8},
{0, 65535, ast::BuiltInClass::UNSIGNED_16},
{-32768, 32767, ast::BuiltInClass::SIGNED_16},
{0, 4294967295, ast::BuiltInClass::UNSIGNED_32},
{-2147483648, 2147483647, ast::BuiltInClass::SIGNED_32},
};
for(const auto& range : integers) {
if((range.low == low_value || range.low == -low_value) && range.high == high_value) {
return range.classification;
}
}
return CCC_FAILURE("Failed to classify range.");
}
static Result<std::unique_ptr<ast::Node>> field_to_ast(
const StabsStructOrUnionType::Field& field,
const StabsType& enclosing_struct,
const StabsToAstState& state,
s32 depth)
{
AST_DEBUG_PRINTF("%-*s field %s\n", depth * 4, "", field.name.c_str());
Result<bool> is_bitfield = detect_bitfield(field, state);
CCC_RETURN_IF_ERROR(is_bitfield);
if(*is_bitfield) {
// Process bitfields.
auto bitfield_node = stabs_type_to_ast(
*field.type,
&enclosing_struct,
state,
depth + 1,
true,
false);
CCC_RETURN_IF_ERROR(bitfield_node);
std::unique_ptr<ast::BitField> bitfield = std::make_unique<ast::BitField>();
bitfield->name = (field.name == " ") ? "" : field.name;
bitfield->offset_bytes = field.offset_bits / 8;
bitfield->size_bits = field.size_bits;
bitfield->underlying_type = std::move(*bitfield_node);
bitfield->bitfield_offset_bits = field.offset_bits % 8;
bitfield->set_access_specifier(stabs_field_visibility_to_access_specifier(field.visibility), state.importer_flags);
return std::unique_ptr<ast::Node>(std::move(bitfield));
} else {
// Process a normal field.
Result<std::unique_ptr<ast::Node>> node = stabs_type_to_ast(
*field.type,
&enclosing_struct,
state,
depth + 1,
true,
false);
CCC_RETURN_IF_ERROR(node);
(*node)->name = field.name;
(*node)->offset_bytes = field.offset_bits / 8;
(*node)->size_bits = field.size_bits;
(*node)->set_access_specifier(stabs_field_visibility_to_access_specifier(field.visibility), state.importer_flags);
if(field.name.starts_with("$vf") || field.name.starts_with("_vptr$") || field.name.starts_with("_vptr.")) {
(*node)->is_vtable_pointer = true;
}
if(field.is_static) {
(*node)->storage_class = STORAGE_CLASS_STATIC;
}
return node;
}
}
static Result<bool> detect_bitfield(const StabsStructOrUnionType::Field& field, const StabsToAstState& state)
{
// Static fields can't be bitfields.
if(field.is_static) {
return false;
}
// Resolve type references.
const StabsType* type = field.type.get();
for(s32 i = 0; i < 50; i++) {
if(!type->descriptor.has_value()) {
if(!type->type_number.valid()) {
return false;
}
auto next_type = state.stabs_types->find(type->type_number);
if(next_type == state.stabs_types->end() || next_type->second == type) {
return false;
}
type = next_type->second;
} else if(type->descriptor == StabsTypeDescriptor::TYPE_REFERENCE) {
type = type->as<StabsTypeReferenceType>().type.get();
} else if(type->descriptor == StabsTypeDescriptor::CONST_QUALIFIER) {
type = type->as<StabsConstQualifierType>().type.get();
} else if(type->descriptor == StabsTypeDescriptor::VOLATILE_QUALIFIER) {
type = type->as<StabsVolatileQualifierType>().type.get();
} else {
break;
}
// Prevent an infinite loop if there's a cycle (fatal frame).
if(i == 49) {
return false;
}
}
// Determine the size of the underlying type.
s32 underlying_type_size_bits = 0;
switch(*type->descriptor) {
case ccc::StabsTypeDescriptor::RANGE: {
Result<ast::BuiltInClass> bclass = classify_range(type->as<StabsRangeType>());
CCC_RETURN_IF_ERROR(bclass);
underlying_type_size_bits = builtin_class_size(*bclass) * 8;
break;
}
case ccc::StabsTypeDescriptor::CROSS_REFERENCE: {
if(type->as<StabsCrossReferenceType>().type == ast::ForwardDeclaredType::ENUM) {
underlying_type_size_bits = 32;
} else {
return false;
}
break;
}
case ccc::StabsTypeDescriptor::TYPE_ATTRIBUTE: {
underlying_type_size_bits = (s32) type->as<StabsSizeTypeAttributeType>().size_bits;
break;
}
case ccc::StabsTypeDescriptor::BUILTIN: {
underlying_type_size_bits = 8; // bool
break;
}
default: {
return false;
}
}
if(underlying_type_size_bits == 0) {
return false;
}
return field.size_bits != underlying_type_size_bits;
}
static Result<std::vector<std::unique_ptr<ast::Node>>> member_functions_to_ast(
const StabsStructOrUnionType& type, const StabsToAstState& state, s32 depth)
{
if(state.importer_flags & NO_MEMBER_FUNCTIONS) {
return std::vector<std::unique_ptr<ast::Node>>();
}
std::string_view type_name_no_template_args;
if(type.name.has_value()) {
type_name_no_template_args =
std::string_view(*type.name).substr(0, type.name->find("<"));
}
std::vector<std::unique_ptr<ast::Node>> member_functions;
bool only_special_functions = true;
for(const StabsStructOrUnionType::MemberFunctionSet& function_set : type.member_functions) {
MemberFunctionInfo info = check_member_function(
function_set.name, type_name_no_template_args, state.demangler, state.importer_flags);
if(!info.is_special_member_function) {
only_special_functions = false;
}
for(const StabsStructOrUnionType::MemberFunction& stabs_func : function_set.overloads) {
auto node = stabs_type_to_ast(
*stabs_func.type,
&type,
state,
depth + 1,
true,
true);
CCC_RETURN_IF_ERROR(node);
(*node)->is_constructor_or_destructor = info.is_constructor_or_destructor;
(*node)->is_special_member_function = info.is_special_member_function;
(*node)->is_operator_member_function = info.is_operator_member_function;
(*node)->name = info.name;
(*node)->set_access_specifier(stabs_field_visibility_to_access_specifier(stabs_func.visibility), state.importer_flags);
if((*node)->descriptor == ast::FUNCTION) {
ast::Function& function = (*node)->as<ast::Function>();
function.modifier = stabs_func.modifier;
function.vtable_index = stabs_func.vtable_index;
}
member_functions.emplace_back(std::move(*node));
}
}
if(only_special_functions && (state.importer_flags & INCLUDE_GENERATED_MEMBER_FUNCTIONS) == 0) {
return std::vector<std::unique_ptr<ast::Node>>();
}
return member_functions;
}
static MemberFunctionInfo check_member_function(
const std::string& mangled_name,
std::string_view type_name_no_template_args,
const DemanglerFunctions& demangler,
u32 importer_flags)
{
MemberFunctionInfo info;
// Some compiler versions output gcc opnames for overloaded operators
// instead of their proper names.
if((importer_flags & DONT_DEMANGLE_NAMES) == 0 && demangler.cplus_demangle_opname) {
char* demangled_name = demangler.cplus_demangle_opname(mangled_name.c_str(), 0);
if(demangled_name) {
info.name = demangled_name;
free(reinterpret_cast<void*>(demangled_name));
}
}
if(info.name.empty()) {
info.name = mangled_name;
}
bool is_constructor =
info.name == "__ct" || // Takes a parameter to decide whether or not to construct virtual base classes.
info.name == "__comp_ctor" || // Constructs virtual base classes.
info.name == "__base_ctor"; // Does not construct virtual base classes.
if(!is_constructor && !type_name_no_template_args.empty()) {
is_constructor |= info.name == type_name_no_template_args; // Named constructor.
}
bool is_destructor =
info.name == "__dt" || // Takes parameters to decide whether or not to construct virtual base classes and/or delete the object.
info.name == "__comp_dtor" || // Destructs virtual base classes.
info.name == "__base_dtor" || // Does not construct virtual base classes.
info.name == "__deleting_dtor"; // Destructs virtual base clases then deletes the entire object.
if(!is_destructor && !info.name.empty()) {
is_destructor |= info.name[0] == '~' && std::string_view(info.name).substr(1) == type_name_no_template_args; // Named destructor.
}
info.is_constructor_or_destructor = is_constructor || is_destructor || info.name.starts_with("$_");
info.is_special_member_function = info.is_constructor_or_destructor || info.name == "operator=";
return info;
}
void fix_recursively_emitted_structures(
ast::StructOrUnion& outer_struct, const std::string& name, StabsTypeNumber type_number, SourceFileHandle file_handle)
{
// This is a rather peculiar case. For some compiler versions, when a struct
// or a union defined using a typedef is being emitted and it needs to
// reference itself from a member function parameter, it will emit its
// entire definition again in the middle of the first definition, although
// thankfully it won't recurse more than once.
//
// The game Sega Soccer Slam is affected by this. See the PeculiarParameter
// test case in mdebug_importer_tests.cpp for a bare bones example.
for(std::unique_ptr<ast::Node>& node : outer_struct.member_functions) {
if(node->descriptor != ast::FUNCTION) {
continue;
}
ast::Function& function = node->as<ast::Function>();
if(!function.parameters.has_value()) {
continue;
}
for(std::unique_ptr<ast::Node>& parameter : *function.parameters) {
if(parameter->descriptor != ast::POINTER_OR_REFERENCE) {
continue;
}
ast::PointerOrReference& pointer_or_reference = parameter->as<ast::PointerOrReference>();
if(pointer_or_reference.value_type->descriptor != ast::STRUCT_OR_UNION) {
continue;
}
ast::StructOrUnion& inner_struct = pointer_or_reference.value_type->as<ast::StructOrUnion>();
// Since C++ doesn't allow struct definitions in function parameter
// lists normally, and most of the time the member function
// parameters aren't even filled in by GCC, this is a really rare
// case, so here we only bother to do some very basic checks to
// verify that the inner struct is similar to the outer struct.
if(inner_struct.base_classes.size() != outer_struct.base_classes.size()) {
continue;
}
if(inner_struct.fields.size() != outer_struct.fields.size()) {
continue;
}
if(inner_struct.member_functions.size() != outer_struct.member_functions.size()) {
continue;
}
auto type_name = std::make_unique<ast::TypeName>();
type_name->source = ast::TypeNameSource::REFERENCE;
type_name->unresolved_stabs = std::make_unique<ast::TypeName::UnresolvedStabs>();
type_name->unresolved_stabs->type_name = name;
type_name->unresolved_stabs->referenced_file_handle = file_handle;
type_name->unresolved_stabs->stabs_type_number = type_number;
pointer_or_reference.value_type = std::move(type_name);
}
}
}
ast::AccessSpecifier stabs_field_visibility_to_access_specifier(StabsStructOrUnionType::Visibility visibility)
{
ast::AccessSpecifier access_specifier = ast::AS_PUBLIC;
switch(visibility) {
case StabsStructOrUnionType::Visibility::NONE: access_specifier = ast::AS_PUBLIC; break;
case StabsStructOrUnionType::Visibility::PUBLIC: access_specifier = ast::AS_PUBLIC; break;
case StabsStructOrUnionType::Visibility::PROTECTED: access_specifier = ast::AS_PROTECTED; break;
case StabsStructOrUnionType::Visibility::PRIVATE: access_specifier = ast::AS_PRIVATE; break;
case StabsStructOrUnionType::Visibility::PUBLIC_OPTIMIZED_OUT: access_specifier = ast::AS_PUBLIC; break;
}
return access_specifier;
}
}

29
3rdparty/ccc/src/ccc/stabs_to_ast.h vendored Normal file
View File

@@ -0,0 +1,29 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "ast.h"
#include "stabs.h"
namespace ccc {
struct StabsToAstState {
u32 file_handle;
std::map<StabsTypeNumber, const StabsType*>* stabs_types;
u32 importer_flags;
DemanglerFunctions demangler;
};
Result<std::unique_ptr<ast::Node>> stabs_type_to_ast(
const StabsType& type,
const StabsType* enclosing_struct,
const StabsToAstState& state,
s32 depth,
bool substitute_type_name,
bool force_substitute);
void fix_recursively_emitted_structures(
ast::StructOrUnion& outer_struct, const std::string& name, StabsTypeNumber type_number, SourceFileHandle file_handle);
ast::AccessSpecifier stabs_field_visibility_to_access_specifier(StabsStructOrUnionType::Visibility visibility);
}

1204
3rdparty/ccc/src/ccc/symbol_database.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

721
3rdparty/ccc/src/ccc/symbol_database.h vendored Normal file
View File

@@ -0,0 +1,721 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include <map>
#include <atomic>
#include <variant>
#include "util.h"
namespace ccc {
// An X macro for all the symbol types.
#define CCC_FOR_EACH_SYMBOL_TYPE_DO_X \
CCC_X(DataType, data_types) \
CCC_X(Function, functions) \
CCC_X(GlobalVariable, global_variables) \
CCC_X(Label, labels) \
CCC_X(LocalVariable, local_variables) \
CCC_X(Module, modules) \
CCC_X(ParameterVariable, parameter_variables) \
CCC_X(Section, sections) \
CCC_X(SourceFile, source_files) \
CCC_X(SymbolSource, symbol_sources)
// An enum for all the symbol types.
enum SymbolDescriptor {
DATA_TYPE = 1 << 0,
FUNCTION = 1 << 1,
GLOBAL_VARIABLE = 1 << 2,
LABEL = 1 << 3,
LOCAL_VARIABLE = 1 << 4,
MODULE = 1 << 5,
PARAMETER_VARIABLE = 1 << 6,
SECTION = 1 << 7,
SOURCE_FILE = 1 << 8,
SYMBOL_SOURCE = 1 << 9
};
enum {
ALL_SYMBOL_TYPES = 0xffff
};
// Forward declare all the different types of symbol objects.
#define CCC_X(SymbolType, symbol_list) class SymbolType;
CCC_FOR_EACH_SYMBOL_TYPE_DO_X
#undef CCC_X
class SymbolDatabase;
// Strongly typed handles for all of the symbol objects. These are here to solve
// the problem of dangling references to symbols.
template <typename SymbolType>
struct SymbolHandle {
u32 value = (u32) -1;
SymbolHandle() {}
SymbolHandle(u32 v) : value(v) {}
SymbolHandle(const SymbolType* symbol)
: value(symbol ? symbol->handle().value : (u32) -1) {}
// Check if this symbol handle has been initialised. Note that this doesn't
// determine whether or not the symbol it points to has been deleted!
bool valid() const { return value != (u32) -1; }
friend auto operator<=>(const SymbolHandle& lhs, const SymbolHandle& rhs) = default;
};
#define CCC_X(SymbolType, symbol_list) using SymbolType##Handle = SymbolHandle<SymbolType>;
CCC_FOR_EACH_SYMBOL_TYPE_DO_X
#undef CCC_X
enum SymbolFlag {
NO_SYMBOL_FLAGS = 0,
WITH_ADDRESS_MAP = 1 << 0,
WITH_NAME_MAP = 1 << 1,
NAME_NEEDS_DEMANGLING = 1 << 2
};
// A container class for symbols of a given type that maintains maps of their
// names and addresses depending on the value of SymbolType::FLAGS.
template <typename SymbolType>
class SymbolList {
public:
// Lookup symbols from their handles using binary search.
SymbolType* symbol_from_handle(SymbolHandle<SymbolType> handle);
const SymbolType* symbol_from_handle(SymbolHandle<SymbolType> handle) const;
// Lookup multiple symbols from their handles using binary search.
std::vector<SymbolType*> symbols_from_handles(const std::vector<SymbolHandle<SymbolType>>& handles);
std::vector<const SymbolType*> symbols_from_handles(const std::vector<SymbolHandle<SymbolType>>& handles) const;
std::vector<SymbolType*> optional_symbols_from_handles(const std::optional<std::vector<SymbolHandle<SymbolType>>>& handles);
std::vector<const SymbolType*> optional_symbols_from_handles(const std::optional<std::vector<SymbolHandle<SymbolType>>>& handles) const;
using Iterator = typename std::vector<SymbolType>::iterator;
using ConstIterator = typename std::vector<SymbolType>::const_iterator;
// For iterating over all the symbols.
Iterator begin();
ConstIterator begin() const;
Iterator end();
ConstIterator end() const;
using AddressToHandleMap = std::multimap<u32, SymbolHandle<SymbolType>>;
using NameToHandleMap = std::multimap<std::string, SymbolHandle<SymbolType>>;
template <typename Iterator>
class Iterators {
public:
Iterators(Iterator b, Iterator e)
: m_begin(b), m_end(e) {}
Iterator begin() const { return m_begin; }
Iterator end() const { return m_end; }
protected:
Iterator m_begin;
Iterator m_end;
};
using AddressToHandleMapIterators = Iterators<typename AddressToHandleMap::const_iterator>;
using NameToHandleMapIterators = Iterators<typename NameToHandleMap::const_iterator>;
// Lookup symbols by their address.
AddressToHandleMapIterators handles_from_starting_address(Address address) const;
AddressToHandleMapIterators handles_from_address_range(AddressRange range) const;
SymbolHandle<SymbolType> first_handle_from_starting_address(Address address) const;
SymbolHandle<SymbolType> first_handle_after_address(Address address) const;
// Lookup symbols by their name.
NameToHandleMapIterators handles_from_name(const std::string& name) const;
SymbolHandle<SymbolType> first_handle_from_name(const std::string& name) const;
// Find a symbol with an address range that contains the provided address.
// For example, to find which function an instruction belongs to.
SymbolType* symbol_overlapping_address(Address address);
const SymbolType* symbol_overlapping_address(Address address) const;
// Convert handles to underlying array indices.
s32 index_from_handle(SymbolHandle<SymbolType> handle) const;
// Index into the underlying array.
SymbolType& symbol_from_index(s32 index);
const SymbolType& symbol_from_index(s32 index) const;
// Determine if any symbols are being stored.
bool empty() const;
// Retrieve the number of symbols stored.
s32 size() const;
// Create a new symbol. If it's a SymbolSource symbol, source can be left
// empty, otherwise it has to be valid.
Result<SymbolType*> create_symbol(
std::string name, Address address, SymbolSourceHandle source, const Module* module_symbol = nullptr);
// Create a new symbol. Similar to above, but for symbols without addresses.
Result<SymbolType*> create_symbol(
std::string name, SymbolSourceHandle source, const Module* module_symbol = nullptr);
// Create a new symbol. Similar to above, but unless DONT_DEMANGLE_NAMES is
// set, the name of the symbol will be demangled.
Result<SymbolType*> create_symbol(
std::string name,
SymbolSourceHandle source,
const Module* module_symbol,
Address address,
u32 importer_flags,
DemanglerFunctions demangler);
// Update the address of a symbol without changing its handle.
bool move_symbol(SymbolHandle<SymbolType> handle, Address new_address);
// Update the name of a symbol without changing its handle.
bool rename_symbol(SymbolHandle<SymbolType> handle, std::string new_name);
// Move all the symbols from the passed list into this list.
void merge_from(SymbolList<SymbolType>& list);
// Mark a symbol for destruction. If the correct symbol database pointer is
// passed, all descendants will also be marked. For example, marking a
// function will also mark its parameters and local variables.
bool mark_symbol_for_destruction(SymbolHandle<SymbolType> handle, SymbolDatabase* database);
// Mark all the symbols from a given symbol source for destruction. For
// example you can use this to free a symbol table without destroying
// user-defined symbols. The behaviour for marking descendants is the same
// as destroy_symbol.
void mark_symbols_from_source_for_destruction(SymbolSourceHandle source, SymbolDatabase* database);
// Mark all the symbols from a given module for destruction. The behaviour
// for marking descendants is the same as destroy_symbol.
void mark_symbols_from_module_for_destruction(ModuleHandle module_handle, SymbolDatabase* database);
// Destroy all symbols that have previously been marked for destruction.
// This invalidates all pointers to symbols in this list.
void destroy_marked_symbols();
// Destroy all symbols, but don't reset m_next_handle so we don't have to
// worry about dangling handles.
void clear();
protected:
// Do a binary search for a handle, and return either its index, or the
// index where it could be inserted.
size_t binary_search(SymbolHandle<SymbolType> handle) const;
// Keep the address map in sync with the symbol list.
void link_address_map(SymbolType& symbol);
void unlink_address_map(SymbolType& symbol);
// Keep the name map in sync with the symbol list.
void link_name_map(SymbolType& symbol);
void unlink_name_map(SymbolType& symbol);
std::vector<SymbolType> m_symbols;
AddressToHandleMap m_address_to_handle;
NameToHandleMap m_name_to_handle;
// We share this between symbol lists of the same type so that we can merge
// them without having to rewrite all the handles.
static std::atomic<u32> m_next_handle;
};
// Base class for all the symbols.
class Symbol {
template <typename SymbolType>
friend class SymbolList;
public:
const std::string& name() const { return m_name; }
u32 raw_handle() const { return m_handle; }
SymbolSourceHandle source() const { return m_source; }
ModuleHandle module_handle() const { return m_module; }
Address address() const { return m_address; }
u32 size() const { return m_size; }
void set_size(u32 size) { m_size = size; }
AddressRange address_range() const { return AddressRange(m_address, m_address.get_or_zero() + m_size); }
ast::Node* type() { return m_type.get(); }
const ast::Node* type() const { return m_type.get(); }
void set_type(std::unique_ptr<ast::Node> type);
u32 generation() const { return m_generation; }
// This MUST be called after any AST nodes have been created/deleted/moved.
// For the set_type function this is done for you.
void invalidate_node_handles() { m_generation++; }
// Mark a single symbol for destruction, not including its descendants.
void mark_for_destruction() { m_marked_for_destruction = true; }
bool is_marked_for_destruction() { return m_marked_for_destruction; }
protected:
void on_create() {}
void on_destroy(SymbolDatabase* database) {}
u32 m_handle = (u32) -1;
SymbolSourceHandle m_source;
Address m_address;
u32 m_size = 0;
std::string m_name;
std::unique_ptr<ast::Node> m_type;
u32 m_generation : 31 = 0;
u32 m_marked_for_destruction : 1 = false;
ModuleHandle m_module;
};
// Variable storage types. This is different to whether the variable is a
// global, local or parameter. For example local variables can have global
// storage (static locals).
enum GlobalStorageLocation {
NIL,
DATA,
BSS,
ABS,
SDATA,
SBSS,
RDATA,
COMMON,
SCOMMON,
SUNDEFINED
};
const char* global_storage_location_to_string(GlobalStorageLocation location);
struct GlobalStorage {
GlobalStorageLocation location = GlobalStorageLocation::NIL;
GlobalStorage() {}
friend auto operator<=>(const GlobalStorage& lhs, const GlobalStorage& rhs) = default;
};
struct RegisterStorage {
s32 dbx_register_number = -1;
bool is_by_reference;
RegisterStorage() {}
friend auto operator<=>(const RegisterStorage& lhs, const RegisterStorage& rhs) = default;
};
struct StackStorage {
s32 stack_pointer_offset = -1;
StackStorage() {}
friend auto operator<=>(const StackStorage& lhs, const StackStorage& rhs) = default;
};
// The hashing algorithm for functions. If you change this algorithm make sure
// to bump the version number for the JSON format so we can know if a hash was
// generated using the new algorithm or not.
class FunctionHash {
public:
void update(u32 instruction)
{
// Separate out the opcode so that the hash remains the same regardless
// of if relocations are applied or not.
u32 opcode = instruction >> 26;
m_hash = m_hash * 31 + opcode;
}
u32 get() const
{
return m_hash;
}
protected:
u32 m_hash = 0;
};
// All the different types of symbol objects.
// A C/C++ data type.
class DataType : public Symbol {
friend SourceFile;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = DATA_TYPE;
static constexpr const char* NAME = "Data Type";
static constexpr const u32 FLAGS = WITH_NAME_MAP;
DataTypeHandle handle() const { return m_handle; }
std::vector<SourceFileHandle> files; // List of files for which a given top-level type is present.
const char* compare_fail_reason = nullptr;
bool not_defined_in_any_translation_unit : 1 = false;
bool only_defined_in_single_translation_unit : 1 = false;
};
// A function. The type stored is the return type.
class Function : public Symbol {
friend SourceFile;
friend SymbolList<Function>;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = FUNCTION;
static constexpr const char* NAME = "Function";
static constexpr const u32 FLAGS = WITH_ADDRESS_MAP | WITH_NAME_MAP | NAME_NEEDS_DEMANGLING;
FunctionHandle handle() const { return m_handle; }
SourceFileHandle source_file() const { return m_source_file; }
const std::optional<std::vector<ParameterVariableHandle>>& parameter_variables() const;
void set_parameter_variables(std::optional<std::vector<ParameterVariableHandle>> parameter_variables, SymbolDatabase& database);
const std::optional<std::vector<LocalVariableHandle>>& local_variables() const;
void set_local_variables(std::optional<std::vector<LocalVariableHandle>> local_variables, SymbolDatabase& database);
const std::string& mangled_name() const;
void set_mangled_name(std::string mangled);
// A hash of all the opcodes in the function, read from file.
u32 original_hash() const;
void set_original_hash(u32 hash);
// A hash of all the opcodes in the function, read from memory.
u32 current_hash() const;
void set_current_hash(FunctionHash hash);
struct LineNumberPair {
Address address;
s32 line_number;
};
struct SubSourceFile {
Address address;
std::string relative_path;
};
std::string relative_path;
StorageClass storage_class;
s32 stack_frame_size = -1;
std::vector<LineNumberPair> line_numbers;
std::vector<SubSourceFile> sub_source_files;
bool is_member_function_ish = false; // Filled in by fill_in_pointers_to_member_function_definitions.
bool is_no_return = false;
protected:
void on_destroy(SymbolDatabase* database);
SourceFileHandle m_source_file;
std::optional<std::vector<ParameterVariableHandle>> m_parameter_variables;
std::optional<std::vector<LocalVariableHandle>> m_local_variables;
std::string m_mangled_name;
u32 m_original_hash = 0;
u32 m_current_hash = 0;
};
// A global variable.
class GlobalVariable : public Symbol {
friend SourceFile;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = GLOBAL_VARIABLE;
static constexpr const char* NAME = "Global Variable";
static constexpr u32 FLAGS = WITH_ADDRESS_MAP | WITH_NAME_MAP | NAME_NEEDS_DEMANGLING;
GlobalVariableHandle handle() const { return m_handle; }
SourceFileHandle source_file() const { return m_source_file; };
const std::string& mangled_name() const;
void set_mangled_name(std::string mangled);
GlobalStorage storage;
StorageClass storage_class;
protected:
SourceFileHandle m_source_file;
std::string m_mangled_name;
};
// A label. This could be a label defined in assembly, C/C++, or just a symbol
// that we can't automatically determine the type of (e.g. SNDLL symbols).
class Label : public Symbol {
public:
static constexpr const SymbolDescriptor DESCRIPTOR = LABEL;
static constexpr const char* NAME = "Label";
static constexpr u32 FLAGS = WITH_ADDRESS_MAP | WITH_NAME_MAP;
LabelHandle handle() const { return m_handle; }
// Indicates that this label should not be used as a function name.
bool is_junk = false;
};
// A local variable. This includes static local variables which have global
// storage.
class LocalVariable : public Symbol {
friend Function;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = LOCAL_VARIABLE;
static constexpr const char* NAME = "Local Variable";
static constexpr u32 FLAGS = WITH_ADDRESS_MAP;
LocalVariableHandle handle() const { return m_handle; }
FunctionHandle function() const { return m_function; };
std::variant<GlobalStorage, RegisterStorage, StackStorage> storage;
AddressRange live_range;
protected:
FunctionHandle m_function;
};
// A program module e.g. an ELF file or an SNDLL file. Every symbol has a module
// field indicating what module the symbol belongs to. This can be used to
// delete all the symbols associated with a given module. Additionally, when a
// valid module pointer is passed to SymbolList<>::create_symbol, the address of
// the symbol will be added to the address of the new symbol.
class Module : public Symbol {
friend SymbolList<Module>;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = MODULE;
static constexpr const char* NAME = "Module";
static constexpr u32 FLAGS = WITH_NAME_MAP;
ModuleHandle handle() const { return m_handle; }
// These are used for IRX modules.
bool is_irx = false;
s32 version_major = -1;
s32 version_minor = -1;
protected:
void on_create();
};
// A parameter variable.
class ParameterVariable : public Symbol {
friend Function;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = PARAMETER_VARIABLE;
static constexpr const char* NAME = "Parameter Variable";
static constexpr u32 FLAGS = NO_SYMBOL_FLAGS;
ParameterVariableHandle handle() const { return m_handle; }
FunctionHandle function() const { return m_function; };
std::variant<RegisterStorage, StackStorage> storage;
protected:
FunctionHandle m_function;
};
// An ELF section. These are created from the ELF section headers.
class Section : public Symbol {
public:
static constexpr const SymbolDescriptor DESCRIPTOR = SECTION;
static constexpr const char* NAME = "Section";
static constexpr u32 FLAGS = WITH_ADDRESS_MAP | WITH_NAME_MAP;
SectionHandle handle() const { return m_handle; }
// Check if the section name is ".text".
bool contains_code() const;
// Check for known data section names.
bool contains_data() const;
};
// A source file (.c or .cpp file). One of these will be created for every
// translation unit in the program (but only if debugging symbols are present).
class SourceFile : public Symbol {
friend SymbolList<SourceFile>;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = SOURCE_FILE;
static constexpr const char* NAME = "Source File";
static constexpr u32 FLAGS = WITH_ADDRESS_MAP | WITH_NAME_MAP;
SourceFileHandle handle() const { return m_handle; }
const std::string& full_path() const { return name(); }
const std::vector<FunctionHandle>& functions() const;
void set_functions(std::vector<FunctionHandle> functions, SymbolDatabase& database);
const std::vector<GlobalVariableHandle>& global_variables() const;
void set_global_variables(std::vector<GlobalVariableHandle> global_variables, SymbolDatabase& database);
// Check whether at least half of the functions associated with the source
// file match their original hash (meaning they haven't been overwritten).
bool functions_match() const;
void check_functions_match(const SymbolDatabase& database);
std::string working_dir;
std::string command_line_path;
std::map<StabsTypeNumber, DataTypeHandle> stabs_type_number_to_handle;
std::set<std::string> toolchain_version_info;
protected:
void on_destroy(SymbolDatabase* database);
std::vector<FunctionHandle> m_functions;
std::vector<GlobalVariableHandle> m_global_variables;
bool m_functions_match = true;
};
// A symbol source. Every symbol has a symbol source field indicating how the
// symbol was created. For example, the symbol table importers will each create
// one of these (if it doesn't already exist).
class SymbolSource : public Symbol {
friend SymbolList<SymbolSource>;
public:
static constexpr const SymbolDescriptor DESCRIPTOR = SYMBOL_SOURCE;
static constexpr const char* NAME = "Symbol Source";
static constexpr u32 FLAGS = WITH_NAME_MAP;
SymbolSourceHandle handle() const { return m_handle; }
protected:
void on_create();
};
// Bundles together all the information needed to identify if a symbol came from
// a specific symbol table import operation. For example, this is used to make
// sure that we don't reference symbols from another symbol table during the
// import process.
struct SymbolGroup {
SymbolSourceHandle source;
Module* module_symbol = nullptr;
bool is_in_group(const Symbol& symbol) const;
};
// The symbol database itself. This owns all the symbols.
class SymbolDatabase {
public:
SymbolList<DataType> data_types;
SymbolList<Function> functions;
SymbolList<GlobalVariable> global_variables;
SymbolList<Label> labels;
SymbolList<LocalVariable> local_variables;
SymbolList<Module> modules;
SymbolList<ParameterVariable> parameter_variables;
SymbolList<Section> sections;
SymbolList<SourceFile> source_files;
SymbolList<SymbolSource> symbol_sources;
// Sum up the symbol counts for each symbol list.
s32 symbol_count() const;
// Find a symbol of any of the specified types given an address. Symbols of
// the types specified higher up in the CCC_FOR_EACH_SYMBOL_TYPE_DO_X macro
// are checked for first.
const Symbol* symbol_starting_at_address(
Address address, u32 descriptors = ALL_SYMBOL_TYPES, SymbolDescriptor* descriptor_out = nullptr) const;
const Symbol* symbol_after_address(
Address address, u32 descriptors = ALL_SYMBOL_TYPES, SymbolDescriptor* descriptor_out = nullptr) const;
const Symbol* symbol_overlapping_address(
Address address, u32 descriptors = ALL_SYMBOL_TYPES, SymbolDescriptor* descriptor_out = nullptr) const;
// Find a symbol of any of the specified types given its name. Symbols of
// the types specified higher up in the CCC_FOR_EACH_SYMBOL_TYPE_DO_X macro
// are checked for first.
const Symbol* symbol_with_name(
const std::string& name, u32 descriptors = ALL_SYMBOL_TYPES, SymbolDescriptor* descriptor_out = nullptr) const;
// Finds a symbol source object with the given name or creates one if it
// doesn't already exist.
Result<SymbolSourceHandle> get_symbol_source(const std::string& name);
// Deduplicate matching data types with the same name. May replace the
// existing data type with the new one if the new one is better.
Result<DataType*> create_data_type_if_unique(
std::unique_ptr<ast::Node> node,
StabsTypeNumber number,
const char* name,
SourceFile& source_file,
const SymbolGroup& group);
// Move all the symbols in the passed database into this database.
void merge_from(SymbolDatabase& database);
// Destroy all the symbols from a given symbol source. For example you can
// use this to free a symbol table without destroying user-defined symbols.
void destroy_symbols_from_source(SymbolSourceHandle source, bool destroy_descendants);
// Destroy all the symbols from a given module.
void destroy_symbols_from_module(ModuleHandle module_handle, bool destroy_descendants);
// Destroy all the symbols that have previously been marked for destruction.
// This invalidates all pointers to symbols in this database.
void destroy_marked_symbols();
// Destroy all the symbols in the symbol database.
void clear();
template <typename Callback>
void for_each_symbol(Callback callback) {
// Use indices here to avoid iterator invalidation.
#define CCC_X(SymbolType, symbol_list) \
for(s32 i = 0; i < symbol_list.size(); i++) { \
callback(symbol_list.symbol_from_index(i)); \
}
CCC_FOR_EACH_SYMBOL_TYPE_DO_X
#undef CCC_X
}
};
// A handle to a symbol of any type.
class MultiSymbolHandle {
public:
// Create an empty multi symbol handle.
MultiSymbolHandle();
// Create a multi symbol handle of the specified type.
template <typename SymbolType>
MultiSymbolHandle(const SymbolType& symbol);
MultiSymbolHandle(SymbolDescriptor descriptor, u32 handle);
bool valid() const;
SymbolDescriptor descriptor() const;
u32 handle() const;
Symbol* lookup_symbol(SymbolDatabase& database);
const Symbol* lookup_symbol(const SymbolDatabase& database) const;
bool is_flag_set(SymbolFlag flag) const;
bool move_symbol(Address new_address, SymbolDatabase& database) const;
bool rename_symbol(std::string new_name, SymbolDatabase& database) const;
bool destroy_symbol(SymbolDatabase& database, bool destroy_descendants) const;
friend auto operator<=>(const MultiSymbolHandle& lhs, const MultiSymbolHandle& rhs) = default;
protected:
SymbolDescriptor m_descriptor = DATA_TYPE;
u32 m_handle = (u32) -1;
};
// A handle to an AST node.
class NodeHandle {
friend SymbolDatabase;
public:
// Create an empty node handle.
NodeHandle();
// Create a node handle that will always allow accesses to its node. You
// should only use this if you know the lifetime of the handle is a subset
// of the lifetime of the node.
NodeHandle(const ast::Node* node);
// Create a node handle pointing to an AST node from a given symbol that
// will prevent accesses to the node if the symbol is deleted.
template <typename SymbolType>
NodeHandle(const SymbolType& symbol, const ast::Node* node);
NodeHandle(SymbolDescriptor descriptor, const Symbol& symbol, const ast::Node* node);
bool valid() const;
const MultiSymbolHandle& symbol() const;
const ast::Node* lookup_node(const SymbolDatabase& database) const;
NodeHandle handle_for_child(const ast::Node* child_node) const;
friend auto operator<=>(const NodeHandle& lhs, const NodeHandle& rhs) = default;
protected:
MultiSymbolHandle m_symbol;
const ast::Node* m_node = nullptr;
u32 m_generation = 0;
};
}

114
3rdparty/ccc/src/ccc/symbol_file.cpp vendored Normal file
View File

@@ -0,0 +1,114 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "symbol_file.h"
namespace ccc {
Result<std::unique_ptr<SymbolFile>> parse_symbol_file(std::vector<u8> image, std::string file_name)
{
const std::optional<u32> magic = copy_unaligned<u32>(image, 0);
CCC_CHECK(magic.has_value(), "File too small.");
std::unique_ptr<SymbolFile> symbol_file;
switch(*magic) {
case CCC_FOURCC("\x7f""ELF"): {
Result<ElfFile> elf = ElfFile::parse(std::move(image));
CCC_RETURN_IF_ERROR(elf);
symbol_file = std::make_unique<ElfSymbolFile>(std::move(*elf), std::move(file_name));
break;
}
case CCC_FOURCC("SNR1"):
case CCC_FOURCC("SNR2"): {
Result<SNDLLFile> sndll = parse_sndll_file(image, Address(), SNDLLType::DYNAMIC_LIBRARY);
CCC_RETURN_IF_ERROR(sndll);
symbol_file = std::make_unique<SNDLLSymbolFile>(std::make_shared<SNDLLFile>(std::move(*sndll)));
break;
}
default: {
return CCC_FAILURE("Unknown file type.");
}
}
return symbol_file;
}
ElfSymbolFile::ElfSymbolFile(ElfFile elf, std::string elf_name)
: m_elf(std::move(elf)), m_name(std::move(elf_name)) {}
std::string ElfSymbolFile::name() const
{
return m_name;
}
Result<std::vector<std::unique_ptr<SymbolTable>>> ElfSymbolFile::get_all_symbol_tables() const
{
std::vector<std::unique_ptr<SymbolTable>> symbol_tables;
symbol_tables.emplace_back(std::make_unique<ElfSectionHeadersSymbolTable>(m_elf));
for(size_t i = 0; i < SYMBOL_TABLE_FORMATS.size(); i++) {
const SymbolTableFormatInfo& info = SYMBOL_TABLE_FORMATS[i];
const ElfSection* section = m_elf.lookup_section(info.section_name);
if(section) {
Result<std::unique_ptr<SymbolTable>> symbol_table = create_elf_symbol_table(*section, m_elf, info.format);
CCC_RETURN_IF_ERROR(symbol_table);
if(*symbol_table) {
symbol_tables.emplace_back(std::move(*symbol_table));
}
}
}
return symbol_tables;
}
Result<std::vector<std::unique_ptr<SymbolTable>>> ElfSymbolFile::get_symbol_tables_from_sections(
const std::vector<SymbolTableLocation>& sections) const
{
std::vector<std::unique_ptr<SymbolTable>> symbol_tables;
for(const SymbolTableLocation& location : sections) {
const ElfSection* section = m_elf.lookup_section(location.section_name.c_str());
CCC_CHECK(section, "No '%s' section.", location.section_name.c_str());
Result<std::unique_ptr<SymbolTable>> symbol_table = create_elf_symbol_table(*section, m_elf, location.format);
CCC_RETURN_IF_ERROR(symbol_table);
if(*symbol_table) {
symbol_tables.emplace_back(std::move(*symbol_table));
}
}
return symbol_tables;
}
const ElfFile& ElfSymbolFile::elf() const
{
return m_elf;
}
SNDLLSymbolFile::SNDLLSymbolFile(std::shared_ptr<SNDLLFile> sndll)
: m_sndll(std::move(sndll)) {}
std::string SNDLLSymbolFile::name() const
{
return m_sndll->elf_path;
}
Result<std::vector<std::unique_ptr<SymbolTable>>> SNDLLSymbolFile::get_all_symbol_tables() const
{
std::vector<std::unique_ptr<SymbolTable>> symbol_tables;
symbol_tables.emplace_back(std::make_unique<SNDLLSymbolTable>(m_sndll));
return symbol_tables;
}
Result<std::vector<std::unique_ptr<SymbolTable>>> SNDLLSymbolFile::get_symbol_tables_from_sections(
const std::vector<SymbolTableLocation>& sections) const
{
return CCC_FAILURE("An SNDLL file is not composed of sections.");
}
}

62
3rdparty/ccc/src/ccc/symbol_file.h vendored Normal file
View File

@@ -0,0 +1,62 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include "elf.h"
#include "sndll.h"
#include "symbol_table.h"
namespace ccc {
struct SymbolTableLocation {
std::string section_name;
SymbolTableFormat format;
};
class SymbolFile {
public:
virtual ~SymbolFile() {}
virtual std::string name() const = 0;
virtual Result<std::vector<std::unique_ptr<SymbolTable>>> get_all_symbol_tables() const = 0;
virtual Result<std::vector<std::unique_ptr<SymbolTable>>> get_symbol_tables_from_sections(
const std::vector<SymbolTableLocation>& sections) const = 0;
};
// Determine the type of the input file and parse it.
Result<std::unique_ptr<SymbolFile>> parse_symbol_file(std::vector<u8> image, std::string file_name);
class ElfSymbolFile : public SymbolFile {
public:
ElfSymbolFile(ElfFile elf, std::string elf_name);
std::string name() const override;
Result<std::vector<std::unique_ptr<SymbolTable>>> get_all_symbol_tables() const override;
Result<std::vector<std::unique_ptr<SymbolTable>>> get_symbol_tables_from_sections(
const std::vector<SymbolTableLocation>& sections) const override;
const ElfFile& elf() const;
protected:
ElfFile m_elf;
std::string m_name;
};
class SNDLLSymbolFile : public SymbolFile {
public:
SNDLLSymbolFile(std::shared_ptr<SNDLLFile> sndll);
std::string name() const override;
Result<std::vector<std::unique_ptr<SymbolTable>>> get_all_symbol_tables() const override;
Result<std::vector<std::unique_ptr<SymbolTable>>> get_symbol_tables_from_sections(
const std::vector<SymbolTableLocation>& sections) const override;
protected:
std::shared_ptr<SNDLLFile> m_sndll;
};
}

285
3rdparty/ccc/src/ccc/symbol_table.cpp vendored Normal file
View File

@@ -0,0 +1,285 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "symbol_table.h"
#include "elf.h"
#include "elf_symtab.h"
#include "mdebug_importer.h"
#include "mdebug_section.h"
#include "sndll.h"
namespace ccc {
const std::vector<SymbolTableFormatInfo> SYMBOL_TABLE_FORMATS = {
{MDEBUG, "mdebug", ".mdebug"}, // The infamous Third Eye symbol table.
{SYMTAB, "symtab", ".symtab"}, // The standard ELF symbol table.
{SNDLL, "sndll", ".sndata"} // The SNDLL symbol table.
};
const SymbolTableFormatInfo* symbol_table_format_from_enum(SymbolTableFormat format)
{
for(size_t i = 0; i < SYMBOL_TABLE_FORMATS.size(); i++) {
if(SYMBOL_TABLE_FORMATS[i].format == format) {
return &SYMBOL_TABLE_FORMATS[i];
}
}
return nullptr;
}
const SymbolTableFormatInfo* symbol_table_format_from_name(const char* format_name)
{
for(size_t i = 0; i < SYMBOL_TABLE_FORMATS.size(); i++) {
if(strcmp(SYMBOL_TABLE_FORMATS[i].format_name, format_name) == 0) {
return &SYMBOL_TABLE_FORMATS[i];
}
}
return nullptr;
}
const SymbolTableFormatInfo* symbol_table_format_from_section(const char* section_name)
{
for(size_t i = 0; i < SYMBOL_TABLE_FORMATS.size(); i++) {
if(strcmp(SYMBOL_TABLE_FORMATS[i].section_name, section_name) == 0) {
return &SYMBOL_TABLE_FORMATS[i];
}
}
return nullptr;
}
// *****************************************************************************
Result<std::unique_ptr<SymbolTable>> create_elf_symbol_table(
const ElfSection& section, const ElfFile& elf, SymbolTableFormat format)
{
std::unique_ptr<SymbolTable> symbol_table;
switch(format) {
case MDEBUG: {
symbol_table = std::make_unique<MdebugSymbolTable>(elf.image, (s32) section.header.offset);
break;
}
case SYMTAB: {
CCC_CHECK(section.header.offset + section.header.size <= elf.image.size(),
"Section '%s' out of range.", section.name.c_str());
std::span<const u8> data = std::span(elf.image).subspan(section.header.offset, section.header.size);
CCC_CHECK(section.header.link != 0, "Section '%s' has no linked string table.", section.name.c_str());
CCC_CHECK(section.header.link < elf.sections.size(),
"Section '%s' has out of range link field.", section.name.c_str());
const ElfSection& linked_section = elf.sections[section.header.link];
CCC_CHECK(linked_section.header.offset + linked_section.header.size <= elf.image.size(),
"Linked section '%s' out of range.", linked_section.name.c_str());
std::span<const u8> linked_data = std::span(elf.image).subspan(
linked_section.header.offset, linked_section.header.size);
symbol_table = std::make_unique<SymtabSymbolTable>(data, linked_data);
break;
}
case SNDLL: {
CCC_CHECK(section.header.offset + section.header.size <= elf.image.size(),
"Section '%s' out of range.", section.name.c_str());
std::span<const u8> data = std::span(elf.image).subspan(section.header.offset, section.header.size);
if(data.size() >= 4 && data[0] != '\0') {
Result<SNDLLFile> file = parse_sndll_file(data, Address::non_zero(section.header.addr), SNDLLType::SNDATA_SECTION);
CCC_RETURN_IF_ERROR(file);
symbol_table = std::make_unique<SNDLLSymbolTable>(std::make_shared<SNDLLFile>(std::move(*file)));
} else {
CCC_WARN("Invalid SNDLL section.");
}
break;
}
}
return symbol_table;
}
Result<ModuleHandle> import_symbol_tables(
SymbolDatabase& database,
const std::vector<std::unique_ptr<SymbolTable>>& symbol_tables,
std::string module_name,
Address base_address,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt)
{
Result<SymbolSourceHandle> module_source = database.get_symbol_source("Symbol Table Importer");
CCC_RETURN_IF_ERROR(module_source);
Result<Module*> module_symbol = database.modules.create_symbol(
std::move(module_name), base_address, *module_source, nullptr);
CCC_RETURN_IF_ERROR(module_symbol);
ModuleHandle module_handle = (*module_symbol)->handle();
for(const std::unique_ptr<SymbolTable>& symbol_table : symbol_tables) {
// Find a symbol source object with the right name, or create one if one
// doesn't already exist.
Result<SymbolSourceHandle> source = database.get_symbol_source(symbol_table->name());
if(!source.success()) {
database.destroy_symbols_from_module(module_handle, false);
return source;
}
// Import the symbol table.
SymbolGroup group;
group.source = *source;
group.module_symbol = database.modules.symbol_from_handle(module_handle);
Result<void> result = symbol_table->import(
database, group, importer_flags, demangler, interrupt);
if(!result.success()) {
database.destroy_symbols_from_module(module_handle, false);
return result;
}
}
return module_handle;
}
// *****************************************************************************
MdebugSymbolTable::MdebugSymbolTable(std::span<const u8> image, s32 section_offset)
: m_image(image), m_section_offset(section_offset) {}
const char* MdebugSymbolTable::name() const
{
return "MIPS Debug Symbol Table";
}
Result<void> MdebugSymbolTable::import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const
{
return mdebug::import_symbol_table(
database, m_image, m_section_offset, group, importer_flags, demangler, interrupt);
}
Result<void> MdebugSymbolTable::print_headers(FILE* out) const
{
mdebug::SymbolTableReader reader;
Result<void> reader_result = reader.init(m_image, m_section_offset);
CCC_RETURN_IF_ERROR(reader_result);
reader.print_header(out);
return Result<void>();
}
Result<void> MdebugSymbolTable::print_symbols(FILE* out, u32 flags) const
{
mdebug::SymbolTableReader reader;
Result<void> reader_result = reader.init(m_image, m_section_offset);
CCC_RETURN_IF_ERROR(reader_result);
Result<void> print_result = reader.print_symbols(
out, flags & PRINT_LOCALS, flags & PRINT_PROCEDURE_DESCRIPTORS, flags & PRINT_EXTERNALS);
CCC_RETURN_IF_ERROR(print_result);
return Result<void>();
}
// *****************************************************************************
SymtabSymbolTable::SymtabSymbolTable(std::span<const u8> symtab, std::span<const u8> strtab)
: m_symtab(symtab), m_strtab(strtab) {}
const char* SymtabSymbolTable::name() const
{
return "ELF Symbol Table";
}
Result<void> SymtabSymbolTable::import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const
{
return elf::import_symbols(database, group, m_symtab, m_strtab, importer_flags, demangler);
}
Result<void> SymtabSymbolTable::print_headers(FILE* out) const
{
return Result<void>();
}
Result<void> SymtabSymbolTable::print_symbols(FILE* out, u32 flags) const
{
Result<void> symbtab_result = elf::print_symbol_table(out, m_symtab, m_strtab);
CCC_RETURN_IF_ERROR(symbtab_result);
return Result<void>();
}
// *****************************************************************************
SNDLLSymbolTable::SNDLLSymbolTable(std::shared_ptr<SNDLLFile> sndll)
: m_sndll(std::move(sndll)) {}
const char* SNDLLSymbolTable::name() const
{
return "SNDLL Symbol Table";
}
Result<void> SNDLLSymbolTable::import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const
{
return import_sndll_symbols(database, *m_sndll, group, importer_flags, demangler);
}
Result<void> SNDLLSymbolTable::print_headers(FILE* out) const
{
return Result<void>();
}
Result<void> SNDLLSymbolTable::print_symbols(FILE* out, u32 flags) const
{
print_sndll_symbols(out, *m_sndll);
return Result<void>();
}
// *****************************************************************************
ElfSectionHeadersSymbolTable::ElfSectionHeadersSymbolTable(const ElfFile& elf)
: m_elf(elf) {}
const char* ElfSectionHeadersSymbolTable::name() const
{
return "ELF Section Headers";
}
Result<void> ElfSectionHeadersSymbolTable::import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const
{
return m_elf.create_section_symbols(database, group);
}
Result<void> ElfSectionHeadersSymbolTable::print_headers(FILE* out) const
{
return Result<void>();
}
Result<void> ElfSectionHeadersSymbolTable::print_symbols(FILE* out, u32 flags) const
{
return Result<void>();
}
}

164
3rdparty/ccc/src/ccc/symbol_table.h vendored Normal file
View File

@@ -0,0 +1,164 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include <atomic>
#include "symbol_database.h"
namespace ccc {
// Determine which symbol tables are present in a given file.
enum SymbolTableFormat {
MDEBUG = 0, // The infamous Third Eye symbol table
SYMTAB = 1, // Standard ELF symbol table
SNDLL = 2 // SNDLL section
};
struct SymbolTableFormatInfo {
SymbolTableFormat format;
const char* format_name;
const char* section_name;
};
// All the supported symbol table formats, sorted from best to worst.
extern const std::vector<SymbolTableFormatInfo> SYMBOL_TABLE_FORMATS;
const SymbolTableFormatInfo* symbol_table_format_from_enum(SymbolTableFormat format);
const SymbolTableFormatInfo* symbol_table_format_from_name(const char* format_name);
const SymbolTableFormatInfo* symbol_table_format_from_section(const char* section_name);
enum SymbolPrintFlags {
PRINT_LOCALS = 1 << 0,
PRINT_PROCEDURE_DESCRIPTORS = 1 << 1,
PRINT_EXTERNALS = 1 << 2
};
class SymbolTable {
public:
virtual ~SymbolTable() {}
virtual const char* name() const = 0;
// Imports this symbol table into the passed database.
virtual Result<void> import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const = 0;
// Print out all the field in the header structure if one exists.
virtual Result<void> print_headers(FILE* out) const = 0;
// Print out all the symbols in the symbol table. For .mdebug symbol tables
// the symbols are split between those that are local to a specific
// translation unit and those that are external, which is what the
// print_locals and print_externals parameters control.
virtual Result<void> print_symbols(FILE* out, u32 flags) const = 0;
};
struct ElfSection;
struct ElfFile;
// Create a symbol table from an ELF section. The return value may be null.
Result<std::unique_ptr<SymbolTable>> create_elf_symbol_table(
const ElfSection& section, const ElfFile& elf, SymbolTableFormat format);
// Utility function to call import_symbol_table on all the passed symbol tables
// and to generate a module handle.
Result<ModuleHandle> import_symbol_tables(
SymbolDatabase& database,
const std::vector<std::unique_ptr<SymbolTable>>& symbol_tables,
std::string module_name,
Address base_address,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt);
class MdebugSymbolTable : public SymbolTable {
public:
MdebugSymbolTable(std::span<const u8> image, s32 section_offset);
const char* name() const override;
Result<void> import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const override;
Result<void> print_headers(FILE* out) const override;
Result<void> print_symbols(FILE* out, u32 flags) const override;
protected:
std::span<const u8> m_image;
s32 m_section_offset;
};
class SymtabSymbolTable : public SymbolTable {
public:
SymtabSymbolTable(std::span<const u8> symtab, std::span<const u8> strtab);
const char* name() const override;
Result<void> import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const override;
Result<void> print_headers(FILE* out) const override;
Result<void> print_symbols(FILE* out, u32 flags) const override;
protected:
std::span<const u8> m_symtab;
std::span<const u8> m_strtab;
};
struct SNDLLFile;
class SNDLLSymbolTable : public SymbolTable {
public:
SNDLLSymbolTable(std::shared_ptr<SNDLLFile> sndll);
const char* name() const override;
Result<void> import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const override;
Result<void> print_headers(FILE* out) const override;
Result<void> print_symbols(FILE* out, u32 flags) const override;
protected:
std::shared_ptr<SNDLLFile> m_sndll;
};
class ElfSectionHeadersSymbolTable : public SymbolTable {
public:
ElfSectionHeadersSymbolTable(const ElfFile& elf);
const char* name() const override;
Result<void> import(
SymbolDatabase& database,
const SymbolGroup& group,
u32 importer_flags,
DemanglerFunctions demangler,
const std::atomic_bool* interrupt) const override;
Result<void> print_headers(FILE* out) const override;
Result<void> print_symbols(FILE* out, u32 flags) const override;
protected:
const ElfFile& m_elf;
};
}

176
3rdparty/ccc/src/ccc/util.cpp vendored Normal file
View File

@@ -0,0 +1,176 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#include "util.h"
namespace ccc {
static CustomErrorCallback custom_error_callback = nullptr;
Error format_error(const char* source_file, int source_line, const char* format, ...)
{
va_list args;
va_start(args, format);
char message[4096];
if(vsnprintf(message, sizeof(message), format, args) < 0) {
strncpy(message, "Failed to generate error message.", sizeof(message));
}
Error error;
error.message = message;
error.source_file = source_file;
error.source_line = source_line;
va_end(args);
return error;
}
void report_error(const Error& error)
{
if(custom_error_callback) {
custom_error_callback(error, ERROR_LEVEL_ERROR);
} else {
fprintf(stderr, "[%s:%d] " CCC_ANSI_COLOUR_RED "error:" CCC_ANSI_COLOUR_OFF " %s\n",
error.source_file, error.source_line, error.message.c_str());
}
}
void report_warning(const Error& warning)
{
if(custom_error_callback) {
custom_error_callback(warning, ERROR_LEVEL_WARNING);
} else {
fprintf(stderr, "[%s:%d] " CCC_ANSI_COLOUR_MAGENTA "warning:" CCC_ANSI_COLOUR_OFF " %s\n",
warning.source_file, warning.source_line, warning.message.c_str());
}
}
void set_custom_error_callback(CustomErrorCallback callback)
{
custom_error_callback = callback;
}
std::optional<std::string_view> get_string(std::span<const u8> bytes, u64 offset)
{
for(u64 i = offset; i < bytes.size(); i++) {
if(bytes[i] == '\0') {
return std::string_view(
reinterpret_cast<const char*>(&bytes[offset]),
reinterpret_cast<const char*>(&bytes[i]));
}
}
return std::nullopt;
}
std::string merge_paths(const std::string& base, const std::string& path)
{
// Try to figure out if we're dealing with a Windows path of a UNIX path.
bool is_windows_path = false;
if(base.empty()) {
is_windows_path = guess_is_windows_path(path.c_str());
} else {
is_windows_path = guess_is_windows_path(base.c_str());
}
// Actually merge the paths. If path is the entire path, we don't need to
// append base onto the front, so check for that now.
bool is_absolute_unix = (path.size() >= 1) && (path[0] == '/' || path[0] == '\\');
bool is_absolute_windows = (path.size() >= 3) && path[1] == ':' && (path[2] == '/' || path[2] == '\\');
if(base.empty() || is_absolute_unix || is_absolute_windows) {
return normalise_path(path.c_str(), is_windows_path);
}
return normalise_path((base + "/" + path).c_str(), is_windows_path);
}
std::string normalise_path(const char* input, bool use_backslashes_as_path_separators)
{
bool is_absolute = false;
std::optional<char> drive_letter;
std::vector<std::string> parts;
// Parse the beginning of the path.
if(*input == '/' || *input == '\\') { // UNIX path, drive relative Windows path or UNC Windows path.
is_absolute = true;
} else if(isalpha(*input) && input[1] == ':' && (input[2] == '/' || input[2] == '\\')) { // Absolute Windows path.
is_absolute = true;
drive_letter = toupper(*input);
input += 2;
} else {
parts.emplace_back();
}
// Parse the rest of the path.
while(*input != 0) {
if(*input == '/' || *input == '\\') {
while(*input == '/' || *input == '\\') input++;
parts.emplace_back();
} else {
parts.back() += *(input++);
}
}
// Remove "." and ".." parts.
for(s32 i = 0; i < (s32) parts.size(); i++) {
if(parts[i] == ".") {
parts.erase(parts.begin() + i);
i--;
} else if(parts[i] == ".." && i > 0 && parts[i - 1] != "..") {
parts.erase(parts.begin() + i);
parts.erase(parts.begin() + i - 1);
i -= 2;
}
}
// Output the path in a normal form.
std::string output;
if(is_absolute) {
if(drive_letter.has_value()) {
output += *drive_letter;
output += ":";
}
output += use_backslashes_as_path_separators ? '\\' : '/';
}
for(size_t i = 0; i < parts.size(); i++) {
output += parts[i];
if(i != parts.size() - 1) {
output += use_backslashes_as_path_separators ? '\\' : '/';
}
}
return output;
}
bool guess_is_windows_path(const char* path)
{
for(const char* ptr = path; *ptr != 0; ptr++) {
if(*ptr == '\\') {
return true;
} else if(*ptr == '/') {
return false;
}
}
return false;
}
std::string extract_file_name(const std::string& path)
{
std::string::size_type forward_pos = path.find_last_of('/');
std::string::size_type backward_pos = path.find_last_of('\\');
std::string::size_type pos;
if(forward_pos == std::string::npos) {
pos = backward_pos;
} else if(backward_pos == std::string::npos) {
pos = forward_pos;
} else {
pos = std::max(forward_pos, backward_pos);
}
if(pos + 1 != path.size() && pos != std::string::npos) {
return path.substr(pos + 1);
} else {
return path;
}
}
}

341
3rdparty/ccc/src/ccc/util.h vendored Normal file
View File

@@ -0,0 +1,341 @@
// This file is part of the Chaos Compiler Collection.
// SPDX-License-Identifier: MIT
#pragma once
#include <set>
#include <span>
#include <cstdio>
#include <vector>
#include <memory>
#include <string>
#include <cstdint>
#include <cstdarg>
#include <cstdlib>
#include <cstring>
#include <optional>
namespace ccc {
using u8 = unsigned char;
using u16 = uint16_t;
using u32 = uint32_t;
using u64 = uint64_t;
using s8 = signed char;
using s16 = int16_t;
using s32 = int32_t;
using s64 = int64_t;
#ifdef _WIN32
#define CCC_ANSI_COLOUR_OFF ""
#define CCC_ANSI_COLOUR_RED ""
#define CCC_ANSI_COLOUR_MAGENTA ""
#define CCC_ANSI_COLOUR_GRAY ""
#else
#define CCC_ANSI_COLOUR_OFF "\033[0m"
#define CCC_ANSI_COLOUR_RED "\033[31m"
#define CCC_ANSI_COLOUR_MAGENTA "\033[35m"
#define CCC_ANSI_COLOUR_GRAY "\033[90m"
#endif
struct Error {
std::string message;
const char* source_file;
s32 source_line;
};
enum ErrorLevel {
ERROR_LEVEL_ERROR,
ERROR_LEVEL_WARNING
};
typedef void (*CustomErrorCallback)(const Error& error, ErrorLevel level);
Error format_error(const char* source_file, int source_line, const char* format, ...);
void report_error(const Error& error);
void report_warning(const Error& warning);
void set_custom_error_callback(CustomErrorCallback callback);
#define CCC_FATAL(...) \
{ \
ccc::Error error = ccc::format_error(__FILE__, __LINE__, __VA_ARGS__); \
ccc::report_error(error); \
exit(1); \
}
#define CCC_CHECK_FATAL(condition, ...) \
if(!(condition)) { \
ccc::Error error = ccc::format_error(__FILE__, __LINE__, __VA_ARGS__); \
ccc::report_error(error); \
exit(1); \
}
#define CCC_ABORT_IF_FALSE(condition, ...) \
if(!(condition)) { \
ccc::Error error = ccc::format_error(__FILE__, __LINE__, __VA_ARGS__); \
ccc::report_error(error); \
abort(); \
}
#define CCC_ASSERT(condition) \
CCC_ABORT_IF_FALSE(condition, #condition)
// The main error handling construct in CCC. This class is used to bundle
// together a return value and a pointer to error information, so that errors
// can be propagated up the stack.
template <typename Value>
class [[nodiscard]] Result {
template <typename OtherValue>
friend class Result;
protected:
Value m_value;
std::unique_ptr<Error> m_error;
Result() {}
public:
Result(Value value) : m_value(std::move(value)), m_error(nullptr) {}
// Used to propagate errors up the call stack.
template <typename OtherValue>
Result(Result<OtherValue>&& rhs)
{
CCC_ASSERT(rhs.m_error != nullptr);
m_error = std::move(rhs.m_error);
}
static Result<Value> failure(Error error)
{
Result<Value> result;
result.m_error = std::make_unique<Error>(std::move(error));
return result;
}
bool success() const
{
return m_error == nullptr;
}
const Error& error() const
{
CCC_ASSERT(m_error != nullptr);
return *m_error;
}
Value& operator*()
{
CCC_ASSERT(m_error == nullptr);
return m_value;
}
const Value& operator*() const
{
CCC_ASSERT(m_error == nullptr);
return m_value;
}
Value* operator->()
{
CCC_ASSERT(m_error == nullptr);
return &m_value;
}
const Value* operator->() const
{
CCC_ASSERT(m_error == nullptr);
return &m_value;
}
};
template <>
class [[nodiscard]] Result<void> : public Result<int> {
public:
Result() : Result<int>(0) {}
// Used to propagate errors up the call stack.
template <typename OtherValue>
Result(Result<OtherValue>&& rhs)
{
CCC_ASSERT(rhs.m_error != nullptr);
m_error = std::move(rhs.m_error);
}
};
#define CCC_FAILURE(...) ccc::Result<int>::failure(ccc::format_error(__FILE__, __LINE__, __VA_ARGS__))
#define CCC_CHECK(condition, ...) \
if(!(condition)) { \
return CCC_FAILURE(__VA_ARGS__); \
}
#define CCC_EXPECT_CHAR(input, c, context) \
CCC_CHECK(*(input++) == c, \
"Expected '%c' in %s, got '%c' (%02hhx)", \
c, context, *(input - 1), *(input - 1))
#define CCC_RETURN_IF_ERROR(result) \
if(!(result).success()) { \
return (result); \
}
#define CCC_EXIT_IF_ERROR(result) \
if(!(result).success()) { \
ccc::report_error((result).error()); \
exit(1); \
}
#define CCC_GTEST_FAIL_IF_ERROR(result) \
if(!(result).success()) { \
FAIL() << (result).error().message; \
}
template <typename... Args>
void warn_impl(const char* source_file, int source_line, const char* format, Args... args)
{
Error warning = format_error(source_file, source_line, format, args...);
report_warning(warning);
}
#define CCC_WARN(...) \
ccc::warn_impl(__FILE__, __LINE__, __VA_ARGS__)
#ifdef _MSC_VER
#define CCC_PACKED_STRUCT(name, ...) \
__pragma(pack(push, 1)) struct name { __VA_ARGS__ } __pragma(pack(pop));
#else
#define CCC_PACKED_STRUCT(name, ...) \
struct __attribute__((__packed__)) name { __VA_ARGS__ };
#endif
template <typename T>
const T* get_aligned(std::span<const u8> bytes, u64 offset)
{
if(offset > bytes.size() || bytes.size() - offset < sizeof(T) || offset % alignof(T) != 0) {
return nullptr;
}
return reinterpret_cast<const T*>(&bytes[offset]);
}
template <typename T>
const T* get_unaligned(std::span<const u8> bytes, u64 offset)
{
if(offset > bytes.size() || bytes.size() - offset < sizeof(T)) {
return nullptr;
}
return reinterpret_cast<const T*>(&bytes[offset]);
}
template <typename T>
std::optional<T> copy_unaligned(std::span<const u8> bytes, u64 offset)
{
if(offset > bytes.size() || bytes.size() - offset < sizeof(T)) {
return std::nullopt;
}
T value;
memcpy(&value, &bytes[offset], sizeof(T));
return value;
}
std::optional<std::string_view> get_string(std::span<const u8> bytes, u64 offset);
#define CCC_BEGIN_END(x) (x).begin(), (x).end()
#define CCC_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define CCC_FOURCC(string) ((string)[0] | (string)[1] << 8 | (string)[2] << 16 | (string)[3] << 24)
struct Address {
u32 value = (u32) -1;
Address() {}
Address(u32 v) : value(v) {}
bool valid() const
{
return value != (u32) -1;
}
u32 get_or_zero() const
{
if(valid()) {
return value;
} else {
return 0;
}
}
Address add_base_address(Address base_address) const
{
if(valid()) {
return base_address.get_or_zero() + value;
} else {
return Address();
}
}
static Address non_zero(u32 address)
{
Address result;
if(address != 0) {
result = address;
}
return result;
}
friend auto operator<=>(const Address& lhs, const Address& rhs) = default;
};
struct AddressRange {
Address low;
Address high;
AddressRange() {}
AddressRange(Address address) : low(address), high(address) {}
AddressRange(Address l, Address h) : low(l), high(h) {}
friend auto operator<=>(const AddressRange& lhs, const AddressRange& rhs) = default;
};
// These functions are to be used only for source file paths present in the
// symbol table, since we want them to be handled consistently across different
// platforms, which with std::filesystem::path doesn't seem to be possible.
std::string merge_paths(const std::string& base, const std::string& path);
std::string normalise_path(const char* input, bool use_backslashes_as_path_separators);
bool guess_is_windows_path(const char* path);
std::string extract_file_name(const std::string& path);
namespace ast { struct Node; }
// These are used to reference STABS types from other types within a single
// translation unit. For most games these will just be a single number, the type
// number. In some cases, for example with the homebrew SDK, type numbers are a
// pair of two numbers surrounded by round brackets e.g. (1,23) where the first
// number is the index of the include file to use (includes are listed for each
// translation unit separately), and the second number is the type number.
struct StabsTypeNumber {
s32 file = -1;
s32 type = -1;
friend auto operator<=>(const StabsTypeNumber& lhs, const StabsTypeNumber& rhs) = default;
bool valid() const { return type > -1; }
};
enum StorageClass {
STORAGE_CLASS_NONE = 0,
STORAGE_CLASS_TYPEDEF = 1,
STORAGE_CLASS_EXTERN = 2,
STORAGE_CLASS_STATIC = 3,
STORAGE_CLASS_AUTO = 4,
STORAGE_CLASS_REGISTER = 5
};
// Function pointers for the GNU demangler functions, so we can build CCC as a
// library without linking against the demangler.
struct DemanglerFunctions {
char* (*cplus_demangle)(const char *mangled, int options) = nullptr;
char* (*cplus_demangle_opname)(const char *opname, int options) = nullptr;
};
}

21
3rdparty/cpuinfo/.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
# Ninja files
build.ninja
# Build objects and artifacts
deps/
build/
bin/
lib/
libs/
obj/
*.pyc
*.pyo
# System files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db

925
3rdparty/cpuinfo/CMakeLists.txt vendored Normal file
View File

@@ -0,0 +1,925 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR)
# ---[ Setup project
PROJECT(
cpuinfo
LANGUAGES C
)
# ---[ Options.
SET(CPUINFO_LIBRARY_TYPE "default" CACHE STRING "Type of cpuinfo library (shared, static, or default) to build")
SET_PROPERTY(CACHE CPUINFO_LIBRARY_TYPE PROPERTY STRINGS default static shared)
SET(CPUINFO_RUNTIME_TYPE "default" CACHE STRING "Type of runtime library (shared, static, or default) to use")
SET_PROPERTY(CACHE CPUINFO_RUNTIME_TYPE PROPERTY STRINGS default static shared)
SET(CPUINFO_LOG_LEVEL "default" CACHE STRING "Minimum logging level (info with lower severity will be ignored)")
SET_PROPERTY(CACHE CPUINFO_LOG_LEVEL PROPERTY STRINGS default debug info warning error fatal none)
IF(ANDROID)
OPTION(CPUINFO_LOG_TO_STDIO "Log errors, warnings, and information to stdout/stderr" OFF)
ELSE()
OPTION(CPUINFO_LOG_TO_STDIO "Log errors, warnings, and information to stdout/stderr" ON)
ENDIF()
OPTION(CPUINFO_BUILD_TOOLS "Build command-line tools" OFF)
OPTION(CPUINFO_BUILD_UNIT_TESTS "Build cpuinfo unit tests" OFF)
OPTION(CPUINFO_BUILD_MOCK_TESTS "Build cpuinfo mock tests" OFF)
OPTION(CPUINFO_BUILD_BENCHMARKS "Build cpuinfo micro-benchmarks" OFF)
OPTION(CPUINFO_BUILD_PKG_CONFIG "Build pkg-config manifest" OFF)
OPTION(USE_SYSTEM_LIBS "Use system libraries instead of downloading and building them" OFF)
OPTION(USE_SYSTEM_GOOGLEBENCHMARK "Use system Google Benchmark library instead of downloading and building it" ${USE_SYSTEM_LIBS})
OPTION(USE_SYSTEM_GOOGLETEST "Use system Google Test library instead of downloading and building it" ${USE_SYSTEM_LIBS})
# ---[ CMake options
INCLUDE(GNUInstallDirs)
IF(CPUINFO_BUILD_UNIT_TESTS OR CPUINFO_BUILD_MOCK_TESTS)
ENABLE_TESTING()
ENDIF()
MACRO(CPUINFO_TARGET_ENABLE_C99 target)
SET_TARGET_PROPERTIES(${target} PROPERTIES
C_STANDARD 99
C_EXTENSIONS NO)
ENDMACRO()
MACRO(CPUINFO_TARGET_ENABLE_CXX11 target)
SET_TARGET_PROPERTIES(${target} PROPERTIES
CXX_STANDARD 14
CXX_EXTENSIONS NO)
ENDMACRO()
MACRO(CPUINFO_TARGET_RUNTIME_LIBRARY target)
IF(MSVC AND NOT CPUINFO_RUNTIME_TYPE STREQUAL "default")
IF(CPUINFO_RUNTIME_TYPE STREQUAL "shared")
TARGET_COMPILE_OPTIONS(${target} PRIVATE
"/MD$<$<CONFIG:Debug>:d>")
ELSEIF(CPUINFO_RUNTIME_TYPE STREQUAL "static")
TARGET_COMPILE_OPTIONS(${target} PRIVATE
"/MT$<$<CONFIG:Debug>:d>")
ENDIF()
ENDIF()
ENDMACRO()
# -- [ Determine whether building for Apple's desktop or mobile OSes
IF(CMAKE_SYSTEM_NAME MATCHES "^(Darwin|iOS|tvOS|watchOS)$")
SET(IS_APPLE_OS TRUE)
ELSE()
SET(IS_APPLE_OS FALSE)
ENDIF()
# -- [ Determine target processor
SET(CPUINFO_TARGET_PROCESSOR "${CMAKE_SYSTEM_PROCESSOR}")
IF(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND CPUINFO_TARGET_PROCESSOR STREQUAL "amd64")
SET(CPUINFO_TARGET_PROCESSOR "AMD64")
ENDIF()
IF(IS_APPLE_OS AND CMAKE_OSX_ARCHITECTURES MATCHES "^(x86_64|arm64.*)$")
SET(CPUINFO_TARGET_PROCESSOR "${CMAKE_OSX_ARCHITECTURES}")
ELSEIF(CMAKE_GENERATOR MATCHES "^Visual Studio " AND CMAKE_VS_PLATFORM_NAME)
IF(CMAKE_VS_PLATFORM_NAME STREQUAL "Win32")
SET(CPUINFO_TARGET_PROCESSOR "x86")
ELSEIF(CMAKE_VS_PLATFORM_NAME STREQUAL "x64")
SET(CPUINFO_TARGET_PROCESSOR "x86_64")
ELSEIF(CMAKE_VS_PLATFORM_NAME STREQUAL "ARM64")
SET(CPUINFO_TARGET_PROCESSOR "arm64")
ELSEIF(CMAKE_VS_PLATFORM_NAME MATCHES "^(ARM64EC|arm64ec|ARM64E|arm64e)")
SET(CPUINFO_TARGET_PROCESSOR "arm64")
ELSE()
MESSAGE(FATAL_ERROR "Unsupported Visual Studio architecture \"${CMAKE_VS_PLATFORM_NAME}\"")
ENDIF()
ENDIF()
# ---[ Build flags
SET(CPUINFO_SUPPORTED_PLATFORM TRUE)
IF(NOT CMAKE_SYSTEM_PROCESSOR)
IF(NOT IOS)
MESSAGE(WARNING
"Target processor architecture is not specified. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
ENDIF()
ELSEIF(NOT CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?|armv[5-8].*|aarch64|arm64.*|ARM64.*|riscv(32|64))$")
MESSAGE(WARNING
"Target processor architecture \"${CPUINFO_TARGET_PROCESSOR}\" is not supported in cpuinfo. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
ENDIF()
IF(NOT CMAKE_SYSTEM_NAME)
MESSAGE(WARNING
"Target operating system is not specified. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
ELSEIF(NOT CMAKE_SYSTEM_NAME MATCHES "^(Windows|WindowsStore|CYGWIN|MSYS|Darwin|Linux|Android|FreeBSD)$")
IF(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.14" AND NOT IS_APPLE_OS)
MESSAGE(WARNING
"Target operating system \"${CMAKE_SYSTEM_NAME}\" is not supported in cpuinfo. "
"cpuinfo will compile, but cpuinfo_initialize() will always fail.")
SET(CPUINFO_SUPPORTED_PLATFORM FALSE)
ENDIF()
ENDIF()
IF(CPUINFO_SUPPORTED_PLATFORM)
IF(CPUINFO_BUILD_MOCK_TESTS OR CPUINFO_BUILD_UNIT_TESTS OR CPUINFO_BUILD_BENCHMARKS)
ENABLE_LANGUAGE(CXX)
ENDIF()
ENDIF()
# ---[ Download deps
SET(CONFU_DEPENDENCIES_SOURCE_DIR ${CMAKE_SOURCE_DIR}/deps
CACHE PATH "Confu-style dependencies source directory")
SET(CONFU_DEPENDENCIES_BINARY_DIR ${CMAKE_BINARY_DIR}/deps
CACHE PATH "Confu-style dependencies binary directory")
IF(CPUINFO_SUPPORTED_PLATFORM AND (CPUINFO_BUILD_MOCK_TESTS OR CPUINFO_BUILD_UNIT_TESTS))
IF(USE_SYSTEM_GOOGLETEST)
FIND_PACKAGE(GTest REQUIRED)
ELSEIF(NOT DEFINED GOOGLETEST_SOURCE_DIR)
MESSAGE(STATUS "Downloading Google Test to ${CONFU_DEPENDENCIES_SOURCE_DIR}/googletest (define GOOGLETEST_SOURCE_DIR to avoid it)")
CONFIGURE_FILE(cmake/DownloadGoogleTest.cmake "${CONFU_DEPENDENCIES_BINARY_DIR}/googletest-download/CMakeLists.txt")
EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${CONFU_DEPENDENCIES_BINARY_DIR}/googletest-download")
EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${CONFU_DEPENDENCIES_BINARY_DIR}/googletest-download")
SET(GOOGLETEST_SOURCE_DIR "${CONFU_DEPENDENCIES_SOURCE_DIR}/googletest" CACHE STRING "Google Test source directory")
ENDIF()
ENDIF()
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_BENCHMARKS)
IF(USE_SYSTEM_GOOGLEBENCHMARK)
FIND_PACKAGE(benchmark REQUIRED)
ELSEIF(NOT DEFINED GOOGLEBENCHMARK_SOURCE_DIR)
MESSAGE(STATUS "Downloading Google Benchmark to ${CONFU_DEPENDENCIES_SOURCE_DIR}/googlebenchmark (define GOOGLEBENCHMARK_SOURCE_DIR to avoid it)")
CONFIGURE_FILE(cmake/DownloadGoogleBenchmark.cmake "${CONFU_DEPENDENCIES_BINARY_DIR}/googlebenchmark-download/CMakeLists.txt")
EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
WORKING_DIRECTORY "${CONFU_DEPENDENCIES_BINARY_DIR}/googlebenchmark-download")
EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" --build .
WORKING_DIRECTORY "${CONFU_DEPENDENCIES_BINARY_DIR}/googlebenchmark-download")
SET(GOOGLEBENCHMARK_SOURCE_DIR "${CONFU_DEPENDENCIES_SOURCE_DIR}/googlebenchmark" CACHE STRING "Google Benchmark source directory")
ENDIF()
ENDIF()
# ---[ cpuinfo library
SET(CPUINFO_SRCS src/api.c src/cache.c src/init.c src/log.c)
IF(CPUINFO_SUPPORTED_PLATFORM)
IF(NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND (CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$" OR IOS_ARCH MATCHES "^(i386|x86_64)$"))
LIST(APPEND CPUINFO_SRCS
src/x86/init.c
src/x86/info.c
src/x86/vendor.c
src/x86/uarch.c
src/x86/name.c
src/x86/topology.c
src/x86/isa.c
src/x86/cache/init.c
src/x86/cache/descriptor.c
src/x86/cache/deterministic.c)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
LIST(APPEND CPUINFO_SRCS
src/x86/linux/init.c
src/x86/linux/cpuinfo.c)
ELSEIF(IS_APPLE_OS)
LIST(APPEND CPUINFO_SRCS src/x86/mach/init.c)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "^(Windows|WindowsStore|CYGWIN|MSYS)$")
LIST(APPEND CPUINFO_SRCS src/x86/windows/init.c)
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
LIST(APPEND CPUINFO_SRCS src/x86/freebsd/init.c)
ENDIF()
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "^Windows" AND CPUINFO_TARGET_PROCESSOR MATCHES "^(ARM64|arm64)$")
LIST(APPEND CPUINFO_SRCS
src/arm/windows/init-by-logical-sys-info.c
src/arm/windows/init.c)
ELSEIF(CPUINFO_TARGET_PROCESSOR MATCHES "^(armv[5-8].*|aarch64|arm64.*)$" OR IOS_ARCH MATCHES "^(armv7.*|arm64.*)$")
LIST(APPEND CPUINFO_SRCS
src/arm/uarch.c
src/arm/cache.c)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
LIST(APPEND CPUINFO_SRCS
src/arm/linux/init.c
src/arm/linux/cpuinfo.c
src/arm/linux/clusters.c
src/arm/linux/chipset.c
src/arm/linux/midr.c
src/arm/linux/hwcap.c)
IF(CMAKE_SYSTEM_PROCESSOR MATCHES "^armv[5-8]")
LIST(APPEND CPUINFO_SRCS src/arm/linux/aarch32-isa.c)
IF(CMAKE_SYSTEM_NAME STREQUAL "Android" AND ANDROID_ABI STREQUAL "armeabi")
SET_SOURCE_FILES_PROPERTIES(src/arm/linux/aarch32-isa.c PROPERTIES COMPILE_FLAGS -marm)
ENDIF()
ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)$")
LIST(APPEND CPUINFO_SRCS src/arm/linux/aarch64-isa.c)
ENDIF()
ELSEIF(IS_APPLE_OS AND CPUINFO_TARGET_PROCESSOR MATCHES "arm64.*")
LIST(APPEND CPUINFO_SRCS src/arm/mach/init.c)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Android")
LIST(APPEND CPUINFO_SRCS
src/arm/android/properties.c)
ENDIF()
ELSEIF(CPUINFO_TARGET_PROCESSOR MATCHES "^(riscv(32|64))$")
LIST(APPEND CPUINFO_SRCS
src/riscv/uarch.c)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
LIST(APPEND CPUINFO_SRCS
src/riscv/linux/init.c
src/riscv/linux/riscv-hw.c
src/riscv/linux/riscv-isa.c)
ENDIF()
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Emscripten")
LIST(APPEND CPUINFO_SRCS
src/emscripten/init.c)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
LIST(APPEND CPUINFO_SRCS
src/linux/smallfile.c
src/linux/multiline.c
src/linux/cpulist.c
src/linux/processors.c)
ELSEIF(IS_APPLE_OS)
LIST(APPEND CPUINFO_SRCS src/mach/topology.c)
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
LIST(APPEND CPUINFO_SRCS src/freebsd/topology.c)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android" OR CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
SET(CMAKE_THREAD_PREFER_PTHREAD TRUE)
SET(THREADS_PREFER_PTHREAD_FLAG TRUE)
FIND_PACKAGE(Threads REQUIRED)
ENDIF()
ENDIF()
IF(CPUINFO_LIBRARY_TYPE STREQUAL "default")
ADD_LIBRARY(cpuinfo ${CPUINFO_SRCS})
ELSEIF(CPUINFO_LIBRARY_TYPE STREQUAL "shared")
ADD_LIBRARY(cpuinfo SHARED ${CPUINFO_SRCS})
ELSEIF(CPUINFO_LIBRARY_TYPE STREQUAL "static")
ADD_LIBRARY(cpuinfo STATIC ${CPUINFO_SRCS})
ELSE()
MESSAGE(FATAL_ERROR "Unsupported library type ${CPUINFO_LIBRARY_TYPE}")
ENDIF()
ADD_LIBRARY(cpuinfo_internals STATIC ${CPUINFO_SRCS})
CPUINFO_TARGET_ENABLE_C99(cpuinfo)
CPUINFO_TARGET_ENABLE_C99(cpuinfo_internals)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuinfo)
IF(CMAKE_SYSTEM_NAME MATCHES "^(Windows|WindowsStore|CYGWIN|MSYS)$")
# Target Windows 7+ API
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE _WIN32_WINNT=0x0601 _CRT_SECURE_NO_WARNINGS)
TARGET_COMPILE_DEFINITIONS(cpuinfo_internals PRIVATE _WIN32_WINNT=0x0601 _CRT_SECURE_NO_WARNINGS)
# Explicitly link Kernel32 for UWP build
if(CMAKE_SYSTEM_NAME STREQUAL "WindowsStore")
TARGET_LINK_LIBRARIES(cpuinfo PUBLIC Kernel32)
endif()
ENDIF()
IF(ANDROID AND NOT CPUINFO_LOG_TO_STDIO)
TARGET_LINK_LIBRARIES(cpuinfo PRIVATE "log")
ENDIF()
SET_TARGET_PROPERTIES(cpuinfo PROPERTIES PUBLIC_HEADER include/cpuinfo.h)
TARGET_INCLUDE_DIRECTORIES(cpuinfo BEFORE PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
TARGET_INCLUDE_DIRECTORIES(cpuinfo BEFORE PRIVATE src)
TARGET_INCLUDE_DIRECTORIES(cpuinfo_internals BEFORE PUBLIC include src)
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_TO_STDIO=$<BOOL:${CPUINFO_LOG_TO_STDIO}>")
IF(CPUINFO_LOG_LEVEL STREQUAL "default")
# default logging level: error (subject to change)
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=2")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "debug")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=5")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "info")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=4")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "warning")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=3")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "error")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=2")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "fatal")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=1")
ELSEIF(CPUINFO_LOG_LEVEL STREQUAL "none")
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE "CPUINFO_LOG_LEVEL=0")
ELSE()
MESSAGE(FATAL_ERROR "Unsupported logging level ${CPUINFO_LOG_LEVEL}")
ENDIF()
TARGET_COMPILE_DEFINITIONS(cpuinfo_internals PRIVATE "CPUINFO_LOG_LEVEL=0")
TARGET_COMPILE_DEFINITIONS(cpuinfo_internals PRIVATE "CPUINFO_LOG_TO_STDIO=1")
IF(CPUINFO_SUPPORTED_PLATFORM)
TARGET_COMPILE_DEFINITIONS(cpuinfo INTERFACE CPUINFO_SUPPORTED_PLATFORM=1)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
TARGET_LINK_LIBRARIES(cpuinfo PUBLIC ${CMAKE_THREAD_LIBS_INIT})
TARGET_LINK_LIBRARIES(cpuinfo_internals PUBLIC ${CMAKE_THREAD_LIBS_INIT})
TARGET_COMPILE_DEFINITIONS(cpuinfo PRIVATE _GNU_SOURCE=1)
TARGET_COMPILE_DEFINITIONS(cpuinfo_internals PRIVATE _GNU_SOURCE=1)
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
TARGET_LINK_LIBRARIES(cpuinfo PUBLIC ${CMAKE_THREAD_LIBS_INIT})
TARGET_LINK_LIBRARIES(cpuinfo_internals PUBLIC ${CMAKE_THREAD_LIBS_INIT})
ENDIF()
ELSE()
TARGET_COMPILE_DEFINITIONS(cpuinfo INTERFACE CPUINFO_SUPPORTED_PLATFORM=0)
ENDIF()
ADD_LIBRARY(${PROJECT_NAME}::cpuinfo ALIAS cpuinfo)
# support find_package(cpuinfo CONFIG)
INCLUDE(CMakePackageConfigHelpers)
GET_FILENAME_COMPONENT(CONFIG_FILE_PATH ${CMAKE_CURRENT_BINARY_DIR}/cpuinfo-config.cmake ABSOLUTE)
CONFIGURE_PACKAGE_CONFIG_FILE(
cmake/cpuinfo-config.cmake.in ${CONFIG_FILE_PATH}
INSTALL_DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME})
INSTALL(FILES ${CONFIG_FILE_PATH}
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME}) # cpuinfo_DIR ${prefix}/share/cpuinfo
INSTALL(TARGETS cpuinfo
EXPORT cpuinfo-targets
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
INSTALL(EXPORT cpuinfo-targets
NAMESPACE ${PROJECT_NAME}:: # IMPORTED cpuinfo::cpuinfo
DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${PROJECT_NAME})
# ---[ cpuinfo micro-benchmarks
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_BENCHMARKS)
# ---[ Build google benchmark
IF(NOT TARGET benchmark AND NOT USE_SYSTEM_GOOGLEBENCHMARK)
SET(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "")
ADD_SUBDIRECTORY(
"${GOOGLEBENCHMARK_SOURCE_DIR}"
"${CONFU_DEPENDENCIES_BINARY_DIR}/googlebenchmark")
ENDIF()
IF(CMAKE_SYSTEM_NAME MATCHES "^(Linux|Android)$")
ADD_EXECUTABLE(get-current-bench bench/get-current.cc)
TARGET_LINK_LIBRARIES(get-current-bench cpuinfo benchmark)
ENDIF()
ADD_EXECUTABLE(init-bench bench/init.cc)
TARGET_LINK_LIBRARIES(init-bench cpuinfo benchmark)
ENDIF()
IF(CPUINFO_SUPPORTED_PLATFORM)
IF(CPUINFO_BUILD_MOCK_TESTS OR CPUINFO_BUILD_UNIT_TESTS)
# ---[ Build google test
IF(NOT TARGET gtest AND NOT USE_SYSTEM_GOOGLETEST)
IF(MSVC AND NOT CPUINFO_RUNTIME_TYPE STREQUAL "static")
SET(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
ENDIF()
ADD_SUBDIRECTORY(
"${GOOGLETEST_SOURCE_DIR}"
"${CONFU_DEPENDENCIES_BINARY_DIR}/googletest")
ENDIF()
ENDIF()
ENDIF()
# ---[ cpuinfo mock library and mock tests
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_MOCK_TESTS)
SET(CPUINFO_MOCK_SRCS "${CPUINFO_SRCS}")
IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
LIST(APPEND CPUINFO_MOCK_SRCS src/x86/mockcpuid.c)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
LIST(APPEND CPUINFO_MOCK_SRCS src/linux/mockfile.c)
ENDIF()
ADD_LIBRARY(cpuinfo_mock STATIC ${CPUINFO_MOCK_SRCS})
CPUINFO_TARGET_ENABLE_C99(cpuinfo_mock)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuinfo_mock)
SET_TARGET_PROPERTIES(cpuinfo_mock PROPERTIES PUBLIC_HEADER include/cpuinfo.h)
TARGET_INCLUDE_DIRECTORIES(cpuinfo_mock BEFORE PUBLIC include)
TARGET_INCLUDE_DIRECTORIES(cpuinfo_mock BEFORE PRIVATE src)
TARGET_COMPILE_DEFINITIONS(cpuinfo_mock PUBLIC "CPUINFO_MOCK=1")
TARGET_COMPILE_DEFINITIONS(cpuinfo_mock PRIVATE "CPUINFO_LOG_LEVEL=5")
TARGET_COMPILE_DEFINITIONS(cpuinfo_mock PRIVATE "CPUINFO_LOG_TO_STDIO=1")
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
TARGET_LINK_LIBRARIES(cpuinfo_mock PUBLIC ${CMAKE_THREAD_LIBS_INIT})
TARGET_COMPILE_DEFINITIONS(cpuinfo_mock PRIVATE _GNU_SOURCE=1)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Android" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv5te|armv7-a)$")
ADD_EXECUTABLE(atm7029b-tablet-test test/mock/atm7029b-tablet.cc)
TARGET_INCLUDE_DIRECTORIES(atm7029b-tablet-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(atm7029b-tablet-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME atm7029b-tablet-test COMMAND atm7029b-tablet-test)
ADD_EXECUTABLE(blu-r1-hd-test test/mock/blu-r1-hd.cc)
TARGET_INCLUDE_DIRECTORIES(blu-r1-hd-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(blu-r1-hd-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME blu-r1-hd-test COMMAND blu-r1-hd-test)
ADD_EXECUTABLE(galaxy-a3-2016-eu-test test/mock/galaxy-a3-2016-eu.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-a3-2016-eu-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-a3-2016-eu-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-a3-2016-eu-test COMMAND galaxy-a3-2016-eu-test)
ADD_EXECUTABLE(galaxy-a8-2016-duos-test test/mock/galaxy-a8-2016-duos.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-a8-2016-duos-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-a8-2016-duos-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-a8-2016-duos-test COMMAND galaxy-a8-2016-duos-test)
ADD_EXECUTABLE(galaxy-grand-prime-value-edition-test test/mock/galaxy-grand-prime-value-edition.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-grand-prime-value-edition-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-grand-prime-value-edition-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-grand-prime-value-edition-test COMMAND galaxy-grand-prime-value-edition-test)
ADD_EXECUTABLE(galaxy-j1-2016-test test/mock/galaxy-j1-2016.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-j1-2016-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-j1-2016-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-j1-2016-test COMMAND galaxy-j1-2016-test)
ADD_EXECUTABLE(galaxy-j5-test test/mock/galaxy-j5.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-j5-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-j5-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-j5-test COMMAND galaxy-j5-test)
ADD_EXECUTABLE(galaxy-j7-prime-test test/mock/galaxy-j7-prime.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-j7-prime-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-j7-prime-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-j7-prime-test COMMAND galaxy-j7-prime-test)
ADD_EXECUTABLE(galaxy-j7-tmobile-test test/mock/galaxy-j7-tmobile.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-j7-tmobile-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-j7-tmobile-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-j7-tmobile-test COMMAND galaxy-j7-tmobile-test)
ADD_EXECUTABLE(galaxy-j7-uae-test test/mock/galaxy-j7-uae.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-j7-uae-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-j7-uae-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-j7-uae-test COMMAND galaxy-j7-uae-test)
ADD_EXECUTABLE(galaxy-s3-us-test test/mock/galaxy-s3-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s3-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s3-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s3-us-test COMMAND galaxy-s3-us-test)
ADD_EXECUTABLE(galaxy-s4-us-test test/mock/galaxy-s4-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s4-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s4-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s4-us-test COMMAND galaxy-s4-us-test)
ADD_EXECUTABLE(galaxy-s5-global-test test/mock/galaxy-s5-global.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s5-global-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s5-global-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s5-global-test COMMAND galaxy-s5-global-test)
ADD_EXECUTABLE(galaxy-s5-us-test test/mock/galaxy-s5-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s5-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s5-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s5-us-test COMMAND galaxy-s5-us-test)
ADD_EXECUTABLE(galaxy-tab-3-7.0-test test/mock/galaxy-tab-3-7.0.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-tab-3-7.0-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-tab-3-7.0-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-tab-3-7.0-test COMMAND galaxy-tab-3-7.0-test)
ADD_EXECUTABLE(galaxy-tab-3-lite-test test/mock/galaxy-tab-3-lite.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-tab-3-lite-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-tab-3-lite-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-tab-3-lite-test COMMAND galaxy-tab-3-lite-test)
ADD_EXECUTABLE(galaxy-win-duos-test test/mock/galaxy-win-duos.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-win-duos-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-win-duos-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-win-duos-test COMMAND galaxy-win-duos-test)
ADD_EXECUTABLE(huawei-ascend-p7-test test/mock/huawei-ascend-p7.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-ascend-p7-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-ascend-p7-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-ascend-p7-test COMMAND huawei-ascend-p7-test)
ADD_EXECUTABLE(huawei-honor-6-test test/mock/huawei-honor-6.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-honor-6-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-honor-6-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-honor-6-test COMMAND huawei-honor-6-test)
ADD_EXECUTABLE(lenovo-a6600-plus-test test/mock/lenovo-a6600-plus.cc)
TARGET_INCLUDE_DIRECTORIES(lenovo-a6600-plus-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(lenovo-a6600-plus-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME lenovo-a6600-plus-test COMMAND lenovo-a6600-plus-test)
ADD_EXECUTABLE(lenovo-vibe-x2-test test/mock/lenovo-vibe-x2.cc)
TARGET_INCLUDE_DIRECTORIES(lenovo-vibe-x2-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(lenovo-vibe-x2-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME lenovo-vibe-x2-test COMMAND lenovo-vibe-x2-test)
ADD_EXECUTABLE(lg-k10-eu-test test/mock/lg-k10-eu.cc)
TARGET_INCLUDE_DIRECTORIES(lg-k10-eu-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(lg-k10-eu-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME lg-k10-eu-test COMMAND lg-k10-eu-test)
ADD_EXECUTABLE(lg-optimus-g-pro-test test/mock/lg-optimus-g-pro.cc)
TARGET_INCLUDE_DIRECTORIES(lg-optimus-g-pro-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(lg-optimus-g-pro-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME lg-optimus-g-pro-test COMMAND lg-optimus-g-pro-test)
ADD_EXECUTABLE(moto-e-gen1-test test/mock/moto-e-gen1.cc)
TARGET_INCLUDE_DIRECTORIES(moto-e-gen1-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-e-gen1-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-e-gen1-test COMMAND moto-e-gen1-test)
ADD_EXECUTABLE(moto-g-gen1-test test/mock/moto-g-gen1.cc)
TARGET_INCLUDE_DIRECTORIES(moto-g-gen1-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-g-gen1-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-g-gen1-test COMMAND moto-g-gen1-test)
ADD_EXECUTABLE(moto-g-gen2-test test/mock/moto-g-gen2.cc)
TARGET_INCLUDE_DIRECTORIES(moto-g-gen2-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-g-gen2-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-g-gen2-test COMMAND moto-g-gen2-test)
ADD_EXECUTABLE(moto-g-gen3-test test/mock/moto-g-gen3.cc)
TARGET_INCLUDE_DIRECTORIES(moto-g-gen3-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-g-gen3-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-g-gen3-test COMMAND moto-g-gen3-test)
ADD_EXECUTABLE(moto-g-gen4-test test/mock/moto-g-gen4.cc)
TARGET_INCLUDE_DIRECTORIES(moto-g-gen4-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-g-gen4-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-g-gen4-test COMMAND moto-g-gen4-test)
ADD_EXECUTABLE(moto-g-gen5-test test/mock/moto-g-gen5.cc)
TARGET_INCLUDE_DIRECTORIES(moto-g-gen5-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(moto-g-gen5-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME moto-g-gen5-test COMMAND moto-g-gen5-test)
ADD_EXECUTABLE(nexus-s-test test/mock/nexus-s.cc)
TARGET_INCLUDE_DIRECTORIES(nexus-s-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus-s-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus-s-test COMMAND nexus-s-test)
ADD_EXECUTABLE(nexus4-test test/mock/nexus4.cc)
TARGET_INCLUDE_DIRECTORIES(nexus4-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus4-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus4-test COMMAND nexus4-test)
ADD_EXECUTABLE(nexus6-test test/mock/nexus6.cc)
TARGET_INCLUDE_DIRECTORIES(nexus6-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus6-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus6-test COMMAND nexus6-test)
ADD_EXECUTABLE(nexus10-test test/mock/nexus10.cc)
TARGET_INCLUDE_DIRECTORIES(nexus10-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus10-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus10-test COMMAND nexus10-test)
ADD_EXECUTABLE(padcod-10.1-test test/mock/padcod-10.1.cc)
TARGET_INCLUDE_DIRECTORIES(padcod-10.1-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(padcod-10.1-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME padcod-10.1-test COMMAND padcod-10.1-test)
ADD_EXECUTABLE(xiaomi-redmi-2a-test test/mock/xiaomi-redmi-2a.cc)
TARGET_INCLUDE_DIRECTORIES(xiaomi-redmi-2a-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xiaomi-redmi-2a-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xiaomi-redmi-2a-test COMMAND xiaomi-redmi-2a-test)
ADD_EXECUTABLE(xperia-sl-test test/mock/xperia-sl.cc)
TARGET_INCLUDE_DIRECTORIES(xperia-sl-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xperia-sl-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xperia-sl-test COMMAND xperia-sl-test)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Android" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv5te|armv7-a|aarch64)$")
ADD_EXECUTABLE(alcatel-revvl-test test/mock/alcatel-revvl.cc)
TARGET_INCLUDE_DIRECTORIES(alcatel-revvl-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(alcatel-revvl-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME alcatel-revvl-test COMMAND alcatel-revvl-test)
ADD_EXECUTABLE(galaxy-a8-2018-test test/mock/galaxy-a8-2018.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-a8-2018-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-a8-2018-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-a8-2018-test COMMAND galaxy-a8-2018-test)
ADD_EXECUTABLE(galaxy-c9-pro-test test/mock/galaxy-c9-pro.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-c9-pro-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-c9-pro-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-c9-pro-test COMMAND galaxy-c9-pro-test)
ADD_EXECUTABLE(galaxy-s6-test test/mock/galaxy-s6.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s6-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s6-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s6-test COMMAND galaxy-s6-test)
ADD_EXECUTABLE(galaxy-s7-us-test test/mock/galaxy-s7-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s7-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s7-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s7-us-test COMMAND galaxy-s7-us-test)
ADD_EXECUTABLE(galaxy-s7-global-test test/mock/galaxy-s7-global.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s7-global-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s7-global-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s7-global-test COMMAND galaxy-s7-global-test)
ADD_EXECUTABLE(galaxy-s8-us-test test/mock/galaxy-s8-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s8-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s8-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s8-us-test COMMAND galaxy-s8-us-test)
ADD_EXECUTABLE(galaxy-s8-global-test test/mock/galaxy-s8-global.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s8-global-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s8-global-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s8-global-test COMMAND galaxy-s8-global-test)
ADD_EXECUTABLE(galaxy-s9-us-test test/mock/galaxy-s9-us.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s9-us-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s9-us-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s9-us-test COMMAND galaxy-s9-us-test)
ADD_EXECUTABLE(galaxy-s9-global-test test/mock/galaxy-s9-global.cc)
TARGET_INCLUDE_DIRECTORIES(galaxy-s9-global-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(galaxy-s9-global-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME galaxy-s9-global-test COMMAND galaxy-s9-global-test)
ADD_EXECUTABLE(huawei-mate-8-test test/mock/huawei-mate-8.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-mate-8-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-mate-8-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-mate-8-test COMMAND huawei-mate-8-test)
ADD_EXECUTABLE(huawei-mate-9-test test/mock/huawei-mate-9.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-mate-9-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-mate-9-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-mate-9-test COMMAND huawei-mate-9-test)
ADD_EXECUTABLE(huawei-mate-10-test test/mock/huawei-mate-10.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-mate-10-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-mate-10-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-mate-10-test COMMAND huawei-mate-10-test)
ADD_EXECUTABLE(huawei-mate-20-test test/mock/huawei-mate-20.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-mate-20-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-mate-20-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-mate-20-test COMMAND huawei-mate-20-test)
ADD_EXECUTABLE(huawei-p8-lite-test test/mock/huawei-p8-lite.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-p8-lite-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-p8-lite-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-p8-lite-test COMMAND huawei-p8-lite-test)
ADD_EXECUTABLE(huawei-p9-lite-test test/mock/huawei-p9-lite.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-p9-lite-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-p9-lite-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-p9-lite-test COMMAND huawei-p9-lite-test)
ADD_EXECUTABLE(huawei-p20-pro-test test/mock/huawei-p20-pro.cc)
TARGET_INCLUDE_DIRECTORIES(huawei-p20-pro-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(huawei-p20-pro-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME huawei-p20-pro-test COMMAND huawei-p20-pro-test)
ADD_EXECUTABLE(iconia-one-10-test test/mock/iconia-one-10.cc)
TARGET_INCLUDE_DIRECTORIES(iconia-one-10-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(iconia-one-10-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME iconia-one-10-test COMMAND iconia-one-10-test)
ADD_EXECUTABLE(meizu-pro-6-test test/mock/meizu-pro-6.cc)
TARGET_INCLUDE_DIRECTORIES(meizu-pro-6-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(meizu-pro-6-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME meizu-pro-6-test COMMAND meizu-pro-6-test)
ADD_EXECUTABLE(meizu-pro-6s-test test/mock/meizu-pro-6s.cc)
TARGET_INCLUDE_DIRECTORIES(meizu-pro-6s-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(meizu-pro-6s-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME meizu-pro-6s-test COMMAND meizu-pro-6s-test)
ADD_EXECUTABLE(meizu-pro-7-plus-test test/mock/meizu-pro-7-plus.cc)
TARGET_INCLUDE_DIRECTORIES(meizu-pro-7-plus-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(meizu-pro-7-plus-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME meizu-pro-7-plus-test COMMAND meizu-pro-7-plus-test)
ADD_EXECUTABLE(nexus5x-test test/mock/nexus5x.cc)
TARGET_INCLUDE_DIRECTORIES(nexus5x-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus5x-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus5x-test COMMAND nexus5x-test)
ADD_EXECUTABLE(nexus6p-test test/mock/nexus6p.cc)
TARGET_INCLUDE_DIRECTORIES(nexus6p-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus6p-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus6p-test COMMAND nexus6p-test)
ADD_EXECUTABLE(nexus9-test test/mock/nexus9.cc)
TARGET_INCLUDE_DIRECTORIES(nexus9-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(nexus9-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME nexus9-test COMMAND nexus9-test)
ADD_EXECUTABLE(oneplus-3t-test test/mock/oneplus-3t.cc)
TARGET_INCLUDE_DIRECTORIES(oneplus-3t-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oneplus-3t-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oneplus-3t-test COMMAND oneplus-3t-test)
ADD_EXECUTABLE(oneplus-5-test test/mock/oneplus-5.cc)
TARGET_INCLUDE_DIRECTORIES(oneplus-5-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oneplus-5-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oneplus-5-test COMMAND oneplus-5-test)
ADD_EXECUTABLE(oneplus-5t-test test/mock/oneplus-5t.cc)
TARGET_INCLUDE_DIRECTORIES(oneplus-5t-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oneplus-5t-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oneplus-5t-test COMMAND oneplus-5t-test)
ADD_EXECUTABLE(oppo-a37-test test/mock/oppo-a37.cc)
TARGET_INCLUDE_DIRECTORIES(oppo-a37-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oppo-a37-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oppo-a37-test COMMAND oppo-a37-test)
ADD_EXECUTABLE(oppo-r9-test test/mock/oppo-r9.cc)
TARGET_INCLUDE_DIRECTORIES(oppo-r9-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oppo-r9-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oppo-r9-test COMMAND oppo-r9-test)
ADD_EXECUTABLE(oppo-r15-test test/mock/oppo-r15.cc)
TARGET_INCLUDE_DIRECTORIES(oppo-r15-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(oppo-r15-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME oppo-r15-test COMMAND oppo-r15-test)
ADD_EXECUTABLE(pixel-test test/mock/pixel.cc)
TARGET_INCLUDE_DIRECTORIES(pixel-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(pixel-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME pixel-test COMMAND pixel-test)
ADD_EXECUTABLE(pixel-c-test test/mock/pixel-c.cc)
TARGET_INCLUDE_DIRECTORIES(pixel-c-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(pixel-c-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME pixel-c-test COMMAND pixel-c-test)
ADD_EXECUTABLE(pixel-xl-test test/mock/pixel-xl.cc)
TARGET_INCLUDE_DIRECTORIES(pixel-xl-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(pixel-xl-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME pixel-xl-test COMMAND pixel-xl-test)
ADD_EXECUTABLE(pixel-2-xl-test test/mock/pixel-2-xl.cc)
TARGET_INCLUDE_DIRECTORIES(pixel-2-xl-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(pixel-2-xl-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME pixel-2-xl-test COMMAND pixel-2-xl-test)
ADD_EXECUTABLE(xiaomi-mi-5c-test test/mock/xiaomi-mi-5c.cc)
TARGET_INCLUDE_DIRECTORIES(xiaomi-mi-5c-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xiaomi-mi-5c-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xiaomi-mi-5c-test COMMAND xiaomi-mi-5c-test)
ADD_EXECUTABLE(xiaomi-redmi-note-3-test test/mock/xiaomi-redmi-note-3.cc)
TARGET_INCLUDE_DIRECTORIES(xiaomi-redmi-note-3-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xiaomi-redmi-note-3-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xiaomi-redmi-note-3-test COMMAND xiaomi-redmi-note-3-test)
ADD_EXECUTABLE(xiaomi-redmi-note-4-test test/mock/xiaomi-redmi-note-4.cc)
TARGET_INCLUDE_DIRECTORIES(xiaomi-redmi-note-4-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xiaomi-redmi-note-4-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xiaomi-redmi-note-4-test COMMAND xiaomi-redmi-note-4-test)
ADD_EXECUTABLE(xperia-c4-dual-test test/mock/xperia-c4-dual.cc)
TARGET_INCLUDE_DIRECTORIES(xperia-c4-dual-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(xperia-c4-dual-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME xperia-c4-dual-test COMMAND xperia-c4-dual-test)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Android" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(i686|x86_64)$")
ADD_EXECUTABLE(alldocube-iwork8-test test/mock/alldocube-iwork8.cc)
TARGET_INCLUDE_DIRECTORIES(alldocube-iwork8-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(alldocube-iwork8-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME alldocube-iwork8-test COMMAND alldocube-iwork8-test)
ADD_EXECUTABLE(leagoo-t5c-test test/mock/leagoo-t5c.cc)
TARGET_INCLUDE_DIRECTORIES(leagoo-t5c-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(leagoo-t5c-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME leagoo-t5c-test COMMAND leagoo-t5c-test)
ADD_EXECUTABLE(memo-pad-7-test test/mock/memo-pad-7.cc)
TARGET_INCLUDE_DIRECTORIES(memo-pad-7-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(memo-pad-7-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME memo-pad-7-test COMMAND memo-pad-7-test)
ADD_EXECUTABLE(zenfone-c-test test/mock/zenfone-c.cc)
TARGET_INCLUDE_DIRECTORIES(zenfone-c-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(zenfone-c-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME zenfone-c-test COMMAND zenfone-c-test)
ADD_EXECUTABLE(zenfone-2-test test/mock/zenfone-2.cc)
TARGET_INCLUDE_DIRECTORIES(zenfone-2-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(zenfone-2-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME zenfone-2-test COMMAND zenfone-2-test)
ADD_EXECUTABLE(zenfone-2e-test test/mock/zenfone-2e.cc)
TARGET_INCLUDE_DIRECTORIES(zenfone-2e-test BEFORE PRIVATE test/mock)
TARGET_LINK_LIBRARIES(zenfone-2e-test PRIVATE cpuinfo_mock gtest)
ADD_TEST(NAME zenfone-2e-test COMMAND zenfone-2e-test)
ENDIF()
ENDIF()
# ---[ cpuinfo unit tests
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_UNIT_TESTS)
ADD_EXECUTABLE(init-test test/init.cc)
CPUINFO_TARGET_ENABLE_CXX11(init-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(init-test)
TARGET_LINK_LIBRARIES(init-test PRIVATE cpuinfo gtest gtest_main)
ADD_TEST(NAME init-test COMMAND init-test)
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" OR CMAKE_SYSTEM_NAME STREQUAL "Android")
ADD_EXECUTABLE(get-current-test test/get-current.cc)
CPUINFO_TARGET_ENABLE_CXX11(get-current-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(get-current-test)
TARGET_LINK_LIBRARIES(get-current-test PRIVATE cpuinfo gtest gtest_main)
ADD_TEST(NAME get-current-test COMMAND get-current-test)
ENDIF()
IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
ADD_EXECUTABLE(brand-string-test test/name/brand-string.cc)
CPUINFO_TARGET_ENABLE_CXX11(brand-string-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(brand-string-test)
TARGET_LINK_LIBRARIES(brand-string-test PRIVATE cpuinfo_internals gtest gtest_main)
ADD_TEST(NAME brand-string-test COMMAND brand-string-test)
ENDIF()
IF(CMAKE_SYSTEM_NAME STREQUAL "Android" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv[5-8].*|aarch64)$")
ADD_LIBRARY(android_properties_interface STATIC test/name/android-properties-interface.c)
CPUINFO_TARGET_ENABLE_C99(android_properties_interface)
CPUINFO_TARGET_RUNTIME_LIBRARY(android_properties_interface)
TARGET_LINK_LIBRARIES(android_properties_interface PRIVATE cpuinfo_internals)
ADD_EXECUTABLE(chipset-test
test/name/proc-cpuinfo-hardware.cc
test/name/ro-product-board.cc
test/name/ro-board-platform.cc
test/name/ro-mediatek-platform.cc
test/name/ro-arch.cc
test/name/ro-chipname.cc
test/name/android-properties.cc)
CPUINFO_TARGET_ENABLE_CXX11(chipset-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(chipset-test)
TARGET_LINK_LIBRARIES(chipset-test PRIVATE android_properties_interface gtest gtest_main)
ADD_TEST(NAME chipset-test COMMAND chipset-test)
ADD_EXECUTABLE(cache-test test/arm-cache.cc)
CPUINFO_TARGET_ENABLE_CXX11(cache-test)
CPUINFO_TARGET_RUNTIME_LIBRARY(cache-test)
TARGET_COMPILE_DEFINITIONS(cache-test PRIVATE __STDC_LIMIT_MACROS=1 __STDC_CONSTANT_MACROS=1)
TARGET_LINK_LIBRARIES(cache-test PRIVATE cpuinfo_internals gtest gtest_main)
ADD_TEST(NAME cache-test COMMAND cache-test)
ENDIF()
ENDIF()
# ---[ Helper and debug tools
IF(CPUINFO_SUPPORTED_PLATFORM AND CPUINFO_BUILD_TOOLS)
ADD_EXECUTABLE(isa-info tools/isa-info.c)
CPUINFO_TARGET_ENABLE_C99(isa-info)
CPUINFO_TARGET_RUNTIME_LIBRARY(isa-info)
TARGET_LINK_LIBRARIES(isa-info PRIVATE cpuinfo)
INSTALL(TARGETS isa-info RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
ADD_EXECUTABLE(cpu-info tools/cpu-info.c)
CPUINFO_TARGET_ENABLE_C99(cpu-info)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpu-info)
TARGET_LINK_LIBRARIES(cpu-info PRIVATE cpuinfo)
INSTALL(TARGETS cpu-info RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
ADD_EXECUTABLE(cache-info tools/cache-info.c)
CPUINFO_TARGET_ENABLE_C99(cache-info)
CPUINFO_TARGET_RUNTIME_LIBRARY(cache-info)
TARGET_LINK_LIBRARIES(cache-info PRIVATE cpuinfo)
INSTALL(TARGETS cache-info RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
IF(CMAKE_SYSTEM_NAME MATCHES "^(Android|Linux)$" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv[5-8].*|aarch64)$")
ADD_EXECUTABLE(auxv-dump tools/auxv-dump.c)
CPUINFO_TARGET_ENABLE_C99(auxv-dump)
CPUINFO_TARGET_RUNTIME_LIBRARY(auxv-dump)
TARGET_LINK_LIBRARIES(auxv-dump PRIVATE ${CMAKE_DL_LIBS} cpuinfo)
ADD_EXECUTABLE(cpuinfo-dump tools/cpuinfo-dump.c)
CPUINFO_TARGET_ENABLE_C99(cpuinfo-dump)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuinfo-dump)
ENDIF()
IF(CPUINFO_TARGET_PROCESSOR MATCHES "^(i[3-6]86|AMD64|x86(_64)?)$")
ADD_EXECUTABLE(cpuid-dump tools/cpuid-dump.c)
CPUINFO_TARGET_ENABLE_C99(cpuid-dump)
CPUINFO_TARGET_RUNTIME_LIBRARY(cpuid-dump)
TARGET_INCLUDE_DIRECTORIES(cpuid-dump BEFORE PRIVATE src)
TARGET_INCLUDE_DIRECTORIES(cpuid-dump BEFORE PRIVATE include)
INSTALL(TARGETS cpuid-dump RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
ENDIF()
ENDIF()
# ---[ pkg-config manifest. This is mostly from JsonCpp...
IF(CPUINFO_BUILD_PKG_CONFIG)
FUNCTION(JOIN_PATHS joined_path first_path_segment)
SET(temp_path "${first_path_segment}")
FOREACH(current_segment IN LISTS ARGN)
IF(NOT ("${current_segment}" STREQUAL ""))
IF(IS_ABSOLUTE "${current_segment}")
SET(temp_path "${current_segment}")
ELSE()
SET(temp_path "${temp_path}/${current_segment}")
ENDIF()
ENDIF()
ENDFOREACH()
SET(${joined_path} "${temp_path}" PARENT_SCOPE)
ENDFUNCTION()
JOIN_PATHS(libdir_for_pc_file "\${exec_prefix}" "${CMAKE_INSTALL_LIBDIR}")
JOIN_PATHS(includedir_for_pc_file "\${prefix}" "${CMAKE_INSTALL_INCLUDEDIR}")
CONFIGURE_FILE(
"libcpuinfo.pc.in"
"libcpuinfo.pc"
@ONLY)
INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/libcpuinfo.pc"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
ENDIF()

27
3rdparty/cpuinfo/LICENSE vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2019 Google LLC
Copyright (c) 2017-2018 Facebook Inc.
Copyright (C) 2012-2017 Georgia Institute of Technology
Copyright (C) 2010-2012 Marat Dukhan
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

318
3rdparty/cpuinfo/README.md vendored Normal file
View File

@@ -0,0 +1,318 @@
# CPU INFOrmation library
[![BSD (2 clause) License](https://img.shields.io/badge/License-BSD%202--Clause%20%22Simplified%22%20License-blue.svg)](https://github.com/pytorch/cpuinfo/blob/master/LICENSE)
[![Linux/Mac build status](https://img.shields.io/travis/pytorch/cpuinfo.svg)](https://travis-ci.org/pytorch/cpuinfo)
[![Windows build status](https://ci.appveyor.com/api/projects/status/g5khy9nr0xm458t7/branch/master?svg=true)](https://ci.appveyor.com/project/MaratDukhan/cpuinfo/branch/master)
cpuinfo is a library to detect essential for performance optimization information about host CPU.
## Features
- **Cross-platform** availability:
- Linux, Windows, macOS, Android, and iOS operating systems
- x86, x86-64, ARM, and ARM64 architectures
- Modern **C/C++ interface**
- Thread-safe
- No memory allocation after initialization
- No exceptions thrown
- Detection of **supported instruction sets**, up to AVX512 (x86) and ARMv8.3 extensions
- Detection of SoC and core information:
- **Processor (SoC) name**
- Vendor and **microarchitecture** for each CPU core
- ID (**MIDR** on ARM, **CPUID** leaf 1 EAX value on x86) for each CPU core
- Detection of **cache information**:
- Cache type (instruction/data/unified), size and line size
- Cache associativity
- Cores and logical processors (hyper-threads) sharing the cache
- Detection of **topology information** (relative between logical processors, cores, and processor packages)
- Well-tested **production-quality** code:
- 60+ mock tests based on data from real devices
- Includes work-arounds for common bugs in hardware and OS kernels
- Supports systems with heterogenous cores, such as **big.LITTLE** and Max.Med.Min
- Permissive **open-source** license (Simplified BSD)
## Examples
Log processor name:
```c
cpuinfo_initialize();
printf("Running on %s CPU\n", cpuinfo_get_package(0)->name);
```
Detect if target is a 32-bit or 64-bit ARM system:
```c
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
/* 32-bit ARM-specific code here */
#endif
```
Check if the host CPU supports ARM NEON
```c
cpuinfo_initialize();
if (cpuinfo_has_arm_neon()) {
neon_implementation(arguments);
}
```
Check if the host CPU supports x86 AVX
```c
cpuinfo_initialize();
if (cpuinfo_has_x86_avx()) {
avx_implementation(arguments);
}
```
Check if the thread runs on a Cortex-A53 core
```c
cpuinfo_initialize();
switch (cpuinfo_get_current_core()->uarch) {
case cpuinfo_uarch_cortex_a53:
cortex_a53_implementation(arguments);
break;
default:
generic_implementation(arguments);
break;
}
```
Get the size of level 1 data cache on the fastest core in the processor (e.g. big core in big.LITTLE ARM systems):
```c
cpuinfo_initialize();
const size_t l1_size = cpuinfo_get_processor(0)->cache.l1d->size;
```
Pin thread to cores sharing L2 cache with the current core (Linux or Android)
```c
cpuinfo_initialize();
cpu_set_t cpu_set;
CPU_ZERO(&cpu_set);
const struct cpuinfo_cache* current_l2 = cpuinfo_get_current_processor()->cache.l2;
for (uint32_t i = 0; i < current_l2->processor_count; i++) {
CPU_SET(cpuinfo_get_processor(current_l2->processor_start + i)->linux_id, &cpu_set);
}
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_set);
```
## Use via pkg-config
If you would like to provide your project's build environment with the necessary compiler and linker flags in a portable manner, the library by default when built enables `CPUINFO_BUILD_PKG_CONFIG` and will generate a [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/) manifest (_libcpuinfo.pc_). Here are several examples of how to use it:
### Command Line
If you used your distro's package manager to install the library, you can verify that it is available to your build environment like so:
```console
$ pkg-config --cflags --libs libcpuinfo
-I/usr/include/x86_64-linux-gnu/ -L/lib/x86_64-linux-gnu/ -lcpuinfo
```
If you have installed the library from source into a non-standard prefix, pkg-config may need help finding it:
```console
$ PKG_CONFIG_PATH="/home/me/projects/cpuinfo/prefix/lib/pkgconfig/:$PKG_CONFIG_PATH" pkg-config --cflags --libs libcpuinfo
-I/home/me/projects/cpuinfo/prefix/include -L/home/me/projects/cpuinfo/prefix/lib -lcpuinfo
```
### GNU Autotools
To [use](https://autotools.io/pkgconfig/pkg_check_modules.html) with the GNU Autotools include the following snippet in your project's `configure.ac`:
```makefile
# CPU INFOrmation library...
PKG_CHECK_MODULES(
[libcpuinfo], [libcpuinfo], [],
[AC_MSG_ERROR([libcpuinfo missing...])])
YOURPROJECT_CXXFLAGS="$YOURPROJECT_CXXFLAGS $libcpuinfo_CFLAGS"
YOURPROJECT_LIBS="$YOURPROJECT_LIBS $libcpuinfo_LIBS"
```
### Meson
To use with Meson you just need to add `dependency('libcpuinfo')` as a dependency for your executable.
```meson
project(
'MyCpuInfoProject',
'cpp',
meson_version: '>=0.55.0'
)
executable(
'MyCpuInfoExecutable',
sources: 'main.cpp',
dependencies: dependency('libcpuinfo')
)
```
### Bazel
This project can be built using [Bazel](https://bazel.build/install).
You can also use this library as a dependency to your Bazel project. Add to the `WORKSPACE` file:
```python
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
git_repository(
name = "org_pytorch_cpuinfo",
branch = "master",
remote = "https://github.com/Vertexwahn/cpuinfo.git",
)
```
And to your `BUILD` file:
```python
cc_binary(
name = "cpuinfo_test",
srcs = [
# ...
],
deps = [
"@org_pytorch_cpuinfo//:cpuinfo",
],
)
```
### CMake
To use with CMake use the [FindPkgConfig](https://cmake.org/cmake/help/latest/module/FindPkgConfig.html) module. Here is an example:
```cmake
cmake_minimum_required(VERSION 3.6)
project("MyCpuInfoProject")
find_package(PkgConfig)
pkg_check_modules(CpuInfo REQUIRED IMPORTED_TARGET libcpuinfo)
add_executable(${PROJECT_NAME} main.cpp)
target_link_libraries(${PROJECT_NAME} PkgConfig::CpuInfo)
```
### Makefile
To use within a vanilla makefile, you can call pkg-config directly to supply compiler and linker flags using shell substitution.
```makefile
CFLAGS=-g3 -Wall -Wextra -Werror ...
LDFLAGS=-lfoo ...
...
CFLAGS+= $(pkg-config --cflags libcpuinfo)
LDFLAGS+= $(pkg-config --libs libcpuinfo)
```
## Exposed information
- [x] Processor (SoC) name
- [x] Microarchitecture
- [x] Usable instruction sets
- [ ] CPU frequency
- [x] Cache
- [x] Size
- [x] Associativity
- [x] Line size
- [x] Number of partitions
- [x] Flags (unified, inclusive, complex hash function)
- [x] Topology (logical processors that share this cache level)
- [ ] TLB
- [ ] Number of entries
- [ ] Associativity
- [ ] Covered page types (instruction, data)
- [ ] Covered page sizes
- [x] Topology information
- [x] Logical processors
- [x] Cores
- [x] Packages (sockets)
## Supported environments:
- [x] Android
- [x] x86 ABI
- [x] x86_64 ABI
- [x] armeabi ABI
- [x] armeabiv7-a ABI
- [x] arm64-v8a ABI
- [ ] ~~mips ABI~~
- [ ] ~~mips64 ABI~~
- [x] Linux
- [x] x86
- [x] x86-64
- [x] 32-bit ARM (ARMv5T and later)
- [x] ARM64
- [ ] PowerPC64
- [x] iOS
- [x] x86 (iPhone simulator)
- [x] x86-64 (iPhone simulator)
- [x] ARMv7
- [x] ARM64
- [x] macOS
- [x] x86
- [x] x86-64
- [x] ARM64 (Apple silicon)
- [x] Windows
- [x] x86
- [x] x86-64
- [x] arm64
## Methods
- Processor (SoC) name detection
- [x] Using CPUID leaves 0x800000020x80000004 on x86/x86-64
- [x] Using `/proc/cpuinfo` on ARM
- [x] Using `ro.chipname`, `ro.board.platform`, `ro.product.board`, `ro.mediatek.platform`, `ro.arch` properties (Android)
- [ ] Using kernel log (`dmesg`) on ARM Linux
- [x] Using Windows registry on ARM64 Windows
- Vendor and microarchitecture detection
- [x] Intel-designed x86/x86-64 cores (up to Sunny Cove, Goldmont Plus, and Knights Mill)
- [x] AMD-designed x86/x86-64 cores (up to Puma/Jaguar and Zen 2)
- [ ] VIA-designed x86/x86-64 cores
- [ ] Other x86 cores (DM&P, RDC, Transmeta, Cyrix, Rise)
- [x] ARM-designed ARM cores (up to Cortex-A55, Cortex-A77, and Neoverse E1/V1/N2/V2)
- [x] Qualcomm-designed ARM cores (Scorpion, Krait, and Kryo)
- [x] Nvidia-designed ARM cores (Denver and Carmel)
- [x] Samsung-designed ARM cores (Exynos)
- [x] Intel-designed ARM cores (XScale up to 3rd-gen)
- [x] Apple-designed ARM cores (up to Lightning and Thunder)
- [x] Cavium-designed ARM cores (ThunderX)
- [x] AppliedMicro-designed ARM cores (X-Gene)
- Instruction set detection
- [x] Using CPUID (x86/x86-64)
- [x] Using `/proc/cpuinfo` on 32-bit ARM EABI (Linux)
- [x] Using microarchitecture heuristics on (32-bit ARM)
- [x] Using `FPSID` and `WCID` registers (32-bit ARM)
- [x] Using `getauxval` (Linux/ARM)
- [x] Using `/proc/self/auxv` (Android/ARM)
- [ ] Using instruction probing on ARM (Linux)
- [ ] Using CPUID registers on ARM64 (Linux)
- [x] Using IsProcessorFeaturePresent on ARM64 Windows
- Cache detection
- [x] Using CPUID leaf 0x00000002 (x86/x86-64)
- [x] Using CPUID leaf 0x00000004 (non-AMD x86/x86-64)
- [ ] Using CPUID leaves 0x80000005-0x80000006 (AMD x86/x86-64)
- [x] Using CPUID leaf 0x8000001D (AMD x86/x86-64)
- [x] Using `/proc/cpuinfo` (Linux/pre-ARMv7)
- [x] Using microarchitecture heuristics (ARM)
- [x] Using chipset name (ARM)
- [x] Using `sysctlbyname` (Mach)
- [x] Using sysfs `typology` directories (ARM/Linux)
- [ ] Using sysfs `cache` directories (Linux)
- [x] Using `GetLogicalProcessorInformationEx` on ARM64 Windows
- TLB detection
- [x] Using CPUID leaf 0x00000002 (x86/x86-64)
- [ ] Using CPUID leaves 0x80000005-0x80000006 and 0x80000019 (AMD x86/x86-64)
- [x] Using microarchitecture heuristics (ARM)
- Topology detection
- [x] Using CPUID leaf 0x00000001 on x86/x86-64 (legacy APIC ID)
- [x] Using CPUID leaf 0x0000000B on x86/x86-64 (Intel APIC ID)
- [ ] Using CPUID leaf 0x8000001E on x86/x86-64 (AMD APIC ID)
- [x] Using `/proc/cpuinfo` (Linux)
- [x] Using `host_info` (Mach)
- [x] Using `GetLogicalProcessorInformationEx` (Windows)
- [x] Using sysfs (Linux)
- [x] Using chipset name (ARM/Linux)

View File

@@ -0,0 +1,15 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12 FATAL_ERROR)
PROJECT(googlebenchmark-download NONE)
INCLUDE(ExternalProject)
ExternalProject_Add(googlebenchmark
URL https://github.com/google/benchmark/archive/v1.6.1.zip
URL_HASH SHA256=367e963b8620080aff8c831e24751852cffd1f74ea40f25d9cc1b667a9dd5e45
SOURCE_DIR "${CONFU_DEPENDENCIES_SOURCE_DIR}/googlebenchmark"
BINARY_DIR "${CONFU_DEPENDENCIES_BINARY_DIR}/googlebenchmark"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@@ -0,0 +1,15 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12 FATAL_ERROR)
PROJECT(googletest-download NONE)
INCLUDE(ExternalProject)
ExternalProject_Add(googletest
URL https://github.com/google/googletest/archive/release-1.11.0.zip
URL_HASH SHA256=353571c2440176ded91c2de6d6cd88ddd41401d14692ec1f99e35d013feda55a
SOURCE_DIR "${CONFU_DEPENDENCIES_SOURCE_DIR}/googletest"
BINARY_DIR "${CONFU_DEPENDENCIES_BINARY_DIR}/googletest"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)

View File

@@ -0,0 +1,12 @@
@PACKAGE_INIT@
get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
file(GLOB CONFIG_FILES "${_DIR}/cpuinfo-config-*.cmake")
foreach(f ${CONFIG_FILES})
include(${f})
endforeach()
# ${_DIR}/cpuinfo-targets-*.cmake will be included here
include("${_DIR}/cpuinfo-targets.cmake")
check_required_components(@PROJECT_NAME@)

119
3rdparty/cpuinfo/cpuinfo.vcxproj vendored Normal file
View File

@@ -0,0 +1,119 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)common\vsprops\BaseProjectConfig.props" />
<Import Project="$(SolutionDir)common\vsprops\WinSDK.props" />
<PropertyGroup Label="Globals">
<ProjectGuid>{7E183337-A7E9-460C-9D3D-568BC9F9BCC1}</ProjectGuid>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<PlatformToolset Condition="!$(Configuration.Contains(Clang))">$(DefaultPlatformToolset)</PlatformToolset>
<PlatformToolset Condition="$(Configuration.Contains(Clang))">ClangCL</PlatformToolset>
<CharacterSet>MultiByte</CharacterSet>
<WholeProgramOptimization Condition="$(Configuration.Contains(Release))">true</WholeProgramOptimization>
<UseDebugLibraries Condition="$(Configuration.Contains(Debug))">true</UseDebugLibraries>
<UseDebugLibraries Condition="!$(Configuration.Contains(Debug))">false</UseDebugLibraries>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings" />
<ImportGroup Label="PropertySheets">
<Import Project="..\DefaultProjectRootDir.props" />
<Import Project="..\3rdparty.props" />
<Import Condition="$(Configuration.Contains(Debug))" Project="..\..\common\vsprops\CodeGen_Debug.props" />
<Import Condition="$(Configuration.Contains(Devel))" Project="..\..\common\vsprops\CodeGen_Devel.props" />
<Import Condition="$(Configuration.Contains(Release))" Project="..\..\common\vsprops\CodeGen_Release.props" />
<Import Condition="!$(Configuration.Contains(Release))" Project="..\..\common\vsprops\IncrementalLinking.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<CodeAnalysisRuleSet>AllRules.ruleset</CodeAnalysisRuleSet>
</PropertyGroup>
<ItemGroup>
<ClCompile Include="src\arm\cache.c">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\arm\uarch.c">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\arm\windows\init-by-logical-sys-info.c">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\arm\windows\init.c">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="deps\clog\src\clog.c" />
<ClCompile Include="src\api.c" />
<ClCompile Include="src\cache.c" />
<ClCompile Include="src\init.c" />
<ClCompile Include="src\x86\cache\descriptor.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\cache\deterministic.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\cache\init.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\info.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\init.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\isa.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\name.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\topology.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\uarch.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\vendor.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="src\x86\windows\init.c">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\arm\api.h">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="src\arm\midr.h">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="src\arm\windows\windows-arm-init.h">
<ExcludedFromBuild Condition="'$(Platform)'!='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="deps\clog\include\clog.h" />
<ClInclude Include="include\cpuinfo.h" />
<ClInclude Include="src\cpuinfo\common.h" />
<ClInclude Include="src\cpuinfo\internal-api.h" />
<ClInclude Include="src\cpuinfo\log.h" />
<ClInclude Include="src\cpuinfo\utils.h" />
<ClInclude Include="src\x86\api.h">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="src\x86\cpuid.h">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="src\x86\windows\api.h">
<ExcludedFromBuild Condition="'$(Platform)'!='x64'">true</ExcludedFromBuild>
</ClInclude>
</ItemGroup>
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>CPUINFO_LOG_LEVEL=0;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<WarningLevel>TurnOffAllWarnings</WarningLevel>
<AdditionalIncludeDirectories>$(ProjectDir)include;$(ProjectDir)src;$(ProjectDir)deps\clog\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<ObjectFileName>$(IntDir)%(RelativeDir)</ObjectFileName>
</ClCompile>
</ItemDefinitionGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets" />
</Project>

115
3rdparty/cpuinfo/cpuinfo.vcxproj.filters vendored Normal file
View File

@@ -0,0 +1,115 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="x86">
<UniqueIdentifier>{8fc9f543-ff04-48fb-ae1a-7c575a8aed13}</UniqueIdentifier>
</Filter>
<Filter Include="x86\windows">
<UniqueIdentifier>{0b540baa-aafb-4e51-8cbf-b7e7c00d9a4d}</UniqueIdentifier>
</Filter>
<Filter Include="x86\descriptor">
<UniqueIdentifier>{53ef3c40-8e03-46d1-aeb3-6446c40469da}</UniqueIdentifier>
</Filter>
<Filter Include="cpuinfo">
<UniqueIdentifier>{26002d26-399a-41bb-93cb-42fb9be21c1f}</UniqueIdentifier>
</Filter>
<Filter Include="clog">
<UniqueIdentifier>{7f0aba4c-ca06-4a7b-aed1-4f1e6976e839}</UniqueIdentifier>
</Filter>
<Filter Include="arm">
<UniqueIdentifier>{ac4549d3-f60f-4e60-bf43-86d1c253cf3f}</UniqueIdentifier>
</Filter>
<Filter Include="arm\windows">
<UniqueIdentifier>{41fcb23a-e77b-4b5c-8238-e9b92bf1f3c6}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="src\x86\isa.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\name.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\topology.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\uarch.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\vendor.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\info.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\init.c">
<Filter>x86</Filter>
</ClCompile>
<ClCompile Include="src\x86\windows\init.c">
<Filter>x86\windows</Filter>
</ClCompile>
<ClCompile Include="src\x86\cache\deterministic.c">
<Filter>x86\descriptor</Filter>
</ClCompile>
<ClCompile Include="src\x86\cache\init.c">
<Filter>x86\descriptor</Filter>
</ClCompile>
<ClCompile Include="src\x86\cache\descriptor.c">
<Filter>x86\descriptor</Filter>
</ClCompile>
<ClCompile Include="src\api.c" />
<ClCompile Include="src\cache.c" />
<ClCompile Include="src\init.c" />
<ClCompile Include="deps\clog\src\clog.c">
<Filter>clog</Filter>
</ClCompile>
<ClCompile Include="src\arm\cache.c">
<Filter>arm</Filter>
</ClCompile>
<ClCompile Include="src\arm\uarch.c">
<Filter>arm</Filter>
</ClCompile>
<ClCompile Include="src\arm\windows\init.c">
<Filter>arm\windows</Filter>
</ClCompile>
<ClCompile Include="src\arm\windows\init-by-logical-sys-info.c">
<Filter>arm\windows</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="src\x86\api.h">
<Filter>x86</Filter>
</ClInclude>
<ClInclude Include="src\x86\cpuid.h">
<Filter>x86</Filter>
</ClInclude>
<ClInclude Include="src\x86\windows\api.h">
<Filter>x86\windows</Filter>
</ClInclude>
<ClInclude Include="src\cpuinfo\internal-api.h">
<Filter>cpuinfo</Filter>
</ClInclude>
<ClInclude Include="src\cpuinfo\log.h">
<Filter>cpuinfo</Filter>
</ClInclude>
<ClInclude Include="src\cpuinfo\utils.h">
<Filter>cpuinfo</Filter>
</ClInclude>
<ClInclude Include="src\cpuinfo\common.h">
<Filter>cpuinfo</Filter>
</ClInclude>
<ClInclude Include="include\cpuinfo.h" />
<ClInclude Include="deps\clog\include\clog.h">
<Filter>clog</Filter>
</ClInclude>
<ClInclude Include="src\arm\api.h">
<Filter>arm</Filter>
</ClInclude>
<ClInclude Include="src\arm\midr.h">
<Filter>arm</Filter>
</ClInclude>
<ClInclude Include="src\arm\windows\windows-arm-init.h">
<Filter>arm\windows</Filter>
</ClInclude>
</ItemGroup>
</Project>

76
3rdparty/cpuinfo/include/cpuinfo-mock.h vendored Normal file
View File

@@ -0,0 +1,76 @@
#pragma once
#ifndef CPUINFO_MOCK_H
#define CPUINFO_MOCK_H
#include <stddef.h>
#include <stdint.h>
#include <cpuinfo.h>
#if defined(__linux__)
#include <sys/types.h>
#endif
#if !defined(CPUINFO_MOCK) || !(CPUINFO_MOCK)
#error This header is intended only for test use
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if CPUINFO_ARCH_ARM
void CPUINFO_ABI cpuinfo_set_fpsid(uint32_t fpsid);
void CPUINFO_ABI cpuinfo_set_wcid(uint32_t wcid);
#endif /* CPUINFO_ARCH_ARM */
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
struct cpuinfo_mock_cpuid {
uint32_t input_eax;
uint32_t input_ecx;
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
};
void CPUINFO_ABI cpuinfo_mock_set_cpuid(struct cpuinfo_mock_cpuid* dump, size_t entries);
void CPUINFO_ABI cpuinfo_mock_get_cpuid(uint32_t eax, uint32_t regs[4]);
void CPUINFO_ABI cpuinfo_mock_get_cpuidex(uint32_t eax, uint32_t ecx, uint32_t regs[4]);
#endif /* CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64 */
struct cpuinfo_mock_file {
const char* path;
size_t size;
const char* content;
size_t offset;
};
struct cpuinfo_mock_property {
const char* key;
const char* value;
};
#if defined(__linux__)
void CPUINFO_ABI cpuinfo_mock_filesystem(struct cpuinfo_mock_file* files);
int CPUINFO_ABI cpuinfo_mock_open(const char* path, int oflag);
int CPUINFO_ABI cpuinfo_mock_close(int fd);
ssize_t CPUINFO_ABI cpuinfo_mock_read(int fd, void* buffer, size_t capacity);
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
void CPUINFO_ABI cpuinfo_set_hwcap(uint32_t hwcap);
#endif
#if CPUINFO_ARCH_ARM
void CPUINFO_ABI cpuinfo_set_hwcap2(uint64_t hwcap2);
#endif
#endif
#if defined(__ANDROID__)
void CPUINFO_ABI cpuinfo_mock_android_properties(struct cpuinfo_mock_property* properties);
void CPUINFO_ABI cpuinfo_mock_gl_renderer(const char* renderer);
#endif
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* CPUINFO_MOCK_H */

2321
3rdparty/cpuinfo/include/cpuinfo.h vendored Normal file

File diff suppressed because it is too large Load Diff

416
3rdparty/cpuinfo/src/api.c vendored Normal file
View File

@@ -0,0 +1,416 @@
#include <stdbool.h>
#include <stddef.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#ifdef __linux__
#include <linux/api.h>
#include <sys/syscall.h>
#include <unistd.h>
#if !defined(__NR_getcpu)
#include <asm-generic/unistd.h>
#endif
#endif
bool cpuinfo_is_initialized = false;
struct cpuinfo_processor* cpuinfo_processors = NULL;
struct cpuinfo_core* cpuinfo_cores = NULL;
struct cpuinfo_cluster* cpuinfo_clusters = NULL;
struct cpuinfo_package* cpuinfo_packages = NULL;
struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max] = {NULL};
uint32_t cpuinfo_processors_count = 0;
uint32_t cpuinfo_cores_count = 0;
uint32_t cpuinfo_clusters_count = 0;
uint32_t cpuinfo_packages_count = 0;
uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max] = {0};
uint32_t cpuinfo_max_cache_size = 0;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
struct cpuinfo_uarch_info* cpuinfo_uarchs = NULL;
uint32_t cpuinfo_uarchs_count = 0;
#else
struct cpuinfo_uarch_info cpuinfo_global_uarch = {cpuinfo_uarch_unknown};
#endif
#ifdef __linux__
uint32_t cpuinfo_linux_cpu_max = 0;
const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map = NULL;
const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map = NULL;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map = NULL;
#endif
#endif
const struct cpuinfo_processor* cpuinfo_get_processors(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processors");
}
return cpuinfo_processors;
}
const struct cpuinfo_core* cpuinfo_get_cores(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "core");
}
return cpuinfo_cores;
}
const struct cpuinfo_cluster* cpuinfo_get_clusters(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "clusters");
}
return cpuinfo_clusters;
}
const struct cpuinfo_package* cpuinfo_get_packages(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "packages");
}
return cpuinfo_packages;
}
const struct cpuinfo_uarch_info* cpuinfo_get_uarchs() {
if (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs");
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_uarchs;
#else
return &cpuinfo_global_uarch;
#endif
}
const struct cpuinfo_processor* cpuinfo_get_processor(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processor");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_processors_count) {
return NULL;
}
return &cpuinfo_processors[index];
}
const struct cpuinfo_core* cpuinfo_get_core(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "core");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cores_count) {
return NULL;
}
return &cpuinfo_cores[index];
}
const struct cpuinfo_cluster* cpuinfo_get_cluster(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "cluster");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_clusters_count) {
return NULL;
}
return &cpuinfo_clusters[index];
}
const struct cpuinfo_package* cpuinfo_get_package(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "package");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_packages_count) {
return NULL;
}
return &cpuinfo_packages[index];
}
const struct cpuinfo_uarch_info* cpuinfo_get_uarch(uint32_t index) {
if (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarch");
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
if CPUINFO_UNLIKELY (index >= cpuinfo_uarchs_count) {
return NULL;
}
return &cpuinfo_uarchs[index];
#else
if CPUINFO_UNLIKELY (index != 0) {
return NULL;
}
return &cpuinfo_global_uarch;
#endif
}
uint32_t cpuinfo_get_processors_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "processors_count");
}
return cpuinfo_processors_count;
}
uint32_t cpuinfo_get_cores_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "cores_count");
}
return cpuinfo_cores_count;
}
uint32_t cpuinfo_get_clusters_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "clusters_count");
}
return cpuinfo_clusters_count;
}
uint32_t cpuinfo_get_packages_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "packages_count");
}
return cpuinfo_packages_count;
}
uint32_t cpuinfo_get_uarchs_count(void) {
if (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "uarchs_count");
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
return cpuinfo_uarchs_count;
#else
return 1;
#endif
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_caches(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_caches");
}
return cpuinfo_cache[cpuinfo_cache_level_1i];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_caches(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_caches");
}
return cpuinfo_cache[cpuinfo_cache_level_1d];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_caches(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_caches");
}
return cpuinfo_cache[cpuinfo_cache_level_2];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_caches(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_caches");
}
return cpuinfo_cache[cpuinfo_cache_level_3];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_caches(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_caches");
}
return cpuinfo_cache[cpuinfo_cache_level_4];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1i_cache(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_cache");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cache_count[cpuinfo_cache_level_1i]) {
return NULL;
}
return &cpuinfo_cache[cpuinfo_cache_level_1i][index];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l1d_cache(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_cache");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cache_count[cpuinfo_cache_level_1d]) {
return NULL;
}
return &cpuinfo_cache[cpuinfo_cache_level_1d][index];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l2_cache(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_cache");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cache_count[cpuinfo_cache_level_2]) {
return NULL;
}
return &cpuinfo_cache[cpuinfo_cache_level_2][index];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l3_cache(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_cache");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cache_count[cpuinfo_cache_level_3]) {
return NULL;
}
return &cpuinfo_cache[cpuinfo_cache_level_3][index];
}
const struct cpuinfo_cache* CPUINFO_ABI cpuinfo_get_l4_cache(uint32_t index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_cache");
}
if CPUINFO_UNLIKELY (index >= cpuinfo_cache_count[cpuinfo_cache_level_4]) {
return NULL;
}
return &cpuinfo_cache[cpuinfo_cache_level_4][index];
}
uint32_t CPUINFO_ABI cpuinfo_get_l1i_caches_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1i_caches_count");
}
return cpuinfo_cache_count[cpuinfo_cache_level_1i];
}
uint32_t CPUINFO_ABI cpuinfo_get_l1d_caches_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l1d_caches_count");
}
return cpuinfo_cache_count[cpuinfo_cache_level_1d];
}
uint32_t CPUINFO_ABI cpuinfo_get_l2_caches_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l2_caches_count");
}
return cpuinfo_cache_count[cpuinfo_cache_level_2];
}
uint32_t CPUINFO_ABI cpuinfo_get_l3_caches_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l3_caches_count");
}
return cpuinfo_cache_count[cpuinfo_cache_level_3];
}
uint32_t CPUINFO_ABI cpuinfo_get_l4_caches_count(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "l4_caches_count");
}
return cpuinfo_cache_count[cpuinfo_cache_level_4];
}
uint32_t CPUINFO_ABI cpuinfo_get_max_cache_size(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "max_cache_size");
}
return cpuinfo_max_cache_size;
}
const struct cpuinfo_processor* CPUINFO_ABI cpuinfo_get_current_processor(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_processor");
}
#ifdef __linux__
/* Initializing this variable silences a MemorySanitizer error. */
unsigned cpu = 0;
if CPUINFO_UNLIKELY (syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) {
return 0;
}
if CPUINFO_UNLIKELY ((uint32_t)cpu >= cpuinfo_linux_cpu_max) {
return 0;
}
return cpuinfo_linux_cpu_to_processor_map[cpu];
#else
return NULL;
#endif
}
const struct cpuinfo_core* CPUINFO_ABI cpuinfo_get_current_core(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_core");
}
#ifdef __linux__
/* Initializing this variable silences a MemorySanitizer error. */
unsigned cpu = 0;
if CPUINFO_UNLIKELY (syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) {
return 0;
}
if CPUINFO_UNLIKELY ((uint32_t)cpu >= cpuinfo_linux_cpu_max) {
return 0;
}
return cpuinfo_linux_cpu_to_core_map[cpu];
#else
return NULL;
#endif
}
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index(void) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal("cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index");
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
#ifdef __linux__
if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) {
/* Special case: avoid syscall on systems with only a single
* type of cores
*/
return 0;
}
/* General case */
/* Initializing this variable silences a MemorySanitizer error. */
unsigned cpu = 0;
if CPUINFO_UNLIKELY (syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) {
return 0;
}
if CPUINFO_UNLIKELY ((uint32_t)cpu >= cpuinfo_linux_cpu_max) {
return 0;
}
return cpuinfo_linux_cpu_to_uarch_index_map[cpu];
#else
/* Fallback: pretend to be on the big core. */
return 0;
#endif
#else
/* Only ARM/ARM64/RISCV processors may include cores of different types
* in the same package. */
return 0;
#endif
}
uint32_t CPUINFO_ABI cpuinfo_get_current_uarch_index_with_default(uint32_t default_uarch_index) {
if CPUINFO_UNLIKELY (!cpuinfo_is_initialized) {
cpuinfo_log_fatal(
"cpuinfo_get_%s called before cpuinfo is initialized", "current_uarch_index_with_default");
}
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
#ifdef __linux__
if (cpuinfo_linux_cpu_to_uarch_index_map == NULL) {
/* Special case: avoid syscall on systems with only a single
* type of cores
*/
return 0;
}
/* General case */
/* Initializing this variable silences a MemorySanitizer error. */
unsigned cpu = 0;
if CPUINFO_UNLIKELY (syscall(__NR_getcpu, &cpu, NULL, NULL) != 0) {
return default_uarch_index;
}
if CPUINFO_UNLIKELY ((uint32_t)cpu >= cpuinfo_linux_cpu_max) {
return default_uarch_index;
}
return cpuinfo_linux_cpu_to_uarch_index_map[cpu];
#else
/* Fallback: no API to query current core, use default uarch index. */
return default_uarch_index;
#endif
#else
/* Only ARM/ARM64/RISCV processors may include cores of different types
* in the same package. */
return 0;
#endif
}

20
3rdparty/cpuinfo/src/arm/android/api.h vendored Normal file
View File

@@ -0,0 +1,20 @@
#pragma once
#include <arm/api.h>
#include <arm/linux/api.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
enum cpuinfo_android_chipset_property {
cpuinfo_android_chipset_property_proc_cpuinfo_hardware = 0,
cpuinfo_android_chipset_property_ro_product_board,
cpuinfo_android_chipset_property_ro_board_platform,
cpuinfo_android_chipset_property_ro_mediatek_platform,
cpuinfo_android_chipset_property_ro_arch,
cpuinfo_android_chipset_property_ro_chipname,
cpuinfo_android_chipset_property_ro_hardware_chipname,
cpuinfo_android_chipset_property_max,
};
CPUINFO_INTERNAL void cpuinfo_arm_android_parse_properties(
struct cpuinfo_android_properties properties[restrict static 1]);

View File

@@ -0,0 +1,66 @@
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/system_properties.h>
#include <arm/android/api.h>
#include <arm/linux/api.h>
#include <cpuinfo/log.h>
#include <linux/api.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
static struct cpuinfo_mock_property* cpuinfo_mock_properties = NULL;
void CPUINFO_ABI cpuinfo_mock_android_properties(struct cpuinfo_mock_property* properties) {
cpuinfo_log_info("Android properties mocking enabled");
cpuinfo_mock_properties = properties;
}
static int cpuinfo_android_property_get(const char* key, char* value) {
if (cpuinfo_mock_properties != NULL) {
for (const struct cpuinfo_mock_property* prop = cpuinfo_mock_properties; prop->key != NULL; prop++) {
if (strncmp(key, prop->key, CPUINFO_BUILD_PROP_NAME_MAX) == 0) {
strncpy(value, prop->value, CPUINFO_BUILD_PROP_VALUE_MAX);
return (int)strnlen(prop->value, CPUINFO_BUILD_PROP_VALUE_MAX);
}
}
}
*value = '\0';
return 0;
}
#else
static inline int cpuinfo_android_property_get(const char* key, char* value) {
return __system_property_get(key, value);
}
#endif
void cpuinfo_arm_android_parse_properties(struct cpuinfo_android_properties properties[restrict static 1]) {
const int ro_product_board_length =
cpuinfo_android_property_get("ro.product.board", properties->ro_product_board);
cpuinfo_log_debug("read ro.product.board = \"%.*s\"", ro_product_board_length, properties->ro_product_board);
const int ro_board_platform_length =
cpuinfo_android_property_get("ro.board.platform", properties->ro_board_platform);
cpuinfo_log_debug("read ro.board.platform = \"%.*s\"", ro_board_platform_length, properties->ro_board_platform);
const int ro_mediatek_platform_length =
cpuinfo_android_property_get("ro.mediatek.platform", properties->ro_mediatek_platform);
cpuinfo_log_debug(
"read ro.mediatek.platform = \"%.*s\"", ro_mediatek_platform_length, properties->ro_mediatek_platform);
const int ro_arch_length = cpuinfo_android_property_get("ro.arch", properties->ro_arch);
cpuinfo_log_debug("read ro.arch = \"%.*s\"", ro_arch_length, properties->ro_arch);
const int ro_chipname_length = cpuinfo_android_property_get("ro.chipname", properties->ro_chipname);
cpuinfo_log_debug("read ro.chipname = \"%.*s\"", ro_chipname_length, properties->ro_chipname);
const int ro_hardware_chipname_length =
cpuinfo_android_property_get("ro.hardware.chipname", properties->ro_hardware_chipname);
cpuinfo_log_debug(
"read ro.hardware.chipname = \"%.*s\"", ro_hardware_chipname_length, properties->ro_hardware_chipname);
}

133
3rdparty/cpuinfo/src/arm/api.h vendored Normal file
View File

@@ -0,0 +1,133 @@
#pragma once
#ifdef _MSC_VER
#define RESTRICT_STATIC /* nothing for MSVC */
#else
#define RESTRICT_STATIC restrict static
#endif
#include <stdbool.h>
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
enum cpuinfo_arm_chipset_vendor {
cpuinfo_arm_chipset_vendor_unknown = 0,
cpuinfo_arm_chipset_vendor_qualcomm,
cpuinfo_arm_chipset_vendor_mediatek,
cpuinfo_arm_chipset_vendor_samsung,
cpuinfo_arm_chipset_vendor_hisilicon,
cpuinfo_arm_chipset_vendor_actions,
cpuinfo_arm_chipset_vendor_allwinner,
cpuinfo_arm_chipset_vendor_amlogic,
cpuinfo_arm_chipset_vendor_broadcom,
cpuinfo_arm_chipset_vendor_lg,
cpuinfo_arm_chipset_vendor_leadcore,
cpuinfo_arm_chipset_vendor_marvell,
cpuinfo_arm_chipset_vendor_mstar,
cpuinfo_arm_chipset_vendor_novathor,
cpuinfo_arm_chipset_vendor_nvidia,
cpuinfo_arm_chipset_vendor_pinecone,
cpuinfo_arm_chipset_vendor_renesas,
cpuinfo_arm_chipset_vendor_rockchip,
cpuinfo_arm_chipset_vendor_spreadtrum,
cpuinfo_arm_chipset_vendor_telechips,
cpuinfo_arm_chipset_vendor_texas_instruments,
cpuinfo_arm_chipset_vendor_unisoc,
cpuinfo_arm_chipset_vendor_wondermedia,
cpuinfo_arm_chipset_vendor_max,
};
enum cpuinfo_arm_chipset_series {
cpuinfo_arm_chipset_series_unknown = 0,
cpuinfo_arm_chipset_series_qualcomm_qsd,
cpuinfo_arm_chipset_series_qualcomm_msm,
cpuinfo_arm_chipset_series_qualcomm_apq,
cpuinfo_arm_chipset_series_qualcomm_snapdragon,
cpuinfo_arm_chipset_series_mediatek_mt,
cpuinfo_arm_chipset_series_samsung_exynos,
cpuinfo_arm_chipset_series_hisilicon_k3v,
cpuinfo_arm_chipset_series_hisilicon_hi,
cpuinfo_arm_chipset_series_hisilicon_kirin,
cpuinfo_arm_chipset_series_actions_atm,
cpuinfo_arm_chipset_series_allwinner_a,
cpuinfo_arm_chipset_series_amlogic_aml,
cpuinfo_arm_chipset_series_amlogic_s,
cpuinfo_arm_chipset_series_broadcom_bcm,
cpuinfo_arm_chipset_series_lg_nuclun,
cpuinfo_arm_chipset_series_leadcore_lc,
cpuinfo_arm_chipset_series_marvell_pxa,
cpuinfo_arm_chipset_series_mstar_6a,
cpuinfo_arm_chipset_series_novathor_u,
cpuinfo_arm_chipset_series_nvidia_tegra_t,
cpuinfo_arm_chipset_series_nvidia_tegra_ap,
cpuinfo_arm_chipset_series_nvidia_tegra_sl,
cpuinfo_arm_chipset_series_pinecone_surge_s,
cpuinfo_arm_chipset_series_renesas_mp,
cpuinfo_arm_chipset_series_rockchip_rk,
cpuinfo_arm_chipset_series_spreadtrum_sc,
cpuinfo_arm_chipset_series_telechips_tcc,
cpuinfo_arm_chipset_series_texas_instruments_omap,
cpuinfo_arm_chipset_series_unisoc_t,
cpuinfo_arm_chipset_series_unisoc_ums,
cpuinfo_arm_chipset_series_wondermedia_wm,
cpuinfo_arm_chipset_series_max,
};
#define CPUINFO_ARM_CHIPSET_SUFFIX_MAX 8
struct cpuinfo_arm_chipset {
enum cpuinfo_arm_chipset_vendor vendor;
enum cpuinfo_arm_chipset_series series;
uint32_t model;
char suffix[CPUINFO_ARM_CHIPSET_SUFFIX_MAX];
};
#define CPUINFO_ARM_CHIPSET_NAME_MAX CPUINFO_PACKAGE_NAME_MAX
#ifndef __cplusplus
CPUINFO_INTERNAL void cpuinfo_arm_chipset_to_string(
const struct cpuinfo_arm_chipset chipset[RESTRICT_STATIC 1],
char name[RESTRICT_STATIC CPUINFO_ARM_CHIPSET_NAME_MAX]);
CPUINFO_INTERNAL void cpuinfo_arm_fixup_chipset(
struct cpuinfo_arm_chipset chipset[RESTRICT_STATIC 1],
uint32_t cores,
uint32_t max_cpu_freq_max);
CPUINFO_INTERNAL void cpuinfo_arm_decode_vendor_uarch(
uint32_t midr,
#if CPUINFO_ARCH_ARM
bool has_vfpv4,
#endif
enum cpuinfo_vendor vendor[RESTRICT_STATIC 1],
enum cpuinfo_uarch uarch[RESTRICT_STATIC 1]);
CPUINFO_INTERNAL void cpuinfo_arm_decode_cache(
enum cpuinfo_uarch uarch,
uint32_t cluster_cores,
uint32_t midr,
const struct cpuinfo_arm_chipset chipset[RESTRICT_STATIC 1],
uint32_t cluster_id,
uint32_t arch_version,
struct cpuinfo_cache l1i[RESTRICT_STATIC 1],
struct cpuinfo_cache l1d[RESTRICT_STATIC 1],
struct cpuinfo_cache l2[RESTRICT_STATIC 1],
struct cpuinfo_cache l3[RESTRICT_STATIC 1]);
CPUINFO_INTERNAL uint32_t
cpuinfo_arm_compute_max_cache_size(const struct cpuinfo_processor processor[RESTRICT_STATIC 1]);
#else /* defined(__cplusplus) */
CPUINFO_INTERNAL void cpuinfo_arm_decode_cache(
enum cpuinfo_uarch uarch,
uint32_t cluster_cores,
uint32_t midr,
const struct cpuinfo_arm_chipset chipset[1],
uint32_t cluster_id,
uint32_t arch_version,
struct cpuinfo_cache l1i[1],
struct cpuinfo_cache l1d[1],
struct cpuinfo_cache l2[1],
struct cpuinfo_cache l3[1]);
#endif

1786
3rdparty/cpuinfo/src/arm/cache.c vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,364 @@
#include <stdint.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <arm/linux/api.h>
#include <arm/linux/cp.h>
#include <arm/midr.h>
#include <cpuinfo/log.h>
#if CPUINFO_MOCK
uint32_t cpuinfo_arm_fpsid = 0;
uint32_t cpuinfo_arm_mvfr0 = 0;
uint32_t cpuinfo_arm_wcid = 0;
void cpuinfo_set_fpsid(uint32_t fpsid) {
cpuinfo_arm_fpsid = fpsid;
}
void cpuinfo_set_wcid(uint32_t wcid) {
cpuinfo_arm_wcid = wcid;
}
#endif
void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
uint32_t features,
uint64_t features2,
uint32_t midr,
uint32_t architecture_version,
uint32_t architecture_flags,
const struct cpuinfo_arm_chipset chipset[restrict static 1],
struct cpuinfo_arm_isa isa[restrict static 1]) {
if (architecture_version < 8) {
const uint32_t armv8_features2_mask = CPUINFO_ARM_LINUX_FEATURE2_AES |
CPUINFO_ARM_LINUX_FEATURE2_PMULL | CPUINFO_ARM_LINUX_FEATURE2_SHA1 |
CPUINFO_ARM_LINUX_FEATURE2_SHA2 | CPUINFO_ARM_LINUX_FEATURE2_CRC32;
if (features2 & armv8_features2_mask) {
architecture_version = 8;
}
}
if (architecture_version >= 8) {
/*
* ARMv7 code running on ARMv8: IDIV, VFP, NEON are always
* supported, but may be not reported in /proc/cpuinfo features.
*/
isa->armv5e = true;
isa->armv6 = true;
isa->armv6k = true;
isa->armv7 = true;
isa->armv7mp = true;
isa->armv8 = true;
isa->thumb = true;
isa->thumb2 = true;
isa->idiv = true;
isa->vfpv3 = true;
isa->d32 = true;
isa->fp16 = true;
isa->fma = true;
isa->neon = true;
/*
* NEON FP16 compute extension and VQRDMLAH/VQRDMLSH
* instructions are not indicated in /proc/cpuinfo. Use a
* MIDR-based heuristic to whitelist processors known to support
* it:
* - Processors with Cortex-A55 cores
* - Processors with Cortex-A75 cores
* - Processors with Cortex-A76 cores
* - Processors with Cortex-A77 cores
* - Processors with Cortex-A78 cores
* - Processors with Cortex-A510 cores
* - Processors with Cortex-A710 cores
* - Processors with Cortex-A715 cores
* - Processors with Cortex-X1 cores
* - Processors with Cortex-X2 cores
* - Processors with Cortex-X3 cores
* - Processors with Exynos M4 cores
* - Processors with Exynos M5 cores
* - Neoverse N1 cores
* - Neoverse N2 cores
* - Neoverse V1 cores
* - Neoverse V2 cores
*/
if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) {
/* Only little cores of Exynos 9810 support FP16 & RDM
*/
cpuinfo_log_warning(
"FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions");
} else {
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x4100D050): /* Cortex-A55 */
case UINT32_C(0x4100D0A0): /* Cortex-A75 */
case UINT32_C(0x4100D0B0): /* Cortex-A76 */
case UINT32_C(0x4100D0C0): /* Neoverse N1 */
case UINT32_C(0x4100D0D0): /* Cortex-A77 */
case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
case UINT32_C(0x4100D400): /* Neoverse V1 */
case UINT32_C(0x4100D410): /* Cortex-A78 */
case UINT32_C(0x4100D440): /* Cortex-X1 */
case UINT32_C(0x4100D460): /* Cortex-A510 */
case UINT32_C(0x4100D470): /* Cortex-A710 */
case UINT32_C(0x4100D480): /* Cortex-X2 */
case UINT32_C(0x4100D490): /* Neoverse N2 */
case UINT32_C(0x4100D4D0): /* Cortex-A715 */
case UINT32_C(0x4100D4E0): /* Cortex-X3 */
case UINT32_C(0x4100D4F0): /* Neoverse V2 */
case UINT32_C(0x4800D400): /* Cortex-A76
(HiSilicon) */
case UINT32_C(0x51008020): /* Kryo 385 Gold
(Cortex-A75) */
case UINT32_C(0x51008030): /* Kryo 385 Silver
(Cortex-A55) */
case UINT32_C(0x51008040): /* Kryo 485 Gold
(Cortex-A76) */
case UINT32_C(0x51008050): /* Kryo 485 Silver
(Cortex-A55) */
case UINT32_C(0x53000030): /* Exynos M4 */
case UINT32_C(0x53000040): /* Exynos M5 */
isa->fp16arith = true;
isa->rdm = true;
break;
}
}
/*
* NEON VDOT instructions are not indicated in /proc/cpuinfo.
* Use a MIDR-based heuristic to whitelist processors known to
* support it:
* - Processors with Cortex-A76 cores
* - Processors with Cortex-A77 cores
* - Processors with Cortex-A78 cores
* - Processors with Cortex-A510 cores
* - Processors with Cortex-A710 cores
* - Processors with Cortex-A715 cores
* - Processors with Cortex-X1 cores
* - Processors with Cortex-X2 cores
* - Processors with Cortex-X3 cores
* - Processors with Exynos M4 cores
* - Processors with Exynos M5 cores
* - Neoverse N1 cores
* - Neoverse N2 cores
* - Neoverse V1 cores
* - Neoverse V2 cores
*/
if (chipset->series == cpuinfo_arm_chipset_series_spreadtrum_sc && chipset->model == 9863) {
cpuinfo_log_warning(
"VDOT instructions disabled: cause occasional SIGILL on Spreadtrum SC9863A");
} else if (chipset->series == cpuinfo_arm_chipset_series_unisoc_t && chipset->model == 310) {
cpuinfo_log_warning("VDOT instructions disabled: cause occasional SIGILL on Unisoc T310");
} else if (chipset->series == cpuinfo_arm_chipset_series_unisoc_ums && chipset->model == 312) {
cpuinfo_log_warning("VDOT instructions disabled: cause occasional SIGILL on Unisoc UMS312");
} else {
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x4100D0B0): /* Cortex-A76 */
case UINT32_C(0x4100D0C0): /* Neoverse N1 */
case UINT32_C(0x4100D0D0): /* Cortex-A77 */
case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
case UINT32_C(0x4100D400): /* Neoverse V1 */
case UINT32_C(0x4100D410): /* Cortex-A78 */
case UINT32_C(0x4100D440): /* Cortex-X1 */
case UINT32_C(0x4100D460): /* Cortex-A510 */
case UINT32_C(0x4100D470): /* Cortex-A710 */
case UINT32_C(0x4100D480): /* Cortex-X2 */
case UINT32_C(0x4100D490): /* Neoverse N2 */
case UINT32_C(0x4100D4D0): /* Cortex-A715 */
case UINT32_C(0x4100D4E0): /* Cortex-X3 */
case UINT32_C(0x4100D4F0): /* Neoverse V2 */
case UINT32_C(0x4800D400): /* Cortex-A76
(HiSilicon) */
case UINT32_C(0x51008040): /* Kryo 485 Gold
(Cortex-A76) */
case UINT32_C(0x51008050): /* Kryo 485 Silver
(Cortex-A55) */
case UINT32_C(0x53000030): /* Exynos M4 */
case UINT32_C(0x53000040): /* Exynos M5 */
isa->dot = true;
break;
case UINT32_C(0x4100D050): /* Cortex A55: revision 1
or later only */
isa->dot = !!(midr_get_variant(midr) >= 1);
break;
case UINT32_C(0x4100D0A0): /* Cortex A75: revision 2
or later only */
isa->dot = !!(midr_get_variant(midr) >= 2);
break;
}
}
} else {
/* ARMv7 or lower: use feature flags to detect optional features
*/
/*
* ARM11 (ARM 1136/1156/1176/11 MPCore) processors can report v7
* architecture even though they support only ARMv6 instruction
* set.
*/
if (architecture_version == 7 && midr_is_arm11(midr)) {
cpuinfo_log_warning(
"kernel-reported architecture ARMv7 ignored due to mismatch with processor microarchitecture (ARM11)");
architecture_version = 6;
}
if (architecture_version < 7) {
const uint32_t armv7_features_mask = CPUINFO_ARM_LINUX_FEATURE_VFPV3 |
CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | CPUINFO_ARM_LINUX_FEATURE_VFPD32 |
CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON |
CPUINFO_ARM_LINUX_FEATURE_IDIVT | CPUINFO_ARM_LINUX_FEATURE_IDIVA;
if (features & armv7_features_mask) {
architecture_version = 7;
}
}
if ((architecture_version >= 6) || (features & CPUINFO_ARM_LINUX_FEATURE_EDSP) ||
(architecture_flags & CPUINFO_ARM_LINUX_ARCH_E)) {
isa->armv5e = true;
}
if (architecture_version >= 6) {
isa->armv6 = true;
}
if (architecture_version >= 7) {
isa->armv6k = true;
isa->armv7 = true;
/*
* ARMv7 MP extension (PLDW instruction) is not
* indicated in /proc/cpuinfo. Use heuristic list of
* supporting processors:
* - Processors supporting UDIV/SDIV instructions
* ("idiva" + "idivt" features in /proc/cpuinfo)
* - Cortex-A5
* - Cortex-A9
* - Dual-Core Scorpion
* - Krait (supports UDIV/SDIV, but kernels may not
* report it in /proc/cpuinfo)
*
* TODO: check single-core Qualcomm Scorpion.
*/
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x4100C050): /* Cortex-A5 */
case UINT32_C(0x4100C090): /* Cortex-A9 */
case UINT32_C(0x510002D0): /* Scorpion (dual-core) */
case UINT32_C(0x510004D0): /* Krait (dual-core) */
case UINT32_C(0x510006F0): /* Krait (quad-core) */
isa->armv7mp = true;
break;
default:
/* In practice IDIV instruction implies
* ARMv7+MP ISA */
isa->armv7mp = (features & CPUINFO_ARM_LINUX_FEATURE_IDIV) ==
CPUINFO_ARM_LINUX_FEATURE_IDIV;
break;
}
}
if (features & CPUINFO_ARM_LINUX_FEATURE_IWMMXT) {
#if !defined(__ARM_ARCH_8A__) && !(defined(__ARM_ARCH) && (__ARM_ARCH >= 8))
const uint32_t wcid = read_wcid();
cpuinfo_log_debug("WCID = 0x%08" PRIx32, wcid);
const uint32_t coprocessor_type = (wcid >> 8) & UINT32_C(0xFF);
if (coprocessor_type >= 0x10) {
isa->wmmx = true;
if (coprocessor_type >= 0x20) {
isa->wmmx2 = true;
}
} else {
cpuinfo_log_warning(
"WMMX ISA disabled: OS reported iwmmxt feature, "
"but WCID coprocessor type 0x%" PRIx32 " indicates no WMMX support",
coprocessor_type);
}
#else
cpuinfo_log_warning(
"WMMX ISA disabled: OS reported iwmmxt feature, "
"but there is no iWMMXt coprocessor");
#endif
}
if ((features & CPUINFO_ARM_LINUX_FEATURE_THUMB) || (architecture_flags & CPUINFO_ARM_LINUX_ARCH_T)) {
isa->thumb = true;
/*
* There is no separate feature flag for Thumb 2.
* All ARMv7 processors and ARM 1156 support Thumb 2.
*/
if (architecture_version >= 7 || midr_is_arm1156(midr)) {
isa->thumb2 = true;
}
}
if (features & CPUINFO_ARM_LINUX_FEATURE_THUMBEE) {
isa->thumbee = true;
}
if ((features & CPUINFO_ARM_LINUX_FEATURE_JAVA) || (architecture_flags & CPUINFO_ARM_LINUX_ARCH_J)) {
isa->jazelle = true;
}
/* Qualcomm Krait may have buggy kernel configuration that
* doesn't report IDIV */
if ((features & CPUINFO_ARM_LINUX_FEATURE_IDIV) == CPUINFO_ARM_LINUX_FEATURE_IDIV ||
midr_is_krait(midr)) {
isa->idiv = true;
}
const uint32_t vfp_mask = CPUINFO_ARM_LINUX_FEATURE_VFP | CPUINFO_ARM_LINUX_FEATURE_VFPV3 |
CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | CPUINFO_ARM_LINUX_FEATURE_VFPD32 |
CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON;
if (features & vfp_mask) {
const uint32_t vfpv3_mask = CPUINFO_ARM_LINUX_FEATURE_VFPV3 |
CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 | CPUINFO_ARM_LINUX_FEATURE_VFPD32 |
CPUINFO_ARM_LINUX_FEATURE_VFPV4 | CPUINFO_ARM_LINUX_FEATURE_NEON;
if ((architecture_version >= 7) || (features & vfpv3_mask)) {
isa->vfpv3 = true;
const uint32_t d32_mask =
CPUINFO_ARM_LINUX_FEATURE_VFPD32 | CPUINFO_ARM_LINUX_FEATURE_NEON;
if (features & d32_mask) {
isa->d32 = true;
}
} else {
#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH) && (__ARM_ARCH >= 7)
isa->vfpv3 = true;
#else
const uint32_t fpsid = read_fpsid();
cpuinfo_log_debug("FPSID = 0x%08" PRIx32, fpsid);
const uint32_t subarchitecture = (fpsid >> 16) & UINT32_C(0x7F);
if (subarchitecture >= 0x01) {
isa->vfpv2 = true;
}
#endif
}
}
if (features & CPUINFO_ARM_LINUX_FEATURE_NEON) {
isa->neon = true;
}
/*
* There is no separate feature flag for FP16 support.
* VFPv4 implies VFPv3-FP16 support (and in practice, NEON-HP as
* well). Additionally, ARM Cortex-A9 and Qualcomm Scorpion
* support FP16.
*/
if ((features & CPUINFO_ARM_LINUX_FEATURE_VFPV4) || midr_is_cortex_a9(midr) || midr_is_scorpion(midr)) {
isa->fp16 = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_VFPV4) {
isa->fma = true;
}
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_AES) {
isa->aes = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_PMULL) {
isa->pmull = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SHA1) {
isa->sha1 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SHA2) {
isa->sha2 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_CRC32) {
isa->crc32 = true;
}
}

View File

@@ -0,0 +1,194 @@
#include <stdint.h>
#include <arm/linux/api.h>
#include <cpuinfo/log.h>
#include <sys/prctl.h>
void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
uint32_t features,
uint64_t features2,
uint32_t midr,
const struct cpuinfo_arm_chipset chipset[restrict static 1],
struct cpuinfo_arm_isa isa[restrict static 1]) {
if (features & CPUINFO_ARM_LINUX_FEATURE_AES) {
isa->aes = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_PMULL) {
isa->pmull = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_SHA1) {
isa->sha1 = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_SHA2) {
isa->sha2 = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_CRC32) {
isa->crc32 = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_ATOMICS) {
isa->atomics = true;
}
/*
* Some phones ship with an old kernel configuration that doesn't report
* NEON FP16 compute extension and SQRDMLAH/SQRDMLSH/UQRDMLAH/UQRDMLSH
* instructions. Use a MIDR-based heuristic to whitelist processors
* known to support it:
* - Processors with Cortex-A55 cores
* - Processors with Cortex-A65 cores
* - Processors with Cortex-A75 cores
* - Processors with Cortex-A76 cores
* - Processors with Cortex-A77 cores
* - Processors with Exynos M4 cores
* - Processors with Exynos M5 cores
* - Neoverse N1 cores
* - Neoverse V1 cores
* - Neoverse N2 cores
* - Neoverse V2 cores
*/
if (chipset->series == cpuinfo_arm_chipset_series_samsung_exynos && chipset->model == 9810) {
/* Exynos 9810 reports that it supports FP16 compute, but in
* fact only little cores do */
cpuinfo_log_warning(
"FP16 arithmetics and RDM disabled: only little cores in Exynos 9810 support these extensions");
} else {
const uint32_t fp16arith_mask = CPUINFO_ARM_LINUX_FEATURE_FPHP | CPUINFO_ARM_LINUX_FEATURE_ASIMDHP;
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x4100D050): /* Cortex-A55 */
case UINT32_C(0x4100D060): /* Cortex-A65 */
case UINT32_C(0x4100D0A0): /* Cortex-A75 */
case UINT32_C(0x4100D0B0): /* Cortex-A76 */
case UINT32_C(0x4100D0C0): /* Neoverse N1 */
case UINT32_C(0x4100D0D0): /* Cortex-A77 */
case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
case UINT32_C(0x4100D400): /* Neoverse V1 */
case UINT32_C(0x4100D490): /* Neoverse N2 */
case UINT32_C(0x4100D4F0): /* Neoverse V2 */
case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
case UINT32_C(0x51008020): /* Kryo 385 Gold (Cortex-A75) */
case UINT32_C(0x51008030): /* Kryo 385 Silver (Cortex-A55) */
case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */
case UINT32_C(0x53000030): /* Exynos M4 */
case UINT32_C(0x53000040): /* Exynos M5 */
isa->fp16arith = true;
isa->rdm = true;
break;
default:
if ((features & fp16arith_mask) == fp16arith_mask) {
isa->fp16arith = true;
} else if (features & CPUINFO_ARM_LINUX_FEATURE_FPHP) {
cpuinfo_log_warning(
"FP16 arithmetics disabled: detected support only for scalar operations");
} else if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDHP) {
cpuinfo_log_warning(
"FP16 arithmetics disabled: detected support only for SIMD operations");
}
if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM) {
isa->rdm = true;
}
break;
}
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_I8MM) {
isa->i8mm = true;
}
/*
* Many phones ship with an old kernel configuration that doesn't report
* UDOT/SDOT instructions. Use a MIDR-based heuristic to whitelist
* processors known to support it.
*/
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x4100D060): /* Cortex-A65 */
case UINT32_C(0x4100D0B0): /* Cortex-A76 */
case UINT32_C(0x4100D0C0): /* Neoverse N1 */
case UINT32_C(0x4100D0D0): /* Cortex-A77 */
case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
case UINT32_C(0x4100D400): /* Neoverse V1 */
case UINT32_C(0x4100D490): /* Neoverse N2 */
case UINT32_C(0x4100D4A0): /* Neoverse E1 */
case UINT32_C(0x4100D4F0): /* Neoverse V2 */
case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
case UINT32_C(0x51008040): /* Kryo 485 Gold (Cortex-A76) */
case UINT32_C(0x51008050): /* Kryo 485 Silver (Cortex-A55) */
case UINT32_C(0x53000030): /* Exynos-M4 */
case UINT32_C(0x53000040): /* Exynos-M5 */
isa->dot = true;
break;
case UINT32_C(0x4100D050): /* Cortex A55: revision 1 or later only */
isa->dot = !!(midr_get_variant(midr) >= 1);
break;
case UINT32_C(0x4100D0A0): /* Cortex A75: revision 2 or later only */
isa->dot = !!(midr_get_variant(midr) >= 2);
break;
default:
if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDDP) {
isa->dot = true;
}
break;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) {
isa->jscvt = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_JSCVT) {
isa->jscvt = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_FCMA) {
isa->fcma = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_SVE) {
isa->sve = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SVE2) {
isa->sve2 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME) {
isa->sme = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME2) {
isa->sme2 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME2P1) {
isa->sme2p1 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME_I16I32) {
isa->sme_i16i32 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME_BI32I32) {
isa->sme_bi32i32 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME_B16B16) {
isa->sme_b16b16 = true;
}
if (features2 & CPUINFO_ARM_LINUX_FEATURE2_SME_F16F16) {
isa->sme_f16f16 = true;
}
// SVEBF16 is set iff SVE and BF16 are both supported, but the SVEBF16
// feature flag was added in Linux kernel before the BF16 feature flag,
// so we check for either.
if (features2 & (CPUINFO_ARM_LINUX_FEATURE2_BF16 | CPUINFO_ARM_LINUX_FEATURE2_SVEBF16)) {
isa->bf16 = true;
}
if (features & CPUINFO_ARM_LINUX_FEATURE_ASIMDFHM) {
isa->fhm = true;
}
#ifndef PR_SVE_GET_VL
#define PR_SVE_GET_VL 51
#endif
#ifndef PR_SVE_VL_LEN_MASK
#define PR_SVE_VL_LEN_MASK 0xffff
#endif
int ret = prctl(PR_SVE_GET_VL);
if (ret < 0) {
cpuinfo_log_warning("No SVE support on this machine");
isa->svelen = 0; // Assume no SVE support if the call fails
} else {
// Mask out the SVE vector length bits
isa->svelen = ret & PR_SVE_VL_LEN_MASK;
}
}

392
3rdparty/cpuinfo/src/arm/linux/api.h vendored Normal file
View File

@@ -0,0 +1,392 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include <arm/api.h>
#include <arm/midr.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
#include <linux/api.h>
/* No hard limit in the kernel, maximum length observed on non-rogue kernels is
* 64 */
#define CPUINFO_HARDWARE_VALUE_MAX 64
/* No hard limit in the kernel, maximum length on Raspberry Pi is 8. Add 1
* symbol to detect overly large revision strings */
#define CPUINFO_REVISION_VALUE_MAX 9
#ifdef __ANDROID__
/* As per include/sys/system_properties.h in Android NDK */
#define CPUINFO_BUILD_PROP_NAME_MAX 32
#define CPUINFO_BUILD_PROP_VALUE_MAX 92
struct cpuinfo_android_properties {
char proc_cpuinfo_hardware[CPUINFO_HARDWARE_VALUE_MAX];
char ro_product_board[CPUINFO_BUILD_PROP_VALUE_MAX];
char ro_board_platform[CPUINFO_BUILD_PROP_VALUE_MAX];
char ro_mediatek_platform[CPUINFO_BUILD_PROP_VALUE_MAX];
char ro_arch[CPUINFO_BUILD_PROP_VALUE_MAX];
char ro_chipname[CPUINFO_BUILD_PROP_VALUE_MAX];
char ro_hardware_chipname[CPUINFO_BUILD_PROP_VALUE_MAX];
};
#endif
#define CPUINFO_ARM_LINUX_ARCH_T UINT32_C(0x00000001)
#define CPUINFO_ARM_LINUX_ARCH_E UINT32_C(0x00000002)
#define CPUINFO_ARM_LINUX_ARCH_J UINT32_C(0x00000004)
#define CPUINFO_ARM_LINUX_ARCH_TE UINT32_C(0x00000003)
#define CPUINFO_ARM_LINUX_ARCH_TEJ UINT32_C(0x00000007)
struct cpuinfo_arm_linux_proc_cpuinfo_cache {
uint32_t i_size;
uint32_t i_assoc;
uint32_t i_line_length;
uint32_t i_sets;
uint32_t d_size;
uint32_t d_assoc;
uint32_t d_line_length;
uint32_t d_sets;
};
#if CPUINFO_ARCH_ARM
/* arch/arm/include/uapi/asm/hwcap.h */
#define CPUINFO_ARM_LINUX_FEATURE_SWP UINT32_C(0x00000001)
#define CPUINFO_ARM_LINUX_FEATURE_HALF UINT32_C(0x00000002)
#define CPUINFO_ARM_LINUX_FEATURE_THUMB UINT32_C(0x00000004)
#define CPUINFO_ARM_LINUX_FEATURE_26BIT UINT32_C(0x00000008)
#define CPUINFO_ARM_LINUX_FEATURE_FASTMULT UINT32_C(0x00000010)
#define CPUINFO_ARM_LINUX_FEATURE_FPA UINT32_C(0x00000020)
#define CPUINFO_ARM_LINUX_FEATURE_VFP UINT32_C(0x00000040)
#define CPUINFO_ARM_LINUX_FEATURE_EDSP UINT32_C(0x00000080)
#define CPUINFO_ARM_LINUX_FEATURE_JAVA UINT32_C(0x00000100)
#define CPUINFO_ARM_LINUX_FEATURE_IWMMXT UINT32_C(0x00000200)
#define CPUINFO_ARM_LINUX_FEATURE_CRUNCH UINT32_C(0x00000400)
#define CPUINFO_ARM_LINUX_FEATURE_THUMBEE UINT32_C(0x00000800)
#define CPUINFO_ARM_LINUX_FEATURE_NEON UINT32_C(0x00001000)
#define CPUINFO_ARM_LINUX_FEATURE_VFPV3 UINT32_C(0x00002000)
#define CPUINFO_ARM_LINUX_FEATURE_VFPV3D16 \
UINT32_C(0x00004000) /* Also set for VFPv4 with 16 double-precision \
registers */
#define CPUINFO_ARM_LINUX_FEATURE_TLS UINT32_C(0x00008000)
#define CPUINFO_ARM_LINUX_FEATURE_VFPV4 UINT32_C(0x00010000)
#define CPUINFO_ARM_LINUX_FEATURE_IDIVA UINT32_C(0x00020000)
#define CPUINFO_ARM_LINUX_FEATURE_IDIVT UINT32_C(0x00040000)
#define CPUINFO_ARM_LINUX_FEATURE_IDIV UINT32_C(0x00060000)
#define CPUINFO_ARM_LINUX_FEATURE_VFPD32 UINT32_C(0x00080000)
#define CPUINFO_ARM_LINUX_FEATURE_LPAE UINT32_C(0x00100000)
#define CPUINFO_ARM_LINUX_FEATURE_EVTSTRM UINT32_C(0x00200000)
#define CPUINFO_ARM_LINUX_FEATURE2_AES UINT32_C(0x00000001)
#define CPUINFO_ARM_LINUX_FEATURE2_PMULL UINT32_C(0x00000002)
#define CPUINFO_ARM_LINUX_FEATURE2_SHA1 UINT32_C(0x00000004)
#define CPUINFO_ARM_LINUX_FEATURE2_SHA2 UINT32_C(0x00000008)
#define CPUINFO_ARM_LINUX_FEATURE2_CRC32 UINT32_C(0x00000010)
#elif CPUINFO_ARCH_ARM64
/* arch/arm64/include/uapi/asm/hwcap.h */
#define CPUINFO_ARM_LINUX_FEATURE_FP UINT32_C(0x00000001)
#define CPUINFO_ARM_LINUX_FEATURE_ASIMD UINT32_C(0x00000002)
#define CPUINFO_ARM_LINUX_FEATURE_EVTSTRM UINT32_C(0x00000004)
#define CPUINFO_ARM_LINUX_FEATURE_AES UINT32_C(0x00000008)
#define CPUINFO_ARM_LINUX_FEATURE_PMULL UINT32_C(0x00000010)
#define CPUINFO_ARM_LINUX_FEATURE_SHA1 UINT32_C(0x00000020)
#define CPUINFO_ARM_LINUX_FEATURE_SHA2 UINT32_C(0x00000040)
#define CPUINFO_ARM_LINUX_FEATURE_CRC32 UINT32_C(0x00000080)
#define CPUINFO_ARM_LINUX_FEATURE_ATOMICS UINT32_C(0x00000100)
#define CPUINFO_ARM_LINUX_FEATURE_FPHP UINT32_C(0x00000200)
#define CPUINFO_ARM_LINUX_FEATURE_ASIMDHP UINT32_C(0x00000400)
#define CPUINFO_ARM_LINUX_FEATURE_CPUID UINT32_C(0x00000800)
#define CPUINFO_ARM_LINUX_FEATURE_ASIMDRDM UINT32_C(0x00001000)
#define CPUINFO_ARM_LINUX_FEATURE_JSCVT UINT32_C(0x00002000)
#define CPUINFO_ARM_LINUX_FEATURE_FCMA UINT32_C(0x00004000)
#define CPUINFO_ARM_LINUX_FEATURE_LRCPC UINT32_C(0x00008000)
#define CPUINFO_ARM_LINUX_FEATURE_DCPOP UINT32_C(0x00010000)
#define CPUINFO_ARM_LINUX_FEATURE_SHA3 UINT32_C(0x00020000)
#define CPUINFO_ARM_LINUX_FEATURE_SM3 UINT32_C(0x00040000)
#define CPUINFO_ARM_LINUX_FEATURE_SM4 UINT32_C(0x00080000)
#define CPUINFO_ARM_LINUX_FEATURE_ASIMDDP UINT32_C(0x00100000)
#define CPUINFO_ARM_LINUX_FEATURE_SHA512 UINT32_C(0x00200000)
#define CPUINFO_ARM_LINUX_FEATURE_SVE UINT32_C(0x00400000)
#define CPUINFO_ARM_LINUX_FEATURE_ASIMDFHM UINT32_C(0x00800000)
#define CPUINFO_ARM_LINUX_FEATURE_DIT UINT32_C(0x01000000)
#define CPUINFO_ARM_LINUX_FEATURE_USCAT UINT32_C(0x02000000)
#define CPUINFO_ARM_LINUX_FEATURE_ILRCPC UINT32_C(0x04000000)
#define CPUINFO_ARM_LINUX_FEATURE_FLAGM UINT32_C(0x08000000)
#define CPUINFO_ARM_LINUX_FEATURE_SSBS UINT32_C(0x10000000)
#define CPUINFO_ARM_LINUX_FEATURE_SB UINT32_C(0x20000000)
#define CPUINFO_ARM_LINUX_FEATURE_PACA UINT32_C(0x40000000)
#define CPUINFO_ARM_LINUX_FEATURE_PACG UINT32_C(0x80000000)
#define CPUINFO_ARM_LINUX_FEATURE2_DCPODP UINT32_C(0x00000001)
#define CPUINFO_ARM_LINUX_FEATURE2_SVE2 UINT32_C(0x00000002)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEAES UINT32_C(0x00000004)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEPMULL UINT32_C(0x00000008)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEBITPERM UINT32_C(0x00000010)
#define CPUINFO_ARM_LINUX_FEATURE2_SVESHA3 UINT32_C(0x00000020)
#define CPUINFO_ARM_LINUX_FEATURE2_SVESM4 UINT32_C(0x00000040)
#define CPUINFO_ARM_LINUX_FEATURE2_FLAGM2 UINT32_C(0x00000080)
#define CPUINFO_ARM_LINUX_FEATURE2_FRINT UINT32_C(0x00000100)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEI8MM UINT32_C(0x00000200)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEF32MM UINT32_C(0x00000400)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEF64MM UINT32_C(0x00000800)
#define CPUINFO_ARM_LINUX_FEATURE2_SVEBF16 UINT32_C(0x00001000)
#define CPUINFO_ARM_LINUX_FEATURE2_I8MM UINT32_C(0x00002000)
#define CPUINFO_ARM_LINUX_FEATURE2_BF16 UINT32_C(0x00004000)
#define CPUINFO_ARM_LINUX_FEATURE2_DGH UINT32_C(0x00008000)
#define CPUINFO_ARM_LINUX_FEATURE2_RNG UINT32_C(0x00010000)
#define CPUINFO_ARM_LINUX_FEATURE2_BTI UINT32_C(0x00020000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME UINT32_C(0x00800000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME2 UINT64_C(0x0000002000000000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME2P1 UINT64_C(0x0000004000000000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME_I16I32 UINT64_C(0x0000008000000000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME_BI32I32 UINT64_C(0x0000010000000000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME_B16B16 UINT64_C(0x0000020000000000)
#define CPUINFO_ARM_LINUX_FEATURE2_SME_F16F16 UINT64_C(0x0000040000000000)
#endif
#define CPUINFO_ARM_LINUX_VALID_ARCHITECTURE UINT32_C(0x00010000)
#define CPUINFO_ARM_LINUX_VALID_IMPLEMENTER UINT32_C(0x00020000)
#define CPUINFO_ARM_LINUX_VALID_VARIANT UINT32_C(0x00040000)
#define CPUINFO_ARM_LINUX_VALID_PART UINT32_C(0x00080000)
#define CPUINFO_ARM_LINUX_VALID_REVISION UINT32_C(0x00100000)
#define CPUINFO_ARM_LINUX_VALID_PROCESSOR UINT32_C(0x00200000)
#define CPUINFO_ARM_LINUX_VALID_FEATURES UINT32_C(0x00400000)
#if CPUINFO_ARCH_ARM
#define CPUINFO_ARM_LINUX_VALID_ICACHE_SIZE UINT32_C(0x01000000)
#define CPUINFO_ARM_LINUX_VALID_ICACHE_SETS UINT32_C(0x02000000)
#define CPUINFO_ARM_LINUX_VALID_ICACHE_WAYS UINT32_C(0x04000000)
#define CPUINFO_ARM_LINUX_VALID_ICACHE_LINE UINT32_C(0x08000000)
#define CPUINFO_ARM_LINUX_VALID_DCACHE_SIZE UINT32_C(0x10000000)
#define CPUINFO_ARM_LINUX_VALID_DCACHE_SETS UINT32_C(0x20000000)
#define CPUINFO_ARM_LINUX_VALID_DCACHE_WAYS UINT32_C(0x40000000)
#define CPUINFO_ARM_LINUX_VALID_DCACHE_LINE UINT32_C(0x80000000)
#endif
#define CPUINFO_ARM_LINUX_VALID_INFO UINT32_C(0x007F0000)
#define CPUINFO_ARM_LINUX_VALID_MIDR UINT32_C(0x003F0000)
#if CPUINFO_ARCH_ARM
#define CPUINFO_ARM_LINUX_VALID_ICACHE UINT32_C(0x0F000000)
#define CPUINFO_ARM_LINUX_VALID_DCACHE UINT32_C(0xF0000000)
#define CPUINFO_ARM_LINUX_VALID_CACHE_LINE UINT32_C(0x88000000)
#endif
struct cpuinfo_arm_linux_processor {
uint32_t architecture_version;
#if CPUINFO_ARCH_ARM
uint32_t architecture_flags;
struct cpuinfo_arm_linux_proc_cpuinfo_cache proc_cpuinfo_cache;
#endif
uint32_t features;
uint64_t features2;
/**
* Main ID Register value.
*/
uint32_t midr;
enum cpuinfo_vendor vendor;
enum cpuinfo_uarch uarch;
uint32_t uarch_index;
/**
* ID of the physical package which includes this logical processor.
* The value is parsed from
* /sys/devices/system/cpu/cpu<N>/topology/physical_package_id
*/
uint32_t package_id;
/**
* Minimum processor ID on the package which includes this logical
* processor. This value can serve as an ID for the cluster of logical
* processors: it is the same for all logical processors on the same
* package.
*/
uint32_t package_leader_id;
/**
* Number of logical processors in the package.
*/
uint32_t package_processor_count;
/**
* Maximum frequency, in kHZ.
* The value is parsed from
* /sys/devices/system/cpu/cpu<N>/cpufreq/cpuinfo_max_freq If failed to
* read or parse the file, the value is 0.
*/
uint32_t max_frequency;
/**
* Minimum frequency, in kHZ.
* The value is parsed from
* /sys/devices/system/cpu/cpu<N>/cpufreq/cpuinfo_min_freq If failed to
* read or parse the file, the value is 0.
*/
uint32_t min_frequency;
/** Linux processor ID */
uint32_t system_processor_id;
uint32_t flags;
};
struct cpuinfo_arm_linux_cluster {
uint32_t processor_id_min;
uint32_t processor_id_max;
};
/* Returns true if the two processors do belong to the same cluster */
static inline bool cpuinfo_arm_linux_processor_equals(
struct cpuinfo_arm_linux_processor processor_i[restrict static 1],
struct cpuinfo_arm_linux_processor processor_j[restrict static 1]) {
const uint32_t joint_flags = processor_i->flags & processor_j->flags;
bool same_max_frequency = false;
if (joint_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (processor_i->max_frequency != processor_j->max_frequency) {
return false;
} else {
same_max_frequency = true;
}
}
bool same_min_frequency = false;
if (joint_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (processor_i->min_frequency != processor_j->min_frequency) {
return false;
} else {
same_min_frequency = true;
}
}
if ((joint_flags & CPUINFO_ARM_LINUX_VALID_MIDR) == CPUINFO_ARM_LINUX_VALID_MIDR) {
if (processor_i->midr == processor_j->midr) {
if (midr_is_cortex_a53(processor_i->midr)) {
return same_min_frequency & same_max_frequency;
} else {
return true;
}
}
}
return same_max_frequency && same_min_frequency;
}
/* Returns true if the two processors certainly don't belong to the same cluster
*/
static inline bool cpuinfo_arm_linux_processor_not_equals(
struct cpuinfo_arm_linux_processor processor_i[restrict static 1],
struct cpuinfo_arm_linux_processor processor_j[restrict static 1]) {
const uint32_t joint_flags = processor_i->flags & processor_j->flags;
if (joint_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (processor_i->max_frequency != processor_j->max_frequency) {
return true;
}
}
if (joint_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (processor_i->min_frequency != processor_j->min_frequency) {
return true;
}
}
if ((joint_flags & CPUINFO_ARM_LINUX_VALID_MIDR) == CPUINFO_ARM_LINUX_VALID_MIDR) {
if (processor_i->midr != processor_j->midr) {
return true;
}
}
return false;
}
CPUINFO_INTERNAL bool cpuinfo_arm_linux_parse_proc_cpuinfo(
char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX],
char revision[restrict static CPUINFO_REVISION_VALUE_MAX],
uint32_t max_processors_count,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors_count]);
#if CPUINFO_ARCH_ARM
CPUINFO_INTERNAL bool cpuinfo_arm_linux_hwcap_from_getauxval(
uint32_t hwcap[restrict static 1],
uint64_t hwcap2[restrict static 1]);
CPUINFO_INTERNAL bool cpuinfo_arm_linux_hwcap_from_procfs(
uint32_t hwcap[restrict static 1],
uint64_t hwcap2[restrict static 1]);
CPUINFO_INTERNAL void cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
uint32_t features,
uint64_t features2,
uint32_t midr,
uint32_t architecture_version,
uint32_t architecture_flags,
const struct cpuinfo_arm_chipset chipset[restrict static 1],
struct cpuinfo_arm_isa isa[restrict static 1]);
#elif CPUINFO_ARCH_ARM64
CPUINFO_INTERNAL void cpuinfo_arm_linux_hwcap_from_getauxval(
uint32_t hwcap[restrict static 1],
uint64_t hwcap2[restrict static 1]);
CPUINFO_INTERNAL void cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
uint32_t features,
uint64_t features2,
uint32_t midr,
const struct cpuinfo_arm_chipset chipset[restrict static 1],
struct cpuinfo_arm_isa isa[restrict static 1]);
#endif
#if defined(__ANDROID__)
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset(
const struct cpuinfo_android_properties properties[restrict static 1],
uint32_t cores,
uint32_t max_cpu_freq_max);
#else
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_linux_decode_chipset(
const char hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX],
const char revision[restrict static CPUINFO_REVISION_VALUE_MAX],
uint32_t cores,
uint32_t max_cpu_freq_max);
#endif
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_hardware(
const char proc_cpuinfo_hardware[restrict static CPUINFO_HARDWARE_VALUE_MAX],
uint32_t cores,
uint32_t max_cpu_freq_max,
bool is_tegra);
#ifdef __ANDROID__
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_product_board(
const char ro_product_board[restrict static CPUINFO_BUILD_PROP_VALUE_MAX],
uint32_t cores,
uint32_t max_cpu_freq_max);
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_board_platform(
const char ro_board_platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX],
uint32_t cores,
uint32_t max_cpu_freq_max);
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_mediatek_platform(
const char ro_mediatek_platform[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]);
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_arch(
const char ro_arch[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]);
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_chipname(
const char ro_chipname[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]);
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_android_decode_chipset_from_ro_hardware_chipname(
const char ro_hardware_chipname[restrict static CPUINFO_BUILD_PROP_VALUE_MAX]);
#else
CPUINFO_INTERNAL struct cpuinfo_arm_chipset cpuinfo_arm_linux_decode_chipset_from_proc_cpuinfo_revision(
const char proc_cpuinfo_revision[restrict static CPUINFO_REVISION_VALUE_MAX]);
#endif
CPUINFO_INTERNAL bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic(
uint32_t usable_processors,
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
CPUINFO_INTERNAL void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
CPUINFO_INTERNAL void cpuinfo_arm_linux_count_cluster_processors(
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
CPUINFO_INTERNAL uint32_t cpuinfo_arm_linux_detect_cluster_midr(
const struct cpuinfo_arm_chipset chipset[restrict static 1],
uint32_t max_processors,
uint32_t usable_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]);
extern CPUINFO_INTERNAL const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map;
extern CPUINFO_INTERNAL uint32_t cpuinfo_linux_cpu_to_uarch_index_map_entries;

4235
3rdparty/cpuinfo/src/arm/linux/chipset.c vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,632 @@
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <arm/linux/api.h>
#include <cpuinfo.h>
#if defined(__ANDROID__)
#include <arm/android/api.h>
#endif
#include <arm/api.h>
#include <arm/midr.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <linux/api.h>
static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
return (bitfield & mask) == mask;
}
/*
* Assigns logical processors to clusters of cores using heuristic based on the
* typical configuration of clusters for 5, 6, 8, and 10 cores:
* - 5 cores (ARM32 Android only): 2 clusters of 4+1 cores
* - 6 cores: 2 clusters of 4+2 cores
* - 8 cores: 2 clusters of 4+4 cores
* - 10 cores: 3 clusters of 4+4+2 cores
*
* The function must be called after parsing OS-provided information on core
* clusters. Its purpose is to detect clusters of cores when OS-provided
* information is lacking or incomplete, i.e.
* - Linux kernel is not configured to report information in sysfs topology
* leaf.
* - Linux kernel reports topology information only for online cores, and only
* cores on one cluster are online, e.g.:
* - Exynos 8890 has 8 cores in 4+4 clusters, but only the first cluster of 4
* cores is reported, and cluster configuration of logical processors 4-7 is not
* reported (all remaining processors 4-7 form cluster 1)
* - MT6797 has 10 cores in 4+4+2, but only the first cluster of 4 cores is
* reported, and cluster configuration of logical processors 4-9 is not reported
* (processors 4-7 form cluster 1, and processors 8-9 form cluster 2).
*
* Heuristic assignment of processors to the above pre-defined clusters fails if
* such assignment would contradict information provided by the operating
* system:
* - Any of the OS-reported processor clusters is different than the
* corresponding heuristic cluster.
* - Processors in a heuristic cluster have no OS-provided cluster siblings
* information, but have known and different minimum/maximum frequency.
* - Processors in a heuristic cluster have no OS-provided cluster siblings
* information, but have known and different MIDR components.
*
* If the heuristic assignment of processors to clusters of cores fails, all
* processors' clusters are unchanged.
*
* @param usable_processors - number of processors in the @p processors array
* with CPUINFO_LINUX_FLAG_VALID flags.
* @param max_processors - number of elements in the @p processors array.
* @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE
* and PRESENT flags, minimum/maximum frequency, MIDR information, and core
* cluster (package siblings list) information.
*
* @retval true if the heuristic successfully assigned all processors into
* clusters of cores.
* @retval false if known details about processors contradict the heuristic
* configuration of core clusters.
*/
bool cpuinfo_arm_linux_detect_core_clusters_by_heuristic(
uint32_t usable_processors,
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) {
uint32_t cluster_processors[3];
switch (usable_processors) {
case 10:
cluster_processors[0] = 4;
cluster_processors[1] = 4;
cluster_processors[2] = 2;
break;
case 8:
cluster_processors[0] = 4;
cluster_processors[1] = 4;
break;
case 6:
cluster_processors[0] = 4;
cluster_processors[1] = 2;
break;
#if defined(__ANDROID__) && CPUINFO_ARCH_ARM
case 5:
/*
* The only processor with 5 cores is Leadcore L1860C
* (ARMv7, mobile), but this configuration is not too
* unreasonable for a virtualized ARM server.
*/
cluster_processors[0] = 4;
cluster_processors[1] = 1;
break;
#endif
default:
return false;
}
/*
* Assignment of processors to core clusters is done in two passes:
* 1. Verify that the clusters proposed by heuristic are compatible with
* known details about processors.
* 2. If verification passed, update core clusters for the processors.
*/
uint32_t cluster = 0;
uint32_t expected_cluster_processors = 0;
uint32_t cluster_start, cluster_flags, cluster_midr, cluster_max_frequency, cluster_min_frequency;
bool expected_cluster_exists;
for (uint32_t i = 0; i < max_processors; i++) {
if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
if (expected_cluster_processors == 0) {
/* Expect this processor to start a new cluster
*/
expected_cluster_exists = !!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER);
if (expected_cluster_exists) {
if (processors[i].package_leader_id != i) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"processor %" PRIu32
" is expected to start a new cluster #%" PRIu32 " with %" PRIu32
" cores, "
"but system siblings lists reported it as a sibling of processor %" PRIu32,
i,
cluster,
cluster_processors[cluster],
processors[i].package_leader_id);
return false;
}
} else {
cluster_flags = 0;
}
cluster_start = i;
expected_cluster_processors = cluster_processors[cluster++];
} else {
/* Expect this processor to belong to the same
* cluster as processor */
if (expected_cluster_exists) {
/*
* The cluster suggested by the
* heuristic was already parsed from
* system siblings lists. For all
* processors we expect in the cluster,
* check that:
* - They have pre-assigned cluster from
* siblings lists
* (CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER
* flag).
* - They were assigned to the same
* cluster based on siblings lists
* (package_leader_id points to the
* first processor in the cluster).
*/
if ((processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) == 0) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"processor %" PRIu32
" is expected to belong to the cluster of processor %" PRIu32
", "
"but system siblings lists did not report it as a sibling of processor %" PRIu32,
i,
cluster_start,
cluster_start);
return false;
}
if (processors[i].package_leader_id != cluster_start) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"processor %" PRIu32
" is expected to belong to the cluster of processor %" PRIu32
", "
"but system siblings lists reported it to belong to the cluster of processor %" PRIu32,
i,
cluster_start,
cluster_start);
return false;
}
} else {
/*
* The cluster suggest by the heuristic
* was not parsed from system siblings
* lists. For all processors we expect
* in the cluster, check that:
* - They have no pre-assigned cluster
* from siblings lists.
* - If their min/max CPU frequency is
* known, it is the same.
* - If any part of their MIDR
* (Implementer, Variant, Part,
* Revision) is known, it is the same.
*/
if (processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"processor %" PRIu32
" is expected to be unassigned to any cluster, "
"but system siblings lists reported it to belong to the cluster of processor %" PRIu32,
i,
processors[i].package_leader_id);
return false;
}
if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (cluster_min_frequency != processors[i].min_frequency) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"minimum frequency of processor %" PRIu32
" (%" PRIu32
" KHz) is different than of its expected cluster (%" PRIu32
" KHz)",
i,
processors[i].min_frequency,
cluster_min_frequency);
return false;
}
} else {
cluster_min_frequency = processors[i].min_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
}
}
if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (cluster_max_frequency != processors[i].max_frequency) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"maximum frequency of processor %" PRIu32
" (%" PRIu32
" KHz) is different than of its expected cluster (%" PRIu32
" KHz)",
i,
processors[i].max_frequency,
cluster_max_frequency);
return false;
}
} else {
cluster_max_frequency = processors[i].max_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"CPU Implementer of processor %" PRIu32
" (0x%02" PRIx32
") is different than of its expected cluster (0x%02" PRIx32
")",
i,
midr_get_implementer(processors[i].midr),
midr_get_implementer(cluster_midr));
return false;
}
} else {
cluster_midr =
midr_copy_implementer(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"CPU Variant of processor %" PRIu32
" (0x%" PRIx32
") is different than of its expected cluster (0x%" PRIx32
")",
i,
midr_get_variant(processors[i].midr),
midr_get_variant(cluster_midr));
return false;
}
} else {
cluster_midr =
midr_copy_variant(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) {
if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"CPU Part of processor %" PRIu32
" (0x%03" PRIx32
") is different than of its expected cluster (0x%03" PRIx32
")",
i,
midr_get_part(processors[i].midr),
midr_get_part(cluster_midr));
return false;
}
} else {
cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) {
cpuinfo_log_debug(
"heuristic detection of core clusters failed: "
"CPU Revision of processor %" PRIu32
" (0x%" PRIx32
") is different than of its expected cluster (0x%" PRIx32
")",
i,
midr_get_revision(cluster_midr),
midr_get_revision(processors[i].midr));
return false;
}
} else {
cluster_midr =
midr_copy_revision(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
}
}
}
}
expected_cluster_processors--;
}
}
/* Verification passed, assign all processors to new clusters */
cluster = 0;
expected_cluster_processors = 0;
for (uint32_t i = 0; i < max_processors; i++) {
if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
if (expected_cluster_processors == 0) {
/* Expect this processor to start a new cluster
*/
cluster_start = i;
expected_cluster_processors = cluster_processors[cluster++];
} else {
/* Expect this processor to belong to the same
* cluster as processor */
if (!(processors[i].flags & CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
cpuinfo_log_debug(
"assigned processor %" PRIu32 " to cluster of processor %" PRIu32
" based on heuristic",
i,
cluster_start);
}
processors[i].package_leader_id = cluster_start;
processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
}
expected_cluster_processors--;
}
}
return true;
}
/*
* Assigns logical processors to clusters of cores in sequential manner:
* - Clusters detected from OS-provided information are unchanged:
* - Processors assigned to these clusters stay assigned to the same clusters
* - No new processors are added to these clusters
* - Processors without pre-assigned cluster are clustered in one sequential
* scan:
* - If known details (min/max frequency, MIDR components) of a processor are
* compatible with a preceding processor, without pre-assigned cluster, the
* processor is assigned to the cluster of the preceding processor.
* - If known details (min/max frequency, MIDR components) of a processor are
* not compatible with a preceding processor, the processor is assigned to a
* newly created cluster.
*
* The function must be called after parsing OS-provided information on core
* clusters, and usually is called only if heuristic assignment of processors to
* clusters (cpuinfo_arm_linux_cluster_processors_by_heuristic) failed.
*
* Its purpose is to detect clusters of cores when OS-provided information is
* lacking or incomplete, i.e.
* - Linux kernel is not configured to report information in sysfs topology
* leaf.
* - Linux kernel reports topology information only for online cores, and all
* cores on some of the clusters are offline.
*
* Sequential assignment of processors to clusters always succeeds, and upon
* exit, all usable processors in the
* @p processors array have cluster information.
*
* @param max_processors - number of elements in the @p processors array.
* @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE
* and PRESENT flags, minimum/maximum frequency, MIDR information, and core
* cluster (package siblings list) information.
*
* @retval true if the heuristic successfully assigned all processors into
* clusters of cores.
* @retval false if known details about processors contradict the heuristic
* configuration of core clusters.
*/
void cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) {
uint32_t cluster_flags = 0;
uint32_t cluster_processors = 0;
uint32_t cluster_start, cluster_midr, cluster_max_frequency, cluster_min_frequency;
for (uint32_t i = 0; i < max_processors; i++) {
if ((processors[i].flags & (CPUINFO_LINUX_FLAG_VALID | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) ==
CPUINFO_LINUX_FLAG_VALID) {
if (cluster_processors == 0) {
goto new_cluster;
}
if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (cluster_flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
if (cluster_min_frequency != processors[i].min_frequency) {
cpuinfo_log_info(
"minimum frequency of processor %" PRIu32 " (%" PRIu32
" KHz) is different than of preceding cluster (%" PRIu32
" KHz); "
"processor %" PRIu32 " starts to a new cluster",
i,
processors[i].min_frequency,
cluster_min_frequency,
i);
goto new_cluster;
}
} else {
cluster_min_frequency = processors[i].min_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
}
}
if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (cluster_flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
if (cluster_max_frequency != processors[i].max_frequency) {
cpuinfo_log_debug(
"maximum frequency of processor %" PRIu32 " (%" PRIu32
" KHz) is different than of preceding cluster (%" PRIu32
" KHz); "
"processor %" PRIu32 " starts a new cluster",
i,
processors[i].max_frequency,
cluster_max_frequency,
i);
goto new_cluster;
}
} else {
cluster_max_frequency = processors[i].max_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
if ((cluster_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK)) {
cpuinfo_log_debug(
"CPU Implementer of processor %" PRIu32 " (0x%02" PRIx32
") is different than of preceding cluster (0x%02" PRIx32
"); "
"processor %" PRIu32 " starts to a new cluster",
i,
midr_get_implementer(processors[i].midr),
midr_get_implementer(cluster_midr),
i);
goto new_cluster;
}
} else {
cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
if ((cluster_midr & CPUINFO_ARM_MIDR_VARIANT_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_VARIANT_MASK)) {
cpuinfo_log_debug(
"CPU Variant of processor %" PRIu32 " (0x%" PRIx32
") is different than of its expected cluster (0x%" PRIx32
")"
"processor %" PRIu32 " starts to a new cluster",
i,
midr_get_variant(processors[i].midr),
midr_get_variant(cluster_midr),
i);
goto new_cluster;
}
} else {
cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_PART) {
if ((cluster_midr & CPUINFO_ARM_MIDR_PART_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_PART_MASK)) {
cpuinfo_log_debug(
"CPU Part of processor %" PRIu32 " (0x%03" PRIx32
") is different than of its expected cluster (0x%03" PRIx32
")"
"processor %" PRIu32 " starts to a new cluster",
i,
midr_get_part(processors[i].midr),
midr_get_part(cluster_midr),
i);
goto new_cluster;
}
} else {
cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
}
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
if (cluster_flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
if ((cluster_midr & CPUINFO_ARM_MIDR_REVISION_MASK) !=
(processors[i].midr & CPUINFO_ARM_MIDR_REVISION_MASK)) {
cpuinfo_log_debug(
"CPU Revision of processor %" PRIu32 " (0x%" PRIx32
") is different than of its expected cluster (0x%" PRIx32
")"
"processor %" PRIu32 " starts to a new cluster",
i,
midr_get_revision(cluster_midr),
midr_get_revision(processors[i].midr),
i);
goto new_cluster;
}
} else {
cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
}
}
/* All checks passed, attach processor to the preceding
* cluster */
cluster_processors++;
processors[i].package_leader_id = cluster_start;
processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
cpuinfo_log_debug(
"assigned processor %" PRIu32 " to preceding cluster of processor %" PRIu32,
i,
cluster_start);
continue;
new_cluster:
/* Create a new cluster starting with processor i */
cluster_start = i;
processors[i].package_leader_id = i;
processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
cluster_processors = 1;
/* Copy known information from processor to cluster, and
* set the flags accordingly */
cluster_flags = 0;
if (processors[i].flags & CPUINFO_LINUX_FLAG_MIN_FREQUENCY) {
cluster_min_frequency = processors[i].min_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
}
if (processors[i].flags & CPUINFO_LINUX_FLAG_MAX_FREQUENCY) {
cluster_max_frequency = processors[i].max_frequency;
cluster_flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_IMPLEMENTER) {
cluster_midr = midr_copy_implementer(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_IMPLEMENTER;
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_VARIANT) {
cluster_midr = midr_copy_variant(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_VARIANT;
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_PART) {
cluster_midr = midr_copy_part(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_PART;
}
if (processors[i].flags & CPUINFO_ARM_LINUX_VALID_REVISION) {
cluster_midr = midr_copy_revision(cluster_midr, processors[i].midr);
cluster_flags |= CPUINFO_ARM_LINUX_VALID_REVISION;
}
}
}
}
/*
* Counts the number of logical processors in each core cluster.
* This function should be called after all processors are assigned to core
* clusters.
*
* @param max_processors - number of elements in the @p processors array.
* @param[in,out] processors - processor descriptors with pre-parsed POSSIBLE
* and PRESENT flags, and decoded core cluster (package_leader_id) information.
* The function expects the value of
* processors[i].package_processor_count to be zero. Upon return,
* processors[i].package_processor_count will contain the number of logical
* processors in the respective core cluster.
*/
void cpuinfo_arm_linux_count_cluster_processors(
uint32_t max_processors,
struct cpuinfo_arm_linux_processor processors[restrict static max_processors]) {
/* First pass: accumulate the number of processors at the group leader's
* package_processor_count */
for (uint32_t i = 0; i < max_processors; i++) {
if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
const uint32_t package_leader_id = processors[i].package_leader_id;
processors[package_leader_id].package_processor_count += 1;
}
}
/* Second pass: copy the package_processor_count from the group leader
* processor */
for (uint32_t i = 0; i < max_processors; i++) {
if (bitmask_all(processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
const uint32_t package_leader_id = processors[i].package_leader_id;
processors[i].package_processor_count = processors[package_leader_id].package_processor_count;
}
}
}

49
3rdparty/cpuinfo/src/arm/linux/cp.h vendored Normal file
View File

@@ -0,0 +1,49 @@
#include <stdint.h>
#if CPUINFO_MOCK
extern uint32_t cpuinfo_arm_fpsid;
extern uint32_t cpuinfo_arm_mvfr0;
extern uint32_t cpuinfo_arm_wcid;
static inline uint32_t read_fpsid(void) {
return cpuinfo_arm_fpsid;
}
static inline uint32_t read_mvfr0(void) {
return cpuinfo_arm_mvfr0;
}
static inline uint32_t read_wcid(void) {
return cpuinfo_arm_wcid;
}
#else
#if !defined(__ARM_ARCH_7A__) && !defined(__ARM_ARCH_8A__) && !(defined(__ARM_ARCH) && (__ARM_ARCH >= 7))
/*
* CoProcessor 10 is inaccessible from user mode since ARMv7,
* and clang refuses to compile inline assembly when targeting ARMv7+
*/
static inline uint32_t read_fpsid(void) {
uint32_t fpsid;
__asm__ __volatile__("MRC p10, 0x7, %[fpsid], cr0, cr0, 0" : [fpsid] "=r"(fpsid));
return fpsid;
}
static inline uint32_t read_mvfr0(void) {
uint32_t mvfr0;
__asm__ __volatile__("MRC p10, 0x7, %[mvfr0], cr7, cr0, 0" : [mvfr0] "=r"(mvfr0));
return mvfr0;
}
#endif
#if !defined(__ARM_ARCH_8A__) && !(defined(__ARM_ARCH) && (__ARM_ARCH >= 8))
/*
* In ARMv8, AArch32 state supports only conceptual coprocessors CP10, CP11,
* CP14, and CP15. AArch64 does not support the concept of coprocessors. and
* clang refuses to compile inline assembly when targeting ARMv8+
*/
static inline uint32_t read_wcid(void) {
uint32_t wcid;
__asm__ __volatile__("MRC p1, 0, %[wcid], c0, c0" : [wcid] "=r"(wcid));
return wcid;
}
#endif
#endif

1023
3rdparty/cpuinfo/src/arm/linux/cpuinfo.c vendored Normal file

File diff suppressed because it is too large Load Diff

154
3rdparty/cpuinfo/src/arm/linux/hwcap.c vendored Normal file
View File

@@ -0,0 +1,154 @@
#include <limits.h>
#include <string.h>
#include <dlfcn.h>
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <arm/linux/api.h>
#include <cpuinfo.h>
#include <cpuinfo/log.h>
#if CPUINFO_ARCH_ARM64 || \
CPUINFO_ARCH_ARM && defined(__GLIBC__) && defined(__GLIBC_MINOR__) && \
(__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 16)
#include <sys/auxv.h>
#else
#define AT_HWCAP 16
#define AT_HWCAP2 26
#endif
#if CPUINFO_MOCK
static uint32_t mock_hwcap = 0;
void cpuinfo_set_hwcap(uint32_t hwcap) {
mock_hwcap = hwcap;
}
static uint64_t mock_hwcap2 = 0;
void cpuinfo_set_hwcap2(uint64_t hwcap2) {
mock_hwcap2 = hwcap2;
}
#endif
#if CPUINFO_ARCH_ARM
typedef unsigned long (*getauxval_function_t)(unsigned long);
bool cpuinfo_arm_linux_hwcap_from_getauxval(uint32_t hwcap[restrict static 1], uint64_t hwcap2[restrict static 1]) {
#if CPUINFO_MOCK
*hwcap = mock_hwcap;
*hwcap2 = mock_hwcap2;
return true;
#elif defined(__ANDROID__)
/* Android: dynamically check if getauxval is supported */
void* libc = NULL;
getauxval_function_t getauxval = NULL;
dlerror();
libc = dlopen("libc.so", RTLD_LAZY);
if (libc == NULL) {
cpuinfo_log_warning("failed to load libc.so: %s", dlerror());
goto cleanup;
}
getauxval = (getauxval_function_t)dlsym(libc, "getauxval");
if (getauxval == NULL) {
cpuinfo_log_info("failed to locate getauxval in libc.so: %s", dlerror());
goto cleanup;
}
*hwcap = getauxval(AT_HWCAP);
*hwcap2 = getauxval(AT_HWCAP2);
cleanup:
if (libc != NULL) {
dlclose(libc);
libc = NULL;
}
return getauxval != NULL;
#elif defined(__GLIBC__) && defined(__GLIBC_MINOR__) && (__GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 16)
/* GNU/Linux: getauxval is supported since glibc-2.16 */
*hwcap = getauxval(AT_HWCAP);
*hwcap2 = getauxval(AT_HWCAP2);
return true;
#else
return false;
#endif
}
#ifdef __ANDROID__
bool cpuinfo_arm_linux_hwcap_from_procfs(uint32_t hwcap[restrict static 1], uint64_t hwcap2[restrict static 1]) {
#if CPUINFO_MOCK
*hwcap = mock_hwcap;
*hwcap2 = mock_hwcap2;
return true;
#else
uint64_t hwcaps[2] = {0, 0};
bool result = false;
int file = -1;
file = open("/proc/self/auxv", O_RDONLY);
if (file == -1) {
cpuinfo_log_warning("failed to open /proc/self/auxv: %s", strerror(errno));
goto cleanup;
}
ssize_t bytes_read;
do {
Elf32_auxv_t elf_auxv;
bytes_read = read(file, &elf_auxv, sizeof(Elf32_auxv_t));
if (bytes_read < 0) {
cpuinfo_log_warning("failed to read /proc/self/auxv: %s", strerror(errno));
goto cleanup;
} else if (bytes_read > 0) {
if (bytes_read == sizeof(elf_auxv)) {
switch (elf_auxv.a_type) {
case AT_HWCAP:
hwcaps[0] = (uint32_t)elf_auxv.a_un.a_val;
break;
case AT_HWCAP2:
hwcaps[1] = (uint64_t)elf_auxv.a_un.a_val;
break;
}
} else {
cpuinfo_log_warning(
"failed to read %zu bytes from /proc/self/auxv: %zu bytes available",
sizeof(elf_auxv),
(size_t)bytes_read);
goto cleanup;
}
}
} while (bytes_read == sizeof(Elf32_auxv_t));
/* Success, commit results */
*hwcap = hwcaps[0];
*hwcap2 = hwcaps[1];
result = true;
cleanup:
if (file != -1) {
close(file);
file = -1;
}
return result;
#endif
}
#endif /* __ANDROID__ */
#elif CPUINFO_ARCH_ARM64
void cpuinfo_arm_linux_hwcap_from_getauxval(uint32_t hwcap[restrict static 1], uint64_t hwcap2[restrict static 1]) {
#if CPUINFO_MOCK
*hwcap = mock_hwcap;
*hwcap2 = mock_hwcap2;
#else
*hwcap = (uint32_t)getauxval(AT_HWCAP);
*hwcap2 = (uint64_t)getauxval(AT_HWCAP2);
return;
#endif
}
#endif

888
3rdparty/cpuinfo/src/arm/linux/init.c vendored Normal file
View File

@@ -0,0 +1,888 @@
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <arm/linux/api.h>
#include <cpuinfo.h>
#if defined(__ANDROID__)
#include <arm/android/api.h>
#endif
#include <arm/api.h>
#include <arm/midr.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <linux/api.h>
struct cpuinfo_arm_isa cpuinfo_isa = {0};
static struct cpuinfo_package package = {{0}};
static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
return (bitfield & mask) == mask;
}
static inline uint32_t min(uint32_t a, uint32_t b) {
return a < b ? a : b;
}
static inline int cmp(uint32_t a, uint32_t b) {
return (a > b) - (a < b);
}
static bool cluster_siblings_parser(
uint32_t processor,
uint32_t siblings_start,
uint32_t siblings_end,
struct cpuinfo_arm_linux_processor* processors) {
processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
uint32_t package_leader_id = processors[processor].package_leader_id;
for (uint32_t sibling = siblings_start; sibling < siblings_end; sibling++) {
if (!bitmask_all(processors[sibling].flags, CPUINFO_LINUX_FLAG_VALID)) {
cpuinfo_log_info(
"invalid processor %" PRIu32 " reported as a sibling for processor %" PRIu32,
sibling,
processor);
continue;
}
const uint32_t sibling_package_leader_id = processors[sibling].package_leader_id;
if (sibling_package_leader_id < package_leader_id) {
package_leader_id = sibling_package_leader_id;
}
processors[sibling].package_leader_id = package_leader_id;
processors[sibling].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
}
processors[processor].package_leader_id = package_leader_id;
return true;
}
static int cmp_arm_linux_processor(const void* ptr_a, const void* ptr_b) {
const struct cpuinfo_arm_linux_processor* processor_a = (const struct cpuinfo_arm_linux_processor*)ptr_a;
const struct cpuinfo_arm_linux_processor* processor_b = (const struct cpuinfo_arm_linux_processor*)ptr_b;
/* Move usable processors towards the start of the array */
const bool usable_a = bitmask_all(processor_a->flags, CPUINFO_LINUX_FLAG_VALID);
const bool usable_b = bitmask_all(processor_b->flags, CPUINFO_LINUX_FLAG_VALID);
if (usable_a != usable_b) {
return (int)usable_b - (int)usable_a;
}
/* Compare based on core type (e.g. Cortex-A57 < Cortex-A53) */
const uint32_t midr_a = processor_a->midr;
const uint32_t midr_b = processor_b->midr;
if (midr_a != midr_b) {
const uint32_t score_a = midr_score_core(midr_a);
const uint32_t score_b = midr_score_core(midr_b);
if (score_a != score_b) {
return score_a > score_b ? -1 : 1;
}
}
/* Compare based on core frequency (e.g. 2.0 GHz < 1.2 GHz) */
const uint32_t frequency_a = processor_a->max_frequency;
const uint32_t frequency_b = processor_b->max_frequency;
if (frequency_a != frequency_b) {
return frequency_a > frequency_b ? -1 : 1;
}
/* Compare based on cluster leader id (i.e. cluster 1 < cluster 0) */
const uint32_t cluster_a = processor_a->package_leader_id;
const uint32_t cluster_b = processor_b->package_leader_id;
if (cluster_a != cluster_b) {
return cluster_a > cluster_b ? -1 : 1;
}
/* Compare based on system processor id (i.e. processor 0 < processor 1)
*/
const uint32_t id_a = processor_a->system_processor_id;
const uint32_t id_b = processor_b->system_processor_id;
return cmp(id_a, id_b);
}
void cpuinfo_arm_linux_init(void) {
struct cpuinfo_arm_linux_processor* arm_linux_processors = NULL;
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_uarch_info* uarchs = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
struct cpuinfo_cache* l2 = NULL;
struct cpuinfo_cache* l3 = NULL;
const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
uint32_t* linux_cpu_to_uarch_index_map = NULL;
const uint32_t max_processors_count = cpuinfo_linux_get_max_processors_count();
cpuinfo_log_debug("system maximum processors count: %" PRIu32, max_processors_count);
const uint32_t max_possible_processors_count =
1 + cpuinfo_linux_get_max_possible_processor(max_processors_count);
cpuinfo_log_debug("maximum possible processors count: %" PRIu32, max_possible_processors_count);
const uint32_t max_present_processors_count = 1 + cpuinfo_linux_get_max_present_processor(max_processors_count);
cpuinfo_log_debug("maximum present processors count: %" PRIu32, max_present_processors_count);
uint32_t valid_processor_mask = 0;
uint32_t arm_linux_processors_count = max_processors_count;
if (max_present_processors_count != 0) {
arm_linux_processors_count = min(arm_linux_processors_count, max_present_processors_count);
valid_processor_mask = CPUINFO_LINUX_FLAG_PRESENT;
}
if (max_possible_processors_count != 0) {
arm_linux_processors_count = min(arm_linux_processors_count, max_possible_processors_count);
valid_processor_mask |= CPUINFO_LINUX_FLAG_POSSIBLE;
}
if ((max_present_processors_count | max_possible_processors_count) == 0) {
cpuinfo_log_error("failed to parse both lists of possible and present processors");
return;
}
arm_linux_processors = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_arm_linux_processor));
if (arm_linux_processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " ARM logical processors",
arm_linux_processors_count * sizeof(struct cpuinfo_arm_linux_processor),
arm_linux_processors_count);
return;
}
if (max_possible_processors_count) {
cpuinfo_linux_detect_possible_processors(
arm_linux_processors_count,
&arm_linux_processors->flags,
sizeof(struct cpuinfo_arm_linux_processor),
CPUINFO_LINUX_FLAG_POSSIBLE);
}
if (max_present_processors_count) {
cpuinfo_linux_detect_present_processors(
arm_linux_processors_count,
&arm_linux_processors->flags,
sizeof(struct cpuinfo_arm_linux_processor),
CPUINFO_LINUX_FLAG_PRESENT);
}
#if defined(__ANDROID__)
struct cpuinfo_android_properties android_properties;
cpuinfo_arm_android_parse_properties(&android_properties);
#else
char proc_cpuinfo_hardware[CPUINFO_HARDWARE_VALUE_MAX];
#endif
char proc_cpuinfo_revision[CPUINFO_REVISION_VALUE_MAX];
if (!cpuinfo_arm_linux_parse_proc_cpuinfo(
#if defined(__ANDROID__)
android_properties.proc_cpuinfo_hardware,
#else
proc_cpuinfo_hardware,
#endif
proc_cpuinfo_revision,
arm_linux_processors_count,
arm_linux_processors)) {
cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo");
return;
}
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, valid_processor_mask)) {
arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_VALID;
cpuinfo_log_debug(
"parsed processor %" PRIu32 " MIDR 0x%08" PRIx32, i, arm_linux_processors[i].midr);
}
}
uint32_t valid_processors = 0, last_midr = 0;
#if CPUINFO_ARCH_ARM
uint32_t last_architecture_version = 0, last_architecture_flags = 0;
#endif
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
arm_linux_processors[i].system_processor_id = i;
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
if (arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR) {
/*
* Processor is in possible and present lists,
* and also reported in /proc/cpuinfo. This
* processor is availble for compute.
*/
valid_processors += 1;
} else {
/*
* Processor is in possible and present lists,
* but not reported in /proc/cpuinfo. This is
* fairly common: high-index processors can be
* not reported if they are offline.
*/
cpuinfo_log_info("processor %" PRIu32 " is not listed in /proc/cpuinfo", i);
}
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_MIDR)) {
last_midr = arm_linux_processors[i].midr;
}
#if CPUINFO_ARCH_ARM
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ARCHITECTURE)) {
last_architecture_version = arm_linux_processors[i].architecture_version;
last_architecture_flags = arm_linux_processors[i].architecture_flags;
}
#endif
} else {
/* Processor reported in /proc/cpuinfo, but not in
* possible and/or present lists: log and ignore */
if (!(arm_linux_processors[i].flags & CPUINFO_ARM_LINUX_VALID_PROCESSOR)) {
cpuinfo_log_warning("invalid processor %" PRIu32 " reported in /proc/cpuinfo", i);
}
}
}
#if defined(__ANDROID__)
const struct cpuinfo_arm_chipset chipset =
cpuinfo_arm_android_decode_chipset(&android_properties, valid_processors, 0);
#else
const struct cpuinfo_arm_chipset chipset =
cpuinfo_arm_linux_decode_chipset(proc_cpuinfo_hardware, proc_cpuinfo_revision, valid_processors, 0);
#endif
#if CPUINFO_ARCH_ARM
uint32_t isa_features = 0;
uint64_t isa_features2 = 0;
#ifdef __ANDROID__
/*
* On Android before API 20, libc.so does not provide getauxval
* function. Thus, we try to dynamically find it, or use two fallback
* mechanisms:
* 1. dlopen libc.so, and try to find getauxval
* 2. Parse /proc/self/auxv procfs file
* 3. Use features reported in /proc/cpuinfo
*/
if (!cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2)) {
/* getauxval can't be used, fall back to parsing /proc/self/auxv
*/
if (!cpuinfo_arm_linux_hwcap_from_procfs(&isa_features, &isa_features2)) {
/*
* Reading /proc/self/auxv failed, probably due to file
* permissions. Use information from /proc/cpuinfo to
* detect ISA.
*
* If different processors report different ISA
* features, take the intersection.
*/
uint32_t processors_with_features = 0;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(
arm_linux_processors[i].flags,
CPUINFO_LINUX_FLAG_VALID | CPUINFO_ARM_LINUX_VALID_FEATURES)) {
if (processors_with_features == 0) {
isa_features = arm_linux_processors[i].features;
isa_features2 = arm_linux_processors[i].features2;
} else {
isa_features &= arm_linux_processors[i].features;
isa_features2 &= arm_linux_processors[i].features2;
}
processors_with_features += 1;
}
}
}
}
#else
/* On GNU/Linux getauxval is always available */
cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2);
#endif
cpuinfo_arm_linux_decode_isa_from_proc_cpuinfo(
isa_features,
isa_features2,
last_midr,
last_architecture_version,
last_architecture_flags,
&chipset,
&cpuinfo_isa);
#elif CPUINFO_ARCH_ARM64
uint32_t isa_features = 0;
uint64_t isa_features2 = 0;
/* getauxval is always available on ARM64 Android */
cpuinfo_arm_linux_hwcap_from_getauxval(&isa_features, &isa_features2);
cpuinfo_arm64_linux_decode_isa_from_proc_cpuinfo(
isa_features, isa_features2, last_midr, &chipset, &cpuinfo_isa);
#endif
/* Detect min/max frequency and package ID */
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
const uint32_t max_frequency = cpuinfo_linux_get_processor_max_frequency(i);
if (max_frequency != 0) {
arm_linux_processors[i].max_frequency = max_frequency;
arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MAX_FREQUENCY;
}
const uint32_t min_frequency = cpuinfo_linux_get_processor_min_frequency(i);
if (min_frequency != 0) {
arm_linux_processors[i].min_frequency = min_frequency;
arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_MIN_FREQUENCY;
}
if (cpuinfo_linux_get_processor_package_id(i, &arm_linux_processors[i].package_id)) {
arm_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_PACKAGE_ID;
}
}
}
/* Initialize topology group IDs */
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
arm_linux_processors[i].package_leader_id = i;
}
/* Propagate topology group IDs among siblings */
bool detected_core_siblings_list_node = false;
bool detected_cluster_cpus_list_node = false;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
if (!bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_PACKAGE_ID)) {
continue;
}
/* Use the cluster_cpus_list topology node if available. If not
* found, cache the result to avoid repeatedly attempting to
* read the non-existent paths.
* */
if (!detected_core_siblings_list_node && !detected_cluster_cpus_list_node) {
if (cpuinfo_linux_detect_cluster_cpus(
arm_linux_processors_count,
i,
(cpuinfo_siblings_callback)cluster_siblings_parser,
arm_linux_processors)) {
detected_cluster_cpus_list_node = true;
continue;
} else {
detected_core_siblings_list_node = true;
}
}
/* The cached result above will guarantee only one of the blocks
* below will execute, with a bias towards cluster_cpus_list.
**/
if (detected_core_siblings_list_node) {
cpuinfo_linux_detect_core_siblings(
arm_linux_processors_count,
i,
(cpuinfo_siblings_callback)cluster_siblings_parser,
arm_linux_processors);
}
if (detected_cluster_cpus_list_node) {
cpuinfo_linux_detect_cluster_cpus(
arm_linux_processors_count,
i,
(cpuinfo_siblings_callback)cluster_siblings_parser,
arm_linux_processors);
}
}
/* Propagate all cluster IDs */
uint32_t clustered_processors = 0;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(
arm_linux_processors[i].flags,
CPUINFO_LINUX_FLAG_VALID | CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
clustered_processors += 1;
const uint32_t package_leader_id = arm_linux_processors[i].package_leader_id;
if (package_leader_id < i) {
arm_linux_processors[i].package_leader_id =
arm_linux_processors[package_leader_id].package_leader_id;
}
cpuinfo_log_debug(
"processor %" PRIu32 " clustered with processor %" PRIu32
" as inferred from system siblings lists",
i,
arm_linux_processors[i].package_leader_id);
}
}
if (clustered_processors != valid_processors) {
/*
* Topology information about some or all logical processors may
* be unavailable, for the following reasons:
* - Linux kernel is too old, or configured without support for
* topology information in sysfs.
* - Core is offline, and Linux kernel is configured to not
* report topology for offline cores.
*
* In this case, we assign processors to clusters using two
* methods:
* - Try heuristic cluster configurations (e.g. 6-core SoC
* usually has 4+2 big.LITTLE configuration).
* - If heuristic failed, assign processors to core clusters in
* a sequential scan.
*/
if (!cpuinfo_arm_linux_detect_core_clusters_by_heuristic(
valid_processors, arm_linux_processors_count, arm_linux_processors)) {
cpuinfo_arm_linux_detect_core_clusters_by_sequential_scan(
arm_linux_processors_count, arm_linux_processors);
}
}
cpuinfo_arm_linux_count_cluster_processors(arm_linux_processors_count, arm_linux_processors);
const uint32_t cluster_count = cpuinfo_arm_linux_detect_cluster_midr(
&chipset, arm_linux_processors_count, valid_processors, arm_linux_processors);
/* Initialize core vendor, uarch, MIDR, and frequency for every logical
* processor */
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
const uint32_t cluster_leader = arm_linux_processors[i].package_leader_id;
if (cluster_leader == i) {
/* Cluster leader: decode core vendor and uarch
*/
cpuinfo_arm_decode_vendor_uarch(
arm_linux_processors[cluster_leader].midr,
#if CPUINFO_ARCH_ARM
!!(arm_linux_processors[cluster_leader].features &
CPUINFO_ARM_LINUX_FEATURE_VFPV4),
#endif
&arm_linux_processors[cluster_leader].vendor,
&arm_linux_processors[cluster_leader].uarch);
} else {
/* Cluster non-leader: copy vendor, uarch, MIDR,
* and frequency from cluster leader */
arm_linux_processors[i].flags |= arm_linux_processors[cluster_leader].flags &
(CPUINFO_ARM_LINUX_VALID_MIDR | CPUINFO_LINUX_FLAG_MAX_FREQUENCY);
arm_linux_processors[i].midr = arm_linux_processors[cluster_leader].midr;
arm_linux_processors[i].vendor = arm_linux_processors[cluster_leader].vendor;
arm_linux_processors[i].uarch = arm_linux_processors[cluster_leader].uarch;
arm_linux_processors[i].max_frequency =
arm_linux_processors[cluster_leader].max_frequency;
}
}
}
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
cpuinfo_log_debug(
"post-analysis processor %" PRIu32 ": MIDR %08" PRIx32 " frequency %" PRIu32,
i,
arm_linux_processors[i].midr,
arm_linux_processors[i].max_frequency);
}
}
qsort(arm_linux_processors,
arm_linux_processors_count,
sizeof(struct cpuinfo_arm_linux_processor),
cmp_arm_linux_processor);
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
cpuinfo_log_debug(
"post-sort processor %" PRIu32 ": system id %" PRIu32 " MIDR %08" PRIx32
" frequency %" PRIu32,
i,
arm_linux_processors[i].system_processor_id,
arm_linux_processors[i].midr,
arm_linux_processors[i].max_frequency);
}
}
uint32_t uarchs_count = 0;
enum cpuinfo_uarch last_uarch;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
if (uarchs_count == 0 || arm_linux_processors[i].uarch != last_uarch) {
last_uarch = arm_linux_processors[i].uarch;
uarchs_count += 1;
}
arm_linux_processors[i].uarch_index = uarchs_count - 1;
}
}
/*
* Assumptions:
* - No SMP (i.e. each core supports only one hardware thread).
* - Level 1 instruction and data caches are private to the core
* clusters.
* - Level 2 and level 3 cache is shared between cores in the same
* cluster.
*/
cpuinfo_arm_chipset_to_string(&chipset, package.name);
package.processor_count = valid_processors;
package.core_count = valid_processors;
package.cluster_count = cluster_count;
processors = calloc(valid_processors, sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " logical processors",
valid_processors * sizeof(struct cpuinfo_processor),
valid_processors);
goto cleanup;
}
cores = calloc(valid_processors, sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " cores",
valid_processors * sizeof(struct cpuinfo_core),
valid_processors);
goto cleanup;
}
clusters = calloc(cluster_count, sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " core clusters",
cluster_count * sizeof(struct cpuinfo_cluster),
cluster_count);
goto cleanup;
}
uarchs = calloc(uarchs_count, sizeof(struct cpuinfo_uarch_info));
if (uarchs == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " microarchitectures",
uarchs_count * sizeof(struct cpuinfo_uarch_info),
uarchs_count);
goto cleanup;
}
linux_cpu_to_processor_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_processor*));
if (linux_cpu_to_processor_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " logical processor mapping entries",
arm_linux_processors_count * sizeof(struct cpuinfo_processor*),
arm_linux_processors_count);
goto cleanup;
}
linux_cpu_to_core_map = calloc(arm_linux_processors_count, sizeof(struct cpuinfo_core*));
if (linux_cpu_to_core_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " core mapping entries",
arm_linux_processors_count * sizeof(struct cpuinfo_core*),
arm_linux_processors_count);
goto cleanup;
}
if (uarchs_count > 1) {
linux_cpu_to_uarch_index_map = calloc(arm_linux_processors_count, sizeof(uint32_t));
if (linux_cpu_to_uarch_index_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " uarch index mapping entries",
arm_linux_processors_count * sizeof(uint32_t),
arm_linux_processors_count);
goto cleanup;
}
}
l1i = calloc(valid_processors, sizeof(struct cpuinfo_cache));
if (l1i == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1I caches",
valid_processors * sizeof(struct cpuinfo_cache),
valid_processors);
goto cleanup;
}
l1d = calloc(valid_processors, sizeof(struct cpuinfo_cache));
if (l1d == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1D caches",
valid_processors * sizeof(struct cpuinfo_cache),
valid_processors);
goto cleanup;
}
uint32_t uarchs_index = 0;
for (uint32_t i = 0; i < arm_linux_processors_count; i++) {
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
if (uarchs_index == 0 || arm_linux_processors[i].uarch != last_uarch) {
last_uarch = arm_linux_processors[i].uarch;
uarchs[uarchs_index] = (struct cpuinfo_uarch_info){
.uarch = arm_linux_processors[i].uarch,
.midr = arm_linux_processors[i].midr,
};
uarchs_index += 1;
}
uarchs[uarchs_index - 1].processor_count += 1;
uarchs[uarchs_index - 1].core_count += 1;
}
}
uint32_t l2_count = 0, l3_count = 0, big_l3_size = 0, cluster_id = UINT32_MAX;
/* Indication whether L3 (if it exists) is shared between all cores */
bool shared_l3 = true;
/* Populate cache information structures in l1i, l1d */
for (uint32_t i = 0; i < valid_processors; i++) {
if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
cluster_id += 1;
clusters[cluster_id] = (struct cpuinfo_cluster){
.processor_start = i,
.processor_count = arm_linux_processors[i].package_processor_count,
.core_start = i,
.core_count = arm_linux_processors[i].package_processor_count,
.cluster_id = cluster_id,
.package = &package,
.vendor = arm_linux_processors[i].vendor,
.uarch = arm_linux_processors[i].uarch,
.midr = arm_linux_processors[i].midr,
};
}
processors[i].smt_id = 0;
processors[i].core = cores + i;
processors[i].cluster = clusters + cluster_id;
processors[i].package = &package;
processors[i].linux_id = (int)arm_linux_processors[i].system_processor_id;
processors[i].cache.l1i = l1i + i;
processors[i].cache.l1d = l1d + i;
linux_cpu_to_processor_map[arm_linux_processors[i].system_processor_id] = &processors[i];
cores[i].processor_start = i;
cores[i].processor_count = 1;
cores[i].core_id = i;
cores[i].cluster = clusters + cluster_id;
cores[i].package = &package;
cores[i].vendor = arm_linux_processors[i].vendor;
cores[i].uarch = arm_linux_processors[i].uarch;
cores[i].midr = arm_linux_processors[i].midr;
linux_cpu_to_core_map[arm_linux_processors[i].system_processor_id] = &cores[i];
if (linux_cpu_to_uarch_index_map != NULL) {
linux_cpu_to_uarch_index_map[arm_linux_processors[i].system_processor_id] =
arm_linux_processors[i].uarch_index;
}
struct cpuinfo_cache temp_l2 = {0}, temp_l3 = {0};
cpuinfo_arm_decode_cache(
arm_linux_processors[i].uarch,
arm_linux_processors[i].package_processor_count,
arm_linux_processors[i].midr,
&chipset,
cluster_id,
arm_linux_processors[i].architecture_version,
&l1i[i],
&l1d[i],
&temp_l2,
&temp_l3);
l1i[i].processor_start = l1d[i].processor_start = i;
l1i[i].processor_count = l1d[i].processor_count = 1;
#if CPUINFO_ARCH_ARM
/* L1I reported in /proc/cpuinfo overrides defaults */
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_ICACHE)) {
l1i[i] = (struct cpuinfo_cache){
.size = arm_linux_processors[i].proc_cpuinfo_cache.i_size,
.associativity = arm_linux_processors[i].proc_cpuinfo_cache.i_assoc,
.sets = arm_linux_processors[i].proc_cpuinfo_cache.i_sets,
.partitions = 1,
.line_size = arm_linux_processors[i].proc_cpuinfo_cache.i_line_length};
}
/* L1D reported in /proc/cpuinfo overrides defaults */
if (bitmask_all(arm_linux_processors[i].flags, CPUINFO_ARM_LINUX_VALID_DCACHE)) {
l1d[i] = (struct cpuinfo_cache){
.size = arm_linux_processors[i].proc_cpuinfo_cache.d_size,
.associativity = arm_linux_processors[i].proc_cpuinfo_cache.d_assoc,
.sets = arm_linux_processors[i].proc_cpuinfo_cache.d_sets,
.partitions = 1,
.line_size = arm_linux_processors[i].proc_cpuinfo_cache.d_line_length};
}
#endif
if (temp_l3.size != 0) {
/*
* Assumptions:
* - L2 is private to each core
* - L3 is shared by cores in the same cluster
* - If cores in different clusters report the same L3,
* it is shared between all cores.
*/
l2_count += 1;
if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
if (cluster_id == 0) {
big_l3_size = temp_l3.size;
l3_count = 1;
} else if (temp_l3.size != big_l3_size) {
/* If some cores have different L3 size,
* L3 is not shared between all cores */
shared_l3 = false;
l3_count += 1;
}
}
} else {
/* If some cores don't have L3 cache, L3 is not shared
* between all cores
*/
shared_l3 = false;
if (temp_l2.size != 0) {
/* Assume L2 is shared by cores in the same
* cluster */
if (arm_linux_processors[i].package_leader_id ==
arm_linux_processors[i].system_processor_id) {
l2_count += 1;
}
}
}
}
if (l2_count != 0) {
l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
if (l2 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L2 caches",
l2_count * sizeof(struct cpuinfo_cache),
l2_count);
goto cleanup;
}
if (l3_count != 0) {
l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
if (l3 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L3 caches",
l3_count * sizeof(struct cpuinfo_cache),
l3_count);
goto cleanup;
}
}
}
cluster_id = UINT32_MAX;
uint32_t l2_index = UINT32_MAX, l3_index = UINT32_MAX;
for (uint32_t i = 0; i < valid_processors; i++) {
if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
cluster_id++;
}
struct cpuinfo_cache dummy_l1i, dummy_l1d, temp_l2 = {0}, temp_l3 = {0};
cpuinfo_arm_decode_cache(
arm_linux_processors[i].uarch,
arm_linux_processors[i].package_processor_count,
arm_linux_processors[i].midr,
&chipset,
cluster_id,
arm_linux_processors[i].architecture_version,
&dummy_l1i,
&dummy_l1d,
&temp_l2,
&temp_l3);
if (temp_l3.size != 0) {
/*
* Assumptions:
* - L2 is private to each core
* - L3 is shared by cores in the same cluster
* - If cores in different clusters report the same L3,
* it is shared between all cores.
*/
l2_index += 1;
l2[l2_index] = (struct cpuinfo_cache){
.size = temp_l2.size,
.associativity = temp_l2.associativity,
.sets = temp_l2.sets,
.partitions = 1,
.line_size = temp_l2.line_size,
.flags = temp_l2.flags,
.processor_start = i,
.processor_count = 1,
};
processors[i].cache.l2 = l2 + l2_index;
if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
l3_index += 1;
if (l3_index < l3_count) {
l3[l3_index] = (struct cpuinfo_cache){
.size = temp_l3.size,
.associativity = temp_l3.associativity,
.sets = temp_l3.sets,
.partitions = 1,
.line_size = temp_l3.line_size,
.flags = temp_l3.flags,
.processor_start = i,
.processor_count = shared_l3
? valid_processors
: arm_linux_processors[i].package_processor_count,
};
}
}
if (shared_l3) {
processors[i].cache.l3 = l3;
} else if (l3_index < l3_count) {
processors[i].cache.l3 = l3 + l3_index;
}
} else if (temp_l2.size != 0) {
/* Assume L2 is shared by cores in the same cluster */
if (arm_linux_processors[i].package_leader_id == arm_linux_processors[i].system_processor_id) {
l2_index += 1;
l2[l2_index] = (struct cpuinfo_cache){
.size = temp_l2.size,
.associativity = temp_l2.associativity,
.sets = temp_l2.sets,
.partitions = 1,
.line_size = temp_l2.line_size,
.flags = temp_l2.flags,
.processor_start = i,
.processor_count = arm_linux_processors[i].package_processor_count,
};
}
processors[i].cache.l2 = l2 + l2_index;
}
}
/* Commit */
cpuinfo_processors = processors;
cpuinfo_cores = cores;
cpuinfo_clusters = clusters;
cpuinfo_packages = &package;
cpuinfo_uarchs = uarchs;
cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
cpuinfo_cache[cpuinfo_cache_level_2] = l2;
cpuinfo_cache[cpuinfo_cache_level_3] = l3;
cpuinfo_processors_count = valid_processors;
cpuinfo_cores_count = valid_processors;
cpuinfo_clusters_count = cluster_count;
cpuinfo_packages_count = 1;
cpuinfo_uarchs_count = uarchs_count;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = valid_processors;
cpuinfo_cache_count[cpuinfo_cache_level_1d] = valid_processors;
cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
cpuinfo_max_cache_size = cpuinfo_arm_compute_max_cache_size(&processors[0]);
cpuinfo_linux_cpu_max = arm_linux_processors_count;
cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map;
__sync_synchronize();
cpuinfo_is_initialized = true;
processors = NULL;
cores = NULL;
clusters = NULL;
uarchs = NULL;
l1i = l1d = l2 = l3 = NULL;
linux_cpu_to_processor_map = NULL;
linux_cpu_to_core_map = NULL;
linux_cpu_to_uarch_index_map = NULL;
cleanup:
free(arm_linux_processors);
free(processors);
free(cores);
free(clusters);
free(uarchs);
free(l1i);
free(l1d);
free(l2);
free(l3);
free(linux_cpu_to_processor_map);
free(linux_cpu_to_core_map);
free(linux_cpu_to_uarch_index_map);
}

1002
3rdparty/cpuinfo/src/arm/linux/midr.c vendored Normal file

File diff suppressed because it is too large Load Diff

692
3rdparty/cpuinfo/src/arm/mach/init.c vendored Normal file
View File

@@ -0,0 +1,692 @@
#include <alloca.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mach/machine.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <mach/api.h>
/* Polyfill recent CPUFAMILY_ARM_* values for older SDKs */
#ifndef CPUFAMILY_ARM_VORTEX_TEMPEST
#define CPUFAMILY_ARM_VORTEX_TEMPEST 0x07D34B9F
#endif
#ifndef CPUFAMILY_ARM_LIGHTNING_THUNDER
#define CPUFAMILY_ARM_LIGHTNING_THUNDER 0x462504D2
#endif
#ifndef CPUFAMILY_ARM_FIRESTORM_ICESTORM
#define CPUFAMILY_ARM_FIRESTORM_ICESTORM 0x1B588BB3
#endif
#ifndef CPUFAMILY_ARM_AVALANCHE_BLIZZARD
#define CPUFAMILY_ARM_AVALANCHE_BLIZZARD 0xDA33D83D
#endif
struct cpuinfo_arm_isa cpuinfo_isa = {
.aes = true,
.sha1 = true,
.sha2 = true,
.pmull = true,
.crc32 = true,
};
static uint32_t get_sys_info(int type_specifier, const char* name) {
size_t size = 0;
uint32_t result = 0;
int mib[2] = {CTL_HW, type_specifier};
if (sysctl(mib, 2, NULL, &size, NULL, 0) != 0) {
cpuinfo_log_info("sysctl(\"%s\") failed: %s", name, strerror(errno));
} else if (size == sizeof(uint32_t)) {
sysctl(mib, 2, &result, &size, NULL, 0);
cpuinfo_log_debug("%s: %" PRIu32 ", size = %lu", name, result, size);
} else {
cpuinfo_log_info("sysctl does not support non-integer lookup for (\"%s\")", name);
}
return result;
}
static uint32_t get_sys_info_by_name(const char* type_specifier) {
size_t size = 0;
uint32_t result = 0;
if (sysctlbyname(type_specifier, NULL, &size, NULL, 0) != 0) {
cpuinfo_log_info("sysctlbyname(\"%s\") failed: %s", type_specifier, strerror(errno));
} else if (size == sizeof(uint32_t)) {
sysctlbyname(type_specifier, &result, &size, NULL, 0);
cpuinfo_log_debug("%s: %" PRIu32 ", size = %lu", type_specifier, result, size);
} else {
cpuinfo_log_info("sysctl does not support non-integer lookup for (\"%s\")", type_specifier);
}
return result;
}
static enum cpuinfo_uarch decode_uarch(uint32_t cpu_family, uint32_t core_index, uint32_t core_count) {
switch (cpu_family) {
case CPUFAMILY_ARM_CYCLONE:
return cpuinfo_uarch_cyclone;
case CPUFAMILY_ARM_TYPHOON:
return cpuinfo_uarch_typhoon;
case CPUFAMILY_ARM_TWISTER:
return cpuinfo_uarch_twister;
case CPUFAMILY_ARM_HURRICANE:
return cpuinfo_uarch_hurricane;
case CPUFAMILY_ARM_MONSOON_MISTRAL:
/* 2x Monsoon + 4x Mistral cores */
return core_index < 2 ? cpuinfo_uarch_monsoon : cpuinfo_uarch_mistral;
case CPUFAMILY_ARM_VORTEX_TEMPEST:
/* Hexa-core: 2x Vortex + 4x Tempest; Octa-core: 4x
* Cortex + 4x Tempest */
return core_index + 4 < core_count ? cpuinfo_uarch_vortex : cpuinfo_uarch_tempest;
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
/* Hexa-core: 2x Lightning + 4x Thunder; Octa-core
* (presumed): 4x Lightning + 4x Thunder */
return core_index + 4 < core_count ? cpuinfo_uarch_lightning : cpuinfo_uarch_thunder;
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
/* Hexa-core: 2x Firestorm + 4x Icestorm; Octa-core: 4x
* Firestorm + 4x Icestorm */
return core_index + 4 < core_count ? cpuinfo_uarch_firestorm : cpuinfo_uarch_icestorm;
case CPUFAMILY_ARM_AVALANCHE_BLIZZARD:
/* Hexa-core: 2x Avalanche + 4x Blizzard */
return core_index + 4 < core_count ? cpuinfo_uarch_avalanche : cpuinfo_uarch_blizzard;
default:
/* Use hw.cpusubtype for detection */
break;
}
return cpuinfo_uarch_unknown;
}
static int read_package_name_from_brand_string(char* package_name) {
size_t size;
if (sysctlbyname("machdep.cpu.brand_string", NULL, &size, NULL, 0) != 0) {
sysctlfail:
cpuinfo_log_warning("sysctlbyname(\"machdep.cpu.brand_string\") failed: %s", strerror(errno));
return false;
}
char* brand_string = alloca(size);
if (sysctlbyname("machdep.cpu.brand_string", brand_string, &size, NULL, 0) != 0)
goto sysctlfail;
cpuinfo_log_debug("machdep.cpu.brand_string: %s", brand_string);
strlcpy(package_name, brand_string, CPUINFO_PACKAGE_NAME_MAX);
return true;
}
static int decode_package_name_from_hw_machine(char* package_name) {
size_t size;
if (sysctlbyname("hw.machine", NULL, &size, NULL, 0) != 0) {
cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
return false;
}
char* machine_name = alloca(size);
if (sysctlbyname("hw.machine", machine_name, &size, NULL, 0) != 0) {
cpuinfo_log_warning("sysctlbyname(\"hw.machine\") failed: %s", strerror(errno));
return false;
}
cpuinfo_log_debug("hw.machine: %s", machine_name);
char name[10];
uint32_t major = 0, minor = 0;
if (sscanf(machine_name, "%9[^,0123456789]%" SCNu32 ",%" SCNu32, name, &major, &minor) != 3) {
cpuinfo_log_warning("parsing \"hw.machine\" failed: %s", strerror(errno));
return false;
}
uint32_t chip_model = 0;
char suffix = '\0';
if (strcmp(name, "iPhone") == 0) {
/*
* iPhone 4 and up are supported:
* - iPhone 4 [A4]: iPhone3,1, iPhone3,2, iPhone3,3
* - iPhone 4S [A5]: iPhone4,1
* - iPhone 5 [A6]: iPhone5,1, iPhone5,2
* - iPhone 5c [A6]: iPhone5,3, iPhone5,4
* - iPhone 5s [A7]: iPhone6,1, iPhone6,2
* - iPhone 6 [A8]: iPhone7,2
* - iPhone 6 Plus [A8]: iPhone7,1
* - iPhone 6s [A9]: iPhone8,1
* - iPhone 6s Plus [A9]: iPhone8,2
* - iPhone SE [A9]: iPhone8,4
* - iPhone 7 [A10]: iPhone9,1, iPhone9,3
* - iPhone 7 Plus [A10]: iPhone9,2, iPhone9,4
* - iPhone 8 [A11]: iPhone10,1, iPhone10,4
* - iPhone 8 Plus [A11]: iPhone10,2, iPhone10,5
* - iPhone X [A11]: iPhone10,3, iPhone10,6
* - iPhone XS [A12]: iPhone11,2,
* - iPhone XS Max [A12]: iPhone11,4, iPhone11,6
* - iPhone XR [A12]: iPhone11,8
*/
chip_model = major + 1;
} else if (strcmp(name, "iPad") == 0) {
switch (major) {
/* iPad 2 and up are supported */
case 2:
/*
* iPad 2 [A5]: iPad2,1, iPad2,2, iPad2,3,
* iPad2,4 iPad mini [A5]: iPad2,5, iPad2,6,
* iPad2,7
*/
chip_model = major + 3;
break;
case 3:
/*
* iPad 3rd Gen [A5X]: iPad3,1, iPad3,2, iPad3,3
* iPad 4th Gen [A6X]: iPad3,4, iPad3,5, iPad3,6
*/
chip_model = (minor <= 3) ? 5 : 6;
suffix = 'X';
break;
case 4:
/*
* iPad Air [A7]: iPad4,1, iPad4,2,
* iPad4,3 iPad mini Retina [A7]: iPad4,4,
* iPad4,5, iPad4,6 iPad mini 3 [A7]:
* iPad4,7, iPad4,8, iPad4,9
*/
chip_model = major + 3;
break;
case 5:
/*
* iPad mini 4 [A8]: iPad5,1, iPad5,2
* iPad Air 2 [A8X]: iPad5,3, iPad5,4
*/
chip_model = major + 3;
suffix = (minor <= 2) ? '\0' : 'X';
break;
case 6:
/*
* iPad Pro 9.7" [A9X]: iPad6,3, iPad6,4
* iPad Pro [A9X]: iPad6,7, iPad6,8
* iPad 5th Gen [A9]: iPad6,11, iPad6,12
*/
chip_model = major + 3;
suffix = minor <= 8 ? 'X' : '\0';
break;
case 7:
/*
* iPad Pro 12.9" [A10X]: iPad7,1, iPad7,2
* iPad Pro 10.5" [A10X]: iPad7,3, iPad7,4
* iPad 6th Gen [A10]: iPad7,5, iPad7,6
*/
chip_model = major + 3;
suffix = minor <= 4 ? 'X' : '\0';
break;
default:
cpuinfo_log_info("unknown iPad: %s", machine_name);
break;
}
} else if (strcmp(name, "iPod") == 0) {
switch (major) {
case 5:
chip_model = 5;
break;
/* iPod touch (5th Gen) [A5]: iPod5,1 */
case 7:
/* iPod touch (6th Gen, 2015) [A8]: iPod7,1 */
chip_model = 8;
break;
default:
cpuinfo_log_info("unknown iPod: %s", machine_name);
break;
}
} else {
cpuinfo_log_info("unknown device: %s", machine_name);
}
if (chip_model != 0) {
snprintf(package_name, CPUINFO_PACKAGE_NAME_MAX, "Apple A%" PRIu32 "%c", chip_model, suffix);
return true;
}
return false;
}
void cpuinfo_arm_mach_init(void) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_uarch_info* uarchs = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
struct cpuinfo_cache* l2 = NULL;
struct cpuinfo_cache* l3 = NULL;
struct cpuinfo_mach_topology mach_topology = cpuinfo_mach_detect_topology();
processors = calloc(mach_topology.threads, sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " logical processors",
mach_topology.threads * sizeof(struct cpuinfo_processor),
mach_topology.threads);
goto cleanup;
}
cores = calloc(mach_topology.cores, sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " cores",
mach_topology.cores * sizeof(struct cpuinfo_core),
mach_topology.cores);
goto cleanup;
}
packages = calloc(mach_topology.packages, sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " packages",
mach_topology.packages * sizeof(struct cpuinfo_package),
mach_topology.packages);
goto cleanup;
}
const uint32_t threads_per_core = mach_topology.threads / mach_topology.cores;
const uint32_t threads_per_package = mach_topology.threads / mach_topology.packages;
const uint32_t cores_per_package = mach_topology.cores / mach_topology.packages;
for (uint32_t i = 0; i < mach_topology.packages; i++) {
packages[i] = (struct cpuinfo_package){
.processor_start = i * threads_per_package,
.processor_count = threads_per_package,
.core_start = i * cores_per_package,
.core_count = cores_per_package,
};
if (!read_package_name_from_brand_string(packages[i].name))
decode_package_name_from_hw_machine(packages[i].name);
}
const uint32_t cpu_family = get_sys_info_by_name("hw.cpufamily");
/*
* iOS 15 and macOS 12 added sysctls for ARM features, use them where
* possible. Otherwise, fallback to hardcoded set of CPUs with known
* support.
*/
const uint32_t has_feat_lse = get_sys_info_by_name("hw.optional.arm.FEAT_LSE");
if (has_feat_lse != 0) {
cpuinfo_isa.atomics = true;
} else {
// Mandatory in ARMv8.1-A, list only cores released before iOS
// 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_MONSOON_MISTRAL:
case CPUFAMILY_ARM_VORTEX_TEMPEST:
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.atomics = true;
}
}
const uint32_t has_feat_rdm = get_sys_info_by_name("hw.optional.arm.FEAT_RDM");
if (has_feat_rdm != 0) {
cpuinfo_isa.rdm = true;
} else {
// Optional in ARMv8.2-A (implemented in Apple cores),
// list only cores released before iOS 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_MONSOON_MISTRAL:
case CPUFAMILY_ARM_VORTEX_TEMPEST:
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.rdm = true;
}
}
const uint32_t has_feat_fp16 = get_sys_info_by_name("hw.optional.arm.FEAT_FP16");
if (has_feat_fp16 != 0) {
cpuinfo_isa.fp16arith = true;
} else {
// Optional in ARMv8.2-A (implemented in Apple cores),
// list only cores released before iOS 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_MONSOON_MISTRAL:
case CPUFAMILY_ARM_VORTEX_TEMPEST:
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.fp16arith = true;
}
}
const uint32_t has_feat_fhm = get_sys_info_by_name("hw.optional.arm.FEAT_FHM");
if (has_feat_fhm != 0) {
cpuinfo_isa.fhm = true;
} else {
// Prior to iOS 15, use 'hw.optional.armv8_2_fhm'
const uint32_t has_feat_fhm_legacy = get_sys_info_by_name("hw.optional.armv8_2_fhm");
if (has_feat_fhm_legacy != 0) {
cpuinfo_isa.fhm = true;
} else {
// Mandatory in ARMv8.4-A when FP16 arithmetics is
// implemented, list only cores released before iOS 15 /
// macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.fhm = true;
}
}
}
const uint32_t has_feat_bf16 = get_sys_info_by_name("hw.optional.arm.FEAT_BF16");
if (has_feat_bf16 != 0) {
cpuinfo_isa.bf16 = true;
}
const uint32_t has_feat_fcma = get_sys_info_by_name("hw.optional.arm.FEAT_FCMA");
if (has_feat_fcma != 0) {
cpuinfo_isa.fcma = true;
} else {
// Mandatory in ARMv8.3-A, list only cores released before iOS
// 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.fcma = true;
}
}
const uint32_t has_feat_jscvt = get_sys_info_by_name("hw.optional.arm.FEAT_JSCVT");
if (has_feat_jscvt != 0) {
cpuinfo_isa.jscvt = true;
} else {
// Mandatory in ARMv8.3-A, list only cores released before iOS
// 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.jscvt = true;
}
}
const uint32_t has_feat_dotprod = get_sys_info_by_name("hw.optional.arm.FEAT_DotProd");
if (has_feat_dotprod != 0) {
cpuinfo_isa.dot = true;
} else {
// Mandatory in ARMv8.4-A, list only cores released before iOS
// 15 / macOS 12
switch (cpu_family) {
case CPUFAMILY_ARM_LIGHTNING_THUNDER:
case CPUFAMILY_ARM_FIRESTORM_ICESTORM:
cpuinfo_isa.dot = true;
}
}
const uint32_t has_feat_i8mm = get_sys_info_by_name("hw.optional.arm.FEAT_I8MM");
if (has_feat_i8mm != 0) {
cpuinfo_isa.i8mm = true;
}
const uint32_t has_feat_sme = get_sys_info_by_name("hw.optional.arm.FEAT_SME");
if (has_feat_sme != 0) {
cpuinfo_isa.sme = true;
}
const uint32_t has_feat_sme2 = get_sys_info_by_name("hw.optional.arm.FEAT_SME2");
if (has_feat_sme2 != 0) {
cpuinfo_isa.sme2 = true;
}
uint32_t num_clusters = 1;
for (uint32_t i = 0; i < mach_topology.cores; i++) {
cores[i] = (struct cpuinfo_core){
.processor_start = i * threads_per_core,
.processor_count = threads_per_core,
.core_id = i % cores_per_package,
.package = packages + i / cores_per_package,
.vendor = cpuinfo_vendor_apple,
.uarch = decode_uarch(cpu_family, i, mach_topology.cores),
};
if (i != 0 && cores[i].uarch != cores[i - 1].uarch) {
num_clusters++;
}
}
for (uint32_t i = 0; i < mach_topology.threads; i++) {
const uint32_t smt_id = i % threads_per_core;
const uint32_t core_id = i / threads_per_core;
const uint32_t package_id = i / threads_per_package;
processors[i].smt_id = smt_id;
processors[i].core = &cores[core_id];
processors[i].package = &packages[package_id];
}
clusters = calloc(num_clusters, sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " clusters",
num_clusters * sizeof(struct cpuinfo_cluster),
num_clusters);
goto cleanup;
}
uarchs = calloc(num_clusters, sizeof(struct cpuinfo_uarch_info));
if (uarchs == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " uarchs",
num_clusters * sizeof(enum cpuinfo_uarch),
num_clusters);
goto cleanup;
}
uint32_t cluster_idx = UINT32_MAX;
for (uint32_t i = 0; i < mach_topology.cores; i++) {
if (i == 0 || cores[i].uarch != cores[i - 1].uarch) {
cluster_idx++;
uarchs[cluster_idx] = (struct cpuinfo_uarch_info){
.uarch = cores[i].uarch,
.processor_count = 1,
.core_count = 1,
};
clusters[cluster_idx] = (struct cpuinfo_cluster){
.processor_start = i * threads_per_core,
.processor_count = 1,
.core_start = i,
.core_count = 1,
.cluster_id = cluster_idx,
.package = cores[i].package,
.vendor = cores[i].vendor,
.uarch = cores[i].uarch,
};
} else {
uarchs[cluster_idx].processor_count++;
uarchs[cluster_idx].core_count++;
clusters[cluster_idx].processor_count++;
clusters[cluster_idx].core_count++;
}
cores[i].cluster = &clusters[cluster_idx];
}
for (uint32_t i = 0; i < mach_topology.threads; i++) {
const uint32_t core_id = i / threads_per_core;
processors[i].cluster = cores[core_id].cluster;
}
for (uint32_t i = 0; i < mach_topology.packages; i++) {
packages[i].cluster_start = 0;
packages[i].cluster_count = num_clusters;
}
const uint32_t cacheline_size = get_sys_info(HW_CACHELINE, "HW_CACHELINE");
const uint32_t l1d_cache_size = get_sys_info(HW_L1DCACHESIZE, "HW_L1DCACHESIZE");
const uint32_t l1i_cache_size = get_sys_info(HW_L1ICACHESIZE, "HW_L1ICACHESIZE");
const uint32_t l2_cache_size = get_sys_info(HW_L2CACHESIZE, "HW_L2CACHESIZE");
const uint32_t l3_cache_size = get_sys_info(HW_L3CACHESIZE, "HW_L3CACHESIZE");
const uint32_t l1_cache_associativity = 4;
const uint32_t l2_cache_associativity = 8;
const uint32_t l3_cache_associativity = 16;
const uint32_t cache_partitions = 1;
const uint32_t cache_flags = 0;
uint32_t threads_per_l1 = 0, l1_count = 0;
if (l1i_cache_size != 0 || l1d_cache_size != 0) {
/* Assume L1 caches are private to each core */
threads_per_l1 = 1;
l1_count = mach_topology.threads / threads_per_l1;
cpuinfo_log_debug("detected %" PRIu32 " L1 caches", l1_count);
}
uint32_t threads_per_l2 = 0, l2_count = 0;
if (l2_cache_size != 0) {
/* Assume L2 cache is shared between all cores */
threads_per_l2 = mach_topology.cores;
l2_count = 1;
cpuinfo_log_debug("detected %" PRIu32 " L2 caches", l2_count);
}
uint32_t threads_per_l3 = 0, l3_count = 0;
if (l3_cache_size != 0) {
/* Assume L3 cache is shared between all cores */
threads_per_l3 = mach_topology.cores;
l3_count = 1;
cpuinfo_log_debug("detected %" PRIu32 " L3 caches", l3_count);
}
if (l1i_cache_size != 0) {
l1i = calloc(l1_count, sizeof(struct cpuinfo_cache));
if (l1i == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1I caches",
l1_count * sizeof(struct cpuinfo_cache),
l1_count);
goto cleanup;
}
for (uint32_t c = 0; c < l1_count; c++) {
l1i[c] = (struct cpuinfo_cache){
.size = l1i_cache_size,
.associativity = l1_cache_associativity,
.sets = l1i_cache_size / (l1_cache_associativity * cacheline_size),
.partitions = cache_partitions,
.line_size = cacheline_size,
.flags = cache_flags,
.processor_start = c * threads_per_l1,
.processor_count = threads_per_l1,
};
}
for (uint32_t t = 0; t < mach_topology.threads; t++) {
processors[t].cache.l1i = &l1i[t / threads_per_l1];
}
}
if (l1d_cache_size != 0) {
l1d = calloc(l1_count, sizeof(struct cpuinfo_cache));
if (l1d == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1D caches",
l1_count * sizeof(struct cpuinfo_cache),
l1_count);
goto cleanup;
}
for (uint32_t c = 0; c < l1_count; c++) {
l1d[c] = (struct cpuinfo_cache){
.size = l1d_cache_size,
.associativity = l1_cache_associativity,
.sets = l1d_cache_size / (l1_cache_associativity * cacheline_size),
.partitions = cache_partitions,
.line_size = cacheline_size,
.flags = cache_flags,
.processor_start = c * threads_per_l1,
.processor_count = threads_per_l1,
};
}
for (uint32_t t = 0; t < mach_topology.threads; t++) {
processors[t].cache.l1d = &l1d[t / threads_per_l1];
}
}
if (l2_count != 0) {
l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
if (l2 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L2 caches",
l2_count * sizeof(struct cpuinfo_cache),
l2_count);
goto cleanup;
}
for (uint32_t c = 0; c < l2_count; c++) {
l2[c] = (struct cpuinfo_cache){
.size = l2_cache_size,
.associativity = l2_cache_associativity,
.sets = l2_cache_size / (l2_cache_associativity * cacheline_size),
.partitions = cache_partitions,
.line_size = cacheline_size,
.flags = cache_flags,
.processor_start = c * threads_per_l2,
.processor_count = threads_per_l2,
};
}
for (uint32_t t = 0; t < mach_topology.threads; t++) {
processors[t].cache.l2 = &l2[0];
}
}
if (l3_count != 0) {
l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
if (l3 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L3 caches",
l3_count * sizeof(struct cpuinfo_cache),
l3_count);
goto cleanup;
}
for (uint32_t c = 0; c < l3_count; c++) {
l3[c] = (struct cpuinfo_cache){
.size = l3_cache_size,
.associativity = l3_cache_associativity,
.sets = l3_cache_size / (l3_cache_associativity * cacheline_size),
.partitions = cache_partitions,
.line_size = cacheline_size,
.flags = cache_flags,
.processor_start = c * threads_per_l3,
.processor_count = threads_per_l3,
};
}
for (uint32_t t = 0; t < mach_topology.threads; t++) {
processors[t].cache.l3 = &l3[0];
}
}
/* Commit changes */
cpuinfo_processors = processors;
cpuinfo_cores = cores;
cpuinfo_clusters = clusters;
cpuinfo_packages = packages;
cpuinfo_uarchs = uarchs;
cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
cpuinfo_cache[cpuinfo_cache_level_2] = l2;
cpuinfo_cache[cpuinfo_cache_level_3] = l3;
cpuinfo_processors_count = mach_topology.threads;
cpuinfo_cores_count = mach_topology.cores;
cpuinfo_clusters_count = num_clusters;
cpuinfo_packages_count = mach_topology.packages;
cpuinfo_uarchs_count = num_clusters;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count;
cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]);
__sync_synchronize();
cpuinfo_is_initialized = true;
processors = NULL;
cores = NULL;
clusters = NULL;
packages = NULL;
uarchs = NULL;
l1i = l1d = l2 = l3 = NULL;
cleanup:
free(processors);
free(cores);
free(clusters);
free(packages);
free(uarchs);
free(l1i);
free(l1d);
free(l2);
free(l3);
}

273
3rdparty/cpuinfo/src/arm/midr.h vendored Normal file
View File

@@ -0,0 +1,273 @@
#pragma once
#include <stdint.h>
#define CPUINFO_ARM_MIDR_IMPLEMENTER_MASK UINT32_C(0xFF000000)
#define CPUINFO_ARM_MIDR_VARIANT_MASK UINT32_C(0x00F00000)
#define CPUINFO_ARM_MIDR_ARCHITECTURE_MASK UINT32_C(0x000F0000)
#define CPUINFO_ARM_MIDR_PART_MASK UINT32_C(0x0000FFF0)
#define CPUINFO_ARM_MIDR_REVISION_MASK UINT32_C(0x0000000F)
#define CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET 24
#define CPUINFO_ARM_MIDR_VARIANT_OFFSET 20
#define CPUINFO_ARM_MIDR_ARCHITECTURE_OFFSET 16
#define CPUINFO_ARM_MIDR_PART_OFFSET 4
#define CPUINFO_ARM_MIDR_REVISION_OFFSET 0
#define CPUINFO_ARM_MIDR_ARM1156 UINT32_C(0x410FB560)
#define CPUINFO_ARM_MIDR_CORTEX_A7 UINT32_C(0x410FC070)
#define CPUINFO_ARM_MIDR_CORTEX_A9 UINT32_C(0x410FC090)
#define CPUINFO_ARM_MIDR_CORTEX_A15 UINT32_C(0x410FC0F0)
#define CPUINFO_ARM_MIDR_CORTEX_A17 UINT32_C(0x410FC0E0)
#define CPUINFO_ARM_MIDR_CORTEX_A35 UINT32_C(0x410FD040)
#define CPUINFO_ARM_MIDR_CORTEX_A53 UINT32_C(0x410FD030)
#define CPUINFO_ARM_MIDR_CORTEX_A55 UINT32_C(0x410FD050)
#define CPUINFO_ARM_MIDR_CORTEX_A57 UINT32_C(0x410FD070)
#define CPUINFO_ARM_MIDR_CORTEX_A72 UINT32_C(0x410FD080)
#define CPUINFO_ARM_MIDR_CORTEX_A73 UINT32_C(0x410FD090)
#define CPUINFO_ARM_MIDR_CORTEX_A75 UINT32_C(0x410FD0A0)
#define CPUINFO_ARM_MIDR_KRYO280_GOLD UINT32_C(0x51AF8001)
#define CPUINFO_ARM_MIDR_KRYO280_SILVER UINT32_C(0x51AF8014)
#define CPUINFO_ARM_MIDR_KRYO385_GOLD UINT32_C(0x518F802D)
#define CPUINFO_ARM_MIDR_KRYO385_SILVER UINT32_C(0x518F803C)
#define CPUINFO_ARM_MIDR_KRYO_SILVER_821 UINT32_C(0x510F2010)
#define CPUINFO_ARM_MIDR_KRYO_GOLD UINT32_C(0x510F2050)
#define CPUINFO_ARM_MIDR_KRYO_SILVER_820 UINT32_C(0x510F2110)
#define CPUINFO_ARM_MIDR_EXYNOS_M1_M2 UINT32_C(0x530F0010)
#define CPUINFO_ARM_MIDR_DENVER2 UINT32_C(0x4E0F0030)
#define CPUINFO_ARM_MIDR_AMPERE_ALTRA UINT32_C(0x413fd0c1)
inline static uint32_t midr_set_implementer(uint32_t midr, uint32_t implementer) {
return (midr & ~CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) |
((implementer << CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET) & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK);
}
inline static uint32_t midr_set_variant(uint32_t midr, uint32_t variant) {
return (midr & ~CPUINFO_ARM_MIDR_VARIANT_MASK) |
((variant << CPUINFO_ARM_MIDR_VARIANT_OFFSET) & CPUINFO_ARM_MIDR_VARIANT_MASK);
}
inline static uint32_t midr_set_architecture(uint32_t midr, uint32_t architecture) {
return (midr & ~CPUINFO_ARM_MIDR_ARCHITECTURE_MASK) |
((architecture << CPUINFO_ARM_MIDR_ARCHITECTURE_OFFSET) & CPUINFO_ARM_MIDR_ARCHITECTURE_MASK);
}
inline static uint32_t midr_set_part(uint32_t midr, uint32_t part) {
return (midr & ~CPUINFO_ARM_MIDR_PART_MASK) |
((part << CPUINFO_ARM_MIDR_PART_OFFSET) & CPUINFO_ARM_MIDR_PART_MASK);
}
inline static uint32_t midr_set_revision(uint32_t midr, uint32_t revision) {
return (midr & ~CPUINFO_ARM_MIDR_REVISION_MASK) |
((revision << CPUINFO_ARM_MIDR_REVISION_OFFSET) & CPUINFO_ARM_MIDR_REVISION_MASK);
}
inline static uint32_t midr_get_variant(uint32_t midr) {
return (midr & CPUINFO_ARM_MIDR_VARIANT_MASK) >> CPUINFO_ARM_MIDR_VARIANT_OFFSET;
}
inline static uint32_t midr_get_implementer(uint32_t midr) {
return (midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) >> CPUINFO_ARM_MIDR_IMPLEMENTER_OFFSET;
}
inline static uint32_t midr_get_part(uint32_t midr) {
return (midr & CPUINFO_ARM_MIDR_PART_MASK) >> CPUINFO_ARM_MIDR_PART_OFFSET;
}
inline static uint32_t midr_get_revision(uint32_t midr) {
return (midr & CPUINFO_ARM_MIDR_REVISION_MASK) >> CPUINFO_ARM_MIDR_REVISION_OFFSET;
}
inline static uint32_t midr_copy_implementer(uint32_t midr, uint32_t other_midr) {
return (midr & ~CPUINFO_ARM_MIDR_IMPLEMENTER_MASK) | (other_midr & CPUINFO_ARM_MIDR_IMPLEMENTER_MASK);
}
inline static uint32_t midr_copy_variant(uint32_t midr, uint32_t other_midr) {
return (midr & ~CPUINFO_ARM_MIDR_VARIANT_MASK) | (other_midr & CPUINFO_ARM_MIDR_VARIANT_MASK);
}
inline static uint32_t midr_copy_architecture(uint32_t midr, uint32_t other_midr) {
return (midr & ~CPUINFO_ARM_MIDR_ARCHITECTURE_MASK) | (other_midr & CPUINFO_ARM_MIDR_ARCHITECTURE_MASK);
}
inline static uint32_t midr_copy_part(uint32_t midr, uint32_t other_midr) {
return (midr & ~CPUINFO_ARM_MIDR_PART_MASK) | (other_midr & CPUINFO_ARM_MIDR_PART_MASK);
}
inline static uint32_t midr_copy_revision(uint32_t midr, uint32_t other_midr) {
return (midr & ~CPUINFO_ARM_MIDR_REVISION_MASK) | (other_midr & CPUINFO_ARM_MIDR_REVISION_MASK);
}
inline static bool midr_is_arm1156(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_ARM1156 & uarch_mask);
}
inline static bool midr_is_arm11(uint32_t midr) {
return (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | 0x0000F000)) == UINT32_C(0x4100B000);
}
inline static bool midr_is_cortex_a9(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_CORTEX_A9 & uarch_mask);
}
inline static bool midr_is_scorpion(uint32_t midr) {
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x510000F0):
case UINT32_C(0x510002D0):
return true;
default:
return false;
}
}
inline static bool midr_is_krait(uint32_t midr) {
switch (midr & (CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case UINT32_C(0x510004D0):
case UINT32_C(0x510006F0):
return true;
default:
return false;
}
}
inline static bool midr_is_cortex_a53(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_CORTEX_A53 & uarch_mask);
}
inline static bool midr_is_qualcomm_cortex_a53_silver(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO280_SILVER & uarch_mask);
}
inline static bool midr_is_qualcomm_cortex_a55_silver(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO385_SILVER & uarch_mask);
}
inline static bool midr_is_kryo280_gold(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO280_GOLD & uarch_mask);
}
inline static bool midr_is_kryo_silver(uint32_t midr) {
const uint32_t uarch_mask =
CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_ARCHITECTURE_MASK | CPUINFO_ARM_MIDR_PART_MASK;
switch (midr & uarch_mask) {
case CPUINFO_ARM_MIDR_KRYO_SILVER_820:
case CPUINFO_ARM_MIDR_KRYO_SILVER_821:
return true;
default:
return false;
}
}
inline static bool midr_is_kryo_gold(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_KRYO_GOLD & uarch_mask);
}
inline static bool midr_is_ampere_altra(uint32_t midr) {
const uint32_t uarch_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
return (midr & uarch_mask) == (CPUINFO_ARM_MIDR_AMPERE_ALTRA & uarch_mask);
}
inline static uint32_t midr_score_core(uint32_t midr) {
const uint32_t core_mask = CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_PART_MASK;
switch (midr & core_mask) {
case UINT32_C(0x53000030): /* Exynos M4 */
case UINT32_C(0x53000040): /* Exynos M5 */
case UINT32_C(0x4100D440): /* Cortex-X1 */
case UINT32_C(0x4100D480): /* Cortex-X2 */
case UINT32_C(0x4100D4E0): /* Cortex-X3 */
/* These cores are in big role w.r.t
* Cortex-A75/-A76/-A77/-A78/-A710/-A715
*/
return 6;
case UINT32_C(0x4100D080): /* Cortex-A72 */
case UINT32_C(0x4100D090): /* Cortex-A73 */
case UINT32_C(0x4100D0A0): /* Cortex-A75 */
case UINT32_C(0x4100D0B0): /* Cortex-A76 */
case UINT32_C(0x4100D0D0): /* Cortex-A77 */
case UINT32_C(0x4100D0E0): /* Cortex-A76AE */
case UINT32_C(0x4100D410): /* Cortex-A78 */
case UINT32_C(0x4100D470): /* Cortex-A710 */
case UINT32_C(0x4100D4D0): /* Cortex-A715 */
case UINT32_C(0x4800D400): /* Cortex-A76 (HiSilicon) */
case UINT32_C(0x4E000030): /* Denver 2 */
case UINT32_C(0x51002050): /* Kryo Gold */
case UINT32_C(0x51008000): /* Kryo 260 / 280 Gold */
case UINT32_C(0x51008020): /* Kryo 385 Gold */
case UINT32_C(0x51008040): /* Kryo 485 Gold / Gold Prime */
case UINT32_C(0x53000010): /* Exynos M1 and Exynos M2 */
case UINT32_C(0x53000020): /* Exynos M3 */
#if CPUINFO_ARCH_ARM
case UINT32_C(0x4100C0F0): /* Cortex-A15 */
case UINT32_C(0x4100C0E0): /* Cortex-A17 */
case UINT32_C(0x4100C0D0): /* Rockchip RK3288 cores */
case UINT32_C(0x4100C0C0): /* Cortex-A12 */
#endif /* CPUINFO_ARCH_ARM */
/* These cores are always in big role */
return 5;
case UINT32_C(0x4100D070): /* Cortex-A57 */
/* Cortex-A57 can be in LITTLE role w.r.t. Denver 2, or
* in big role w.r.t. Cortex-A53 */
return 4;
#if CPUINFO_ARCH_ARM64
case UINT32_C(0x4100D060): /* Cortex-A65 */
#endif /* CPUINFO_ARCH_ARM64 */
case UINT32_C(0x4100D030): /* Cortex-A53 */
case UINT32_C(0x4100D050): /* Cortex-A55 */
case UINT32_C(0x4100D460): /* Cortex-A510 */
/* Cortex-A53 is usually in LITTLE role, but can be in
* big role w.r.t. Cortex-A35 */
return 2;
case UINT32_C(0x4100D040): /* Cortex-A35 */
#if CPUINFO_ARCH_ARM
case UINT32_C(0x4100C070): /* Cortex-A7 */
#endif /* CPUINFO_ARCH_ARM */
case UINT32_C(0x51008050): /* Kryo 485 Silver */
case UINT32_C(0x51008030): /* Kryo 385 Silver */
case UINT32_C(0x51008010): /* Kryo 260 / 280 Silver */
case UINT32_C(0x51002110): /* Kryo Silver (Snapdragon 820) */
case UINT32_C(0x51002010): /* Kryo Silver (Snapdragon 821) */
/* These cores are always in LITTLE core */
return 1;
default:
/*
* Unknown cores, or cores which do not have big/LITTLE
* roles. To be future-proof w.r.t. cores not yet
* recognized in cpuinfo, assume position between
* Cortex-A57/A72/A73/A75 and Cortex-A53/A55. Then at
* least future cores paired with one of these known
* cores will be properly scored.
*/
return 3;
}
}
inline static uint32_t midr_little_core_for_big(uint32_t midr) {
const uint32_t core_mask =
CPUINFO_ARM_MIDR_IMPLEMENTER_MASK | CPUINFO_ARM_MIDR_ARCHITECTURE_MASK | CPUINFO_ARM_MIDR_PART_MASK;
switch (midr & core_mask) {
case CPUINFO_ARM_MIDR_CORTEX_A75:
return CPUINFO_ARM_MIDR_CORTEX_A55;
case CPUINFO_ARM_MIDR_CORTEX_A73:
case CPUINFO_ARM_MIDR_CORTEX_A72:
case CPUINFO_ARM_MIDR_CORTEX_A57:
case CPUINFO_ARM_MIDR_EXYNOS_M1_M2:
return CPUINFO_ARM_MIDR_CORTEX_A53;
case CPUINFO_ARM_MIDR_CORTEX_A17:
case CPUINFO_ARM_MIDR_CORTEX_A15:
return CPUINFO_ARM_MIDR_CORTEX_A7;
case CPUINFO_ARM_MIDR_KRYO280_GOLD:
return CPUINFO_ARM_MIDR_KRYO280_SILVER;
case CPUINFO_ARM_MIDR_KRYO_GOLD:
return CPUINFO_ARM_MIDR_KRYO_SILVER_820;
case CPUINFO_ARM_MIDR_DENVER2:
return CPUINFO_ARM_MIDR_CORTEX_A57;
default:
return midr;
}
}

154
3rdparty/cpuinfo/src/arm/tlb.c vendored Normal file
View File

@@ -0,0 +1,154 @@
switch (uarch) {
case cpuinfo_uarch_cortex_a5:
/*
* Cortex-A5 Technical Reference Manual:
* 6.3.1. Micro TLB
* The first level of caching for the page table information
* is a micro TLB of 10 entries that is implemented on each of
* the instruction and data sides. 6.3.2. Main TLB Misses from
* the instruction and data micro TLBs are handled by a unified
* main TLB. The main TLB is 128-entry two-way set-associative.
*/
break;
case cpuinfo_uarch_cortex_a7:
/*
* Cortex-A7 MPCore Technical Reference Manual:
* 5.3.1. Micro TLB
* The first level of caching for the page table information
* is a micro TLB of 10 entries that is implemented on each of
* the instruction and data sides. 5.3.2. Main TLB Misses from
* the micro TLBs are handled by a unified main TLB. This is a
* 256-entry 2-way set-associative structure. The main TLB
* supports all the VMSAv7 page sizes of 4KB, 64KB, 1MB and 16MB
* in addition to the LPAE page sizes of 2MB and 1G.
*/
break;
case cpuinfo_uarch_cortex_a8:
/*
* Cortex-A8 Technical Reference Manual:
* 6.1. About the MMU
* The MMU features include the following:
* - separate, fully-associative, 32-entry data and
* instruction TLBs
* - TLB entries that support 4KB, 64KB, 1MB, and 16MB pages
*/
break;
case cpuinfo_uarch_cortex_a9:
/*
* ARM CortexA9 Technical Reference Manual:
* 6.2.1 Micro TLB
* The first level of caching for the page table information
* is a micro TLB of 32 entries on the data side, and
* configurable 32 or 64 entries on the instruction side. 6.2.2
* Main TLB The main TLB is implemented as a combination of:
* - A fully-associative, lockable array of four elements.
* - A 2-way associative structure of 2x32, 2x64, 2x128 or
* 2x256 entries.
*/
break;
case cpuinfo_uarch_cortex_a15:
/*
* ARM Cortex-A15 MPCore Processor Technical Reference Manual:
* 5.2.1. L1 instruction TLB
* The L1 instruction TLB is a 32-entry fully-associative
* structure. This TLB caches entries at the 4KB granularity of
* Virtual Address (VA) to Physical Address (PA) mapping only.
* If the page tables map the memory region to a larger
* granularity than 4K, it only allocates one mapping for the
* particular 4K region to which the current access
* corresponds. 5.2.2. L1 data TLB There are two separate
* 32-entry fully-associative TLBs that are used for data loads
* and stores, respectively. Similar to the L1 instruction TLB,
* both of these cache entries at the 4KB granularity of VA to
* PA mappings only. At implementation time, the Cortex-A15
* MPCore processor can be configured with the -l1tlb_1m option,
* to have the L1 data TLB cache entries at both the 4KB and 1MB
* granularity. With this configuration, any translation that
* results in a 1MB or larger page is cached in the L1 data TLB
* as a 1MB entry. Any translation that results in a page
* smaller than 1MB is cached in the L1 data TLB as a 4KB entry.
* By default, all translations are cached in the L1 data TLB as
* a 4KB entry. 5.2.3. L2 TLB Misses from the L1 instruction and
* data TLBs are handled by a unified L2 TLB. This is a
* 512-entry 4-way set-associative structure. The L2 TLB
* supports all the VMSAv7 page sizes of 4K, 64K, 1MB and 16MB
* in addition to the LPAE page sizes of 2MB and 1GB.
*/
break;
case cpuinfo_uarch_cortex_a17:
/*
* ARM Cortex-A17 MPCore Processor Technical Reference Manual:
* 5.2.1. Instruction micro TLB
* The instruction micro TLB is implemented as a 32, 48 or 64
* entry, fully-associative structure. This TLB caches entries
* at the 4KB and 1MB granularity of Virtual Address (VA) to
* Physical Address (PA) mapping only. If the translation tables
* map the memory region to a larger granularity than 4KB or
* 1MB, it only allocates one mapping for the particular 4KB
* region to which the current access corresponds. 5.2.2. Data
* micro TLB The data micro TLB is a 32 entry fully-associative
* TLB that is used for data loads and stores. The cache entries
* have a 4KB and 1MB granularity of VA to PA mappings
* only. 5.2.3. Unified main TLB Misses from the instruction and
* data micro TLBs are handled by a unified main TLB. This is a
* 1024 entry 4-way set-associative structure. The main TLB
* supports all the VMSAv7 page sizes of 4K, 64K, 1MB and 16MB
* in addition to the LPAE page sizes of 2MB and 1GB.
*/
break;
case cpuinfo_uarch_cortex_a35:
/*
* ARM CortexA35 Processor Technical Reference Manual:
* A6.2 TLB Organization
* Micro TLB
* The first level of caching for the translation table
* information is a micro TLB of ten entries that is implemented
* on each of the instruction and data sides. Main TLB A unified
* main TLB handles misses from the micro TLBs. It has a
* 512-entry, 2-way, set-associative structure and supports all
* VMSAv8 block sizes, except 1GB. If it fetches a 1GB block,
* the TLB splits it into 512MB blocks and stores the
* appropriate block for the lookup.
*/
break;
case cpuinfo_uarch_cortex_a53:
/*
* ARM Cortex-A53 MPCore Processor Technical Reference Manual:
* 5.2.1. Micro TLB
* The first level of caching for the translation table
* information is a micro TLB of ten entries that is implemented
* on each of the instruction and data sides. 5.2.2. Main TLB A
* unified main TLB handles misses from the micro TLBs. This is
* a 512-entry, 4-way, set-associative structure. The main TLB
* supports all VMSAv8 block sizes, except 1GB. If a 1GB block
* is fetched, it is split into 512MB blocks and the appropriate
* block for the lookup stored.
*/
break;
case cpuinfo_uarch_cortex_a57:
/*
* ARM® Cortex-A57 MPCore Processor Technical Reference Manual:
* 5.2.1 L1 instruction TLB
* The L1 instruction TLB is a 48-entry fully-associative
* structure. This TLB caches entries of three different page
* sizes, natively 4KB, 64KB, and 1MB, of VA to PA mappings. If
* the page tables map the memory region to a larger granularity
* than 1MB, it only allocates one mapping for the particular
* 1MB region to which the current access corresponds. 5.2.2 L1
* data TLB The L1 data TLB is a 32-entry fully-associative TLB
* that is used for data loads and stores. This TLB caches
* entries of three different page sizes, natively 4KB, 64KB,
* and 1MB, of VA to PA mappings. 5.2.3 L2 TLB Misses from the
* L1 instruction and data TLBs are handled by a unified L2 TLB.
* This is a 1024-entry 4-way set-associative structure. The L2
* TLB supports the page sizes of 4K, 64K, 1MB and 16MB. It also
* supports page sizes of 2MB and 1GB for the long descriptor
* format translation in AArch32 state and in AArch64 state when
* using the 4KB translation granule. In addition, the L2 TLB
* supports the 512MB page map size defined for the AArch64
* translations that use a 64KB translation granule.
*/
break;
}

429
3rdparty/cpuinfo/src/arm/uarch.c vendored Normal file
View File

@@ -0,0 +1,429 @@
#include <stdint.h>
#include <arm/api.h>
#include <arm/midr.h>
#include <cpuinfo/log.h>
void cpuinfo_arm_decode_vendor_uarch(
uint32_t midr,
#if CPUINFO_ARCH_ARM
bool has_vfpv4,
#endif /* CPUINFO_ARCH_ARM */
enum cpuinfo_vendor vendor[RESTRICT_STATIC 1],
enum cpuinfo_uarch uarch[RESTRICT_STATIC 1]) {
switch (midr_get_implementer(midr)) {
case 'A':
*vendor = cpuinfo_vendor_arm;
switch (midr_get_part(midr)) {
#if CPUINFO_ARCH_ARM
case 0xC05:
*uarch = cpuinfo_uarch_cortex_a5;
break;
case 0xC07:
*uarch = cpuinfo_uarch_cortex_a7;
break;
case 0xC08:
*uarch = cpuinfo_uarch_cortex_a8;
break;
case 0xC09:
*uarch = cpuinfo_uarch_cortex_a9;
break;
case 0xC0C:
*uarch = cpuinfo_uarch_cortex_a12;
break;
case 0xC0E:
*uarch = cpuinfo_uarch_cortex_a17;
break;
case 0xC0D:
/*
* Rockchip RK3288 only.
* Core information is ambiguous: some
* sources specify Cortex-A12, others -
* Cortex-A17. Assume it is Cortex-A12.
*/
*uarch = cpuinfo_uarch_cortex_a12;
break;
case 0xC0F:
*uarch = cpuinfo_uarch_cortex_a15;
break;
#endif /* CPUINFO_ARCH_ARM */
case 0xD01:
*uarch = cpuinfo_uarch_cortex_a32;
break;
case 0xD03:
*uarch = cpuinfo_uarch_cortex_a53;
break;
case 0xD04:
*uarch = cpuinfo_uarch_cortex_a35;
break;
case 0xD05:
// Note: use Variant, not Revision,
// field
*uarch = (midr & CPUINFO_ARM_MIDR_VARIANT_MASK) == 0
? cpuinfo_uarch_cortex_a55r0
: cpuinfo_uarch_cortex_a55;
break;
case 0xD06:
*uarch = cpuinfo_uarch_cortex_a65;
break;
case 0xD07:
*uarch = cpuinfo_uarch_cortex_a57;
break;
case 0xD08:
*uarch = cpuinfo_uarch_cortex_a72;
break;
case 0xD09:
*uarch = cpuinfo_uarch_cortex_a73;
break;
case 0xD0A:
*uarch = cpuinfo_uarch_cortex_a75;
break;
case 0xD0B:
*uarch = cpuinfo_uarch_cortex_a76;
break;
case 0xD0C:
*uarch = cpuinfo_uarch_neoverse_n1;
break;
case 0xD0D:
*uarch = cpuinfo_uarch_cortex_a77;
break;
case 0xD0E: /* Cortex-A76AE */
*uarch = cpuinfo_uarch_cortex_a76;
break;
case 0xD40: /* Neoverse V1 */
*uarch = cpuinfo_uarch_neoverse_v1;
break;
case 0xD41: /* Cortex-A78 */
*uarch = cpuinfo_uarch_cortex_a78;
break;
case 0xD44: /* Cortex-X1 */
*uarch = cpuinfo_uarch_cortex_x1;
break;
case 0xD46: /* Cortex-A510 */
*uarch = cpuinfo_uarch_cortex_a510;
break;
case 0xD47: /* Cortex-A710 */
*uarch = cpuinfo_uarch_cortex_a710;
break;
case 0xD48: /* Cortex-X2 */
*uarch = cpuinfo_uarch_cortex_x2;
break;
case 0xD49: /* Neoverse N2 */
*uarch = cpuinfo_uarch_neoverse_n2;
break;
#if CPUINFO_ARCH_ARM64
case 0xD4A:
*uarch = cpuinfo_uarch_neoverse_e1;
break;
#endif /* CPUINFO_ARCH_ARM64 */
case 0xD4D: /* Cortex-A715 */
*uarch = cpuinfo_uarch_cortex_a715;
break;
case 0xD4E: /* Cortex-X3 */
*uarch = cpuinfo_uarch_cortex_x3;
break;
case 0xD4F: /* Neoverse V2 */
*uarch = cpuinfo_uarch_neoverse_v2;
break;
default:
switch (midr_get_part(midr) >> 8) {
#if CPUINFO_ARCH_ARM
case 7:
*uarch = cpuinfo_uarch_arm7;
break;
case 9:
*uarch = cpuinfo_uarch_arm9;
break;
case 11:
*uarch = cpuinfo_uarch_arm11;
break;
#endif /* CPUINFO_ARCH_ARM */
default:
cpuinfo_log_warning(
"unknown ARM CPU part 0x%03" PRIx32 " ignored",
midr_get_part(midr));
}
}
break;
case 'B':
*vendor = cpuinfo_vendor_broadcom;
switch (midr_get_part(midr)) {
case 0x00F:
*uarch = cpuinfo_uarch_brahma_b15;
break;
case 0x100:
*uarch = cpuinfo_uarch_brahma_b53;
break;
#if CPUINFO_ARCH_ARM64
case 0x516:
/* Broadcom Vulkan was sold to Cavium
* before it reached the market, so we
* identify it as Cavium ThunderX2 */
*vendor = cpuinfo_vendor_cavium;
*uarch = cpuinfo_uarch_thunderx2;
break;
#endif /* CPUINFO_ARCH_ARM64 */
default:
cpuinfo_log_warning(
"unknown Broadcom CPU part 0x%03" PRIx32 " ignored",
midr_get_part(midr));
}
break;
#if CPUINFO_ARCH_ARM64
case 'C':
*vendor = cpuinfo_vendor_cavium;
switch (midr_get_part(midr)) {
case 0x0A0: /* ThunderX */
case 0x0A1: /* ThunderX 88XX */
case 0x0A2: /* ThunderX 81XX */
case 0x0A3: /* ThunderX 83XX */
*uarch = cpuinfo_uarch_thunderx;
break;
case 0x0AF: /* ThunderX2 99XX */
*uarch = cpuinfo_uarch_thunderx2;
break;
default:
cpuinfo_log_warning(
"unknown Cavium CPU part 0x%03" PRIx32 " ignored", midr_get_part(midr));
}
break;
#endif /* CPUINFO_ARCH_ARM64 */
case 'H':
*vendor = cpuinfo_vendor_huawei;
switch (midr_get_part(midr)) {
#if CPUINFO_ARCH_ARM64
case 0xD01: /* Kunpeng 920 series */
*uarch = cpuinfo_uarch_taishan_v110;
break;
#endif /* CPUINFO_ARCH_ARM64 */
case 0xD40: /* Kirin 980 Big/Medium cores ->
Cortex-A76 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a76;
break;
default:
cpuinfo_log_warning(
"unknown Huawei CPU part 0x%03" PRIx32 " ignored", midr_get_part(midr));
}
break;
#if CPUINFO_ARCH_ARM
case 'i':
*vendor = cpuinfo_vendor_intel;
switch (midr_get_part(midr) >> 8) {
case 2: /* PXA 210/25X/26X */
case 4: /* PXA 27X */
case 6: /* PXA 3XX */
*uarch = cpuinfo_uarch_xscale;
break;
default:
cpuinfo_log_warning(
"unknown Intel CPU part 0x%03" PRIx32 " ignored", midr_get_part(midr));
}
break;
#endif /* CPUINFO_ARCH_ARM */
case 'N':
*vendor = cpuinfo_vendor_nvidia;
switch (midr_get_part(midr)) {
case 0x000:
*uarch = cpuinfo_uarch_denver;
break;
case 0x003:
*uarch = cpuinfo_uarch_denver2;
break;
case 0x004:
*uarch = cpuinfo_uarch_carmel;
break;
default:
cpuinfo_log_warning(
"unknown Nvidia CPU part 0x%03" PRIx32 " ignored", midr_get_part(midr));
}
break;
case 'P':
*vendor = cpuinfo_vendor_apm;
switch (midr_get_part(midr)) {
case 0x000:
*uarch = cpuinfo_uarch_xgene;
break;
default:
cpuinfo_log_warning(
"unknown Applied Micro CPU part 0x%03" PRIx32 " ignored",
midr_get_part(midr));
}
break;
case 'Q':
*vendor = cpuinfo_vendor_qualcomm;
switch (midr_get_part(midr)) {
#if CPUINFO_ARCH_ARM
case 0x00F:
/* Mostly Scorpions, but some Cortex A5
* may report this value as well
*/
if (has_vfpv4) {
/* Unlike Scorpion, Cortex-A5
* comes with VFPv4 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a5;
} else {
*uarch = cpuinfo_uarch_scorpion;
}
break;
case 0x02D: /* Dual-core Scorpions */
*uarch = cpuinfo_uarch_scorpion;
break;
case 0x04D:
/*
* Dual-core Krait:
* - r1p0 -> Krait 200
* - r1p4 -> Krait 200
* - r2p0 -> Krait 300
*/
case 0x06F:
/*
* Quad-core Krait:
* - r0p1 -> Krait 200
* - r0p2 -> Krait 200
* - r1p0 -> Krait 300
* - r2p0 -> Krait 400 (Snapdragon 800
* MSMxxxx)
* - r2p1 -> Krait 400 (Snapdragon 801
* MSMxxxxPRO)
* - r3p1 -> Krait 450
*/
*uarch = cpuinfo_uarch_krait;
break;
#endif /* CPUINFO_ARCH_ARM */
case 0x201: /* Qualcomm Snapdragon 821:
Low-power Kryo "Silver" */
case 0x205: /* Qualcomm Snapdragon 820 & 821:
High-performance Kryo "Gold" */
case 0x211: /* Qualcomm Snapdragon 820:
Low-power Kryo "Silver" */
*uarch = cpuinfo_uarch_kryo;
break;
case 0x800: /* High-performance Kryo 260 (r10p2)
/ Kryo 280 (r10p1) "Gold" ->
Cortex-A73 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a73;
break;
case 0x801: /* Low-power Kryo 260 / 280 "Silver"
-> Cortex-A53 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a53;
break;
case 0x802: /* High-performance Kryo 385 "Gold"
-> Cortex-A75 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a75;
break;
case 0x803: /* Low-power Kryo 385 "Silver" ->
Cortex-A55r0 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a55r0;
break;
case 0x804: /* High-performance Kryo 485 "Gold"
/ "Gold Prime" -> Cortex-A76 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a76;
break;
case 0x805: /* Low-performance Kryo 485 "Silver"
-> Cortex-A55 */
*vendor = cpuinfo_vendor_arm;
*uarch = cpuinfo_uarch_cortex_a55;
break;
#if CPUINFO_ARCH_ARM64
case 0x001:
*uarch = cpuinfo_uarch_oryon;
break;
case 0xC00:
*uarch = cpuinfo_uarch_falkor;
break;
case 0xC01:
*uarch = cpuinfo_uarch_saphira;
break;
#endif /* CPUINFO_ARCH_ARM64 */
default:
cpuinfo_log_warning(
"unknown Qualcomm CPU part 0x%03" PRIx32 " ignored",
midr_get_part(midr));
}
break;
case 'S':
*vendor = cpuinfo_vendor_samsung;
switch (midr & (CPUINFO_ARM_MIDR_VARIANT_MASK | CPUINFO_ARM_MIDR_PART_MASK)) {
case 0x00100010:
/*
* Exynos 8890 MIDR = 0x531F0011, assume
* Exynos M1 has:
* - CPU variant 0x1
* - CPU part 0x001
*/
*uarch = cpuinfo_uarch_exynos_m1;
break;
case 0x00400010:
/*
* Exynos 8895 MIDR = 0x534F0010, assume
* Exynos M2 has:
* - CPU variant 0x4
* - CPU part 0x001
*/
*uarch = cpuinfo_uarch_exynos_m2;
break;
case 0x00100020:
/*
* Exynos 9810 MIDR = 0x531F0020, assume
* Exynos M3 has:
* - CPU variant 0x1
* - CPU part 0x002
*/
*uarch = cpuinfo_uarch_exynos_m3;
break;
case 0x00100030:
/*
* Exynos 9820 MIDR = 0x531F0030, assume
* Exynos M4 has:
* - CPU variant 0x1
* - CPU part 0x003
*/
*uarch = cpuinfo_uarch_exynos_m4;
break;
case 0x00100040:
/*
* Exynos 9820 MIDR = 0x531F0040, assume
* Exynos M5 has:
* - CPU variant 0x1
* - CPU part 0x004
*/
*uarch = cpuinfo_uarch_exynos_m5;
break;
default:
cpuinfo_log_warning(
"unknown Samsung CPU variant 0x%01" PRIx32 " part 0x%03" PRIx32
" ignored",
midr_get_variant(midr),
midr_get_part(midr));
}
break;
#if CPUINFO_ARCH_ARM
case 'V':
*vendor = cpuinfo_vendor_marvell;
switch (midr_get_part(midr)) {
case 0x581: /* PJ4 / PJ4B */
case 0x584: /* PJ4B-MP / PJ4C */
*uarch = cpuinfo_uarch_pj4;
break;
default:
cpuinfo_log_warning(
"unknown Marvell CPU part 0x%03" PRIx32 " ignored",
midr_get_part(midr));
}
break;
#endif /* CPUINFO_ARCH_ARM */
default:
cpuinfo_log_warning(
"unknown CPU implementer '%c' (0x%02" PRIx32 ") with CPU part 0x%03" PRIx32 " ignored",
(char)midr_get_implementer(midr),
midr_get_implementer(midr),
midr_get_part(midr));
}
}

View File

@@ -0,0 +1,912 @@
#include <errno.h>
#include <malloc.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include "windows-arm-init.h"
#define MAX_NR_OF_CACHES (cpuinfo_cache_level_max - 1)
/* Call chain:
* cpu_info_init_by_logical_sys_info
* read_packages_for_processors
* read_cores_for_processors
* read_caches_for_processors
* read_all_logical_processor_info_of_relation
* parse_relation_processor_info
* store_package_info_per_processor
* store_core_info_per_processor
* parse_relation_cache_info
* store_cache_info_per_processor
*/
static uint32_t count_logical_processors(const uint32_t max_group_count, uint32_t* global_proc_index_per_group);
static uint32_t read_packages_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info);
static uint32_t read_cores_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
const uint32_t* global_proc_index_per_group,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info);
static uint32_t read_caches_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info);
static uint32_t read_all_logical_processor_info_of_relation(
LOGICAL_PROCESSOR_RELATIONSHIP info_type,
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
struct cpuinfo_core* cores,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info);
static bool parse_relation_processor_info(
struct cpuinfo_processor* processors,
uint32_t nr_of_processors,
const uint32_t* global_proc_index_per_group,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info,
const uint32_t info_id,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info);
static bool parse_relation_cache_info(
struct cpuinfo_processor* processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
const uint32_t* global_proc_index_per_group,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info);
static void store_package_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
const uint32_t package_id,
const uint32_t group_id,
const uint32_t processor_id_in_group);
static void store_core_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
const uint32_t core_id,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX core_info,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info);
static void store_cache_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info,
struct cpuinfo_cache* current_cache);
static bool connect_packages_cores_clusters_by_processors(
struct cpuinfo_processor* processors,
const uint32_t nr_of_processors,
struct cpuinfo_package* packages,
const uint32_t nr_of_packages,
struct cpuinfo_cluster* clusters,
struct cpuinfo_core* cores,
const uint32_t nr_of_cores,
const struct woa_chip_info* chip_info,
enum cpuinfo_vendor vendor);
static inline uint32_t low_index_from_kaffinity(KAFFINITY kaffinity);
bool cpu_info_init_by_logical_sys_info(const struct woa_chip_info* chip_info, const enum cpuinfo_vendor vendor) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_cache* caches = NULL;
struct cpuinfo_uarch_info* uarchs = NULL;
uint32_t nr_of_packages = 0;
uint32_t nr_of_cores = 0;
uint32_t nr_of_all_caches = 0;
uint32_t numbers_of_caches[MAX_NR_OF_CACHES] = {0};
uint32_t nr_of_uarchs = 0;
bool result = false;
HANDLE heap = GetProcessHeap();
/* 1. Count available logical processor groups and processors */
const uint32_t max_group_count = (uint32_t)GetMaximumProcessorGroupCount();
cpuinfo_log_debug("detected %" PRIu32 " processor group(s)", max_group_count);
/* We need to store the absolute processor ID offsets for every groups,
* because
* 1. We can't assume every processor groups include the same number of
* logical processors.
* 2. Every processor groups know its group number and processor IDs
* within the group, but not the global processor IDs.
* 3. We need to list every logical processors by global IDs.
*/
uint32_t* global_proc_index_per_group = (uint32_t*)HeapAlloc(heap, 0, max_group_count * sizeof(uint32_t));
if (global_proc_index_per_group == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " processor groups",
max_group_count * sizeof(struct cpuinfo_processor),
max_group_count);
goto clean_up;
}
uint32_t nr_of_processors = count_logical_processors(max_group_count, global_proc_index_per_group);
processors = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_processors * sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " logical processors",
nr_of_processors * sizeof(struct cpuinfo_processor),
nr_of_processors);
goto clean_up;
}
/* 2. Read topology information via MSDN API: packages, cores and
* caches*/
nr_of_packages =
read_packages_for_processors(processors, nr_of_processors, global_proc_index_per_group, chip_info);
if (!nr_of_packages) {
cpuinfo_log_error("error in reading package information");
goto clean_up;
}
cpuinfo_log_debug("detected %" PRIu32 " processor package(s)", nr_of_packages);
/* We need the EfficiencyClass to parse uarch from the core information,
* but we need to iterate first to count cores and allocate memory then
* we will iterate again to read and store data to cpuinfo_core
* structures.
*/
nr_of_cores =
read_cores_for_processors(processors, nr_of_processors, global_proc_index_per_group, NULL, chip_info);
if (!nr_of_cores) {
cpuinfo_log_error("error in reading core information");
goto clean_up;
}
cpuinfo_log_debug("detected %" PRIu32 " processor core(s)", nr_of_cores);
/* There is no API to read number of caches, so we need to iterate twice
on caches:
1. Count all type of caches -> allocate memory
2. Read out cache data and store to allocated memory
*/
nr_of_all_caches = read_caches_for_processors(
processors, nr_of_processors, caches, numbers_of_caches, global_proc_index_per_group, chip_info);
if (!nr_of_all_caches) {
cpuinfo_log_error("error in reading cache information");
goto clean_up;
}
cpuinfo_log_debug("detected %" PRIu32 " processor cache(s)", nr_of_all_caches);
/* 3. Allocate memory for package, cluster, core and cache structures */
packages = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_packages * sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " physical packages",
nr_of_packages * sizeof(struct cpuinfo_package),
nr_of_packages);
goto clean_up;
}
/* We don't have cluster information so we explicitly set clusters to
* equal to cores. */
clusters = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_cores * sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " core clusters",
nr_of_cores * sizeof(struct cpuinfo_cluster),
nr_of_cores);
goto clean_up;
}
cores = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_cores * sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " cores",
nr_of_cores * sizeof(struct cpuinfo_core),
nr_of_cores);
goto clean_up;
}
/* We allocate one contiguous cache array for all caches, then use
* offsets per cache type. */
caches = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_all_caches * sizeof(struct cpuinfo_cache));
if (caches == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " caches",
nr_of_all_caches * sizeof(struct cpuinfo_cache),
nr_of_all_caches);
goto clean_up;
}
/* 4.Read missing topology information that can't be saved without
* counted allocate structures in the first round.
*/
nr_of_all_caches = read_caches_for_processors(
processors, nr_of_processors, caches, numbers_of_caches, global_proc_index_per_group, chip_info);
if (!nr_of_all_caches) {
cpuinfo_log_error("error in reading cache information");
goto clean_up;
}
nr_of_cores =
read_cores_for_processors(processors, nr_of_processors, global_proc_index_per_group, cores, chip_info);
if (!nr_of_cores) {
cpuinfo_log_error("error in reading core information");
goto clean_up;
}
/* 5. Now that we read out everything from the system we can, fill the
* package, cluster and core structures respectively.
*/
result = connect_packages_cores_clusters_by_processors(
processors,
nr_of_processors,
packages,
nr_of_packages,
clusters,
cores,
nr_of_cores,
chip_info,
vendor);
if (!result) {
cpuinfo_log_error("error in connecting information");
goto clean_up;
}
/* 6. Count and store uarchs of cores, assuming same uarchs are
* neighbors */
enum cpuinfo_uarch prev_uarch = cpuinfo_uarch_unknown;
for (uint32_t i = 0; i < nr_of_cores; i++) {
if (prev_uarch != cores[i].uarch) {
nr_of_uarchs++;
prev_uarch = cores[i].uarch;
}
}
uarchs = HeapAlloc(heap, HEAP_ZERO_MEMORY, nr_of_uarchs * sizeof(struct cpuinfo_uarch_info));
if (uarchs == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " uarchs",
nr_of_uarchs * sizeof(struct cpuinfo_uarch_info),
nr_of_uarchs);
goto clean_up;
}
prev_uarch = cpuinfo_uarch_unknown;
for (uint32_t i = 0, uarch_index = 0; i < nr_of_cores; i++) {
if (prev_uarch != cores[i].uarch) {
if (i != 0) {
uarch_index++;
}
if (uarch_index >= nr_of_uarchs) {
cpuinfo_log_error("more uarchs detected than reported");
}
prev_uarch = cores[i].uarch;
uarchs[uarch_index].uarch = cores[i].uarch;
uarchs[uarch_index].core_count = 1;
uarchs[uarch_index].processor_count = cores[i].processor_count;
} else if (prev_uarch != cpuinfo_uarch_unknown) {
uarchs[uarch_index].core_count++;
uarchs[uarch_index].processor_count += cores[i].processor_count;
}
}
/* 7. Commit changes */
cpuinfo_processors = processors;
cpuinfo_packages = packages;
cpuinfo_clusters = clusters;
cpuinfo_cores = cores;
cpuinfo_uarchs = uarchs;
cpuinfo_processors_count = nr_of_processors;
cpuinfo_packages_count = nr_of_packages;
cpuinfo_clusters_count = nr_of_cores;
cpuinfo_cores_count = nr_of_cores;
cpuinfo_uarchs_count = nr_of_uarchs;
for (uint32_t i = 0; i < MAX_NR_OF_CACHES; i++) {
cpuinfo_cache_count[i] = numbers_of_caches[i];
}
cpuinfo_cache[cpuinfo_cache_level_1i] = caches;
cpuinfo_cache[cpuinfo_cache_level_1d] =
cpuinfo_cache[cpuinfo_cache_level_1i] + cpuinfo_cache_count[cpuinfo_cache_level_1i];
cpuinfo_cache[cpuinfo_cache_level_2] =
cpuinfo_cache[cpuinfo_cache_level_1d] + cpuinfo_cache_count[cpuinfo_cache_level_1d];
cpuinfo_cache[cpuinfo_cache_level_3] =
cpuinfo_cache[cpuinfo_cache_level_2] + cpuinfo_cache_count[cpuinfo_cache_level_2];
cpuinfo_cache[cpuinfo_cache_level_4] =
cpuinfo_cache[cpuinfo_cache_level_3] + cpuinfo_cache_count[cpuinfo_cache_level_3];
cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]);
result = true;
MemoryBarrier();
processors = NULL;
packages = NULL;
clusters = NULL;
cores = NULL;
caches = NULL;
uarchs = NULL;
clean_up:
/* The propagated pointers, shouldn't be freed, only in case of error
* and unfinished init.
*/
if (processors != NULL) {
HeapFree(heap, 0, processors);
}
if (packages != NULL) {
HeapFree(heap, 0, packages);
}
if (clusters != NULL) {
HeapFree(heap, 0, clusters);
}
if (cores != NULL) {
HeapFree(heap, 0, cores);
}
if (caches != NULL) {
HeapFree(heap, 0, caches);
}
if (uarchs != NULL) {
HeapFree(heap, 0, uarchs);
}
/* Free the locally used temporary pointers */
HeapFree(heap, 0, global_proc_index_per_group);
global_proc_index_per_group = NULL;
return result;
}
static uint32_t count_logical_processors(const uint32_t max_group_count, uint32_t* global_proc_index_per_group) {
uint32_t nr_of_processors = 0;
for (uint32_t i = 0; i < max_group_count; i++) {
uint32_t nr_of_processors_per_group = GetMaximumProcessorCount((WORD)i);
cpuinfo_log_debug(
"detected %" PRIu32 " processor(s) in group %" PRIu32 "", nr_of_processors_per_group, i);
global_proc_index_per_group[i] = nr_of_processors;
nr_of_processors += nr_of_processors_per_group;
}
return nr_of_processors;
}
static uint32_t read_packages_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info) {
return read_all_logical_processor_info_of_relation(
RelationProcessorPackage,
processors,
number_of_processors,
NULL,
NULL,
NULL,
global_proc_index_per_group,
chip_info);
}
uint32_t read_cores_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
const uint32_t* global_proc_index_per_group,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info) {
return read_all_logical_processor_info_of_relation(
RelationProcessorCore,
processors,
number_of_processors,
NULL,
NULL,
cores,
global_proc_index_per_group,
chip_info);
}
static uint32_t read_caches_for_processors(
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info) {
/* Reset processor start indexes */
if (caches) {
uint32_t cache_offset = 0;
for (uint32_t i = 0; i < MAX_NR_OF_CACHES; i++) {
for (uint32_t j = 0; j < numbers_of_caches[i]; j++) {
caches[cache_offset + j].processor_start = UINT32_MAX;
}
cache_offset += numbers_of_caches[i];
}
}
return read_all_logical_processor_info_of_relation(
RelationCache,
processors,
number_of_processors,
caches,
numbers_of_caches,
NULL,
global_proc_index_per_group,
chip_info);
}
static uint32_t read_all_logical_processor_info_of_relation(
LOGICAL_PROCESSOR_RELATIONSHIP info_type,
struct cpuinfo_processor* processors,
const uint32_t number_of_processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
struct cpuinfo_core* cores,
const uint32_t* global_proc_index_per_group,
const struct woa_chip_info* chip_info) {
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX infos = NULL;
uint32_t nr_of_structs = 0;
DWORD info_size = 0;
bool result = false;
HANDLE heap = GetProcessHeap();
/* 1. Query the size of the information structure first */
if (GetLogicalProcessorInformationEx(info_type, NULL, &info_size) == FALSE) {
const DWORD last_error = GetLastError();
if (last_error != ERROR_INSUFFICIENT_BUFFER) {
cpuinfo_log_error(
"failed to query size of processor %" PRIu32 " information information: error %" PRIu32
"",
(uint32_t)info_type,
(uint32_t)last_error);
goto clean_up;
}
}
/* 2. Allocate memory for the information structure */
infos = HeapAlloc(heap, 0, info_size);
if (infos == NULL) {
cpuinfo_log_error(
"failed to allocate %" PRIu32 " bytes for logical processor information", (uint32_t)info_size);
goto clean_up;
}
/* 3. Read the information structure */
if (GetLogicalProcessorInformationEx(info_type, infos, &info_size) == FALSE) {
cpuinfo_log_error(
"failed to query processor %" PRIu32 " information: error %" PRIu32 "",
(uint32_t)info_type,
(uint32_t)GetLastError());
goto clean_up;
}
/* 4. Parse the structure and store relevant data */
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info_end =
(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)((uintptr_t)infos + info_size);
for (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info = infos; info < info_end;
info = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)((uintptr_t)info + info->Size)) {
if (info->Relationship != info_type) {
cpuinfo_log_warning(
"unexpected processor info type (%" PRIu32 ") for processor information",
(uint32_t)info->Relationship);
continue;
}
const uint32_t info_id = nr_of_structs++;
switch (info_type) {
case RelationProcessorPackage:
result = parse_relation_processor_info(
processors,
number_of_processors,
global_proc_index_per_group,
info,
info_id,
cores,
chip_info);
break;
case RelationProcessorCore:
result = parse_relation_processor_info(
processors,
number_of_processors,
global_proc_index_per_group,
info,
info_id,
cores,
chip_info);
break;
case RelationCache:
result = parse_relation_cache_info(
processors, caches, numbers_of_caches, global_proc_index_per_group, info);
break;
default:
cpuinfo_log_error(
"unexpected processor info type (%" PRIu32 ") for processor information",
(uint32_t)info->Relationship);
result = false;
break;
}
if (!result) {
nr_of_structs = 0;
goto clean_up;
}
}
clean_up:
/* 5. Release dynamically allocated info structure. */
HeapFree(heap, 0, infos);
infos = NULL;
return nr_of_structs;
}
static bool parse_relation_processor_info(
struct cpuinfo_processor* processors,
uint32_t nr_of_processors,
const uint32_t* global_proc_index_per_group,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info,
const uint32_t info_id,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info) {
for (uint32_t i = 0; i < info->Processor.GroupCount; i++) {
const uint32_t group_id = info->Processor.GroupMask[i].Group;
/* Bitmask representing processors in this group belonging to
* this package
*/
KAFFINITY group_processors_mask = info->Processor.GroupMask[i].Mask;
while (group_processors_mask != 0) {
const uint32_t processor_id_in_group = low_index_from_kaffinity(group_processors_mask);
const uint32_t processor_global_index =
global_proc_index_per_group[group_id] + processor_id_in_group;
if (processor_global_index >= nr_of_processors) {
cpuinfo_log_error("unexpected processor index %" PRIu32 "", processor_global_index);
return false;
}
switch (info->Relationship) {
case RelationProcessorPackage:
store_package_info_per_processor(
processors,
processor_global_index,
info_id,
group_id,
processor_id_in_group);
break;
case RelationProcessorCore:
store_core_info_per_processor(
processors, processor_global_index, info_id, info, cores, chip_info);
break;
default:
cpuinfo_log_error(
"unexpected processor info type (%" PRIu32
") for processor information",
(uint32_t)info->Relationship);
break;
}
/* Clear the bits in affinity mask, lower the least set
* bit. */
group_processors_mask &= (group_processors_mask - 1);
}
}
return true;
}
static bool parse_relation_cache_info(
struct cpuinfo_processor* processors,
struct cpuinfo_cache* caches,
uint32_t* numbers_of_caches,
const uint32_t* global_proc_index_per_group,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info) {
static uint32_t l1i_counter = 0;
static uint32_t l1d_counter = 0;
static uint32_t l2_counter = 0;
static uint32_t l3_counter = 0;
/* Count cache types for allocation at first. */
if (caches == NULL) {
switch (info->Cache.Level) {
case 1:
switch (info->Cache.Type) {
case CacheInstruction:
numbers_of_caches[cpuinfo_cache_level_1i]++;
break;
case CacheData:
numbers_of_caches[cpuinfo_cache_level_1d]++;
break;
case CacheUnified:
break;
case CacheTrace:
break;
default:
break;
}
break;
case 2:
numbers_of_caches[cpuinfo_cache_level_2]++;
break;
case 3:
numbers_of_caches[cpuinfo_cache_level_3]++;
break;
}
return true;
}
struct cpuinfo_cache* l1i_base = caches;
struct cpuinfo_cache* l1d_base = l1i_base + numbers_of_caches[cpuinfo_cache_level_1i];
struct cpuinfo_cache* l2_base = l1d_base + numbers_of_caches[cpuinfo_cache_level_1d];
struct cpuinfo_cache* l3_base = l2_base + numbers_of_caches[cpuinfo_cache_level_2];
cpuinfo_log_debug(
"info->Cache.GroupCount:%" PRIu32 ", info->Cache.GroupMask:%" PRIu32
","
"info->Cache.Level:%" PRIu32 ", info->Cache.Associativity:%" PRIu32
","
"info->Cache.LineSize:%" PRIu32
","
"info->Cache.CacheSize:%" PRIu32 ", info->Cache.Type:%" PRIu32 "",
info->Cache.GroupCount,
(unsigned int)info->Cache.GroupMask.Mask,
info->Cache.Level,
info->Cache.Associativity,
info->Cache.LineSize,
info->Cache.CacheSize,
info->Cache.Type);
struct cpuinfo_cache* current_cache = NULL;
switch (info->Cache.Level) {
case 1:
switch (info->Cache.Type) {
case CacheInstruction:
current_cache = l1i_base + l1i_counter;
l1i_counter++;
break;
case CacheData:
current_cache = l1d_base + l1d_counter;
l1d_counter++;
break;
case CacheUnified:
break;
case CacheTrace:
break;
default:
break;
}
break;
case 2:
current_cache = l2_base + l2_counter;
l2_counter++;
break;
case 3:
current_cache = l3_base + l3_counter;
l3_counter++;
break;
}
current_cache->size = info->Cache.CacheSize;
current_cache->line_size = info->Cache.LineSize;
current_cache->associativity = info->Cache.Associativity;
/* We don't have partition and set information of caches on Windows,
* so we set partitions to 1 and calculate the expected sets.
*/
current_cache->partitions = 1;
current_cache->sets = current_cache->size / current_cache->line_size / current_cache->associativity;
if (info->Cache.Type == CacheUnified) {
current_cache->flags = CPUINFO_CACHE_UNIFIED;
}
for (uint32_t i = 0; i < info->Cache.GroupCount; i++) {
/* Zero GroupCount is valid, GroupMask still can store bits set.
*/
const uint32_t group_id = info->Cache.GroupMasks[i].Group;
/* Bitmask representing processors in this group belonging to
* this package
*/
KAFFINITY group_processors_mask = info->Cache.GroupMasks[i].Mask;
while (group_processors_mask != 0) {
const uint32_t processor_id_in_group = low_index_from_kaffinity(group_processors_mask);
const uint32_t processor_global_index =
global_proc_index_per_group[group_id] + processor_id_in_group;
store_cache_info_per_processor(processors, processor_global_index, info, current_cache);
/* Clear the bits in affinity mask, lower the least set
* bit. */
group_processors_mask &= (group_processors_mask - 1);
}
}
return true;
}
static void store_package_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
const uint32_t package_id,
const uint32_t group_id,
const uint32_t processor_id_in_group) {
processors[processor_global_index].windows_group_id = (uint16_t)group_id;
processors[processor_global_index].windows_processor_id = (uint16_t)processor_id_in_group;
/* As we're counting the number of packages now, we haven't allocated
* memory for cpuinfo_packages yet, so we only set the package pointer's
* offset now.
*/
processors[processor_global_index].package = (const struct cpuinfo_package*)NULL + package_id;
}
void store_core_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
const uint32_t core_id,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX core_info,
struct cpuinfo_core* cores,
const struct woa_chip_info* chip_info) {
if (cores) {
processors[processor_global_index].core = cores + core_id;
cores[core_id].core_id = core_id;
if (chip_info->uarchs == NULL) {
cpuinfo_log_error("uarch is NULL for core %d", core_id);
return;
}
cores[core_id].uarch = chip_info->uarchs[0].uarch;
cores[core_id].frequency = chip_info->uarchs[0].frequency;
/* We don't have cluster information, so we handle it as
* fixed 1 to (cluster / cores).
* Set the cluster offset ID now, as soon as we have the
* cluster base address, we'll set the absolute address.
*/
processors[processor_global_index].cluster = (const struct cpuinfo_cluster*)NULL + core_id;
}
}
static void store_cache_info_per_processor(
struct cpuinfo_processor* processors,
const uint32_t processor_global_index,
PSYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX info,
struct cpuinfo_cache* current_cache) {
if (current_cache->processor_start > processor_global_index) {
current_cache->processor_start = processor_global_index;
}
current_cache->processor_count++;
switch (info->Cache.Level) {
case 1:
switch (info->Cache.Type) {
case CacheInstruction:
processors[processor_global_index].cache.l1i = current_cache;
break;
case CacheData:
processors[processor_global_index].cache.l1d = current_cache;
break;
case CacheUnified:
break;
case CacheTrace:
break;
default:
break;
}
break;
case 2:
processors[processor_global_index].cache.l2 = current_cache;
break;
case 3:
processors[processor_global_index].cache.l3 = current_cache;
break;
}
}
static bool connect_packages_cores_clusters_by_processors(
struct cpuinfo_processor* processors,
const uint32_t nr_of_processors,
struct cpuinfo_package* packages,
const uint32_t nr_of_packages,
struct cpuinfo_cluster* clusters,
struct cpuinfo_core* cores,
const uint32_t nr_of_cores,
const struct woa_chip_info* chip_info,
enum cpuinfo_vendor vendor) {
/* Adjust core and package pointers for all logical processors. */
for (uint32_t i = nr_of_processors; i != 0; i--) {
const uint32_t processor_id = i - 1;
struct cpuinfo_processor* processor = processors + processor_id;
struct cpuinfo_core* core = (struct cpuinfo_core*)processor->core;
/* We stored the offset of pointers when we haven't allocated
* memory for packages and clusters, so now add offsets to base
* addresses.
*/
struct cpuinfo_package* package =
(struct cpuinfo_package*)((uintptr_t)packages + (uintptr_t)processor->package);
if (package < packages || package >= (packages + nr_of_packages)) {
cpuinfo_log_error("invalid package indexing");
return false;
}
processor->package = package;
struct cpuinfo_cluster* cluster =
(struct cpuinfo_cluster*)((uintptr_t)clusters + (uintptr_t)processor->cluster);
if (cluster < clusters || cluster >= (clusters + nr_of_cores)) {
cpuinfo_log_error("invalid cluster indexing");
return false;
}
processor->cluster = cluster;
if (chip_info) {
size_t converted_chars = 0;
if (!WideCharToMultiByte(
CP_UTF8,
WC_ERR_INVALID_CHARS,
chip_info->chip_name_string,
-1,
package->name,
CPUINFO_PACKAGE_NAME_MAX,
NULL,
NULL)) {
cpuinfo_log_error("cpu name character conversion error");
return false;
};
}
/* Set start indexes and counts per packages / clusters / cores
* - going backwards */
/* This can be overwritten by lower-index processors on the same
* package. */
package->processor_start = processor_id;
package->processor_count++;
/* This can be overwritten by lower-index processors on the same
* cluster. */
cluster->processor_start = processor_id;
cluster->processor_count++;
/* This can be overwritten by lower-index processors on the same
* core. */
core->processor_start = processor_id;
core->processor_count++;
}
/* Fill cores */
for (uint32_t i = nr_of_cores; i != 0; i--) {
const uint32_t global_core_id = i - 1;
struct cpuinfo_core* core = cores + global_core_id;
const struct cpuinfo_processor* processor = processors + core->processor_start;
struct cpuinfo_package* package = (struct cpuinfo_package*)processor->package;
struct cpuinfo_cluster* cluster = (struct cpuinfo_cluster*)processor->cluster;
core->package = package;
core->cluster = cluster;
core->vendor = vendor;
/* This can be overwritten by lower-index cores on the same
* cluster/package.
*/
cluster->core_start = global_core_id;
cluster->core_count++;
package->core_start = global_core_id;
package->core_count++;
package->cluster_start = global_core_id;
package->cluster_count = package->core_count;
cluster->package = package;
cluster->vendor = cores[cluster->core_start].vendor;
cluster->uarch = cores[cluster->core_start].uarch;
cluster->frequency = cores[cluster->core_start].frequency;
}
return true;
}
static inline uint32_t low_index_from_kaffinity(KAFFINITY kaffinity) {
unsigned long index;
_BitScanForward64(&index, (unsigned __int64)kaffinity);
return (uint32_t)index;
}

222
3rdparty/cpuinfo/src/arm/windows/init.c vendored Normal file
View File

@@ -0,0 +1,222 @@
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <arm/api.h>
#include <arm/midr.h>
#include "windows-arm-init.h"
struct cpuinfo_arm_isa cpuinfo_isa;
static void set_cpuinfo_isa_fields(void);
static struct woa_chip_info* get_system_info_from_registry(void);
static struct woa_chip_info woa_chip_unknown = {L"Unknown", {{cpuinfo_vendor_unknown, cpuinfo_uarch_unknown, 0}}};
BOOL CALLBACK cpuinfo_arm_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context) {
struct woa_chip_info* chip_info = NULL;
enum cpuinfo_vendor vendor = cpuinfo_vendor_unknown;
set_cpuinfo_isa_fields();
chip_info = get_system_info_from_registry();
if (chip_info == NULL) {
chip_info = &woa_chip_unknown;
}
cpuinfo_is_initialized = cpu_info_init_by_logical_sys_info(chip_info, chip_info->uarchs[0].vendor);
return true;
}
/* Static helper functions */
static wchar_t* read_registry(LPCWSTR subkey, LPCWSTR value) {
DWORD key_type = 0;
DWORD data_size = 0;
const DWORD flags = RRF_RT_REG_SZ; /* Only read strings (REG_SZ) */
wchar_t* text_buffer = NULL;
LSTATUS result = 0;
HANDLE heap = GetProcessHeap();
result = RegGetValueW(
HKEY_LOCAL_MACHINE,
subkey,
value,
flags,
&key_type,
NULL, /* Request buffer size */
&data_size);
if (result != 0 || data_size == 0) {
cpuinfo_log_error("Registry entry size read error");
return NULL;
}
text_buffer = HeapAlloc(heap, HEAP_ZERO_MEMORY, data_size);
if (text_buffer == NULL) {
cpuinfo_log_error("Registry textbuffer allocation error");
return NULL;
}
result = RegGetValueW(
HKEY_LOCAL_MACHINE,
subkey,
value,
flags,
NULL,
text_buffer, /* Write string in this destination buffer */
&data_size);
if (result != 0) {
cpuinfo_log_error("Registry read error");
HeapFree(heap, 0, text_buffer);
return NULL;
}
return text_buffer;
}
static uint64_t read_registry_qword(LPCWSTR subkey, LPCWSTR value) {
DWORD key_type = 0;
DWORD data_size = sizeof(uint64_t);
const DWORD flags = RRF_RT_REG_QWORD; /* Only read QWORD (REG_QWORD) values */
uint64_t qword_value = 0;
LSTATUS result = RegGetValueW(HKEY_LOCAL_MACHINE, subkey, value, flags, &key_type, &qword_value, &data_size);
if (result != ERROR_SUCCESS || data_size != sizeof(uint64_t)) {
cpuinfo_log_error("Registry QWORD read error");
return 0;
}
return qword_value;
}
static uint64_t read_registry_dword(LPCWSTR subkey, LPCWSTR value) {
DWORD key_type = 0;
DWORD data_size = sizeof(DWORD);
DWORD dword_value = 0;
LSTATUS result =
RegGetValueW(HKEY_LOCAL_MACHINE, subkey, value, RRF_RT_REG_DWORD, &key_type, &dword_value, &data_size);
if (result != ERROR_SUCCESS || data_size != sizeof(DWORD)) {
cpuinfo_log_error("Registry DWORD read error");
return 0;
}
return (uint64_t)dword_value;
}
static wchar_t* wcsndup(const wchar_t* src, size_t n) {
size_t len = wcsnlen(src, n);
wchar_t* dup = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, (len + 1) * sizeof(wchar_t));
if (dup) {
wcsncpy_s(dup, len + 1, src, len);
dup[len] = L'\0';
}
return dup;
}
static struct core_info_by_chip_name get_core_info_from_midr(uint32_t midr, uint64_t frequency) {
struct core_info_by_chip_name info;
enum cpuinfo_vendor vendor;
enum cpuinfo_uarch uarch;
#if CPUINFO_ARCH_ARM
bool has_vfpv4 = false;
cpuinfo_arm_decode_vendor_uarch(midr, has_vfpv4, &vendor, &uarch);
#else
cpuinfo_arm_decode_vendor_uarch(midr, &vendor, &uarch);
#endif
info.vendor = vendor;
info.uarch = uarch;
info.frequency = frequency;
return info;
}
static struct woa_chip_info* get_system_info_from_registry(void) {
wchar_t* text_buffer = NULL;
LPCWSTR cpu0_subkey = L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
LPCWSTR chip_name_value = L"ProcessorNameString";
LPCWSTR chip_midr_value = L"CP 4000";
LPCWSTR chip_mhz_value = L"~MHz";
struct woa_chip_info* chip_info = NULL;
/* Read processor model name from registry and find in the hard-coded
* list. */
text_buffer = read_registry(cpu0_subkey, chip_name_value);
if (text_buffer == NULL) {
cpuinfo_log_error("Registry read error for processor name");
return NULL;
}
/*
* https://developer.arm.com/documentation/100442/0100/register-descriptions/aarch32-system-registers/midr--main-id-register
* Regedit for MIDR :
*HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0\CP 4000
*/
uint64_t midr_qword = (uint32_t)read_registry_qword(cpu0_subkey, chip_midr_value);
if (midr_qword == 0) {
cpuinfo_log_error("Registry read error for MIDR value");
return NULL;
}
// MIDR is only 32 bits, so we need to cast it to uint32_t
uint32_t midr_value = (uint32_t)midr_qword;
/* Read the frequency from the registry
* The value is in MHz, so we need to convert it to Hz */
uint64_t frequency_mhz = read_registry_dword(cpu0_subkey, chip_mhz_value);
if (frequency_mhz == 0) {
cpuinfo_log_error("Registry read error for frequency value");
return NULL;
}
// Convert MHz to Hz
uint64_t frequency_hz = frequency_mhz * 1000000;
// Allocate chip_info before using it.
chip_info = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(struct woa_chip_info));
if (chip_info == NULL) {
cpuinfo_log_error("Heap allocation error for chip_info");
return NULL;
}
// set chip_info fields
chip_info->chip_name_string = wcsndup(text_buffer, CPUINFO_PACKAGE_NAME_MAX - 1);
chip_info->uarchs[0] = get_core_info_from_midr(midr_value, frequency_hz);
cpuinfo_log_debug("detected chip model name: %ls", chip_info->chip_name_string);
return chip_info;
}
static void set_cpuinfo_isa_fields(void) {
cpuinfo_isa.atomics = IsProcessorFeaturePresent(PF_ARM_V81_ATOMIC_INSTRUCTIONS_AVAILABLE) != 0;
const bool dotprod = IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE) != 0;
cpuinfo_isa.dot = dotprod;
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
switch (system_info.wProcessorLevel) {
case 0x803: // Kryo 385 Silver (Snapdragon 850)
cpuinfo_isa.fp16arith = dotprod;
cpuinfo_isa.rdm = dotprod;
break;
default:
// Assume that Dot Product support implies FP16
// arithmetics and RDM support. ARM manuals don't
// guarantee that, but it holds in practice.
cpuinfo_isa.fp16arith = dotprod;
cpuinfo_isa.rdm = dotprod;
break;
}
/* Windows API reports all or nothing for cryptographic instructions. */
const bool crypto = IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE) != 0;
cpuinfo_isa.aes = crypto;
cpuinfo_isa.sha1 = crypto;
cpuinfo_isa.sha2 = crypto;
cpuinfo_isa.pmull = crypto;
cpuinfo_isa.crc32 = IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE) != 0;
}

View File

@@ -0,0 +1,21 @@
#pragma once
/* Efficiency class = 0 means little core, while 1 means big core for now. */
#define MAX_WOA_VALID_EFFICIENCY_CLASSES 2
/* Topology information hard-coded by SoC/chip name */
struct core_info_by_chip_name {
enum cpuinfo_vendor vendor;
enum cpuinfo_uarch uarch;
uint64_t frequency; /* Hz */
};
/* SoC/chip info that's currently not readable by logical system information,
* but can be read from registry.
*/
struct woa_chip_info {
wchar_t* chip_name_string;
struct core_info_by_chip_name uarchs[MAX_WOA_VALID_EFFICIENCY_CLASSES];
};
bool cpu_info_init_by_logical_sys_info(const struct woa_chip_info* chip_info, enum cpuinfo_vendor vendor);

17
3rdparty/cpuinfo/src/cache.c vendored Normal file
View File

@@ -0,0 +1,17 @@
#include <stddef.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
uint32_t cpuinfo_compute_max_cache_size(const struct cpuinfo_processor* processor) {
if (processor->cache.l4 != NULL) {
return processor->cache.l4->size;
} else if (processor->cache.l3 != NULL) {
return processor->cache.l3->size;
} else if (processor->cache.l2 != NULL) {
return processor->cache.l2->size;
} else if (processor->cache.l1d != NULL) {
return processor->cache.l1d->size;
}
return 0;
}

39
3rdparty/cpuinfo/src/cpuinfo/common.h vendored Normal file
View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#define CPUINFO_COUNT_OF(array) (sizeof(array) / sizeof(0 [array]))
#if defined(__GNUC__)
#define CPUINFO_LIKELY(condition) (__builtin_expect(!!(condition), 1))
#define CPUINFO_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
#else
#define CPUINFO_LIKELY(condition) (!!(condition))
#define CPUINFO_UNLIKELY(condition) (!!(condition))
#endif
#ifndef CPUINFO_INTERNAL
#if defined(__ELF__)
#define CPUINFO_INTERNAL __attribute__((__visibility__("internal")))
#elif defined(__MACH__)
#define CPUINFO_INTERNAL __attribute__((__visibility__("hidden")))
#else
#define CPUINFO_INTERNAL
#endif
#endif
#ifndef CPUINFO_PRIVATE
#if defined(__ELF__)
#define CPUINFO_PRIVATE __attribute__((__visibility__("hidden")))
#elif defined(__MACH__)
#define CPUINFO_PRIVATE __attribute__((__visibility__("hidden")))
#else
#define CPUINFO_PRIVATE
#endif
#endif

View File

@@ -0,0 +1,67 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#if defined(_WIN32) || defined(__CYGWIN__)
#include <windows.h>
#endif
#include <cpuinfo.h>
#include <cpuinfo/common.h>
enum cpuinfo_cache_level {
cpuinfo_cache_level_1i = 0,
cpuinfo_cache_level_1d = 1,
cpuinfo_cache_level_2 = 2,
cpuinfo_cache_level_3 = 3,
cpuinfo_cache_level_4 = 4,
cpuinfo_cache_level_max = 5,
};
extern CPUINFO_INTERNAL bool cpuinfo_is_initialized;
extern CPUINFO_INTERNAL struct cpuinfo_processor* cpuinfo_processors;
extern CPUINFO_INTERNAL struct cpuinfo_core* cpuinfo_cores;
extern CPUINFO_INTERNAL struct cpuinfo_cluster* cpuinfo_clusters;
extern CPUINFO_INTERNAL struct cpuinfo_package* cpuinfo_packages;
extern CPUINFO_INTERNAL struct cpuinfo_cache* cpuinfo_cache[cpuinfo_cache_level_max];
extern CPUINFO_INTERNAL uint32_t cpuinfo_processors_count;
extern CPUINFO_INTERNAL uint32_t cpuinfo_cores_count;
extern CPUINFO_INTERNAL uint32_t cpuinfo_clusters_count;
extern CPUINFO_INTERNAL uint32_t cpuinfo_packages_count;
extern CPUINFO_INTERNAL uint32_t cpuinfo_cache_count[cpuinfo_cache_level_max];
extern CPUINFO_INTERNAL uint32_t cpuinfo_max_cache_size;
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64 || CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
extern CPUINFO_INTERNAL struct cpuinfo_uarch_info* cpuinfo_uarchs;
extern CPUINFO_INTERNAL uint32_t cpuinfo_uarchs_count;
#else
extern CPUINFO_INTERNAL struct cpuinfo_uarch_info cpuinfo_global_uarch;
#endif
#ifdef __linux__
extern CPUINFO_INTERNAL uint32_t cpuinfo_linux_cpu_max;
extern CPUINFO_INTERNAL const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map;
extern CPUINFO_INTERNAL const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map;
#endif
CPUINFO_PRIVATE void cpuinfo_x86_mach_init(void);
CPUINFO_PRIVATE void cpuinfo_x86_linux_init(void);
CPUINFO_PRIVATE void cpuinfo_x86_freebsd_init(void);
#if defined(_WIN32) || defined(__CYGWIN__)
#if CPUINFO_ARCH_ARM64
CPUINFO_PRIVATE BOOL CALLBACK cpuinfo_arm_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context);
#else
CPUINFO_PRIVATE BOOL CALLBACK cpuinfo_x86_windows_init(PINIT_ONCE init_once, PVOID parameter, PVOID* context);
#endif
#endif
CPUINFO_PRIVATE void cpuinfo_arm_mach_init(void);
CPUINFO_PRIVATE void cpuinfo_arm_linux_init(void);
CPUINFO_PRIVATE void cpuinfo_riscv_linux_init(void);
CPUINFO_PRIVATE void cpuinfo_emscripten_init(void);
CPUINFO_PRIVATE uint32_t cpuinfo_compute_max_cache_size(const struct cpuinfo_processor* processor);
typedef void (*cpuinfo_processor_callback)(uint32_t);

102
3rdparty/cpuinfo/src/cpuinfo/log.h vendored Normal file
View File

@@ -0,0 +1,102 @@
#pragma once
#include <inttypes.h>
#include <stdarg.h>
#include <stdlib.h>
#ifndef CPUINFO_LOG_LEVEL
#error "Undefined CPUINFO_LOG_LEVEL"
#endif
#define CPUINFO_LOG_NONE 0
#define CPUINFO_LOG_FATAL 1
#define CPUINFO_LOG_ERROR 2
#define CPUINFO_LOG_WARNING 3
#define CPUINFO_LOG_INFO 4
#define CPUINFO_LOG_DEBUG 5
#ifndef CPUINFO_LOG_DEBUG_PARSERS
#define CPUINFO_LOG_DEBUG_PARSERS 0
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_DEBUG
void cpuinfo_vlog_debug(const char* format, va_list args);
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_INFO
void cpuinfo_vlog_info(const char* format, va_list args);
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_WARNING
void cpuinfo_vlog_warning(const char* format, va_list args);
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_ERROR
void cpuinfo_vlog_error(const char* format, va_list args);
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_FATAL
void cpuinfo_vlog_fatal(const char* format, va_list args);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
#ifndef CPUINFO_LOG_ARGUMENTS_FORMAT
#ifdef __GNUC__
#define CPUINFO_LOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2)))
#else
#define CPUINFO_LOG_ARGUMENTS_FORMAT
#endif
#endif
CPUINFO_LOG_ARGUMENTS_FORMAT inline static void cpuinfo_log_debug(const char* format, ...) {
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_DEBUG
va_list args;
va_start(args, format);
cpuinfo_vlog_debug(format, args);
va_end(args);
#endif
}
CPUINFO_LOG_ARGUMENTS_FORMAT inline static void cpuinfo_log_info(const char* format, ...) {
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_INFO
va_list args;
va_start(args, format);
cpuinfo_vlog_info(format, args);
va_end(args);
#endif
}
CPUINFO_LOG_ARGUMENTS_FORMAT inline static void cpuinfo_log_warning(const char* format, ...) {
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_WARNING
va_list args;
va_start(args, format);
cpuinfo_vlog_warning(format, args);
va_end(args);
#endif
}
CPUINFO_LOG_ARGUMENTS_FORMAT inline static void cpuinfo_log_error(const char* format, ...) {
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_ERROR
va_list args;
va_start(args, format);
cpuinfo_vlog_error(format, args);
va_end(args);
#endif
}
CPUINFO_LOG_ARGUMENTS_FORMAT inline static void cpuinfo_log_fatal(const char* format, ...) {
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_FATAL
va_list args;
va_start(args, format);
cpuinfo_vlog_fatal(format, args);
va_end(args);
#endif
abort();
}

21
3rdparty/cpuinfo/src/cpuinfo/utils.h vendored Normal file
View File

@@ -0,0 +1,21 @@
#pragma once
#ifdef _MSC_VER
#include <intrin.h>
#endif
#include <stdint.h>
inline static uint32_t bit_length(uint32_t n) {
const uint32_t n_minus_1 = n - 1;
if (n_minus_1 == 0) {
return 0;
} else {
#ifdef _MSC_VER
unsigned long bsr;
_BitScanReverse(&bsr, n_minus_1);
return bsr + 1;
#else
return 32 - __builtin_clz(n_minus_1);
#endif
}
}

288
3rdparty/cpuinfo/src/emscripten/init.c vendored Normal file
View File

@@ -0,0 +1,288 @@
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten/threading.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
static const volatile float infinity = INFINITY;
static struct cpuinfo_package static_package = {};
static struct cpuinfo_cache static_x86_l3 = {
.size = 2 * 1024 * 1024,
.associativity = 16,
.sets = 2048,
.partitions = 1,
.line_size = 64,
};
void cpuinfo_emscripten_init(void) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
struct cpuinfo_cache* l2 = NULL;
const bool is_x86 = signbit(infinity - infinity);
int logical_cores_count = emscripten_num_logical_cores();
if (logical_cores_count <= 0) {
logical_cores_count = 1;
}
uint32_t processor_count = (uint32_t)logical_cores_count;
uint32_t core_count = processor_count;
uint32_t cluster_count = 1;
uint32_t big_cluster_core_count = core_count;
uint32_t processors_per_core = 1;
if (is_x86) {
if (processor_count % 2 == 0) {
processors_per_core = 2;
core_count = processor_count / 2;
big_cluster_core_count = core_count;
}
} else {
/* Assume ARM/ARM64 */
if (processor_count > 4) {
/* Assume big.LITTLE architecture */
cluster_count = 2;
big_cluster_core_count = processor_count >= 8 ? 4 : 2;
}
}
uint32_t l2_count = is_x86 ? core_count : cluster_count;
processors = calloc(processor_count, sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " logical processors",
processor_count * sizeof(struct cpuinfo_processor),
processor_count);
goto cleanup;
}
cores = calloc(processor_count, sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " cores",
processor_count * sizeof(struct cpuinfo_core),
processor_count);
goto cleanup;
}
clusters = calloc(cluster_count, sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " clusters",
cluster_count * sizeof(struct cpuinfo_cluster),
cluster_count);
goto cleanup;
}
l1i = calloc(core_count, sizeof(struct cpuinfo_cache));
if (l1i == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1I caches",
core_count * sizeof(struct cpuinfo_cache),
core_count);
goto cleanup;
}
l1d = calloc(core_count, sizeof(struct cpuinfo_cache));
if (l1d == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L1D caches",
core_count * sizeof(struct cpuinfo_cache),
core_count);
goto cleanup;
}
l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
if (l2 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " L2 caches",
l2_count * sizeof(struct cpuinfo_cache),
l2_count);
goto cleanup;
}
static_package.processor_count = processor_count;
static_package.core_count = core_count;
static_package.cluster_count = cluster_count;
if (is_x86) {
strncpy(static_package.name, "x86 vCPU", CPUINFO_PACKAGE_NAME_MAX);
} else {
strncpy(static_package.name, "ARM vCPU", CPUINFO_PACKAGE_NAME_MAX);
}
for (uint32_t i = 0; i < core_count; i++) {
for (uint32_t j = 0; j < processors_per_core; j++) {
processors[i * processors_per_core + j] = (struct cpuinfo_processor){
.smt_id = j,
.core = cores + i,
.cluster = clusters + (uint32_t)(i >= big_cluster_core_count),
.package = &static_package,
.cache.l1i = l1i + i,
.cache.l1d = l1d + i,
.cache.l2 = is_x86 ? l2 + i : l2 + (uint32_t)(i >= big_cluster_core_count),
.cache.l3 = is_x86 ? &static_x86_l3 : NULL,
};
}
cores[i] = (struct cpuinfo_core){
.processor_start = i * processors_per_core,
.processor_count = processors_per_core,
.core_id = i,
.cluster = clusters + (uint32_t)(i >= big_cluster_core_count),
.package = &static_package,
.vendor = cpuinfo_vendor_unknown,
.uarch = cpuinfo_uarch_unknown,
.frequency = 0,
};
l1i[i] = (struct cpuinfo_cache){
.size = 32 * 1024,
.associativity = 4,
.sets = 128,
.partitions = 1,
.line_size = 64,
.processor_start = i * processors_per_core,
.processor_count = processors_per_core,
};
l1d[i] = (struct cpuinfo_cache){
.size = 32 * 1024,
.associativity = 4,
.sets = 128,
.partitions = 1,
.line_size = 64,
.processor_start = i * processors_per_core,
.processor_count = processors_per_core,
};
if (is_x86) {
l2[i] = (struct cpuinfo_cache){
.size = 256 * 1024,
.associativity = 8,
.sets = 512,
.partitions = 1,
.line_size = 64,
.processor_start = i * processors_per_core,
.processor_count = processors_per_core,
};
}
}
if (is_x86) {
clusters[0] = (struct cpuinfo_cluster){
.processor_start = 0,
.processor_count = processor_count,
.core_start = 0,
.core_count = core_count,
.cluster_id = 0,
.package = &static_package,
.vendor = cpuinfo_vendor_unknown,
.uarch = cpuinfo_uarch_unknown,
.frequency = 0,
};
static_x86_l3.processor_count = processor_count;
} else {
clusters[0] = (struct cpuinfo_cluster){
.processor_start = 0,
.processor_count = big_cluster_core_count,
.core_start = 0,
.core_count = big_cluster_core_count,
.cluster_id = 0,
.package = &static_package,
.vendor = cpuinfo_vendor_unknown,
.uarch = cpuinfo_uarch_unknown,
.frequency = 0,
};
l2[0] = (struct cpuinfo_cache){
.size = 1024 * 1024,
.associativity = 8,
.sets = 2048,
.partitions = 1,
.line_size = 64,
.processor_start = 0,
.processor_count = big_cluster_core_count,
};
if (cluster_count > 1) {
l2[1] = (struct cpuinfo_cache){
.size = 256 * 1024,
.associativity = 8,
.sets = 512,
.partitions = 1,
.line_size = 64,
.processor_start = big_cluster_core_count,
.processor_count = processor_count - big_cluster_core_count,
};
clusters[1] = (struct cpuinfo_cluster){
.processor_start = big_cluster_core_count,
.processor_count = processor_count - big_cluster_core_count,
.core_start = big_cluster_core_count,
.core_count = processor_count - big_cluster_core_count,
.cluster_id = 1,
.package = &static_package,
.vendor = cpuinfo_vendor_unknown,
.uarch = cpuinfo_uarch_unknown,
.frequency = 0,
};
}
}
/* Commit changes */
cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
cpuinfo_cache[cpuinfo_cache_level_2] = l2;
if (is_x86) {
cpuinfo_cache[cpuinfo_cache_level_3] = &static_x86_l3;
}
cpuinfo_processors = processors;
cpuinfo_cores = cores;
cpuinfo_clusters = clusters;
cpuinfo_packages = &static_package;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = processor_count;
cpuinfo_cache_count[cpuinfo_cache_level_1d] = processor_count;
cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
if (is_x86) {
cpuinfo_cache_count[cpuinfo_cache_level_3] = 1;
}
cpuinfo_global_uarch = (struct cpuinfo_uarch_info){
.uarch = cpuinfo_uarch_unknown,
.processor_count = processor_count,
.core_count = core_count,
};
cpuinfo_processors_count = processor_count;
cpuinfo_cores_count = processor_count;
cpuinfo_clusters_count = cluster_count;
cpuinfo_packages_count = 1;
cpuinfo_max_cache_size = is_x86 ? 128 * 1024 * 1024 : 8 * 1024 * 1024;
cpuinfo_is_initialized = true;
processors = NULL;
cores = NULL;
clusters = NULL;
l1i = l1d = l2 = NULL;
cleanup:
free(processors);
free(cores);
free(clusters);
free(l1i);
free(l1d);
free(l2);
}

12
3rdparty/cpuinfo/src/freebsd/api.h vendored Normal file
View File

@@ -0,0 +1,12 @@
#pragma once
#include <stdint.h>
struct cpuinfo_freebsd_topology {
uint32_t packages;
uint32_t cores;
uint32_t threads;
uint32_t threads_per_core;
};
struct cpuinfo_freebsd_topology cpuinfo_freebsd_detect_topology(void);

100
3rdparty/cpuinfo/src/freebsd/topology.c vendored Normal file
View File

@@ -0,0 +1,100 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <cpuinfo/log.h>
#include <freebsd/api.h>
static int sysctl_int(const char* name) {
int value = 0;
size_t value_size = sizeof(value);
if (sysctlbyname(name, &value, &value_size, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", name, strerror(errno));
} else if (value <= 0) {
cpuinfo_log_error("sysctlbyname(\"%s\") returned invalid value %d %zu", name, value, value_size);
value = 0;
}
return value;
}
static char* sysctl_str(const char* name) {
size_t value_size = 0;
if (sysctlbyname(name, NULL, &value_size, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", name, strerror(errno));
return NULL;
} else if (value_size <= 0) {
cpuinfo_log_error("sysctlbyname(\"%s\") returned invalid value size %zu", name, value_size);
return NULL;
}
value_size += 1;
char* value = calloc(value_size, 1);
if (!value) {
cpuinfo_log_error("calloc %zu bytes failed", value_size);
return NULL;
}
if (sysctlbyname(name, value, &value_size, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"%s\") failed: %s", name, strerror(errno));
free(value);
return NULL;
}
return value;
}
struct cpuinfo_freebsd_topology cpuinfo_freebsd_detect_topology(void) {
struct cpuinfo_freebsd_topology topology = {
.packages = 0,
.cores = 0,
.threads_per_core = 0,
.threads = 0,
};
char* topology_spec = sysctl_str("kern.sched.topology_spec");
if (!topology_spec) {
return topology;
}
const char* group_tags[] = {"<group level=\"2\" cache-level=\"0\">", "<group level=\"1\" "};
for (size_t i = 0; i < sizeof(group_tags) / sizeof(group_tags[0]); i++) {
const char* group_tag = group_tags[i];
char* p = strstr(topology_spec, group_tag);
while (p) {
topology.packages += 1;
p++;
p = strstr(p, group_tag);
}
if (topology.packages > 0) {
break;
}
}
if (topology.packages == 0) {
cpuinfo_log_error("failed to parse topology_spec: %s", topology_spec);
free(topology_spec);
goto fail;
}
free(topology_spec);
topology.cores = sysctl_int("kern.smp.cores");
if (topology.cores == 0) {
goto fail;
}
if (topology.cores < topology.packages) {
cpuinfo_log_error("invalid numbers of package and core: %d %d", topology.packages, topology.cores);
goto fail;
}
topology.threads_per_core = sysctl_int("kern.smp.threads_per_core");
if (topology.threads_per_core == 0) {
goto fail;
}
cpuinfo_log_debug(
"freebsd topology: packages = %d, cores = %d, "
"threads_per_core = %d",
topology.packages,
topology.cores,
topology.threads_per_core);
topology.threads = topology.threads_per_core * topology.cores;
return topology;
fail:
topology.packages = 0;
return topology;
}

67
3rdparty/cpuinfo/src/init.c vendored Normal file
View File

@@ -0,0 +1,67 @@
#if defined(_WIN32) || defined(__CYGWIN__)
#include <windows.h>
#elif !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
#include <pthread.h>
#endif
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#ifdef __APPLE__
#include "TargetConditionals.h"
#endif
#if defined(_WIN32) || defined(__CYGWIN__)
static INIT_ONCE init_guard = INIT_ONCE_STATIC_INIT;
#elif !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
static pthread_once_t init_guard = PTHREAD_ONCE_INIT;
#else
static bool init_guard = false;
#endif
bool CPUINFO_ABI cpuinfo_initialize(void) {
#if CPUINFO_ARCH_X86 || CPUINFO_ARCH_X86_64
#if defined(__MACH__) && defined(__APPLE__)
pthread_once(&init_guard, &cpuinfo_x86_mach_init);
#elif defined(__FreeBSD__)
pthread_once(&init_guard, &cpuinfo_x86_freebsd_init);
#elif defined(__linux__)
pthread_once(&init_guard, &cpuinfo_x86_linux_init);
#elif defined(_WIN32) || defined(__CYGWIN__)
InitOnceExecuteOnce(&init_guard, &cpuinfo_x86_windows_init, NULL, NULL);
#else
cpuinfo_log_error("operating system is not supported in cpuinfo");
#endif
#elif CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
#if defined(__linux__)
pthread_once(&init_guard, &cpuinfo_arm_linux_init);
#elif defined(__MACH__) && defined(__APPLE__)
pthread_once(&init_guard, &cpuinfo_arm_mach_init);
#elif defined(_WIN32)
InitOnceExecuteOnce(&init_guard, &cpuinfo_arm_windows_init, NULL, NULL);
#else
cpuinfo_log_error("operating system is not supported in cpuinfo");
#endif
#elif CPUINFO_ARCH_RISCV32 || CPUINFO_ARCH_RISCV64
#if defined(__linux__)
pthread_once(&init_guard, &cpuinfo_riscv_linux_init);
#else
cpuinfo_log_error("operating system is not supported in cpuinfo");
#endif
#elif CPUINFO_ARCH_ASMJS || CPUINFO_ARCH_WASM || CPUINFO_ARCH_WASMSIMD
#if defined(__EMSCRIPTEN_PTHREADS__)
pthread_once(&init_guard, &cpuinfo_emscripten_init);
#else
if (!init_guard) {
cpuinfo_emscripten_init();
}
init_guard = true;
#endif
#else
cpuinfo_log_error("processor architecture is not supported in cpuinfo");
#endif
return cpuinfo_is_initialized;
}
void CPUINFO_ABI cpuinfo_deinitialize(void) {}

94
3rdparty/cpuinfo/src/linux/api.h vendored Normal file
View File

@@ -0,0 +1,94 @@
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
#define CPUINFO_LINUX_FLAG_PRESENT UINT32_C(0x00000001)
#define CPUINFO_LINUX_FLAG_POSSIBLE UINT32_C(0x00000002)
#define CPUINFO_LINUX_FLAG_MAX_FREQUENCY UINT32_C(0x00000004)
#define CPUINFO_LINUX_FLAG_MIN_FREQUENCY UINT32_C(0x00000008)
#define CPUINFO_LINUX_FLAG_SMT_ID UINT32_C(0x00000010)
#define CPUINFO_LINUX_FLAG_CORE_ID UINT32_C(0x00000020)
#define CPUINFO_LINUX_FLAG_PACKAGE_ID UINT32_C(0x00000040)
#define CPUINFO_LINUX_FLAG_APIC_ID UINT32_C(0x00000080)
#define CPUINFO_LINUX_FLAG_SMT_CLUSTER UINT32_C(0x00000100)
#define CPUINFO_LINUX_FLAG_CORE_CLUSTER UINT32_C(0x00000200)
#define CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER UINT32_C(0x00000400)
#define CPUINFO_LINUX_FLAG_PROC_CPUINFO UINT32_C(0x00000800)
#define CPUINFO_LINUX_FLAG_VALID UINT32_C(0x00001000)
#define CPUINFO_LINUX_FLAG_CUR_FREQUENCY UINT32_C(0x00002000)
#define CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER UINT32_C(0x00004000)
typedef bool (*cpuinfo_cpulist_callback)(uint32_t, uint32_t, void*);
CPUINFO_INTERNAL bool cpuinfo_linux_parse_cpulist(
const char* filename,
cpuinfo_cpulist_callback callback,
void* context);
typedef bool (*cpuinfo_smallfile_callback)(const char*, const char*, const char*, void*);
CPUINFO_INTERNAL bool cpuinfo_linux_parse_small_file(
const char* filename,
size_t buffer_size,
cpuinfo_smallfile_callback,
void* context);
typedef bool (*cpuinfo_line_callback)(const char*, const char*, void*, uint64_t);
CPUINFO_INTERNAL bool cpuinfo_linux_parse_multiline_file(
const char* filename,
size_t buffer_size,
cpuinfo_line_callback,
void* context);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_processors_count(void);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_possible_processor(uint32_t max_processors_count);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_max_present_processor(uint32_t max_processors_count);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_processor_cur_frequency(uint32_t processor);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_processor_min_frequency(uint32_t processor);
CPUINFO_INTERNAL uint32_t cpuinfo_linux_get_processor_max_frequency(uint32_t processor);
CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_package_id(
uint32_t processor,
uint32_t package_id[restrict static 1]);
CPUINFO_INTERNAL bool cpuinfo_linux_get_processor_core_id(uint32_t processor, uint32_t core_id[restrict static 1]);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_possible_processors(
uint32_t max_processors_count,
uint32_t* processor0_flags,
uint32_t processor_struct_size,
uint32_t possible_flag);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_present_processors(
uint32_t max_processors_count,
uint32_t* processor0_flags,
uint32_t processor_struct_size,
uint32_t present_flag);
typedef bool (*cpuinfo_siblings_callback)(uint32_t, uint32_t, uint32_t, void*);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_core_siblings(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_thread_siblings(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_cluster_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_core_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context);
CPUINFO_INTERNAL bool cpuinfo_linux_detect_package_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context);
extern CPUINFO_INTERNAL const struct cpuinfo_processor** cpuinfo_linux_cpu_to_processor_map;
extern CPUINFO_INTERNAL const struct cpuinfo_core** cpuinfo_linux_cpu_to_core_map;

242
3rdparty/cpuinfo/src/linux/cpulist.c vendored Normal file
View File

@@ -0,0 +1,242 @@
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <cpuinfo/log.h>
#include <linux/api.h>
/*
* Size, in chars, of the on-stack buffer used for parsing cpu lists.
* This is also the limit on the length of a single entry
* (<cpu-number> or <cpu-number-start>-<cpu-number-end>)
* in the cpu list.
*/
#define BUFFER_SIZE 256
/* Locale-independent */
inline static bool is_whitespace(char c) {
switch (c) {
case ' ':
case '\t':
case '\n':
case '\r':
return true;
default:
return false;
}
}
inline static const char* parse_number(const char* string, const char* end, uint32_t number_ptr[restrict static 1]) {
uint32_t number = 0;
while (string != end) {
const uint32_t digit = (uint32_t)(*string) - (uint32_t)'0';
if (digit >= 10) {
break;
}
number = number * UINT32_C(10) + digit;
string += 1;
}
*number_ptr = number;
return string;
}
inline static bool parse_entry(
const char* entry_start,
const char* entry_end,
cpuinfo_cpulist_callback callback,
void* context) {
/* Skip whitespace at the beginning of an entry */
for (; entry_start != entry_end; entry_start++) {
if (!is_whitespace(*entry_start)) {
break;
}
}
/* Skip whitespace at the end of an entry */
for (; entry_end != entry_start; entry_end--) {
if (!is_whitespace(entry_end[-1])) {
break;
}
}
const size_t entry_length = (size_t)(entry_end - entry_start);
if (entry_length == 0) {
cpuinfo_log_warning("unexpected zero-length cpu list entry ignored");
return false;
}
#if CPUINFO_LOG_DEBUG_PARSERS
cpuinfo_log_debug("parse cpu list entry \"%.*s\" (%zu chars)", (int)entry_length, entry_start, entry_length);
#endif
uint32_t first_cpu, last_cpu;
const char* number_end = parse_number(entry_start, entry_end, &first_cpu);
if (number_end == entry_start) {
/* Failed to parse the number; ignore the entry */
cpuinfo_log_warning(
"invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored",
entry_start[0],
(int)entry_length,
entry_start);
return false;
} else if (number_end == entry_end) {
/* Completely parsed the entry */
#if CPUINFO_LOG_DEBUG_PARSERS
cpuinfo_log_debug(
"cpulist: call callback with list_start = %" PRIu32 ", list_end = %" PRIu32,
first_cpu,
first_cpu + 1);
#endif
return callback(first_cpu, first_cpu + 1, context);
}
/* Parse the second part of the entry */
if (*number_end != '-') {
cpuinfo_log_warning(
"invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored",
*number_end,
(int)entry_length,
entry_start);
return false;
}
const char* number_start = number_end + 1;
number_end = parse_number(number_start, entry_end, &last_cpu);
if (number_end == number_start) {
/* Failed to parse the second number; ignore the entry */
cpuinfo_log_warning(
"invalid character '%c' in the cpu list entry \"%.*s\": entry is ignored",
*number_start,
(int)entry_length,
entry_start);
return false;
}
if (number_end != entry_end) {
/* Partially parsed the entry; ignore unparsed characters and
* continue with the parsed part */
cpuinfo_log_warning(
"ignored invalid characters \"%.*s\" at the end of cpu list entry \"%.*s\"",
(int)(entry_end - number_end),
number_start,
(int)entry_length,
entry_start);
}
if (last_cpu < first_cpu) {
cpuinfo_log_warning(
"ignored cpu list entry \"%.*s\": invalid range %" PRIu32 "-%" PRIu32,
(int)entry_length,
entry_start,
first_cpu,
last_cpu);
return false;
}
/* Parsed both parts of the entry; update CPU set */
#if CPUINFO_LOG_DEBUG_PARSERS
cpuinfo_log_debug(
"cpulist: call callback with list_start = %" PRIu32 ", list_end = %" PRIu32, first_cpu, last_cpu + 1);
#endif
return callback(first_cpu, last_cpu + 1, context);
}
bool cpuinfo_linux_parse_cpulist(const char* filename, cpuinfo_cpulist_callback callback, void* context) {
bool status = true;
int file = -1;
char buffer[BUFFER_SIZE];
#if CPUINFO_LOG_DEBUG_PARSERS
cpuinfo_log_debug("parsing cpu list from file %s", filename);
#endif
#if CPUINFO_MOCK
file = cpuinfo_mock_open(filename, O_RDONLY);
#else
file = open(filename, O_RDONLY);
#endif
if (file == -1) {
cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno));
status = false;
goto cleanup;
}
size_t position = 0;
const char* buffer_end = &buffer[BUFFER_SIZE];
char* data_start = buffer;
ssize_t bytes_read;
do {
#if CPUINFO_MOCK
bytes_read = cpuinfo_mock_read(file, data_start, (size_t)(buffer_end - data_start));
#else
bytes_read = read(file, data_start, (size_t)(buffer_end - data_start));
#endif
if (bytes_read < 0) {
cpuinfo_log_info(
"failed to read file %s at position %zu: %s", filename, position, strerror(errno));
status = false;
goto cleanup;
}
position += (size_t)bytes_read;
const char* data_end = data_start + (size_t)bytes_read;
const char* entry_start = buffer;
if (bytes_read == 0) {
/* No more data in the file: process the remaining text
* in the buffer as a single entry */
const char* entry_end = data_end;
const bool entry_status = parse_entry(entry_start, entry_end, callback, context);
status &= entry_status;
} else {
const char* entry_end;
do {
/* Find the end of the entry, as indicated by a
* comma (',') */
for (entry_end = entry_start; entry_end != data_end; entry_end++) {
if (*entry_end == ',') {
break;
}
}
/*
* If we located separator at the end of the
* entry, parse it. Otherwise, there may be more
* data at the end; read the file once again.
*/
if (entry_end != data_end) {
const bool entry_status =
parse_entry(entry_start, entry_end, callback, context);
status &= entry_status;
entry_start = entry_end + 1;
}
} while (entry_end != data_end);
/* Move remaining partial entry data at the end to the
* beginning of the buffer */
const size_t entry_length = (size_t)(entry_end - entry_start);
memmove(buffer, entry_start, entry_length);
data_start = &buffer[entry_length];
}
} while (bytes_read != 0);
cleanup:
if (file != -1) {
#if CPUINFO_MOCK
cpuinfo_mock_close(file);
#else
close(file);
#endif
file = -1;
}
return status;
}

103
3rdparty/cpuinfo/src/linux/mockfile.c vendored Normal file
View File

@@ -0,0 +1,103 @@
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sched.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if !CPUINFO_MOCK
#error This file should be built only in mock mode
#endif
#include <arm/linux/api.h>
#include <arm/midr.h>
#include <cpuinfo-mock.h>
#include <cpuinfo/log.h>
static struct cpuinfo_mock_file* cpuinfo_mock_files = NULL;
static uint32_t cpuinfo_mock_file_count = 0;
void CPUINFO_ABI cpuinfo_mock_filesystem(struct cpuinfo_mock_file* files) {
cpuinfo_log_info("filesystem mocking enabled");
uint32_t file_count = 0;
while (files[file_count].path != NULL) {
/* Indicate that file is not opened */
files[file_count].offset = SIZE_MAX;
file_count += 1;
}
cpuinfo_mock_files = files;
cpuinfo_mock_file_count = file_count;
}
int CPUINFO_ABI cpuinfo_mock_open(const char* path, int oflag) {
if (cpuinfo_mock_files == NULL) {
cpuinfo_log_warning("cpuinfo_mock_open called without mock filesystem; redictering to open");
return open(path, oflag);
}
for (uint32_t i = 0; i < cpuinfo_mock_file_count; i++) {
if (strcmp(cpuinfo_mock_files[i].path, path) == 0) {
if (oflag != O_RDONLY) {
errno = EACCES;
return -1;
}
if (cpuinfo_mock_files[i].offset != SIZE_MAX) {
errno = ENFILE;
return -1;
}
cpuinfo_mock_files[i].offset = 0;
return (int)i;
}
}
errno = ENOENT;
return -1;
}
int CPUINFO_ABI cpuinfo_mock_close(int fd) {
if (cpuinfo_mock_files == NULL) {
cpuinfo_log_warning("cpuinfo_mock_close called without mock filesystem; redictering to close");
return close(fd);
}
if ((unsigned int)fd >= cpuinfo_mock_file_count) {
errno = EBADF;
return -1;
}
if (cpuinfo_mock_files[fd].offset == SIZE_MAX) {
errno = EBADF;
return -1;
}
cpuinfo_mock_files[fd].offset = SIZE_MAX;
return 0;
}
ssize_t CPUINFO_ABI cpuinfo_mock_read(int fd, void* buffer, size_t capacity) {
if (cpuinfo_mock_files == NULL) {
cpuinfo_log_warning("cpuinfo_mock_read called without mock filesystem; redictering to read");
return read(fd, buffer, capacity);
}
if ((unsigned int)fd >= cpuinfo_mock_file_count) {
errno = EBADF;
return -1;
}
if (cpuinfo_mock_files[fd].offset == SIZE_MAX) {
errno = EBADF;
return -1;
}
const size_t offset = cpuinfo_mock_files[fd].offset;
size_t count = cpuinfo_mock_files[fd].size - offset;
if (count > capacity) {
count = capacity;
}
memcpy(buffer, (void*)cpuinfo_mock_files[fd].content + offset, count);
cpuinfo_mock_files[fd].offset += count;
return (ssize_t)count;
}

113
3rdparty/cpuinfo/src/linux/multiline.c vendored Normal file
View File

@@ -0,0 +1,113 @@
#include <alloca.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <cpuinfo/log.h>
#include <linux/api.h>
bool cpuinfo_linux_parse_multiline_file(
const char* filename,
size_t buffer_size,
cpuinfo_line_callback callback,
void* context) {
int file = -1;
bool status = false;
char* buffer = (char*)alloca(buffer_size);
#if CPUINFO_MOCK
file = cpuinfo_mock_open(filename, O_RDONLY);
#else
file = open(filename, O_RDONLY);
#endif
if (file == -1) {
cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno));
goto cleanup;
}
/* Only used for error reporting */
size_t position = 0;
uint64_t line_number = 1;
const char* buffer_end = &buffer[buffer_size];
char* data_start = buffer;
ssize_t bytes_read;
do {
#if CPUINFO_MOCK
bytes_read = cpuinfo_mock_read(file, data_start, (size_t)(buffer_end - data_start));
#else
bytes_read = read(file, data_start, (size_t)(buffer_end - data_start));
#endif
if (bytes_read < 0) {
cpuinfo_log_info(
"failed to read file %s at position %zu: %s", filename, position, strerror(errno));
goto cleanup;
}
position += (size_t)bytes_read;
const char* data_end = data_start + (size_t)bytes_read;
const char* line_start = buffer;
if (bytes_read == 0) {
/* No more data in the file: process the remaining text
* in the buffer as a single entry */
const char* line_end = data_end;
if (!callback(line_start, line_end, context, line_number)) {
goto cleanup;
}
} else {
const char* line_end;
do {
/* Find the end of the entry, as indicated by
* newline character ('\n')
*/
for (line_end = line_start; line_end != data_end; line_end++) {
if (*line_end == '\n') {
break;
}
}
/*
* If we located separator at the end of the
* entry, parse it. Otherwise, there may be more
* data at the end; read the file once again.
*/
if (line_end != data_end) {
if (!callback(line_start, line_end, context, line_number++)) {
goto cleanup;
}
line_start = line_end + 1;
}
} while (line_end != data_end);
/* Move remaining partial line data at the end to the
* beginning of the buffer */
const size_t line_length = (size_t)(line_end - line_start);
memmove(buffer, line_start, line_length);
data_start = &buffer[line_length];
}
} while (bytes_read != 0);
/* Commit */
status = true;
cleanup:
if (file != -1) {
#if CPUINFO_MOCK
cpuinfo_mock_close(file);
#else
close(file);
#endif
file = -1;
}
return status;
}

580
3rdparty/cpuinfo/src/linux/processors.c vendored Normal file
View File

@@ -0,0 +1,580 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if !defined(__ANDROID__)
/*
* sched.h is only used for CPU_SETSIZE constant.
* Android NDK headers before platform 21 do have this constant in sched.h
*/
#include <sched.h>
#endif
#include <cpuinfo/log.h>
#include <linux/api.h>
#define STRINGIFY(token) #token
#define KERNEL_MAX_FILENAME "/sys/devices/system/cpu/kernel_max"
#define KERNEL_MAX_FILESIZE 32
#define FREQUENCY_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/cpufreq/cpuinfo_max_freq"))
#define CUR_FREQUENCY_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/cpufreq/cpuinfo_cur_freq"
#define MAX_FREQUENCY_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/cpufreq/cpuinfo_max_freq"
#define MIN_FREQUENCY_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/cpufreq/cpuinfo_min_freq"
#define FREQUENCY_FILESIZE 32
#define PACKAGE_ID_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/physical_package_id"))
#define PACKAGE_ID_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id"
#define PACKAGE_ID_FILESIZE 32
#define CORE_ID_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_id"))
#define CORE_ID_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
#define CORE_ID_FILESIZE 32
#define CORE_CPUS_FILENAME_SIZE (sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_cpus_list"))
#define CORE_CPUS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_cpus_list"
#define CORE_SIBLINGS_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/core_siblings_list"))
#define CORE_SIBLINGS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_siblings_list"
#define CLUSTER_CPUS_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/cluster_cpus_list"))
#define CLUSTER_CPUS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/cluster_cpus_list"
#define PACKAGE_CPUS_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/package_cpus_list"))
#define PACKAGE_CPUS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/package_cpus_list"
#define THREAD_SIBLINGS_FILENAME_SIZE \
(sizeof("/sys/devices/system/cpu/cpu" STRINGIFY(UINT32_MAX) "/topology/thread_siblings_list"))
#define THREAD_SIBLINGS_FILENAME_FORMAT "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/thread_siblings_list"
#define POSSIBLE_CPULIST_FILENAME "/sys/devices/system/cpu/possible"
#define PRESENT_CPULIST_FILENAME "/sys/devices/system/cpu/present"
inline static const char* parse_number(const char* start, const char* end, uint32_t number_ptr[restrict static 1]) {
uint32_t number = 0;
const char* parsed = start;
for (; parsed != end; parsed++) {
const uint32_t digit = (uint32_t)(uint8_t)(*parsed) - (uint32_t)'0';
if (digit >= 10) {
break;
}
number = number * UINT32_C(10) + digit;
}
*number_ptr = number;
return parsed;
}
/* Locale-independent */
inline static bool is_whitespace(char c) {
switch (c) {
case ' ':
case '\t':
case '\n':
case '\r':
return true;
default:
return false;
}
}
#if defined(__ANDROID__) && !defined(CPU_SETSIZE)
/*
* Android NDK headers before platform 21 do not define CPU_SETSIZE,
* so we hard-code its value, as defined in platform 21 headers
*/
#if defined(__LP64__)
static const uint32_t default_max_processors_count = 1024;
#else
static const uint32_t default_max_processors_count = 32;
#endif
#else
static const uint32_t default_max_processors_count = CPU_SETSIZE;
#endif
static bool uint32_parser(const char* filename, const char* text_start, const char* text_end, void* context) {
if (text_start == text_end) {
cpuinfo_log_error("failed to parse file %s: file is empty", KERNEL_MAX_FILENAME);
return false;
}
uint32_t kernel_max = 0;
const char* parsed_end = parse_number(text_start, text_end, &kernel_max);
if (parsed_end == text_start) {
cpuinfo_log_error(
"failed to parse file %s: \"%.*s\" is not an unsigned number",
filename,
(int)(text_end - text_start),
text_start);
return false;
} else {
for (const char* char_ptr = parsed_end; char_ptr != text_end; char_ptr++) {
if (!is_whitespace(*char_ptr)) {
cpuinfo_log_warning(
"non-whitespace characters \"%.*s\" following number in file %s are ignored",
(int)(text_end - char_ptr),
char_ptr,
filename);
break;
}
}
}
uint32_t* kernel_max_ptr = (uint32_t*)context;
*kernel_max_ptr = kernel_max;
return true;
}
uint32_t cpuinfo_linux_get_max_processors_count(void) {
uint32_t kernel_max;
if (cpuinfo_linux_parse_small_file(KERNEL_MAX_FILENAME, KERNEL_MAX_FILESIZE, uint32_parser, &kernel_max)) {
cpuinfo_log_debug("parsed kernel_max value of %" PRIu32 " from %s", kernel_max, KERNEL_MAX_FILENAME);
if (kernel_max >= default_max_processors_count) {
cpuinfo_log_warning(
"kernel_max value of %" PRIu32
" parsed from %s exceeds platform-default limit %" PRIu32,
kernel_max,
KERNEL_MAX_FILENAME,
default_max_processors_count - 1);
}
return kernel_max + 1;
} else {
cpuinfo_log_warning(
"using platform-default max processors count = %" PRIu32, default_max_processors_count);
return default_max_processors_count;
}
}
uint32_t cpuinfo_linux_get_processor_cur_frequency(uint32_t processor) {
char cur_frequency_filename[FREQUENCY_FILENAME_SIZE];
const int chars_formatted =
snprintf(cur_frequency_filename, FREQUENCY_FILENAME_SIZE, CUR_FREQUENCY_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= FREQUENCY_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for current frequency of processor %" PRIu32, processor);
return 0;
}
uint32_t cur_frequency;
if (cpuinfo_linux_parse_small_file(cur_frequency_filename, FREQUENCY_FILESIZE, uint32_parser, &cur_frequency)) {
cpuinfo_log_debug(
"parsed currrent frequency value of %" PRIu32 " KHz for logical processor %" PRIu32 " from %s",
cur_frequency,
processor,
cur_frequency_filename);
return cur_frequency;
} else {
cpuinfo_log_warning(
"failed to parse current frequency for processor %" PRIu32 " from %s",
processor,
cur_frequency_filename);
return 0;
}
}
uint32_t cpuinfo_linux_get_processor_max_frequency(uint32_t processor) {
char max_frequency_filename[FREQUENCY_FILENAME_SIZE];
const int chars_formatted =
snprintf(max_frequency_filename, FREQUENCY_FILENAME_SIZE, MAX_FREQUENCY_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= FREQUENCY_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for max frequency of processor %" PRIu32, processor);
return 0;
}
uint32_t max_frequency;
if (cpuinfo_linux_parse_small_file(max_frequency_filename, FREQUENCY_FILESIZE, uint32_parser, &max_frequency)) {
cpuinfo_log_debug(
"parsed max frequency value of %" PRIu32 " KHz for logical processor %" PRIu32 " from %s",
max_frequency,
processor,
max_frequency_filename);
return max_frequency;
} else {
cpuinfo_log_warning(
"failed to parse max frequency for processor %" PRIu32 " from %s",
processor,
max_frequency_filename);
return 0;
}
}
uint32_t cpuinfo_linux_get_processor_min_frequency(uint32_t processor) {
char min_frequency_filename[FREQUENCY_FILENAME_SIZE];
const int chars_formatted =
snprintf(min_frequency_filename, FREQUENCY_FILENAME_SIZE, MIN_FREQUENCY_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= FREQUENCY_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for min frequency of processor %" PRIu32, processor);
return 0;
}
uint32_t min_frequency;
if (cpuinfo_linux_parse_small_file(min_frequency_filename, FREQUENCY_FILESIZE, uint32_parser, &min_frequency)) {
cpuinfo_log_debug(
"parsed min frequency value of %" PRIu32 " KHz for logical processor %" PRIu32 " from %s",
min_frequency,
processor,
min_frequency_filename);
return min_frequency;
} else {
/*
* This error is less severe than parsing max frequency, because
* min frequency is only useful for clustering, while max
* frequency is also needed for peak FLOPS calculation.
*/
cpuinfo_log_info(
"failed to parse min frequency for processor %" PRIu32 " from %s",
processor,
min_frequency_filename);
return 0;
}
}
bool cpuinfo_linux_get_processor_core_id(uint32_t processor, uint32_t core_id_ptr[restrict static 1]) {
char core_id_filename[PACKAGE_ID_FILENAME_SIZE];
const int chars_formatted =
snprintf(core_id_filename, CORE_ID_FILENAME_SIZE, CORE_ID_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= CORE_ID_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for core id of processor %" PRIu32, processor);
return 0;
}
uint32_t core_id;
if (cpuinfo_linux_parse_small_file(core_id_filename, CORE_ID_FILESIZE, uint32_parser, &core_id)) {
cpuinfo_log_debug(
"parsed core id value of %" PRIu32 " for logical processor %" PRIu32 " from %s",
core_id,
processor,
core_id_filename);
*core_id_ptr = core_id;
return true;
} else {
cpuinfo_log_info(
"failed to parse core id for processor %" PRIu32 " from %s", processor, core_id_filename);
return false;
}
}
bool cpuinfo_linux_get_processor_package_id(uint32_t processor, uint32_t package_id_ptr[restrict static 1]) {
char package_id_filename[PACKAGE_ID_FILENAME_SIZE];
const int chars_formatted =
snprintf(package_id_filename, PACKAGE_ID_FILENAME_SIZE, PACKAGE_ID_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= PACKAGE_ID_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for package id of processor %" PRIu32, processor);
return 0;
}
uint32_t package_id;
if (cpuinfo_linux_parse_small_file(package_id_filename, PACKAGE_ID_FILESIZE, uint32_parser, &package_id)) {
cpuinfo_log_debug(
"parsed package id value of %" PRIu32 " for logical processor %" PRIu32 " from %s",
package_id,
processor,
package_id_filename);
*package_id_ptr = package_id;
return true;
} else {
cpuinfo_log_info(
"failed to parse package id for processor %" PRIu32 " from %s", processor, package_id_filename);
return false;
}
}
static bool max_processor_number_parser(uint32_t processor_list_start, uint32_t processor_list_end, void* context) {
uint32_t* processor_number_ptr = (uint32_t*)context;
const uint32_t processor_list_last = processor_list_end - 1;
if (*processor_number_ptr < processor_list_last) {
*processor_number_ptr = processor_list_last;
}
return true;
}
uint32_t cpuinfo_linux_get_max_possible_processor(uint32_t max_processors_count) {
uint32_t max_possible_processor = 0;
if (!cpuinfo_linux_parse_cpulist(
POSSIBLE_CPULIST_FILENAME, max_processor_number_parser, &max_possible_processor)) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
cpuinfo_log_error("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME);
#else
cpuinfo_log_warning("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME);
#endif
return UINT32_MAX;
}
if (max_possible_processor >= max_processors_count) {
cpuinfo_log_warning(
"maximum possible processor number %" PRIu32 " exceeds system limit %" PRIu32
": truncating to the latter",
max_possible_processor,
max_processors_count - 1);
max_possible_processor = max_processors_count - 1;
}
return max_possible_processor;
}
uint32_t cpuinfo_linux_get_max_present_processor(uint32_t max_processors_count) {
uint32_t max_present_processor = 0;
if (!cpuinfo_linux_parse_cpulist(
PRESENT_CPULIST_FILENAME, max_processor_number_parser, &max_present_processor)) {
#if CPUINFO_ARCH_ARM || CPUINFO_ARCH_ARM64
cpuinfo_log_error("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME);
#else
cpuinfo_log_warning("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME);
#endif
return UINT32_MAX;
}
if (max_present_processor >= max_processors_count) {
cpuinfo_log_warning(
"maximum present processor number %" PRIu32 " exceeds system limit %" PRIu32
": truncating to the latter",
max_present_processor,
max_processors_count - 1);
max_present_processor = max_processors_count - 1;
}
return max_present_processor;
}
struct detect_processors_context {
uint32_t max_processors_count;
uint32_t* processor0_flags;
uint32_t processor_struct_size;
uint32_t detected_flag;
};
static bool detect_processor_parser(uint32_t processor_list_start, uint32_t processor_list_end, void* context) {
const uint32_t max_processors_count = ((struct detect_processors_context*)context)->max_processors_count;
const uint32_t* processor0_flags = ((struct detect_processors_context*)context)->processor0_flags;
const uint32_t processor_struct_size = ((struct detect_processors_context*)context)->processor_struct_size;
const uint32_t detected_flag = ((struct detect_processors_context*)context)->detected_flag;
for (uint32_t processor = processor_list_start; processor < processor_list_end; processor++) {
if (processor >= max_processors_count) {
break;
}
*((uint32_t*)((uintptr_t)processor0_flags + processor_struct_size * processor)) |= detected_flag;
}
return true;
}
bool cpuinfo_linux_detect_possible_processors(
uint32_t max_processors_count,
uint32_t* processor0_flags,
uint32_t processor_struct_size,
uint32_t possible_flag) {
struct detect_processors_context context = {
.max_processors_count = max_processors_count,
.processor0_flags = processor0_flags,
.processor_struct_size = processor_struct_size,
.detected_flag = possible_flag,
};
if (cpuinfo_linux_parse_cpulist(POSSIBLE_CPULIST_FILENAME, detect_processor_parser, &context)) {
return true;
} else {
cpuinfo_log_warning("failed to parse the list of possible processors in %s", POSSIBLE_CPULIST_FILENAME);
return false;
}
}
bool cpuinfo_linux_detect_present_processors(
uint32_t max_processors_count,
uint32_t* processor0_flags,
uint32_t processor_struct_size,
uint32_t present_flag) {
struct detect_processors_context context = {
.max_processors_count = max_processors_count,
.processor0_flags = processor0_flags,
.processor_struct_size = processor_struct_size,
.detected_flag = present_flag,
};
if (cpuinfo_linux_parse_cpulist(PRESENT_CPULIST_FILENAME, detect_processor_parser, &context)) {
return true;
} else {
cpuinfo_log_warning("failed to parse the list of present processors in %s", PRESENT_CPULIST_FILENAME);
return false;
}
}
struct siblings_context {
const char* group_name;
uint32_t max_processors_count;
uint32_t processor;
cpuinfo_siblings_callback callback;
void* callback_context;
};
static bool siblings_parser(uint32_t sibling_list_start, uint32_t sibling_list_end, struct siblings_context* context) {
const char* group_name = context->group_name;
const uint32_t max_processors_count = context->max_processors_count;
const uint32_t processor = context->processor;
if (sibling_list_end > max_processors_count) {
cpuinfo_log_warning(
"ignore %s siblings %" PRIu32 "-%" PRIu32 " of processor %" PRIu32,
group_name,
max_processors_count,
sibling_list_end - 1,
processor);
sibling_list_end = max_processors_count;
}
return context->callback(processor, sibling_list_start, sibling_list_end, context->callback_context);
}
bool cpuinfo_linux_detect_core_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context) {
char core_cpus_filename[CORE_CPUS_FILENAME_SIZE];
const int chars_formatted =
snprintf(core_cpus_filename, CORE_CPUS_FILENAME_SIZE, CORE_CPUS_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= CORE_CPUS_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for core cpus of processor %" PRIu32, processor);
return false;
}
struct siblings_context siblings_context = {
.group_name = "cpus",
.max_processors_count = max_processors_count,
.processor = processor,
.callback = callback,
.callback_context = context,
};
if (cpuinfo_linux_parse_cpulist(
core_cpus_filename, (cpuinfo_cpulist_callback)siblings_parser, &siblings_context)) {
return true;
} else {
cpuinfo_log_info(
"failed to parse the list of core cpus for processor %" PRIu32 " from %s",
processor,
core_cpus_filename);
return false;
}
}
bool cpuinfo_linux_detect_core_siblings(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context) {
char core_siblings_filename[CORE_SIBLINGS_FILENAME_SIZE];
const int chars_formatted =
snprintf(core_siblings_filename, CORE_SIBLINGS_FILENAME_SIZE, CORE_SIBLINGS_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= CORE_SIBLINGS_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for core siblings of processor %" PRIu32, processor);
return false;
}
struct siblings_context siblings_context = {
.group_name = "package",
.max_processors_count = max_processors_count,
.processor = processor,
.callback = callback,
.callback_context = context,
};
if (cpuinfo_linux_parse_cpulist(
core_siblings_filename, (cpuinfo_cpulist_callback)siblings_parser, &siblings_context)) {
return true;
} else {
cpuinfo_log_info(
"failed to parse the list of core siblings for processor %" PRIu32 " from %s",
processor,
core_siblings_filename);
return false;
}
}
bool cpuinfo_linux_detect_thread_siblings(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context) {
char thread_siblings_filename[THREAD_SIBLINGS_FILENAME_SIZE];
const int chars_formatted = snprintf(
thread_siblings_filename, THREAD_SIBLINGS_FILENAME_SIZE, THREAD_SIBLINGS_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= THREAD_SIBLINGS_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for thread siblings of processor %" PRIu32, processor);
return false;
}
struct siblings_context siblings_context = {
.group_name = "core",
.max_processors_count = max_processors_count,
.processor = processor,
.callback = callback,
.callback_context = context,
};
if (cpuinfo_linux_parse_cpulist(
thread_siblings_filename, (cpuinfo_cpulist_callback)siblings_parser, &siblings_context)) {
return true;
} else {
cpuinfo_log_info(
"failed to parse the list of thread siblings for processor %" PRIu32 " from %s",
processor,
thread_siblings_filename);
return false;
}
}
bool cpuinfo_linux_detect_cluster_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context) {
char cluster_cpus_filename[CLUSTER_CPUS_FILENAME_SIZE];
const int chars_formatted =
snprintf(cluster_cpus_filename, CLUSTER_CPUS_FILENAME_SIZE, CLUSTER_CPUS_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= CLUSTER_CPUS_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for cluster cpus of processor %" PRIu32, processor);
return false;
}
struct siblings_context siblings_context = {
.group_name = "cluster",
.max_processors_count = max_processors_count,
.processor = processor,
.callback = callback,
.callback_context = context,
};
if (cpuinfo_linux_parse_cpulist(
cluster_cpus_filename, (cpuinfo_cpulist_callback)siblings_parser, &siblings_context)) {
return true;
} else {
cpuinfo_log_info(
"failed to parse the list of cluster cpus for processor %" PRIu32 " from %s",
processor,
cluster_cpus_filename);
return false;
}
}
bool cpuinfo_linux_detect_package_cpus(
uint32_t max_processors_count,
uint32_t processor,
cpuinfo_siblings_callback callback,
void* context) {
char package_cpus_filename[PACKAGE_CPUS_FILENAME_SIZE];
const int chars_formatted =
snprintf(package_cpus_filename, PACKAGE_CPUS_FILENAME_SIZE, PACKAGE_CPUS_FILENAME_FORMAT, processor);
if ((unsigned int)chars_formatted >= PACKAGE_CPUS_FILENAME_SIZE) {
cpuinfo_log_warning("failed to format filename for package cpus of processor %" PRIu32, processor);
return false;
}
struct siblings_context siblings_context = {
.group_name = "package",
.max_processors_count = max_processors_count,
.processor = processor,
.callback = callback,
.callback_context = context,
};
if (cpuinfo_linux_parse_cpulist(
package_cpus_filename, (cpuinfo_cpulist_callback)siblings_parser, &siblings_context)) {
return true;
} else {
cpuinfo_log_info(
"failed to parse the list of package cpus for processor %" PRIu32 " from %s",
processor,
package_cpus_filename);
return false;
}
}

78
3rdparty/cpuinfo/src/linux/smallfile.c vendored Normal file
View File

@@ -0,0 +1,78 @@
#include <alloca.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <cpuinfo/log.h>
#include <linux/api.h>
bool cpuinfo_linux_parse_small_file(
const char* filename,
size_t buffer_size,
cpuinfo_smallfile_callback callback,
void* context) {
int file = -1;
bool status = false;
char* buffer = (char*)alloca(buffer_size);
#if CPUINFO_LOG_DEBUG_PARSERS
cpuinfo_log_debug("parsing small file %s", filename);
#endif
#if CPUINFO_MOCK
file = cpuinfo_mock_open(filename, O_RDONLY);
#else
file = open(filename, O_RDONLY);
#endif
if (file == -1) {
cpuinfo_log_info("failed to open %s: %s", filename, strerror(errno));
goto cleanup;
}
size_t buffer_position = 0;
ssize_t bytes_read;
do {
#if CPUINFO_MOCK
bytes_read = cpuinfo_mock_read(file, &buffer[buffer_position], buffer_size - buffer_position);
#else
bytes_read = read(file, &buffer[buffer_position], buffer_size - buffer_position);
#endif
if (bytes_read < 0) {
cpuinfo_log_info(
"failed to read file %s at position %zu: %s",
filename,
buffer_position,
strerror(errno));
goto cleanup;
}
buffer_position += (size_t)bytes_read;
if (buffer_position >= buffer_size) {
cpuinfo_log_error(
"failed to read file %s: insufficient buffer of size %zu", filename, buffer_size);
goto cleanup;
}
} while (bytes_read != 0);
status = callback(filename, buffer, &buffer[buffer_position], context);
cleanup:
if (file != -1) {
#if CPUINFO_MOCK
cpuinfo_mock_close(file);
#else
close(file);
#endif
file = -1;
}
return status;
}

203
3rdparty/cpuinfo/src/log.c vendored Normal file
View File

@@ -0,0 +1,203 @@
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _WIN32
#include <windows.h>
#else
#include <unistd.h>
#endif
#if defined(__ANDROID__)
#include <android/log.h>
#endif
#if defined(__hexagon__)
#include <qurt_printf.h>
#endif
#ifndef CPUINFO_LOG_TO_STDIO
#if defined(__ANDROID__)
#define CPUINFO_LOG_TO_STDIO 0
#else
#define CPUINFO_LOG_TO_STDIO 1
#endif
#endif
#include <cpuinfo/log.h>
/* Messages up to this size are formatted entirely on-stack, and don't allocate
* heap memory */
#define CPUINFO_LOG_STACK_BUFFER_SIZE 1024
#ifdef _WIN32
#define CPUINFO_LOG_NEWLINE_LENGTH 2
#define CPUINFO_LOG_STDERR STD_ERROR_HANDLE
#define CPUINFO_LOG_STDOUT STD_OUTPUT_HANDLE
#elif defined(__hexagon__)
#define CPUINFO_LOG_NEWLINE_LENGTH 1
#define CPUINFO_LOG_STDERR 0
#define CPUINFO_LOG_STDOUT 0
#else
#define CPUINFO_LOG_NEWLINE_LENGTH 1
#define CPUINFO_LOG_STDERR STDERR_FILENO
#define CPUINFO_LOG_STDOUT STDOUT_FILENO
#endif
#if CPUINFO_LOG_TO_STDIO
static void cpuinfo_vlog(
int output_handle,
const char* prefix,
size_t prefix_length,
const char* format,
va_list args) {
char stack_buffer[CPUINFO_LOG_STACK_BUFFER_SIZE];
char* heap_buffer = NULL;
char* out_buffer = &stack_buffer[0];
/* The first call to vsnprintf will clobber args, thus need a copy in
* case a second vsnprintf call is needed */
va_list args_copy;
va_copy(args_copy, args);
memcpy(stack_buffer, prefix, prefix_length * sizeof(char));
assert((prefix_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char) <= CPUINFO_LOG_STACK_BUFFER_SIZE);
const int format_chars = vsnprintf(
&stack_buffer[prefix_length],
CPUINFO_LOG_STACK_BUFFER_SIZE - (prefix_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char),
format,
args);
if (format_chars < 0) {
/* Format error in the message: silently ignore this particular
* message. */
goto cleanup;
}
const size_t format_length = (size_t)format_chars;
if ((prefix_length + format_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char) >
CPUINFO_LOG_STACK_BUFFER_SIZE) {
/* Allocate a buffer on heap, and vsnprintf to this buffer */
const size_t heap_buffer_size =
(prefix_length + format_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char);
#if _WIN32
heap_buffer = HeapAlloc(GetProcessHeap(), 0, heap_buffer_size);
#else
heap_buffer = malloc(heap_buffer_size);
#endif
if (heap_buffer == NULL) {
goto cleanup;
}
/* Copy pre-formatted prefix into the on-heap buffer */
memcpy(heap_buffer, prefix, prefix_length * sizeof(char));
vsnprintf(
&heap_buffer[prefix_length],
(format_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char),
format,
args_copy);
out_buffer = heap_buffer;
}
#ifdef _WIN32
out_buffer[prefix_length + format_length] = '\r';
out_buffer[prefix_length + format_length + 1] = '\n';
DWORD bytes_written;
WriteFile(
GetStdHandle((DWORD)output_handle),
out_buffer,
(prefix_length + format_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char),
&bytes_written,
NULL);
#elif defined(__hexagon__)
qurt_printf("%s", out_buffer);
#else
out_buffer[prefix_length + format_length] = '\n';
ssize_t bytes_written = write(
output_handle, out_buffer, (prefix_length + format_length + CPUINFO_LOG_NEWLINE_LENGTH) * sizeof(char));
(void)bytes_written;
#endif
cleanup:
#ifdef _WIN32
HeapFree(GetProcessHeap(), 0, heap_buffer);
#else
free(heap_buffer);
#endif
va_end(args_copy);
}
#elif defined(__ANDROID__) && CPUINFO_LOG_LEVEL > CPUINFO_LOG_NONE
static const char cpuinfo_module[] = "XNNPACK";
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_DEBUG
void cpuinfo_vlog_debug(const char* format, va_list args) {
#if CPUINFO_LOG_TO_STDIO
static const char debug_prefix[17] = {
'D', 'e', 'b', 'u', 'g', ' ', '(', 'c', 'p', 'u', 'i', 'n', 'f', 'o', ')', ':', ' '};
cpuinfo_vlog(CPUINFO_LOG_STDOUT, debug_prefix, 17, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_DEBUG, cpuinfo_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_INFO
void cpuinfo_vlog_info(const char* format, va_list args) {
#if CPUINFO_LOG_TO_STDIO
static const char info_prefix[16] = {
'N', 'o', 't', 'e', ' ', '(', 'c', 'p', 'u', 'i', 'n', 'f', 'o', ')', ':', ' '};
cpuinfo_vlog(CPUINFO_LOG_STDOUT, info_prefix, 16, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_INFO, cpuinfo_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_WARNING
void cpuinfo_vlog_warning(const char* format, va_list args) {
#if CPUINFO_LOG_TO_STDIO
static const char warning_prefix[20] = {'W', 'a', 'r', 'n', 'i', 'n', 'g', ' ', 'i', 'n',
' ', 'c', 'p', 'u', 'i', 'n', 'f', 'o', ':', ' '};
cpuinfo_vlog(CPUINFO_LOG_STDERR, warning_prefix, 20, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_WARN, cpuinfo_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_ERROR
void cpuinfo_vlog_error(const char* format, va_list args) {
#if CPUINFO_LOG_TO_STDIO
static const char error_prefix[18] = {
'E', 'r', 'r', 'o', 'r', ' ', 'i', 'n', ' ', 'c', 'p', 'u', 'i', 'n', 'f', 'o', ':', ' '};
cpuinfo_vlog(CPUINFO_LOG_STDERR, error_prefix, 18, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_ERROR, cpuinfo_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif
#if CPUINFO_LOG_LEVEL >= CPUINFO_LOG_FATAL
void cpuinfo_vlog_fatal(const char* format, va_list args) {
#if CPUINFO_LOG_TO_STDIO
static const char fatal_prefix[24] = {'F', 'a', 't', 'a', 'l', ' ', 'e', 'r', 'r', 'o', 'r', ' ',
'i', 'n', ' ', 'c', 'p', 'u', 'i', 'n', 'f', 'o', ':', ' '};
cpuinfo_vlog(CPUINFO_LOG_STDERR, fatal_prefix, 24, format, args);
#elif defined(__ANDROID__)
__android_log_vprint(ANDROID_LOG_FATAL, cpuinfo_module, format, args);
#else
#error "Platform-specific implementation required"
#endif
}
#endif

14
3rdparty/cpuinfo/src/mach/api.h vendored Normal file
View File

@@ -0,0 +1,14 @@
#pragma once
#include <stdint.h>
#define CPUINFO_MACH_MAX_CACHE_LEVELS 8
struct cpuinfo_mach_topology {
uint32_t packages;
uint32_t cores;
uint32_t threads;
uint32_t threads_per_cache[CPUINFO_MACH_MAX_CACHE_LEVELS];
};
struct cpuinfo_mach_topology cpuinfo_mach_detect_topology(void);

69
3rdparty/cpuinfo/src/mach/topology.c vendored Normal file
View File

@@ -0,0 +1,69 @@
#include <alloca.h>
#include <errno.h>
#include <string.h>
#include <sys/sysctl.h>
#include <sys/types.h>
#include <cpuinfo/log.h>
#include <mach/api.h>
#include <TargetConditionals.h>
struct cpuinfo_mach_topology cpuinfo_mach_detect_topology(void) {
int cores = 1;
size_t sizeof_cores = sizeof(cores);
if (sysctlbyname("hw.physicalcpu_max", &cores, &sizeof_cores, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"hw.physicalcpu_max\") failed: %s", strerror(errno));
} else if (cores <= 0) {
cpuinfo_log_error("sysctlbyname(\"hw.physicalcpu_max\") returned invalid value %d", cores);
cores = 1;
}
int threads = 1;
size_t sizeof_threads = sizeof(threads);
if (sysctlbyname("hw.logicalcpu_max", &threads, &sizeof_threads, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"hw.logicalcpu_max\") failed: %s", strerror(errno));
} else if (threads <= 0) {
cpuinfo_log_error("sysctlbyname(\"hw.logicalcpu_max\") returned invalid value %d", threads);
threads = cores;
}
int packages = 1;
#if !TARGET_OS_IPHONE
size_t sizeof_packages = sizeof(packages);
if (sysctlbyname("hw.packages", &packages, &sizeof_packages, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"hw.packages\") failed: %s", strerror(errno));
} else if (packages <= 0) {
cpuinfo_log_error("sysctlbyname(\"hw.packages\") returned invalid value %d", packages);
packages = 1;
}
#endif
cpuinfo_log_debug("mach topology: packages = %d, cores = %d, threads = %d", packages, (int)cores, (int)threads);
struct cpuinfo_mach_topology topology = {
.packages = (uint32_t)packages, .cores = (uint32_t)cores, .threads = (uint32_t)threads};
#if !TARGET_OS_IPHONE
size_t cacheconfig_size = 0;
if (sysctlbyname("hw.cacheconfig", NULL, &cacheconfig_size, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"hw.cacheconfig\") failed: %s", strerror(errno));
} else {
uint64_t* cacheconfig = alloca(cacheconfig_size);
if (sysctlbyname("hw.cacheconfig", cacheconfig, &cacheconfig_size, NULL, 0) != 0) {
cpuinfo_log_error("sysctlbyname(\"hw.cacheconfig\") failed: %s", strerror(errno));
} else {
size_t cache_configs = cacheconfig_size / sizeof(uint64_t);
cpuinfo_log_debug("mach hw.cacheconfig count: %zu", cache_configs);
if (cache_configs > CPUINFO_MACH_MAX_CACHE_LEVELS) {
cache_configs = CPUINFO_MACH_MAX_CACHE_LEVELS;
}
for (size_t i = 0; i < cache_configs; i++) {
cpuinfo_log_debug("mach hw.cacheconfig[%zu]: %" PRIu64, i, cacheconfig[i]);
topology.threads_per_cache[i] = cacheconfig[i];
}
}
}
#endif
return topology;
}

42
3rdparty/cpuinfo/src/riscv/api.h vendored Normal file
View File

@@ -0,0 +1,42 @@
#pragma once
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
/* RISC-V Vendor IDs. */
enum cpuinfo_riscv_chipset_vendor {
cpuinfo_riscv_chipset_vendor_unknown = 0,
cpuinfo_riscv_chipset_vendor_sifive = 0x489,
cpuinfo_riscv_chipset_vendor_max,
};
/* RISC-V Architecture IDs. */
enum cpuinfo_riscv_chipset_arch {
cpuinfo_riscv_chipset_arch_unknown = 0,
cpuinfo_riscv_chipset_arch_max,
};
/* RISC-V Implementation IDs. */
enum cpuinfo_riscv_chipset_impl {
cpuinfo_riscv_chipset_impl_unknown = 0,
cpuinfo_riscv_chipset_impl_max,
};
/**
* Decodes the vendor and micro-architecture based on the provided input
* parameters, regardless of underlying operating system.
*
* @param[vendor_id]: The 'mvendorid' as described by the RISC-V Manual.
* @param[arch_id]: The 'marchid' as described by the RISC-V Manual.
* @param[imp_id]: The 'mimplid' as described by the RISC-V Manual.
* @param[vendor] - Reference to the cpuinfo_vendor to populate.
* @param[uarch] - Reference to the cpuinfo_uarch to populate.
*/
CPUINFO_INTERNAL void cpuinfo_riscv_decode_vendor_uarch(
uint32_t vendor_id,
uint32_t arch_id,
uint32_t imp_id,
enum cpuinfo_vendor vendor[restrict static 1],
enum cpuinfo_uarch uarch[restrict static 1]);

71
3rdparty/cpuinfo/src/riscv/linux/api.h vendored Normal file
View File

@@ -0,0 +1,71 @@
#pragma once
#include <cpuinfo.h>
#include <cpuinfo/common.h>
/**
* Definition of a RISC-V Linux processor. It is composed of the base processor
* definition in "include/cpuinfo.h" and flags specific to RISC-V Linux
* implementations.
*/
struct cpuinfo_riscv_linux_processor {
/* Public ABI cpuinfo structures. */
struct cpuinfo_processor processor;
struct cpuinfo_core core;
struct cpuinfo_cluster cluster;
struct cpuinfo_package package;
/**
* Linux-specific flags for the logical processor:
* - Bit field that can be masked with CPUINFO_LINUX_FLAG_*.
*/
uint32_t flags;
/**
* Minimum processor ID on the cluster which includes this logical
* processor. This value can serve as an ID for the cluster of logical
* processors: it is the same for all logical processors on the same
* package.
*/
uint32_t cluster_leader_id;
/**
* Minimum processor ID on the core which includes this logical
* processor. This value can serve as an ID for the core of logical
* processors: it is the same for all logical processors on the same
* core.
*/
uint32_t core_leader_id;
/**
* Minimum processor ID on the package which includes this logical
* processor. This value can serve as an ID for the package of logical
* processors: it is the same for all logical processors on the same
* package.
*/
uint32_t package_leader_id;
};
/**
* Reads AT_HWCAP from `getauxval` and populates the cpuinfo_riscv_isa
* structure.
*
* @param[isa] - Reference to cpuinfo_riscv_isa structure to populate.
*/
CPUINFO_INTERNAL void cpuinfo_riscv_linux_decode_isa_from_hwcap(struct cpuinfo_riscv_isa isa[restrict static 1]);
/**
* Reads `sys_riscv_hwprobe` and determines the processor vendor and
* micro-architecture.
*
* @param[processor] - The Linux ID of the target processor.
* @param[vendor] - Reference to the cpuinfo_vendor to populate.
* @param[uarch] - Reference to the cpuinfo_uarch to populate.
*/
CPUINFO_INTERNAL void cpuinfo_riscv_linux_decode_vendor_uarch_from_hwprobe(
uint32_t processor,
enum cpuinfo_vendor vendor[restrict static 1],
enum cpuinfo_uarch uarch[restrict static 1]);
/* Used to determine which uarch is associated with the current thread. */
extern CPUINFO_INTERNAL const uint32_t* cpuinfo_linux_cpu_to_uarch_index_map;

619
3rdparty/cpuinfo/src/riscv/linux/init.c vendored Normal file
View File

@@ -0,0 +1,619 @@
#include <string.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <linux/api.h>
#include <riscv/linux/api.h>
/* ISA structure to hold supported extensions. */
struct cpuinfo_riscv_isa cpuinfo_isa;
/* Helper function to bitmask flags and ensure operator precedence. */
static inline bool bitmask_all(uint32_t flags, uint32_t mask) {
return (flags & mask) == mask;
}
static int compare_riscv_linux_processors(const void* a, const void* b) {
/**
* For our purposes, it is only relevant that the list is sorted by
* micro-architecture, so the nature of ordering is irrelevant.
*/
return ((const struct cpuinfo_riscv_linux_processor*)a)->core.uarch -
((const struct cpuinfo_riscv_linux_processor*)b)->core.uarch;
}
/**
* Parses the core cpus list for each processor. This function is called once
* per-processor, with the IDs of all other processors in the core list.
*
* The 'processor_[start|count]' are populated in the processor's 'core'
* attribute, with 'start' being the smallest ID in the core list.
*
* The 'core_leader_id' of each processor is set to the smallest ID in it's
* cluster CPU list.
*
* Precondition: The element in the 'processors' list must be initialized with
* their 'core_leader_id' to their index in the list.
* E.g. processors[0].core_leader_id = 0.
*/
static bool core_cpus_parser(
uint32_t processor,
uint32_t core_cpus_start,
uint32_t core_cpus_end,
struct cpuinfo_riscv_linux_processor* processors) {
uint32_t processor_start = UINT32_MAX;
uint32_t processor_count = 0;
/* If the processor already has a leader, use it. */
if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER)) {
processor_start = processors[processor].core_leader_id;
}
for (size_t core_cpu = core_cpus_start; core_cpu < core_cpus_end; core_cpu++) {
if (!bitmask_all(processors[core_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/**
* The first valid processor observed is the smallest ID in the
* list that attaches to this core.
*/
if (processor_start == UINT32_MAX) {
processor_start = core_cpu;
}
processors[core_cpu].core_leader_id = processor_start;
processor_count++;
}
/**
* If the cluster flag has not been set, assign the processor start. If
* it has been set, only apply the processor start if it's less than the
* held value. This can happen if the callback is invoked twice:
*
* e.g. core_cpu_list=1,10-12
*/
if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER) ||
processors[processor].core.processor_start > processor_start) {
processors[processor].core.processor_start = processor_start;
processors[processor].core_leader_id = processor_start;
}
processors[processor].core.processor_count += processor_count;
processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_CLUSTER;
/* The parser has failed only if no processors were found. */
return processor_count != 0;
}
/**
* Parses the cluster cpu list for each processor. This function is called once
* per-processor, with the IDs of all other processors in the cluster.
*
* The 'cluster_leader_id' of each processor is set to the smallest ID in it's
* cluster CPU list.
*
* Precondition: The element in the 'processors' list must be initialized with
* their 'cluster_leader_id' to their index in the list.
* E.g. processors[0].cluster_leader_id = 0.
*/
static bool cluster_cpus_parser(
uint32_t processor,
uint32_t cluster_cpus_start,
uint32_t cluster_cpus_end,
struct cpuinfo_riscv_linux_processor* processors) {
uint32_t processor_start = UINT32_MAX;
uint32_t processor_count = 0;
uint32_t core_count = 0;
/* If the processor already has a leader, use it. */
if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER)) {
processor_start = processors[processor].cluster_leader_id;
}
for (size_t cluster_cpu = cluster_cpus_start; cluster_cpu < cluster_cpus_end; cluster_cpu++) {
if (!bitmask_all(processors[cluster_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/**
* The first valid processor observed is the smallest ID in the
* list that attaches to this core.
*/
if (processor_start == UINT32_MAX) {
processor_start = cluster_cpu;
}
processors[cluster_cpu].cluster_leader_id = processor_start;
processor_count++;
/**
* A processor should only represent it's core if it is the
* assigned leader of that core.
*/
if (processors[cluster_cpu].core_leader_id == cluster_cpu) {
core_count++;
}
}
/**
* If the cluster flag has not been set, assign the processor start. If
* it has been set, only apply the processor start if it's less than the
* held value. This can happen if the callback is invoked twice:
*
* e.g. cluster_cpus_list=1,10-12
*/
if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER) ||
processors[processor].cluster.processor_start > processor_start) {
processors[processor].cluster.processor_start = processor_start;
processors[processor].cluster.core_start = processor_start;
processors[processor].cluster.cluster_id = processor_start;
processors[processor].cluster_leader_id = processor_start;
}
processors[processor].cluster.processor_count += processor_count;
processors[processor].cluster.core_count += core_count;
processors[processor].flags |= CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER;
return true;
}
/**
* Parses the package cpus list for each processor. This function is called once
* per-processor, with the IDs of all other processors in the package list.
*
* The 'processor_[start|count]' are populated in the processor's 'package'
* attribute, with 'start' being the smallest ID in the package list.
*
* The 'package_leader_id' of each processor is set to the smallest ID in it's
* cluster CPU list.
*
* Precondition: The element in the 'processors' list must be initialized with
* their 'package_leader_id' to their index in the list.
* E.g. processors[0].package_leader_id = 0.
*/
static bool package_cpus_parser(
uint32_t processor,
uint32_t package_cpus_start,
uint32_t package_cpus_end,
struct cpuinfo_riscv_linux_processor* processors) {
uint32_t processor_start = UINT32_MAX;
uint32_t processor_count = 0;
uint32_t cluster_count = 0;
uint32_t core_count = 0;
/* If the processor already has a leader, use it. */
if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
processor_start = processors[processor].package_leader_id;
}
for (size_t package_cpu = package_cpus_start; package_cpu < package_cpus_end; package_cpu++) {
if (!bitmask_all(processors[package_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/**
* The first valid processor observed is the smallest ID in the
* list that attaches to this package.
*/
if (processor_start == UINT32_MAX) {
processor_start = package_cpu;
}
processors[package_cpu].package_leader_id = processor_start;
processor_count++;
/**
* A processor should only represent it's core if it is the
* assigned leader of that core, and similarly for it's cluster.
*/
if (processors[package_cpu].cluster_leader_id == package_cpu) {
cluster_count++;
}
if (processors[package_cpu].core_leader_id == package_cpu) {
core_count++;
}
}
/**
* If the cluster flag has not been set, assign the processor start. If
* it has been set, only apply the processor start if it's less than the
* held value. This can happen if the callback is invoked twice:
*
* e.g. package_cpus_list=1,10-12
*/
if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) ||
processors[processor].package.processor_start > processor_start) {
processors[processor].package.processor_start = processor_start;
processors[processor].package.cluster_start = processor_start;
processors[processor].package.core_start = processor_start;
processors[processor].package_leader_id = processor_start;
}
processors[processor].package.processor_count += processor_count;
processors[processor].package.cluster_count += cluster_count;
processors[processor].package.core_count += core_count;
processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
return true;
}
/* Initialization for the RISC-V Linux system. */
void cpuinfo_riscv_linux_init(void) {
struct cpuinfo_riscv_linux_processor* riscv_linux_processors = NULL;
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_uarch_info* uarchs = NULL;
const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
uint32_t* linux_cpu_to_uarch_index_map = NULL;
/**
* The interesting set of processors are the number of 'present'
* processors on the system. There may be more 'possible' processors,
* but processor information cannot be gathered on non-present
* processors.
*
* Note: For SoCs, it is largely the case that all processors are known
* at boot and no processors are hotplugged at runtime, so the
* 'present' and 'possible' list is often the same.
*
* Note: This computes the maximum processor ID of the 'present'
* processors. It is not a count of the number of processors on the
* system.
*/
const uint32_t max_processor_id =
1 + cpuinfo_linux_get_max_present_processor(cpuinfo_linux_get_max_processors_count());
if (max_processor_id == 0) {
cpuinfo_log_error("failed to discover any processors");
return;
}
/**
* Allocate space to store all processor information. This array is
* sized to the max processor ID as opposed to the number of 'present'
* processors, to leverage pointer math in the common utility functions.
*/
riscv_linux_processors = calloc(max_processor_id, sizeof(struct cpuinfo_riscv_linux_processor));
if (riscv_linux_processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " processors.",
max_processor_id * sizeof(struct cpuinfo_riscv_linux_processor),
max_processor_id);
goto cleanup;
}
/**
* Attempt to detect all processors and apply the corresponding flag to
* each processor struct that we find.
*/
if (!cpuinfo_linux_detect_present_processors(
max_processor_id,
&riscv_linux_processors->flags,
sizeof(struct cpuinfo_riscv_linux_processor),
CPUINFO_LINUX_FLAG_PRESENT | CPUINFO_LINUX_FLAG_VALID)) {
cpuinfo_log_error("failed to detect present processors");
goto cleanup;
}
/* Populate processor information. */
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/* TODO: Determine if an 'smt_id' is available. */
riscv_linux_processors[processor].processor.linux_id = processor;
}
/* Populate core information. */
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/* Populate processor start and count information. */
if (!cpuinfo_linux_detect_core_cpus(
max_processor_id,
processor,
(cpuinfo_siblings_callback)core_cpus_parser,
riscv_linux_processors)) {
cpuinfo_log_error("failed to detect core cpus for processor %zu.", processor);
goto cleanup;
}
/* Populate core ID information. */
if (cpuinfo_linux_get_processor_core_id(processor, &riscv_linux_processors[processor].core.core_id)) {
riscv_linux_processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_ID;
}
/**
* Populate the vendor and uarch of this core from this
* processor. When the final 'cores' list is constructed, only
* the values from the core leader will be honored.
*/
cpuinfo_riscv_linux_decode_vendor_uarch_from_hwprobe(
processor,
&riscv_linux_processors[processor].core.vendor,
&riscv_linux_processors[processor].core.uarch);
/* Populate frequency information of this core. */
uint32_t frequency = cpuinfo_linux_get_processor_cur_frequency(processor);
if (frequency != 0) {
riscv_linux_processors[processor].core.frequency = frequency;
riscv_linux_processors[processor].flags |= CPUINFO_LINUX_FLAG_CUR_FREQUENCY;
}
}
/* Populate cluster information. */
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
if (!cpuinfo_linux_detect_cluster_cpus(
max_processor_id,
processor,
(cpuinfo_siblings_callback)cluster_cpus_parser,
riscv_linux_processors)) {
cpuinfo_log_warning("failed to detect cluster cpus for processor %zu.", processor);
goto cleanup;
}
/**
* Populate the vendor, uarch and frequency of this cluster from
* this logical processor. When the 'clusters' list is
* constructed, only the values from the cluster leader will be
* honored.
*/
riscv_linux_processors[processor].cluster.vendor = riscv_linux_processors[processor].core.vendor;
riscv_linux_processors[processor].cluster.uarch = riscv_linux_processors[processor].core.uarch;
riscv_linux_processors[processor].cluster.frequency = riscv_linux_processors[processor].core.frequency;
}
/* Populate package information. */
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
if (!cpuinfo_linux_detect_package_cpus(
max_processor_id,
processor,
(cpuinfo_siblings_callback)package_cpus_parser,
riscv_linux_processors)) {
cpuinfo_log_warning("failed to detect package cpus for processor %zu.", processor);
goto cleanup;
}
}
/* Populate ISA structure with hwcap information. */
cpuinfo_riscv_linux_decode_isa_from_hwcap(&cpuinfo_isa);
/**
* To efficiently compute the number of unique micro-architectures
* present on the system, sort the processor list by micro-architecture
* and then scan through the list to count the differences.
*
* Ensure this is done at the end of composing the processor list - the
* parsing functions assume that the position of the processor in the
* list matches it's Linux ID, which this sorting operation breaks.
*/
qsort(riscv_linux_processors,
max_processor_id,
sizeof(struct cpuinfo_riscv_linux_processor),
compare_riscv_linux_processors);
/**
* Determine the number of *valid* detected processors, cores,
* clusters, packages and uarchs in the list.
*/
size_t valid_processors_count = 0;
size_t valid_cores_count = 0;
size_t valid_clusters_count = 0;
size_t valid_packages_count = 0;
size_t valid_uarchs_count = 0;
enum cpuinfo_uarch last_uarch = cpuinfo_uarch_unknown;
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/**
* All comparisons to the leader id values MUST be done against
* the 'linux_id' as opposed to 'processor'. The sort function
* above no longer allows us to make the assumption that these
* two values are the same.
*/
uint32_t linux_id = riscv_linux_processors[processor].processor.linux_id;
valid_processors_count++;
if (riscv_linux_processors[processor].core_leader_id == linux_id) {
valid_cores_count++;
}
if (riscv_linux_processors[processor].cluster_leader_id == linux_id) {
valid_clusters_count++;
}
if (riscv_linux_processors[processor].package_leader_id == linux_id) {
valid_packages_count++;
}
/**
* As we've sorted by micro-architecture, when the uarch differs
* between two entries, a unique uarch has been observed.
*/
if (last_uarch != riscv_linux_processors[processor].core.uarch || valid_uarchs_count == 0) {
valid_uarchs_count++;
last_uarch = riscv_linux_processors[processor].core.uarch;
}
}
/* Allocate and populate final public ABI structures. */
processors = calloc(valid_processors_count, sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %zu processors.",
valid_processors_count * sizeof(struct cpuinfo_processor),
valid_processors_count);
goto cleanup;
}
cores = calloc(valid_cores_count, sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %zu cores.",
valid_cores_count * sizeof(struct cpuinfo_core),
valid_cores_count);
goto cleanup;
}
clusters = calloc(valid_clusters_count, sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %zu clusters.",
valid_clusters_count * sizeof(struct cpuinfo_cluster),
valid_clusters_count);
goto cleanup;
}
packages = calloc(valid_packages_count, sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %zu packages.",
valid_packages_count * sizeof(struct cpuinfo_package),
valid_packages_count);
goto cleanup;
}
uarchs = calloc(valid_uarchs_count, sizeof(struct cpuinfo_uarch_info));
if (uarchs == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %zu packages.",
valid_uarchs_count * sizeof(struct cpuinfo_uarch_info),
valid_uarchs_count);
goto cleanup;
}
linux_cpu_to_processor_map = calloc(max_processor_id, sizeof(struct cpuinfo_processor*));
if (linux_cpu_to_processor_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " processor map.",
max_processor_id * sizeof(struct cpuinfo_processor*),
max_processor_id);
goto cleanup;
}
linux_cpu_to_core_map = calloc(max_processor_id, sizeof(struct cpuinfo_core*));
if (linux_cpu_to_core_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " core map.",
max_processor_id * sizeof(struct cpuinfo_core*),
max_processor_id);
goto cleanup;
}
linux_cpu_to_uarch_index_map = calloc(max_processor_id, sizeof(struct cpuinfo_uarch_info*));
if (linux_cpu_to_uarch_index_map == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for %" PRIu32 " uarch map.",
max_processor_id * sizeof(struct cpuinfo_uarch_info*),
max_processor_id);
goto cleanup;
}
/* Transfer contents of processor list to ABI structures. */
size_t valid_processors_index = 0;
size_t valid_cores_index = 0;
size_t valid_clusters_index = 0;
size_t valid_packages_index = 0;
size_t valid_uarchs_index = 0;
last_uarch = cpuinfo_uarch_unknown;
for (size_t processor = 0; processor < max_processor_id; processor++) {
if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
continue;
}
/**
* All comparisons to the leader id values MUST be done against
* the 'linux_id' as opposed to 'processor'. The sort function
* above no longer allows us to make the assumption that these
* two values are the same.
*/
uint32_t linux_id = riscv_linux_processors[processor].processor.linux_id;
/* Create uarch entry if this uarch has not been seen before. */
if (last_uarch != riscv_linux_processors[processor].core.uarch || valid_uarchs_index == 0) {
uarchs[valid_uarchs_index++].uarch = riscv_linux_processors[processor].core.uarch;
last_uarch = riscv_linux_processors[processor].core.uarch;
}
/* Copy cpuinfo_processor information. */
memcpy(&processors[valid_processors_index++],
&riscv_linux_processors[processor].processor,
sizeof(struct cpuinfo_processor));
/* Update uarch processor count. */
uarchs[valid_uarchs_index - 1].processor_count++;
/* Copy cpuinfo_core information, if this is the leader. */
if (riscv_linux_processors[processor].core_leader_id == linux_id) {
memcpy(&cores[valid_cores_index++],
&riscv_linux_processors[processor].core,
sizeof(struct cpuinfo_core));
/* Update uarch core count. */
uarchs[valid_uarchs_index - 1].core_count++;
}
/* Copy cpuinfo_cluster information, if this is the leader. */
if (riscv_linux_processors[processor].cluster_leader_id == linux_id) {
memcpy(&clusters[valid_clusters_index++],
&riscv_linux_processors[processor].cluster,
sizeof(struct cpuinfo_cluster));
}
/* Copy cpuinfo_package information, if this is the leader. */
if (riscv_linux_processors[processor].package_leader_id == linux_id) {
memcpy(&packages[valid_packages_index++],
&riscv_linux_processors[processor].package,
sizeof(struct cpuinfo_package));
}
/* Commit pointers on the final structures. */
processors[valid_processors_index - 1].core = &cores[valid_cores_index - 1];
processors[valid_processors_index - 1].cluster = &clusters[valid_clusters_index - 1];
processors[valid_processors_index - 1].package = &packages[valid_packages_index - 1];
cores[valid_cores_index - 1].cluster = &clusters[valid_clusters_index - 1];
cores[valid_cores_index - 1].package = &packages[valid_packages_index - 1];
clusters[valid_clusters_index - 1].package = &packages[valid_packages_index - 1];
linux_cpu_to_processor_map[linux_id] = &processors[valid_processors_index - 1];
linux_cpu_to_core_map[linux_id] = &cores[valid_cores_index - 1];
linux_cpu_to_uarch_index_map[linux_id] = valid_uarchs_index - 1;
}
/* Commit */
cpuinfo_processors = processors;
cpuinfo_processors_count = valid_processors_count;
cpuinfo_cores = cores;
cpuinfo_cores_count = valid_cores_count;
cpuinfo_clusters = clusters;
cpuinfo_clusters_count = valid_clusters_count;
cpuinfo_packages = packages;
cpuinfo_packages_count = valid_packages_count;
cpuinfo_uarchs = uarchs;
cpuinfo_uarchs_count = valid_uarchs_count;
cpuinfo_linux_cpu_max = max_processor_id;
cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map;
__sync_synchronize();
cpuinfo_is_initialized = true;
/* Mark all public structures NULL to prevent cleanup from erasing them.
*/
processors = NULL;
cores = NULL;
clusters = NULL;
packages = NULL;
uarchs = NULL;
linux_cpu_to_processor_map = NULL;
linux_cpu_to_core_map = NULL;
linux_cpu_to_uarch_index_map = NULL;
cleanup:
free(riscv_linux_processors);
free(processors);
free(cores);
free(clusters);
free(packages);
free(uarchs);
free(linux_cpu_to_processor_map);
free(linux_cpu_to_core_map);
free(linux_cpu_to_uarch_index_map);
}

View File

@@ -0,0 +1,151 @@
/*
* Only enable the C standard library hwprobe interface on Android for now.
* Patches to add a compatible hwprobe API to glibc are available but not
* merged at the time of writing and so cannot easily be tested. The
* #ifdef __ANDROID__ check will be removed in the future.
*/
#ifdef __ANDROID__
#ifdef __has_include
#if __has_include(<sys/hwprobe.h>)
#define CPUINFO_RISCV_LINUX_HAVE_C_HWPROBE
#include <sys/hwprobe.h>
#endif
#endif
#endif
#include <sched.h>
#include <cpuinfo/log.h>
#include <riscv/api.h>
#include <riscv/linux/api.h>
#ifndef CPUINFO_RISCV_LINUX_HAVE_C_HWPROBE
#include <stdint.h>
#include <sys/syscall.h>
#include <unistd.h>
struct riscv_hwprobe {
int64_t key;
uint64_t value;
};
/*
* The standard C library our binary was compiled with does not support
* hwprobe but the kernel on which we are running might do. The
* constants below are copied from
* /usr/include/riscv64-linux-gnu/asm/hwprobe.h. They allow us to
* invoke the hwprobe syscall directly. We duplicate the constants
* rather than including the kernel hwprobe.h header, as this header
* will only be present if we're building Linux 6.4 or greater.
*/
#define RISCV_HWPROBE_KEY_MVENDORID 0
#define RISCV_HWPROBE_KEY_MARCHID 1
#define RISCV_HWPROBE_KEY_MIMPID 2
#define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
#define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
#define RISCV_HWPROBE_IMA_FD (1 << 0)
#define RISCV_HWPROBE_IMA_C (1 << 1)
#define RISCV_HWPROBE_IMA_V (1 << 2)
#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
#define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
#define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
#define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
#define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
#ifndef NR_riscv_hwprobe
#ifndef NR_arch_specific_syscall
#define NR_arch_specific_syscall 244
#endif
#define NR_riscv_hwprobe (NR_arch_specific_syscall + 14)
#endif
#endif
void cpuinfo_riscv_linux_decode_vendor_uarch_from_hwprobe(
uint32_t processor,
enum cpuinfo_vendor vendor[restrict static 1],
enum cpuinfo_uarch uarch[restrict static 1]) {
struct riscv_hwprobe pairs[] = {
{
.key = RISCV_HWPROBE_KEY_MVENDORID,
},
{
.key = RISCV_HWPROBE_KEY_MARCHID,
},
{
.key = RISCV_HWPROBE_KEY_MIMPID,
},
};
const size_t pairs_count = sizeof(pairs) / sizeof(struct riscv_hwprobe);
/* In case of failure, report unknown. */
*vendor = cpuinfo_vendor_unknown;
*uarch = cpuinfo_uarch_unknown;
/* Create a CPU set with this processor flagged. */
const size_t cpu_count = processor + 1;
cpu_set_t* cpu_set = CPU_ALLOC(cpu_count);
if (cpu_set == NULL) {
cpuinfo_log_warning("failed to allocate space for cpu_set");
return;
}
const size_t cpu_set_size = CPU_ALLOC_SIZE(cpu_count);
CPU_ZERO_S(cpu_set_size, cpu_set);
CPU_SET_S(processor, cpu_set_size, cpu_set);
/* Request all available information from hwprobe. */
#ifndef CPUINFO_RISCV_LINUX_HAVE_C_HWPROBE
/*
* No standard library support for hwprobe. We'll need to invoke the
* syscall directly. See
*
* https://docs.kernel.org/arch/riscv/hwprobe.html
*
* for more details.
*/
int ret = syscall(NR_riscv_hwprobe, pairs, pairs_count, cpu_set_size, (unsigned long*)cpu_set, 0 /* flags */);
#else
int ret = __riscv_hwprobe(pairs, pairs_count, cpu_set_size, (unsigned long*)cpu_set, 0 /* flags */);
#endif
if (ret < 0) {
cpuinfo_log_warning("failed to get hwprobe information, err: %d", ret);
goto cleanup;
}
/**
* The syscall may not have populated all requested keys, loop through
* the list and store the values that were discovered.
*/
uint32_t vendor_id = 0;
uint32_t arch_id = 0;
uint32_t imp_id = 0;
for (size_t pair = 0; pair < pairs_count; pair++) {
switch (pairs[pair].key) {
case RISCV_HWPROBE_KEY_MVENDORID:
vendor_id = pairs[pair].value;
break;
case RISCV_HWPROBE_KEY_MARCHID:
arch_id = pairs[pair].value;
break;
case RISCV_HWPROBE_KEY_MIMPID:
imp_id = pairs[pair].value;
break;
default:
/* The key value may be -1 if unsupported. */
break;
}
}
cpuinfo_riscv_decode_vendor_uarch(vendor_id, arch_id, imp_id, vendor, uarch);
cleanup:
CPU_FREE(cpu_set);
}

View File

@@ -0,0 +1,43 @@
#include <string.h>
#include <sys/auxv.h>
#include <riscv/linux/api.h>
/**
* arch/riscv/include/uapi/asm/hwcap.h
*
* This must be kept in sync with the upstream kernel header.
*/
#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A'))
#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A'))
#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A'))
#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
#define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A'))
void cpuinfo_riscv_linux_decode_isa_from_hwcap(struct cpuinfo_riscv_isa isa[restrict static 1]) {
const unsigned long hwcap = getauxval(AT_HWCAP);
if (hwcap & COMPAT_HWCAP_ISA_I) {
isa->i = true;
}
if (hwcap & COMPAT_HWCAP_ISA_M) {
isa->m = true;
}
if (hwcap & COMPAT_HWCAP_ISA_A) {
isa->a = true;
}
if (hwcap & COMPAT_HWCAP_ISA_F) {
isa->f = true;
}
if (hwcap & COMPAT_HWCAP_ISA_D) {
isa->d = true;
}
if (hwcap & COMPAT_HWCAP_ISA_C) {
isa->c = true;
}
if (hwcap & COMPAT_HWCAP_ISA_V) {
isa->v = true;
}
}

27
3rdparty/cpuinfo/src/riscv/uarch.c vendored Normal file
View File

@@ -0,0 +1,27 @@
#include <stdint.h>
#include <cpuinfo/log.h>
#include <riscv/api.h>
void cpuinfo_riscv_decode_vendor_uarch(
uint32_t vendor_id,
uint32_t arch_id,
uint32_t imp_id,
enum cpuinfo_vendor vendor[restrict static 1],
enum cpuinfo_uarch uarch[restrict static 1]) {
/* The vendor ID is sufficient to determine the cpuinfo_vendor. */
switch (vendor_id) {
case cpuinfo_riscv_chipset_vendor_sifive:
*vendor = cpuinfo_vendor_sifive;
break;
default:
*vendor = cpuinfo_vendor_unknown;
cpuinfo_log_warning("unknown vendor ID: %" PRIu32, vendor_id);
break;
}
/**
* TODO: Add support for parsing chipset architecture and implementation
* IDs here, when a chipset of interest comes along.
*/
*uarch = cpuinfo_uarch_unknown;
}

159
3rdparty/cpuinfo/src/x86/api.h vendored Normal file
View File

@@ -0,0 +1,159 @@
#pragma once
#include <stdbool.h>
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/common.h>
struct cpuid_regs {
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
uint32_t edx;
};
struct cpuinfo_x86_cache {
uint32_t size;
uint32_t associativity;
uint32_t sets;
uint32_t partitions;
uint32_t line_size;
uint32_t flags;
uint32_t apic_bits;
};
struct cpuinfo_x86_caches {
struct cpuinfo_trace_cache trace;
struct cpuinfo_x86_cache l1i;
struct cpuinfo_x86_cache l1d;
struct cpuinfo_x86_cache l2;
struct cpuinfo_x86_cache l3;
struct cpuinfo_x86_cache l4;
uint32_t prefetch_size;
};
struct cpuinfo_x86_model_info {
uint32_t model;
uint32_t family;
uint32_t base_model;
uint32_t base_family;
uint32_t stepping;
uint32_t extended_model;
uint32_t extended_family;
uint32_t processor_type;
};
struct cpuinfo_x86_topology {
uint32_t apic_id;
uint32_t thread_bits_offset;
uint32_t thread_bits_length;
uint32_t core_bits_offset;
uint32_t core_bits_length;
};
struct cpuinfo_x86_processor {
uint32_t cpuid;
enum cpuinfo_vendor vendor;
enum cpuinfo_uarch uarch;
#ifdef __linux__
int linux_id;
#endif
struct cpuinfo_x86_caches cache;
struct {
struct cpuinfo_tlb itlb_4KB;
struct cpuinfo_tlb itlb_2MB;
struct cpuinfo_tlb itlb_4MB;
struct cpuinfo_tlb dtlb0_4KB;
struct cpuinfo_tlb dtlb0_2MB;
struct cpuinfo_tlb dtlb0_4MB;
struct cpuinfo_tlb dtlb_4KB;
struct cpuinfo_tlb dtlb_2MB;
struct cpuinfo_tlb dtlb_4MB;
struct cpuinfo_tlb dtlb_1GB;
struct cpuinfo_tlb stlb2_4KB;
struct cpuinfo_tlb stlb2_2MB;
struct cpuinfo_tlb stlb2_1GB;
} tlb;
struct cpuinfo_x86_topology topology;
char brand_string[CPUINFO_PACKAGE_NAME_MAX];
};
CPUINFO_INTERNAL void cpuinfo_x86_init_processor(struct cpuinfo_x86_processor* processor);
CPUINFO_INTERNAL enum cpuinfo_vendor cpuinfo_x86_decode_vendor(uint32_t ebx, uint32_t ecx, uint32_t edx);
CPUINFO_INTERNAL struct cpuinfo_x86_model_info cpuinfo_x86_decode_model_info(uint32_t eax);
CPUINFO_INTERNAL enum cpuinfo_uarch cpuinfo_x86_decode_uarch(
enum cpuinfo_vendor vendor,
const struct cpuinfo_x86_model_info* model_info);
CPUINFO_INTERNAL struct cpuinfo_x86_isa cpuinfo_x86_detect_isa(
const struct cpuid_regs basic_info,
const struct cpuid_regs extended_info,
uint32_t max_base_index,
uint32_t max_extended_index,
enum cpuinfo_vendor vendor,
enum cpuinfo_uarch uarch);
CPUINFO_INTERNAL void cpuinfo_x86_detect_topology(
uint32_t max_base_index,
uint32_t max_extended_index,
struct cpuid_regs leaf1,
struct cpuinfo_x86_topology* topology);
CPUINFO_INTERNAL void cpuinfo_x86_detect_cache(
uint32_t max_base_index,
uint32_t max_extended_index,
bool amd_topology_extensions,
enum cpuinfo_vendor vendor,
const struct cpuinfo_x86_model_info* model_info,
struct cpuinfo_x86_caches* cache,
struct cpuinfo_tlb* itlb_4KB,
struct cpuinfo_tlb* itlb_2MB,
struct cpuinfo_tlb* itlb_4MB,
struct cpuinfo_tlb* dtlb0_4KB,
struct cpuinfo_tlb* dtlb0_2MB,
struct cpuinfo_tlb* dtlb0_4MB,
struct cpuinfo_tlb* dtlb_4KB,
struct cpuinfo_tlb* dtlb_2MB,
struct cpuinfo_tlb* dtlb_4MB,
struct cpuinfo_tlb* dtlb_1GB,
struct cpuinfo_tlb* stlb2_4KB,
struct cpuinfo_tlb* stlb2_2MB,
struct cpuinfo_tlb* stlb2_1GB,
uint32_t* log2_package_cores_max);
CPUINFO_INTERNAL void cpuinfo_x86_decode_cache_descriptor(
uint8_t descriptor,
enum cpuinfo_vendor vendor,
const struct cpuinfo_x86_model_info* model_info,
struct cpuinfo_x86_caches* cache,
struct cpuinfo_tlb* itlb_4KB,
struct cpuinfo_tlb* itlb_2MB,
struct cpuinfo_tlb* itlb_4MB,
struct cpuinfo_tlb* dtlb0_4KB,
struct cpuinfo_tlb* dtlb0_2MB,
struct cpuinfo_tlb* dtlb0_4MB,
struct cpuinfo_tlb* dtlb_4KB,
struct cpuinfo_tlb* dtlb_2MB,
struct cpuinfo_tlb* dtlb_4MB,
struct cpuinfo_tlb* dtlb_1GB,
struct cpuinfo_tlb* stlb2_4KB,
struct cpuinfo_tlb* stlb2_2MB,
struct cpuinfo_tlb* stlb2_1GB,
uint32_t* prefetch_size);
CPUINFO_INTERNAL bool cpuinfo_x86_decode_deterministic_cache_parameters(
struct cpuid_regs regs,
struct cpuinfo_x86_caches* cache,
uint32_t* package_cores_max);
CPUINFO_INTERNAL bool cpuinfo_x86_decode_cache_properties(struct cpuid_regs regs, struct cpuinfo_x86_caches* cache);
CPUINFO_INTERNAL uint32_t cpuinfo_x86_normalize_brand_string(const char raw_name[48], char normalized_name[48]);
CPUINFO_INTERNAL uint32_t cpuinfo_x86_format_package_name(
enum cpuinfo_vendor vendor,
const char normalized_brand_string[48],
char package_name[CPUINFO_PACKAGE_NAME_MAX]);

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,247 @@
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/log.h>
#include <cpuinfo/utils.h>
#include <x86/cpuid.h>
enum cache_type {
cache_type_none = 0,
cache_type_data = 1,
cache_type_instruction = 2,
cache_type_unified = 3,
};
bool cpuinfo_x86_decode_deterministic_cache_parameters(
struct cpuid_regs regs,
struct cpuinfo_x86_caches* cache,
uint32_t* package_cores_max) {
const uint32_t type = regs.eax & UINT32_C(0x1F);
if (type == cache_type_none) {
return false;
}
/* Level starts at 1 */
const uint32_t level = (regs.eax >> 5) & UINT32_C(0x7);
const uint32_t sets = 1 + regs.ecx;
const uint32_t line_size = 1 + (regs.ebx & UINT32_C(0x00000FFF));
const uint32_t partitions = 1 + ((regs.ebx >> 12) & UINT32_C(0x000003FF));
const uint32_t associativity = 1 + (regs.ebx >> 22);
*package_cores_max = 1 + (regs.eax >> 26);
const uint32_t processors = 1 + ((regs.eax >> 14) & UINT32_C(0x00000FFF));
const uint32_t apic_bits = bit_length(processors);
uint32_t flags = 0;
if (regs.edx & UINT32_C(0x00000002)) {
flags |= CPUINFO_CACHE_INCLUSIVE;
}
if (regs.edx & UINT32_C(0x00000004)) {
flags |= CPUINFO_CACHE_COMPLEX_INDEXING;
}
switch (level) {
case 1:
switch (type) {
case cache_type_unified:
cache->l1d = cache->l1i = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags | CPUINFO_CACHE_UNIFIED,
.apic_bits = apic_bits};
break;
case cache_type_data:
cache->l1d = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
case cache_type_instruction:
cache->l1i = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
case 2:
switch (type) {
case cache_type_instruction:
cpuinfo_log_warning(
"unexpected L2 instruction cache reported in leaf 0x00000004 is ignored");
break;
case cache_type_unified:
flags |= CPUINFO_CACHE_UNIFIED;
case cache_type_data:
cache->l2 = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
case 3:
switch (type) {
case cache_type_instruction:
cpuinfo_log_warning(
"unexpected L3 instruction cache reported in leaf 0x00000004 is ignored");
break;
case cache_type_unified:
flags |= CPUINFO_CACHE_UNIFIED;
case cache_type_data:
cache->l3 = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
case 4:
switch (type) {
case cache_type_instruction:
cpuinfo_log_warning(
"unexpected L4 instruction cache reported in leaf 0x00000004 is ignored");
break;
case cache_type_unified:
flags |= CPUINFO_CACHE_UNIFIED;
case cache_type_data:
cache->l4 = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
default:
cpuinfo_log_warning(
"unexpected L%" PRIu32 " cache reported in leaf 0x00000004 is ignored", level);
break;
}
return true;
}
bool cpuinfo_x86_decode_cache_properties(struct cpuid_regs regs, struct cpuinfo_x86_caches* cache) {
const uint32_t type = regs.eax & UINT32_C(0x1F);
if (type == cache_type_none) {
return false;
}
const uint32_t level = (regs.eax >> 5) & UINT32_C(0x7);
const uint32_t cores = 1 + ((regs.eax >> 14) & UINT32_C(0x00000FFF));
const uint32_t apic_bits = bit_length(cores);
const uint32_t sets = 1 + regs.ecx;
const uint32_t line_size = 1 + (regs.ebx & UINT32_C(0x00000FFF));
const uint32_t partitions = 1 + ((regs.ebx >> 12) & UINT32_C(0x000003FF));
const uint32_t associativity = 1 + (regs.ebx >> 22);
uint32_t flags = 0;
if (regs.edx & UINT32_C(0x00000002)) {
flags |= CPUINFO_CACHE_INCLUSIVE;
}
switch (level) {
case 1:
switch (type) {
case cache_type_unified:
cache->l1d = cache->l1i = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags | CPUINFO_CACHE_UNIFIED,
.apic_bits = apic_bits};
break;
case cache_type_data:
cache->l1d = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
case cache_type_instruction:
cache->l1i = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
case 2:
switch (type) {
case cache_type_instruction:
cpuinfo_log_warning(
"unexpected L2 instruction cache reported in leaf 0x8000001D is ignored");
break;
case cache_type_unified:
flags |= CPUINFO_CACHE_UNIFIED;
case cache_type_data:
cache->l2 = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
case 3:
switch (type) {
case cache_type_instruction:
cpuinfo_log_warning(
"unexpected L3 instruction cache reported in leaf 0x8000001D is ignored");
break;
case cache_type_unified:
flags |= CPUINFO_CACHE_UNIFIED;
case cache_type_data:
cache->l3 = (struct cpuinfo_x86_cache){
.size = associativity * partitions * line_size * sets,
.associativity = associativity,
.sets = sets,
.partitions = partitions,
.line_size = line_size,
.flags = flags,
.apic_bits = apic_bits};
break;
}
break;
default:
cpuinfo_log_warning(
"unexpected L%" PRIu32 " cache reported in leaf 0x8000001D is ignored", level);
break;
}
return true;
}

97
3rdparty/cpuinfo/src/x86/cache/init.c vendored Normal file
View File

@@ -0,0 +1,97 @@
#include <stdint.h>
#include <cpuinfo.h>
#include <cpuinfo/log.h>
#include <cpuinfo/utils.h>
#include <x86/api.h>
#include <x86/cpuid.h>
union cpuinfo_x86_cache_descriptors {
struct cpuid_regs regs;
uint8_t as_bytes[16];
};
enum cache_type {
cache_type_none = 0,
cache_type_data = 1,
cache_type_instruction = 2,
cache_type_unified = 3,
};
void cpuinfo_x86_detect_cache(
uint32_t max_base_index,
uint32_t max_extended_index,
bool amd_topology_extensions,
enum cpuinfo_vendor vendor,
const struct cpuinfo_x86_model_info* model_info,
struct cpuinfo_x86_caches* cache,
struct cpuinfo_tlb* itlb_4KB,
struct cpuinfo_tlb* itlb_2MB,
struct cpuinfo_tlb* itlb_4MB,
struct cpuinfo_tlb* dtlb0_4KB,
struct cpuinfo_tlb* dtlb0_2MB,
struct cpuinfo_tlb* dtlb0_4MB,
struct cpuinfo_tlb* dtlb_4KB,
struct cpuinfo_tlb* dtlb_2MB,
struct cpuinfo_tlb* dtlb_4MB,
struct cpuinfo_tlb* dtlb_1GB,
struct cpuinfo_tlb* stlb2_4KB,
struct cpuinfo_tlb* stlb2_2MB,
struct cpuinfo_tlb* stlb2_1GB,
uint32_t* log2_package_cores_max) {
if (max_base_index >= 2) {
union cpuinfo_x86_cache_descriptors descriptors;
descriptors.regs = cpuid(2);
uint32_t iterations = (uint8_t)descriptors.as_bytes[0];
if (iterations != 0) {
iterate_descriptors:
for (uint32_t i = 1 /* note: not 0 */; i < 16; i++) {
const uint8_t descriptor = descriptors.as_bytes[i];
if (descriptor != 0) {
cpuinfo_x86_decode_cache_descriptor(
descriptor,
vendor,
model_info,
cache,
itlb_4KB,
itlb_2MB,
itlb_4MB,
dtlb0_4KB,
dtlb0_2MB,
dtlb0_4MB,
dtlb_4KB,
dtlb_2MB,
dtlb_4MB,
dtlb_1GB,
stlb2_4KB,
stlb2_2MB,
stlb2_1GB,
&cache->prefetch_size);
}
}
if (--iterations != 0) {
descriptors.regs = cpuid(2);
goto iterate_descriptors;
}
}
if (vendor != cpuinfo_vendor_amd && vendor != cpuinfo_vendor_hygon && max_base_index >= 4) {
struct cpuid_regs leaf4;
uint32_t input_ecx = 0;
uint32_t package_cores_max = 0;
do {
leaf4 = cpuidex(4, input_ecx++);
} while (cpuinfo_x86_decode_deterministic_cache_parameters(leaf4, cache, &package_cores_max));
if (package_cores_max != 0) {
*log2_package_cores_max = bit_length(package_cores_max);
}
}
}
if (amd_topology_extensions && max_extended_index >= UINT32_C(0x8000001D)) {
struct cpuid_regs leaf0x8000001D;
uint32_t input_ecx = 0;
do {
leaf0x8000001D = cpuidex(UINT32_C(0x8000001D), input_ecx++);
} while (cpuinfo_x86_decode_cache_properties(leaf0x8000001D, cache));
}
}

77
3rdparty/cpuinfo/src/x86/cpuid.h vendored Normal file
View File

@@ -0,0 +1,77 @@
#pragma once
#include <stdint.h>
#if defined(__GNUC__)
#include <cpuid.h>
#elif defined(_MSC_VER)
#include <intrin.h>
#endif
#if CPUINFO_MOCK
#include <cpuinfo-mock.h>
#endif
#include <x86/api.h>
#if defined(__GNUC__) || defined(_MSC_VER)
static inline struct cpuid_regs cpuid(uint32_t eax) {
#if CPUINFO_MOCK
uint32_t regs_array[4];
cpuinfo_mock_get_cpuid(eax, regs_array);
return (struct cpuid_regs){
.eax = regs_array[0],
.ebx = regs_array[1],
.ecx = regs_array[2],
.edx = regs_array[3],
};
#else
struct cpuid_regs regs;
#if defined(__GNUC__)
__cpuid(eax, regs.eax, regs.ebx, regs.ecx, regs.edx);
#else
int regs_array[4];
__cpuid(regs_array, (int)eax);
regs.eax = regs_array[0];
regs.ebx = regs_array[1];
regs.ecx = regs_array[2];
regs.edx = regs_array[3];
#endif
return regs;
#endif
}
static inline struct cpuid_regs cpuidex(uint32_t eax, uint32_t ecx) {
#if CPUINFO_MOCK
uint32_t regs_array[4];
cpuinfo_mock_get_cpuidex(eax, ecx, regs_array);
return (struct cpuid_regs){
.eax = regs_array[0],
.ebx = regs_array[1],
.ecx = regs_array[2],
.edx = regs_array[3],
};
#else
struct cpuid_regs regs;
#if defined(__GNUC__)
__cpuid_count(eax, ecx, regs.eax, regs.ebx, regs.ecx, regs.edx);
#else
int regs_array[4];
__cpuidex(regs_array, (int)eax, (int)ecx);
regs.eax = regs_array[0];
regs.ebx = regs_array[1];
regs.ecx = regs_array[2];
regs.edx = regs_array[3];
#endif
return regs;
#endif
}
#endif
static inline uint64_t xgetbv(uint32_t ext_ctrl_reg) {
#ifdef _MSC_VER
return (uint64_t)_xgetbv((unsigned int)ext_ctrl_reg);
#else
uint32_t lo, hi;
__asm__(".byte 0x0F, 0x01, 0xD0" : "=a"(lo), "=d"(hi) : "c"(ext_ctrl_reg));
return ((uint64_t)hi << 32) | (uint64_t)lo;
#endif
}

398
3rdparty/cpuinfo/src/x86/freebsd/init.c vendored Normal file
View File

@@ -0,0 +1,398 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cpuinfo.h>
#include <cpuinfo/internal-api.h>
#include <cpuinfo/log.h>
#include <freebsd/api.h>
#include <x86/api.h>
static inline uint32_t max(uint32_t a, uint32_t b) {
return a > b ? a : b;
}
static inline uint32_t bit_mask(uint32_t bits) {
return (UINT32_C(1) << bits) - UINT32_C(1);
}
void cpuinfo_x86_freebsd_init(void) {
struct cpuinfo_processor* processors = NULL;
struct cpuinfo_core* cores = NULL;
struct cpuinfo_cluster* clusters = NULL;
struct cpuinfo_package* packages = NULL;
struct cpuinfo_cache* l1i = NULL;
struct cpuinfo_cache* l1d = NULL;
struct cpuinfo_cache* l2 = NULL;
struct cpuinfo_cache* l3 = NULL;
struct cpuinfo_cache* l4 = NULL;
struct cpuinfo_freebsd_topology freebsd_topology = cpuinfo_freebsd_detect_topology();
if (freebsd_topology.packages == 0) {
cpuinfo_log_error("failed to detect topology");
goto cleanup;
}
processors = calloc(freebsd_topology.threads, sizeof(struct cpuinfo_processor));
if (processors == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " logical processors",
freebsd_topology.threads * sizeof(struct cpuinfo_processor),
freebsd_topology.threads);
goto cleanup;
}
cores = calloc(freebsd_topology.cores, sizeof(struct cpuinfo_core));
if (cores == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " cores",
freebsd_topology.cores * sizeof(struct cpuinfo_core),
freebsd_topology.cores);
goto cleanup;
}
/* On x86 a cluster of cores is the biggest group of cores that shares a
* cache. */
clusters = calloc(freebsd_topology.packages, sizeof(struct cpuinfo_cluster));
if (clusters == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " core clusters",
freebsd_topology.packages * sizeof(struct cpuinfo_cluster),
freebsd_topology.packages);
goto cleanup;
}
packages = calloc(freebsd_topology.packages, sizeof(struct cpuinfo_package));
if (packages == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of %" PRIu32 " physical packages",
freebsd_topology.packages * sizeof(struct cpuinfo_package),
freebsd_topology.packages);
goto cleanup;
}
struct cpuinfo_x86_processor x86_processor;
memset(&x86_processor, 0, sizeof(x86_processor));
cpuinfo_x86_init_processor(&x86_processor);
char brand_string[48];
cpuinfo_x86_normalize_brand_string(x86_processor.brand_string, brand_string);
const uint32_t threads_per_core = freebsd_topology.threads_per_core;
const uint32_t threads_per_package = freebsd_topology.threads / freebsd_topology.packages;
const uint32_t cores_per_package = freebsd_topology.cores / freebsd_topology.packages;
for (uint32_t i = 0; i < freebsd_topology.packages; i++) {
clusters[i] = (struct cpuinfo_cluster){
.processor_start = i * threads_per_package,
.processor_count = threads_per_package,
.core_start = i * cores_per_package,
.core_count = cores_per_package,
.cluster_id = 0,
.package = packages + i,
.vendor = x86_processor.vendor,
.uarch = x86_processor.uarch,
.cpuid = x86_processor.cpuid,
};
packages[i].processor_start = i * threads_per_package;
packages[i].processor_count = threads_per_package;
packages[i].core_start = i * cores_per_package;
packages[i].core_count = cores_per_package;
packages[i].cluster_start = i;
packages[i].cluster_count = 1;
cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[i].name);
}
for (uint32_t i = 0; i < freebsd_topology.cores; i++) {
cores[i] = (struct cpuinfo_core){
.processor_start = i * threads_per_core,
.processor_count = threads_per_core,
.core_id = i % cores_per_package,
.cluster = clusters + i / cores_per_package,
.package = packages + i / cores_per_package,
.vendor = x86_processor.vendor,
.uarch = x86_processor.uarch,
.cpuid = x86_processor.cpuid,
};
}
for (uint32_t i = 0; i < freebsd_topology.threads; i++) {
const uint32_t smt_id = i % threads_per_core;
const uint32_t core_id = i / threads_per_core;
const uint32_t package_id = i / threads_per_package;
/* Reconstruct APIC IDs from topology components */
const uint32_t thread_bits_mask = bit_mask(x86_processor.topology.thread_bits_length);
const uint32_t core_bits_mask = bit_mask(x86_processor.topology.core_bits_length);
const uint32_t package_bits_offset =
max(x86_processor.topology.thread_bits_offset + x86_processor.topology.thread_bits_length,
x86_processor.topology.core_bits_offset + x86_processor.topology.core_bits_length);
const uint32_t apic_id = ((smt_id & thread_bits_mask) << x86_processor.topology.thread_bits_offset) |
((core_id & core_bits_mask) << x86_processor.topology.core_bits_offset) |
(package_id << package_bits_offset);
cpuinfo_log_debug("reconstructed APIC ID 0x%08" PRIx32 " for thread %" PRIu32, apic_id, i);
processors[i].smt_id = smt_id;
processors[i].core = cores + i / threads_per_core;
processors[i].cluster = clusters + i / threads_per_package;
processors[i].package = packages + i / threads_per_package;
processors[i].apic_id = apic_id;
}
uint32_t threads_per_l1 = 0, l1_count = 0;
if (x86_processor.cache.l1i.size != 0 || x86_processor.cache.l1d.size != 0) {
/* Assume that threads on the same core share L1 */
threads_per_l1 = freebsd_topology.threads / freebsd_topology.cores;
if (threads_per_l1 == 0) {
cpuinfo_log_error("failed to detect threads_per_l1");
goto cleanup;
}
cpuinfo_log_warning(
"freebsd kernel did not report number of "
"threads sharing L1 cache; assume %" PRIu32,
threads_per_l1);
l1_count = freebsd_topology.threads / threads_per_l1;
cpuinfo_log_debug("detected %" PRIu32 " L1 caches", l1_count);
}
uint32_t threads_per_l2 = 0, l2_count = 0;
if (x86_processor.cache.l2.size != 0) {
if (x86_processor.cache.l3.size != 0) {
/* This is not a last-level cache; assume that threads
* on the same core share L2 */
threads_per_l2 = freebsd_topology.threads / freebsd_topology.cores;
} else {
/* This is a last-level cache; assume that threads on
* the same package share L2 */
threads_per_l2 = freebsd_topology.threads / freebsd_topology.packages;
}
if (threads_per_l2 == 0) {
cpuinfo_log_error("failed to detect threads_per_l1");
goto cleanup;
}
cpuinfo_log_warning(
"freebsd kernel did not report number of "
"threads sharing L2 cache; assume %" PRIu32,
threads_per_l2);
l2_count = freebsd_topology.threads / threads_per_l2;
cpuinfo_log_debug("detected %" PRIu32 " L2 caches", l2_count);
}
uint32_t threads_per_l3 = 0, l3_count = 0;
if (x86_processor.cache.l3.size != 0) {
/*
* Assume that threads on the same package share L3.
* However, is it not necessarily the last-level cache (there
* may be L4 cache as well)
*/
threads_per_l3 = freebsd_topology.threads / freebsd_topology.packages;
if (threads_per_l3 == 0) {
cpuinfo_log_error("failed to detect threads_per_l3");
goto cleanup;
}
cpuinfo_log_warning(
"freebsd kernel did not report number of "
"threads sharing L3 cache; assume %" PRIu32,
threads_per_l3);
l3_count = freebsd_topology.threads / threads_per_l3;
cpuinfo_log_debug("detected %" PRIu32 " L3 caches", l3_count);
}
uint32_t threads_per_l4 = 0, l4_count = 0;
if (x86_processor.cache.l4.size != 0) {
/*
* Assume that all threads share this L4.
* As of now, L4 cache exists only on notebook x86 CPUs, which
* are single-package, but multi-socket systems could have
* shared L4 (like on IBM POWER8).
*/
threads_per_l4 = freebsd_topology.threads;
if (threads_per_l4 == 0) {
cpuinfo_log_error("failed to detect threads_per_l4");
goto cleanup;
}
cpuinfo_log_warning(
"freebsd kernel did not report number of "
"threads sharing L4 cache; assume %" PRIu32,
threads_per_l4);
l4_count = freebsd_topology.threads / threads_per_l4;
cpuinfo_log_debug("detected %" PRIu32 " L4 caches", l4_count);
}
if (x86_processor.cache.l1i.size != 0) {
l1i = calloc(l1_count, sizeof(struct cpuinfo_cache));
if (l1i == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of "
"%" PRIu32 " L1I caches",
l1_count * sizeof(struct cpuinfo_cache),
l1_count);
goto cleanup;
}
for (uint32_t c = 0; c < l1_count; c++) {
l1i[c] = (struct cpuinfo_cache){
.size = x86_processor.cache.l1i.size,
.associativity = x86_processor.cache.l1i.associativity,
.sets = x86_processor.cache.l1i.sets,
.partitions = x86_processor.cache.l1i.partitions,
.line_size = x86_processor.cache.l1i.line_size,
.flags = x86_processor.cache.l1i.flags,
.processor_start = c * threads_per_l1,
.processor_count = threads_per_l1,
};
}
for (uint32_t t = 0; t < freebsd_topology.threads; t++) {
processors[t].cache.l1i = &l1i[t / threads_per_l1];
}
}
if (x86_processor.cache.l1d.size != 0) {
l1d = calloc(l1_count, sizeof(struct cpuinfo_cache));
if (l1d == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of "
"%" PRIu32 " L1D caches",
l1_count * sizeof(struct cpuinfo_cache),
l1_count);
goto cleanup;
}
for (uint32_t c = 0; c < l1_count; c++) {
l1d[c] = (struct cpuinfo_cache){
.size = x86_processor.cache.l1d.size,
.associativity = x86_processor.cache.l1d.associativity,
.sets = x86_processor.cache.l1d.sets,
.partitions = x86_processor.cache.l1d.partitions,
.line_size = x86_processor.cache.l1d.line_size,
.flags = x86_processor.cache.l1d.flags,
.processor_start = c * threads_per_l1,
.processor_count = threads_per_l1,
};
}
for (uint32_t t = 0; t < freebsd_topology.threads; t++) {
processors[t].cache.l1d = &l1d[t / threads_per_l1];
}
}
if (l2_count != 0) {
l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
if (l2 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of "
"%" PRIu32 " L2 caches",
l2_count * sizeof(struct cpuinfo_cache),
l2_count);
goto cleanup;
}
for (uint32_t c = 0; c < l2_count; c++) {
l2[c] = (struct cpuinfo_cache){
.size = x86_processor.cache.l2.size,
.associativity = x86_processor.cache.l2.associativity,
.sets = x86_processor.cache.l2.sets,
.partitions = x86_processor.cache.l2.partitions,
.line_size = x86_processor.cache.l2.line_size,
.flags = x86_processor.cache.l2.flags,
.processor_start = c * threads_per_l2,
.processor_count = threads_per_l2,
};
}
for (uint32_t t = 0; t < freebsd_topology.threads; t++) {
processors[t].cache.l2 = &l2[t / threads_per_l2];
}
}
if (l3_count != 0) {
l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
if (l3 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of "
"%" PRIu32 " L3 caches",
l3_count * sizeof(struct cpuinfo_cache),
l3_count);
goto cleanup;
}
for (uint32_t c = 0; c < l3_count; c++) {
l3[c] = (struct cpuinfo_cache){
.size = x86_processor.cache.l3.size,
.associativity = x86_processor.cache.l3.associativity,
.sets = x86_processor.cache.l3.sets,
.partitions = x86_processor.cache.l3.partitions,
.line_size = x86_processor.cache.l3.line_size,
.flags = x86_processor.cache.l3.flags,
.processor_start = c * threads_per_l3,
.processor_count = threads_per_l3,
};
}
for (uint32_t t = 0; t < freebsd_topology.threads; t++) {
processors[t].cache.l3 = &l3[t / threads_per_l3];
}
}
if (l4_count != 0) {
l4 = calloc(l4_count, sizeof(struct cpuinfo_cache));
if (l4 == NULL) {
cpuinfo_log_error(
"failed to allocate %zu bytes for descriptions of "
"%" PRIu32 " L4 caches",
l4_count * sizeof(struct cpuinfo_cache),
l4_count);
goto cleanup;
}
for (uint32_t c = 0; c < l4_count; c++) {
l4[c] = (struct cpuinfo_cache){
.size = x86_processor.cache.l4.size,
.associativity = x86_processor.cache.l4.associativity,
.sets = x86_processor.cache.l4.sets,
.partitions = x86_processor.cache.l4.partitions,
.line_size = x86_processor.cache.l4.line_size,
.flags = x86_processor.cache.l4.flags,
.processor_start = c * threads_per_l4,
.processor_count = threads_per_l4,
};
}
for (uint32_t t = 0; t < freebsd_topology.threads; t++) {
processors[t].cache.l4 = &l4[t / threads_per_l4];
}
}
/* Commit changes */
cpuinfo_processors = processors;
cpuinfo_cores = cores;
cpuinfo_clusters = clusters;
cpuinfo_packages = packages;
cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
cpuinfo_cache[cpuinfo_cache_level_2] = l2;
cpuinfo_cache[cpuinfo_cache_level_3] = l3;
cpuinfo_cache[cpuinfo_cache_level_4] = l4;
cpuinfo_processors_count = freebsd_topology.threads;
cpuinfo_cores_count = freebsd_topology.cores;
cpuinfo_clusters_count = freebsd_topology.packages;
cpuinfo_packages_count = freebsd_topology.packages;
cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1_count;
cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1_count;
cpuinfo_cache_count[cpuinfo_cache_level_2] = l2_count;
cpuinfo_cache_count[cpuinfo_cache_level_3] = l3_count;
cpuinfo_cache_count[cpuinfo_cache_level_4] = l4_count;
cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]);
cpuinfo_global_uarch = (struct cpuinfo_uarch_info){
.uarch = x86_processor.uarch,
.cpuid = x86_processor.cpuid,
.processor_count = freebsd_topology.threads,
.core_count = freebsd_topology.cores,
};
__sync_synchronize();
cpuinfo_is_initialized = true;
processors = NULL;
cores = NULL;
clusters = NULL;
packages = NULL;
l1i = l1d = l2 = l3 = l4 = NULL;
cleanup:
free(processors);
free(cores);
free(clusters);
free(packages);
free(l1i);
free(l1d);
free(l2);
free(l3);
free(l4);
}

18
3rdparty/cpuinfo/src/x86/info.c vendored Normal file
View File

@@ -0,0 +1,18 @@
#include <stdint.h>
#include <cpuinfo.h>
#include <x86/api.h>
struct cpuinfo_x86_model_info cpuinfo_x86_decode_model_info(uint32_t eax) {
struct cpuinfo_x86_model_info model_info;
model_info.stepping = eax & 0xF;
model_info.base_model = (eax >> 4) & 0xF;
model_info.base_family = (eax >> 8) & 0xF;
model_info.processor_type = (eax >> 12) & 0x3;
model_info.extended_model = (eax >> 16) & 0xF;
model_info.extended_family = (eax >> 20) & 0xFF;
model_info.family = model_info.base_family + model_info.extended_family;
model_info.model = model_info.base_model + (model_info.extended_model << 4);
return model_info;
}

Some files were not shown because too many files have changed in this diff Show More