First Commit

This commit is contained in:
2025-11-18 14:18:26 -07:00
parent 33eb6e3707
commit 27277ec342
6106 changed files with 3571167 additions and 0 deletions

View File

@@ -0,0 +1,617 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/Assertions.h"
#include "common/BitUtils.h"
#include "common/Console.h"
#include "common/CrashHandler.h"
#include "common/Darwin/DarwinMisc.h"
#include "common/Error.h"
#include "common/Pcsx2Types.h"
#include "common/Threading.h"
#include "common/WindowInfo.h"
#include "common/HostSys.h"
#include <csignal>
#include <cstring>
#include <cstdlib>
#include <optional>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <time.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/mach_time.h>
#include <mach/mach_vm.h>
#include <mach/task.h>
#include <mach/vm_map.h>
#include <mutex>
#include <ApplicationServices/ApplicationServices.h>
#include <IOKit/pwr_mgt/IOPMLib.h>
// Darwin (OSX) is a bit different from Linux when requesting properties of
// the OS because of its BSD/Mach heritage. Helpfully, most of this code
// should translate pretty well to other *BSD systems. (e.g.: the sysctl(3)
// interface).
//
// For an overview of all of Darwin's sysctls, check:
// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man3/sysctl.3.html
// Return the total physical memory on the machine, in bytes. Returns 0 on
// failure (not supported by the operating system).
u64 GetPhysicalMemory()
{
u64 getmem = 0;
size_t len = sizeof(getmem);
int mib[] = {CTL_HW, HW_MEMSIZE};
if (sysctl(mib, std::size(mib), &getmem, &len, NULL, 0) < 0)
perror("sysctl:");
return getmem;
}
u64 GetAvailablePhysicalMemory()
{
const mach_port_t host_port = mach_host_self();
vm_size_t page_size;
// Get the system's page size.
if (host_page_size(host_port, &page_size) != KERN_SUCCESS)
return 0;
vm_statistics64_data_t vm_stat;
mach_msg_type_number_t host_size = sizeof(vm_statistics64_data_t) / sizeof(integer_t);
// Get system memory statistics.
if (host_statistics64(host_port, HOST_VM_INFO, reinterpret_cast<host_info64_t>(&vm_stat), &host_size) != KERN_SUCCESS)
return 0;
// Get the number of free and inactive pages.
const u64 free_pages = static_cast<u64>(vm_stat.free_count);
const u64 inactive_pages = static_cast<u64>(vm_stat.inactive_count);
// Calculate available memory.
const u64 get_available_mem = (free_pages + inactive_pages) * page_size;
return get_available_mem;
}
static mach_timebase_info_data_t s_timebase_info;
static const u64 tickfreq = []() {
if (mach_timebase_info(&s_timebase_info) != KERN_SUCCESS)
abort();
return (u64)1e9 * (u64)s_timebase_info.denom / (u64)s_timebase_info.numer;
}();
// returns the performance-counter frequency: ticks per second (Hz)
//
// usage:
// u64 seconds_passed = GetCPUTicks() / GetTickFrequency();
// u64 millis_passed = (GetCPUTicks() * 1000) / GetTickFrequency();
//
// NOTE: multiply, subtract, ... your ticks before dividing by
// GetTickFrequency() to maintain good precision.
u64 GetTickFrequency()
{
return tickfreq;
}
// return the number of "ticks" since some arbitrary, fixed time in the
// past. On OSX x86(-64), this is actually the number of nanoseconds passed,
// because mach_timebase_info.numer == denom == 1. So "ticks" ==
// nanoseconds.
u64 GetCPUTicks()
{
return mach_absolute_time();
}
static std::string sysctl_str(int category, int name)
{
char buf[32];
size_t len = sizeof(buf);
int mib[] = {category, name};
sysctl(mib, std::size(mib), buf, &len, nullptr, 0);
return std::string(buf, len > 0 ? len - 1 : 0);
}
template <typename T>
static std::optional<T> sysctlbyname_T(const char* name)
{
T output = 0;
size_t output_size = sizeof(output);
if (sysctlbyname(name, &output, &output_size, nullptr, 0) != 0)
return std::nullopt;
if (output_size != sizeof(output))
{
ERROR_LOG("(DarwinMisc) sysctl {} gave unexpected size {}", name, output_size);
return std::nullopt;
}
return output;
}
std::string GetOSVersionString()
{
std::string type = sysctl_str(CTL_KERN, KERN_OSTYPE);
std::string release = sysctl_str(CTL_KERN, KERN_OSRELEASE);
std::string arch = sysctl_str(CTL_HW, HW_MACHINE);
return type + " " + release + " " + arch;
}
static IOPMAssertionID s_pm_assertion;
bool Common::InhibitScreensaver(bool inhibit)
{
if (s_pm_assertion)
{
IOPMAssertionRelease(s_pm_assertion);
s_pm_assertion = 0;
}
if (inhibit)
IOPMAssertionCreateWithName(kIOPMAssertionTypePreventUserIdleDisplaySleep, kIOPMAssertionLevelOn, CFSTR("Playing a game"), &s_pm_assertion);
return true;
}
void Common::SetMousePosition(int x, int y)
{
// Little bit ugly but;
// Creating mouse move events and posting them wasn't very reliable.
// Calling CGWarpMouseCursorPosition without CGAssociateMouseAndMouseCursorPosition(false)
// ends up with the cursor feeling "sticky".
CGAssociateMouseAndMouseCursorPosition(false);
CGWarpMouseCursorPosition(CGPointMake(x, y));
CGAssociateMouseAndMouseCursorPosition(true); // The default state
return;
}
CFMachPortRef mouseEventTap = nullptr;
CFRunLoopSourceRef mouseRunLoopSource = nullptr;
static std::function<void(int, int)> fnMouseMoveCb;
CGEventRef mouseMoveCallback(CGEventTapProxy, CGEventType type, CGEventRef event, void* arg)
{
if (type == kCGEventMouseMoved)
{
const CGPoint location = CGEventGetLocation(event);
fnMouseMoveCb(location.x, location.y);
}
return event;
}
bool Common::AttachMousePositionCb(std::function<void(int, int)> cb)
{
if (!AXIsProcessTrusted())
{
Console.Warning("Process isn't trusted with accessibility permissions. Mouse tracking will not work!");
}
fnMouseMoveCb = cb;
mouseEventTap = CGEventTapCreate(kCGSessionEventTap, kCGHeadInsertEventTap, kCGEventTapOptionDefault,
CGEventMaskBit(kCGEventMouseMoved), mouseMoveCallback, nullptr);
if (!mouseEventTap)
{
Console.Warning("Unable to create mouse moved event tap. Mouse tracking will not work!");
return false;
}
mouseRunLoopSource = CFMachPortCreateRunLoopSource(kCFAllocatorDefault, mouseEventTap, 0);
CFRunLoopAddSource(CFRunLoopGetCurrent(), mouseRunLoopSource, kCFRunLoopCommonModes);
return true;
}
void Common::DetachMousePositionCb()
{
if (mouseRunLoopSource)
{
CFRunLoopRemoveSource(CFRunLoopGetCurrent(), mouseRunLoopSource, kCFRunLoopCommonModes);
CFRelease(mouseRunLoopSource);
}
if (mouseEventTap)
{
CFRelease(mouseEventTap);
}
mouseRunLoopSource = nullptr;
mouseEventTap = nullptr;
}
void Threading::Sleep(int ms)
{
usleep(1000 * ms);
}
void Threading::SleepUntil(u64 ticks)
{
// This is definitely sub-optimal, but apparently clock_nanosleep() doesn't exist.
const s64 diff = static_cast<s64>(ticks - GetCPUTicks());
if (diff <= 0)
return;
const u64 nanos = (static_cast<u64>(diff) * static_cast<u64>(s_timebase_info.denom)) / static_cast<u64>(s_timebase_info.numer);
if (nanos == 0)
return;
struct timespec ts;
ts.tv_sec = nanos / 1000000000ULL;
ts.tv_nsec = nanos % 1000000000ULL;
nanosleep(&ts, nullptr);
}
std::vector<DarwinMisc::CPUClass> DarwinMisc::GetCPUClasses()
{
std::vector<CPUClass> out;
if (std::optional<u32> nperflevels = sysctlbyname_T<u32>("hw.nperflevels"))
{
char name[64];
for (u32 i = 0; i < *nperflevels; i++)
{
snprintf(name, sizeof(name), "hw.perflevel%u.physicalcpu", i);
std::optional<u32> physicalcpu = sysctlbyname_T<u32>(name);
snprintf(name, sizeof(name), "hw.perflevel%u.logicalcpu", i);
std::optional<u32> logicalcpu = sysctlbyname_T<u32>(name);
char levelname[64];
size_t levelname_size = sizeof(levelname);
snprintf(name, sizeof(name), "hw.perflevel%u.name", i);
if (0 != sysctlbyname(name, levelname, &levelname_size, nullptr, 0))
strcpy(levelname, "???");
if (!physicalcpu.has_value() || !logicalcpu.has_value())
{
Console.Warning("(DarwinMisc) Perf level %u is missing data on %s cpus!",
i, !physicalcpu.has_value() ? "physical" : "logical");
continue;
}
out.push_back({levelname, *physicalcpu, *logicalcpu});
}
}
else if (std::optional<u32> physcpu = sysctlbyname_T<u32>("hw.physicalcpu"))
{
out.push_back({"Default", *physcpu, sysctlbyname_T<u32>("hw.logicalcpu").value_or(0)});
}
else
{
Console.Warning("(DarwinMisc) Couldn't get cpu core count!");
}
return out;
}
size_t HostSys::GetRuntimePageSize()
{
return sysctlbyname_T<u32>("hw.pagesize").value_or(0);
}
size_t HostSys::GetRuntimeCacheLineSize()
{
return static_cast<size_t>(std::max<s64>(sysctlbyname_T<s64>("hw.cachelinesize").value_or(0), 0));
}
static __ri vm_prot_t MachProt(const PageProtectionMode& mode)
{
vm_prot_t machmode = (mode.CanWrite()) ? VM_PROT_WRITE : 0;
machmode |= (mode.CanRead()) ? VM_PROT_READ : 0;
machmode |= (mode.CanExecute()) ? (VM_PROT_EXECUTE | VM_PROT_READ) : 0;
return machmode;
}
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
if (mode.IsNone())
return nullptr;
#ifdef __aarch64__
// We can't allocate executable memory with mach_vm_allocate() on Apple Silicon.
// Instead, we need to use MAP_JIT with mmap(), which does not support fixed mappings.
if (mode.CanExecute())
{
if (base)
return nullptr;
const u32 mmap_prot = mode.CanWrite() ? (PROT_READ | PROT_WRITE | PROT_EXEC) : (PROT_READ | PROT_EXEC);
const u32 flags = MAP_PRIVATE | MAP_ANON | MAP_JIT;
void* const res = mmap(nullptr, size, mmap_prot, flags, -1, 0);
return (res == MAP_FAILED) ? nullptr : res;
}
#endif
kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&base), size,
base ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE);
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_allocate() returned {}", ret);
return nullptr;
}
ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size, false, MachProt(mode));
if (ret != KERN_SUCCESS)
{
DEV_LOG("mach_vm_protect() returned {}", ret);
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
return nullptr;
}
return base;
}
void HostSys::Munmap(void* base, size_t size)
{
if (!base)
return;
mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(base), size);
}
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
{
pxAssertMsg((size & (__pagesize - 1)) == 0, "Size is page aligned");
kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false,
MachProt(mode));
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_protect() failed: {}", res);
pxFailRel("mach_vm_protect() failed");
}
}
std::string HostSys::GetFileMappingName(const char* prefix)
{
// name actually is not used.
return {};
}
void* HostSys::CreateSharedMemory(const char* name, size_t size)
{
mach_vm_size_t vm_size = size;
mach_port_t port;
const kern_return_t res = mach_make_memory_entry_64(
mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_make_memory_entry_64() failed: {}", res);
return nullptr;
}
return reinterpret_cast<void*>(static_cast<uintptr_t>(port));
}
void HostSys::DestroySharedMemory(void* ptr)
{
mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr)));
}
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
{
mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr);
const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}
return reinterpret_cast<void*>(ptr);
}
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
{
const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size);
if (res != KERN_SUCCESS)
pxFailRel("Failed to unmap shared memory");
}
#ifdef _M_ARM64
void HostSys::FlushInstructionCache(void* address, u32 size)
{
__builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size);
}
#endif
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
: m_base_ptr(base_ptr)
, m_size(size)
, m_num_pages(num_pages)
{
}
SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
pxAssertRel(m_num_mappings == 0, "No mappings left");
if (mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS)
pxFailRel("Failed to release shared memory area");
}
std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
{
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");
mach_vm_address_t alloc = 0;
const kern_return_t res =
mach_vm_map(mach_task_self(), &alloc, size, 0, VM_FLAGS_ANYWHERE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS)
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return {};
}
return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(reinterpret_cast<u8*>(alloc), size, size / __pagesize));
}
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false,
MachProt(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return nullptr;
}
m_num_mappings++;
return static_cast<u8*>(map_base);
}
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const kern_return_t res =
mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE,
MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE);
if (res != KERN_SUCCESS) [[unlikely]]
{
ERROR_LOG("mach_vm_map() failed: {}", res);
return false;
}
m_num_mappings--;
return true;
}
#ifdef _M_ARM64
static thread_local int s_code_write_depth = 0;
void HostSys::BeginCodeWrite()
{
if ((s_code_write_depth++) == 0)
pthread_jit_write_protect_np(0);
}
void HostSys::EndCodeWrite()
{
pxAssert(s_code_write_depth > 0);
if ((--s_code_write_depth) == 0)
pthread_jit_write_protect_np(1);
}
[[maybe_unused]] static bool IsStoreInstruction(const void* ptr)
{
u32 bits;
std::memcpy(&bits, ptr, sizeof(bits));
// Based on vixl's disassembler Instruction::IsStore().
// if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed)
if ((bits & 0x0a000000) != 0x08000000)
return false;
// if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed)
if ((bits & 0x3a000000) == 0x28000000)
{
// return Mask(LoadStorePairLBit) == 0
return (bits & (1 << 22)) == 0;
}
switch (bits & 0xC4C00000)
{
case 0x00000000: // STRB_w
case 0x40000000: // STRH_w
case 0x80000000: // STR_w
case 0xC0000000: // STR_x
case 0x04000000: // STR_b
case 0x44000000: // STR_h
case 0x84000000: // STR_s
case 0xC4000000: // STR_d
case 0x04800000: // STR_q
return true;
default:
return false;
}
}
#endif // _M_ARM64
namespace PageFaultHandler
{
static void SignalHandler(int sig, siginfo_t* info, void* ctx);
static std::recursive_mutex s_exception_handler_mutex;
static bool s_in_exception_handler = false;
static bool s_installed = false;
} // namespace PageFaultHandler
void PageFaultHandler::SignalHandler(int sig, siginfo_t* info, void* ctx)
{
#if defined(_M_X86)
void* const exception_address =
reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__faultvaddr);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__rip);
const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__err & 2) != 0;
#elif defined(_M_ARM64)
void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far);
void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc);
const bool is_write = IsStoreInstruction(exception_pc);
#endif
// Executing the handler concurrently from multiple threads wouldn't go down well.
s_exception_handler_mutex.lock();
// Prevent recursive exception filtering.
HandlerResult result = HandlerResult::ExecuteNextHandler;
if (!s_in_exception_handler)
{
s_in_exception_handler = true;
result = HandlePageFault(exception_pc, exception_address, is_write);
s_in_exception_handler = false;
}
s_exception_handler_mutex.unlock();
// Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV).
if (result == HandlerResult::ContinueExecution)
return;
// We couldn't handle it. Pass it off to the crash dumper.
CrashHandler::CrashSignalHandler(sig, info, ctx);
}
bool PageFaultHandler::Install(Error* error)
{
std::unique_lock lock(s_exception_handler_mutex);
pxAssertRel(!s_installed, "Page fault handler has already been installed.");
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = SignalHandler;
// MacOS uses SIGBUS for memory permission violations, as well as SIGSEGV on ARM64.
if (sigaction(SIGBUS, &sa, nullptr) != 0)
{
Error::SetErrno(error, "sigaction() for SIGBUS failed: ", errno);
return false;
}
#ifdef _M_ARM64
if (sigaction(SIGSEGV, &sa, nullptr) != 0)
{
Error::SetErrno(error, "sigaction() for SIGSEGV failed: ", errno);
return false;
}
#endif
// Allow us to ignore faults when running under lldb.
task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, MACH_PORT_NULL, EXCEPTION_DEFAULT, 0);
s_installed = true;
return true;
}

View File

@@ -0,0 +1,24 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#pragma once
#ifdef __APPLE__
#include <string>
#include <vector>
#include "common/Pcsx2Types.h"
namespace DarwinMisc {
struct CPUClass {
std::string name;
u32 num_physical;
u32 num_logical;
};
std::vector<CPUClass> GetCPUClasses();
}
#endif

View File

@@ -0,0 +1,279 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/Threading.h"
#include "common/Assertions.h"
#include <cstdio>
#include <cassert> // assert
#include <sched.h>
#include <sys/time.h> // gettimeofday()
#include <pthread.h>
#include <unistd.h>
#include <mach/mach.h>
#include <mach/mach_error.h> // mach_error_string()
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/mach_time.h> // mach_absolute_time()
#include <mach/semaphore.h> // semaphore_*()
#include <mach/task.h> // semaphore_create() and semaphore_destroy()
#include <mach/thread_act.h>
// Note: assuming multicore is safer because it forces the interlocked routines to use
// the LOCK prefix. The prefix works on single core CPUs fine (but is slow), but not
// having the LOCK prefix is very bad indeed.
__forceinline void Threading::Timeslice()
{
sched_yield();
}
// For use in spin/wait loops, acts as a hint to Intel CPUs and should, in theory
// improve performance and reduce cpu power consumption.
__forceinline void Threading::SpinWait()
{
// If this doesn't compile you can just comment it out (it only serves as a
// performance hint and isn't required).
#if defined(_M_X86)
__asm__("pause");
#elif defined(_M_ARM64)
__asm__ __volatile__("isb");
#endif
}
__forceinline void Threading::EnableHiresScheduler()
{
// Darwin has customizable schedulers, see xnu/osfmk/man. Not
// implemented yet though (and not sure if useful for pcsx2).
}
__forceinline void Threading::DisableHiresScheduler()
{
// see EnableHiresScheduler()
}
// Just like on Windows, this is not really the number of ticks per second,
// but just a factor by which one has to divide GetThreadCpuTime() or
// pxThread::GetCpuTime() if one wants to receive a value in seconds. NOTE:
// doing this will of course yield precision loss.
u64 Threading::GetThreadTicksPerSecond()
{
return 1000000; // the *CpuTime() functions return values in microseconds
}
// gets the CPU time used by the current thread (both system and user), in
// microseconds, returns 0 on failure
static u64 getthreadtime(thread_port_t thread)
{
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
kern_return_t kr = thread_info(thread, THREAD_BASIC_INFO,
(thread_info_t)&info, &count);
if (kr != KERN_SUCCESS)
{
return 0;
}
// add system and user time
return (u64)info.user_time.seconds * (u64)1e6 +
(u64)info.user_time.microseconds +
(u64)info.system_time.seconds * (u64)1e6 +
(u64)info.system_time.microseconds;
}
// Returns the current timestamp (not relative to a real world clock) in microseconds
u64 Threading::GetThreadCpuTime()
{
// we could also use mach_thread_self() and mach_port_deallocate(), but
// that calls upon mach traps (kinda like system calls). Unless I missed
// something in the COMMPAGE (like Linux vDSO) which makes overrides it
// to be user-space instead. In contract,
// pthread_mach_thread_np(pthread_self()) is entirely in user-space.
u64 us = getthreadtime(pthread_mach_thread_np(pthread_self()));
return us;
}
// --------------------------------------------------------------------------------------
// Semaphore Implementation for Darwin/OSX
//
// Sadly, Darwin/OSX needs its own implementation of Semaphores instead of
// relying on phtreads, because OSX unnamed semaphore (the best kind)
// support is very poor.
//
// This implementation makes use of Mach primitives instead. These are also
// what Grand Central Dispatch (GCD) is based on, as far as I understand:
// http://newosxbook.com/articles/GCD.html.
//
// --------------------------------------------------------------------------------------
static void MACH_CHECK(kern_return_t mach_retval)
{
if (mach_retval != KERN_SUCCESS)
{
fprintf(stderr, "mach error: %s", mach_error_string(mach_retval));
assert(mach_retval == KERN_SUCCESS);
}
}
Threading::KernelSemaphore::KernelSemaphore()
{
MACH_CHECK(semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, 0));
}
Threading::KernelSemaphore::~KernelSemaphore()
{
MACH_CHECK(semaphore_destroy(mach_task_self(), m_sema));
}
void Threading::KernelSemaphore::Post()
{
MACH_CHECK(semaphore_signal(m_sema));
}
void Threading::KernelSemaphore::Wait()
{
MACH_CHECK(semaphore_wait(m_sema));
}
bool Threading::KernelSemaphore::TryWait()
{
mach_timespec_t time = {};
kern_return_t res = semaphore_timedwait(m_sema, time);
if (res == KERN_OPERATION_TIMED_OUT)
return false;
MACH_CHECK(res);
return true;
}
Threading::ThreadHandle::ThreadHandle() = default;
Threading::ThreadHandle::ThreadHandle(const ThreadHandle& handle)
: m_native_handle(handle.m_native_handle)
{
}
Threading::ThreadHandle::ThreadHandle(ThreadHandle&& handle)
: m_native_handle(handle.m_native_handle)
{
handle.m_native_handle = nullptr;
}
Threading::ThreadHandle::~ThreadHandle() = default;
Threading::ThreadHandle Threading::ThreadHandle::GetForCallingThread()
{
ThreadHandle ret;
ret.m_native_handle = pthread_self();
return ret;
}
Threading::ThreadHandle& Threading::ThreadHandle::operator=(ThreadHandle&& handle)
{
m_native_handle = handle.m_native_handle;
handle.m_native_handle = nullptr;
return *this;
}
Threading::ThreadHandle& Threading::ThreadHandle::operator=(const ThreadHandle& handle)
{
m_native_handle = handle.m_native_handle;
return *this;
}
u64 Threading::ThreadHandle::GetCPUTime() const
{
return getthreadtime(pthread_mach_thread_np((pthread_t)m_native_handle));
}
bool Threading::ThreadHandle::SetAffinity(u64 processor_mask) const
{
// Doesn't appear to be possible to set affinity.
return false;
}
Threading::Thread::Thread() = default;
Threading::Thread::Thread(Thread&& thread)
: ThreadHandle(thread)
, m_stack_size(thread.m_stack_size)
{
thread.m_stack_size = 0;
}
Threading::Thread::Thread(EntryPoint func)
: ThreadHandle()
{
if (!Start(std::move(func)))
pxFailRel("Failed to start implicitly started thread.");
}
Threading::Thread::~Thread()
{
pxAssertRel(!m_native_handle, "Thread should be detached or joined at destruction");
}
void Threading::Thread::SetStackSize(u32 size)
{
pxAssertRel(!m_native_handle, "Can't change the stack size on a started thread");
m_stack_size = size;
}
void* Threading::Thread::ThreadProc(void* param)
{
std::unique_ptr<EntryPoint> entry(static_cast<EntryPoint*>(param));
(*entry.get())();
return nullptr;
}
bool Threading::Thread::Start(EntryPoint func)
{
pxAssertRel(!m_native_handle, "Can't start an already-started thread");
std::unique_ptr<EntryPoint> func_clone(std::make_unique<EntryPoint>(std::move(func)));
pthread_attr_t attrs;
bool has_attributes = false;
if (m_stack_size != 0)
{
has_attributes = true;
pthread_attr_init(&attrs);
}
if (m_stack_size != 0)
pthread_attr_setstacksize(&attrs, m_stack_size);
pthread_t handle;
const int res = pthread_create(&handle, has_attributes ? &attrs : nullptr, ThreadProc, func_clone.get());
if (res != 0)
return false;
// thread started, it'll release the memory
m_native_handle = (void*)handle;
func_clone.release();
return true;
}
void Threading::Thread::Detach()
{
pxAssertRel(m_native_handle, "Can't detach without a thread");
pthread_detach((pthread_t)m_native_handle);
m_native_handle = nullptr;
}
void Threading::Thread::Join()
{
pxAssertRel(m_native_handle, "Can't join without a thread");
void* retval;
const int res = pthread_join((pthread_t)m_native_handle, &retval);
if (res != 0)
pxFailRel("pthread_join() for thread join failed");
m_native_handle = nullptr;
}
// name can be up to 16 bytes
void Threading::SetNameOfCurrentThread(const char* name)
{
pthread_setname_np(name);
}