First Commit

This commit is contained in:
2025-11-18 14:18:26 -07:00
parent 33eb6e3707
commit 27277ec342
6106 changed files with 3571167 additions and 0 deletions

View File

@@ -0,0 +1,375 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/HostSys.h"
#include "common/AlignedMalloc.h"
#include "common/Assertions.h"
#include "common/BitUtils.h"
#include "common/Console.h"
#include "common/Error.h"
#include "common/RedtapeWindows.h"
#include "common/StringUtil.h"
#include "fmt/format.h"
#include <mutex>
static DWORD ConvertToWinApi(const PageProtectionMode& mode)
{
DWORD winmode = PAGE_NOACCESS;
// Windows has some really bizarre memory protection enumeration that uses bitwise
// numbering (like flags) but is in fact not a flag value. *Someone* from the early
// microsoft days wasn't a very good coder, me thinks. --air
if (mode.CanExecute())
{
winmode = mode.CanWrite() ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
}
else if (mode.CanRead())
{
winmode = mode.CanWrite() ? PAGE_READWRITE : PAGE_READONLY;
}
return winmode;
}
void* HostSys::Mmap(void* base, size_t size, const PageProtectionMode& mode)
{
if (mode.IsNone())
return nullptr;
return VirtualAlloc(base, size, MEM_RESERVE | MEM_COMMIT, ConvertToWinApi(mode));
}
void HostSys::Munmap(void* base, size_t size)
{
if (!base)
return;
VirtualFree((void*)base, 0, MEM_RELEASE);
}
void HostSys::MemProtect(void* baseaddr, size_t size, const PageProtectionMode& mode)
{
pxAssert((size & (__pagesize - 1)) == 0);
DWORD OldProtect; // enjoy my uselessness, yo!
if (!VirtualProtect(baseaddr, size, ConvertToWinApi(mode), &OldProtect))
pxFail("VirtualProtect() failed");
}
std::string HostSys::GetFileMappingName(const char* prefix)
{
const unsigned pid = GetCurrentProcessId();
return fmt::format("{}_{}", prefix, pid);
}
void* HostSys::CreateSharedMemory(const char* name, size_t size)
{
return static_cast<void*>(CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE,
static_cast<DWORD>(size >> 32), static_cast<DWORD>(size), StringUtil::UTF8StringToWideString(name).c_str()));
}
void HostSys::DestroySharedMemory(void* ptr)
{
CloseHandle(static_cast<HANDLE>(ptr));
}
void* HostSys::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, const PageProtectionMode& mode)
{
void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE,
static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr);
if (!ret)
return nullptr;
const DWORD prot = ConvertToWinApi(mode);
if (prot != PAGE_READWRITE)
{
DWORD old_prot;
if (!VirtualProtect(ret, size, prot, &old_prot))
pxFail("Failed to protect memory mapping");
}
return ret;
}
void HostSys::UnmapSharedMemory(void* baseaddr, size_t size)
{
if (!UnmapViewOfFile(baseaddr))
pxFail("Failed to unmap shared memory");
}
size_t HostSys::GetRuntimePageSize()
{
SYSTEM_INFO si = {};
GetSystemInfo(&si);
return si.dwPageSize;
}
size_t HostSys::GetRuntimeCacheLineSize()
{
DWORD size = 0;
if (!GetLogicalProcessorInformation(nullptr, &size) && GetLastError() != ERROR_INSUFFICIENT_BUFFER)
return 0;
std::unique_ptr<SYSTEM_LOGICAL_PROCESSOR_INFORMATION[]> lpi =
std::make_unique<SYSTEM_LOGICAL_PROCESSOR_INFORMATION[]>(
(size + (sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) - 1)) / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION));
if (!GetLogicalProcessorInformation(lpi.get(), &size))
return 0;
u32 max_line_size = 0;
for (u32 i = 0; i < size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); i++)
{
if (lpi[i].Relationship == RelationCache)
max_line_size = std::max<u32>(max_line_size, lpi[i].Cache.LineSize);
}
return max_line_size;
}
#ifdef _M_ARM64
void HostSys::FlushInstructionCache(void* address, u32 size)
{
::FlushInstructionCache(GetCurrentProcess(), address, size);
}
#endif
SharedMemoryMappingArea::SharedMemoryMappingArea(u8* base_ptr, size_t size, size_t num_pages)
: m_base_ptr(base_ptr)
, m_size(size)
, m_num_pages(num_pages)
{
m_placeholder_ranges.emplace(0, size);
}
SharedMemoryMappingArea::~SharedMemoryMappingArea()
{
pxAssertRel(m_num_mappings == 0, "No mappings left");
// hopefully this will be okay, and we don't need to coalesce all the placeholders...
if (!VirtualFreeEx(GetCurrentProcess(), m_base_ptr, 0, MEM_RELEASE))
pxFailRel("Failed to release shared memory area");
}
SharedMemoryMappingArea::PlaceholderMap::iterator SharedMemoryMappingArea::FindPlaceholder(size_t offset)
{
if (m_placeholder_ranges.empty())
return m_placeholder_ranges.end();
// this will give us an iterator equal or after page
auto it = m_placeholder_ranges.lower_bound(offset);
if (it == m_placeholder_ranges.end())
{
// check the last page
it = (++m_placeholder_ranges.rbegin()).base();
}
// it's the one we found?
if (offset >= it->first && offset < it->second)
return it;
// otherwise try the one before
if (it == m_placeholder_ranges.begin())
return m_placeholder_ranges.end();
--it;
if (offset >= it->first && offset < it->second)
return it;
else
return m_placeholder_ranges.end();
}
std::unique_ptr<SharedMemoryMappingArea> SharedMemoryMappingArea::Create(size_t size)
{
pxAssertRel(Common::IsAlignedPow2(size, __pagesize), "Size is page aligned");
void* alloc = VirtualAlloc2(GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, PAGE_NOACCESS, nullptr, 0);
if (!alloc)
return nullptr;
return std::unique_ptr<SharedMemoryMappingArea>(new SharedMemoryMappingArea(static_cast<u8*>(alloc), size, size / __pagesize));
}
u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, const PageProtectionMode& mode)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
pxAssert(Common::IsAlignedPow2(map_offset, __pagesize));
pxAssert(Common::IsAlignedPow2(map_size, __pagesize));
// should be a placeholder. unless there's some other mapping we didn't free.
PlaceholderMap::iterator phit = FindPlaceholder(map_offset);
pxAssertMsg(phit != m_placeholder_ranges.end(), "Page we're mapping is a placeholder");
pxAssertMsg(map_offset >= phit->first && map_offset < phit->second, "Page is in returned placeholder range");
pxAssertMsg((map_offset + map_size) <= phit->second, "Page range is in returned placeholder range");
// do we need to split to the left? (i.e. is there a placeholder before this range)
const size_t old_ph_end = phit->second;
if (map_offset != phit->first)
{
phit->second = map_offset;
// split it (i.e. left..start and start..end are now separated)
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(phit->first),
(map_offset - phit->first), MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
{
pxFailRel("Failed to left split placeholder for map");
}
}
else
{
// start of the placeholder is getting used, we'll split it right below if there's anything left over
m_placeholder_ranges.erase(phit);
}
// do we need to split to the right? (i.e. is there a placeholder after this range)
if ((map_offset + map_size) != old_ph_end)
{
// split out end..ph_end
m_placeholder_ranges.emplace(map_offset + map_size, old_ph_end);
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(map_offset), map_size,
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER))
{
pxFailRel("Failed to right split placeholder for map");
}
}
// actually do the mapping, replacing the placeholder on the range
if (!MapViewOfFile3(static_cast<HANDLE>(file_handle), GetCurrentProcess(),
map_base, file_offset, map_size, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0))
{
Console.Error("(SharedMemoryMappingArea) MapViewOfFile3() failed: %u", GetLastError());
return nullptr;
}
const DWORD prot = ConvertToWinApi(mode);
if (prot != PAGE_READWRITE)
{
DWORD old_prot;
if (!VirtualProtect(map_base, map_size, prot, &old_prot))
pxFail("Failed to protect memory mapping");
}
m_num_mappings++;
return static_cast<u8*>(map_base);
}
bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
{
pxAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size));
const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr;
pxAssert(Common::IsAlignedPow2(map_offset, __pagesize));
pxAssert(Common::IsAlignedPow2(map_size, __pagesize));
// unmap the specified range
if (!UnmapViewOfFile2(GetCurrentProcess(), map_base, MEM_PRESERVE_PLACEHOLDER))
{
Console.Error("(SharedMemoryMappingArea) UnmapViewOfFile2() failed: %u", GetLastError());
return false;
}
// can we coalesce to the left?
PlaceholderMap::iterator left_it = (map_offset > 0) ? FindPlaceholder(map_offset - 1) : m_placeholder_ranges.end();
if (left_it != m_placeholder_ranges.end())
{
// the left placeholder should end at our start
pxAssert(map_offset == left_it->second);
left_it->second = map_offset + map_size;
// combine placeholders before and the range we're unmapping, i.e. to the left
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first),
left_it->second - left_it->first, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
{
pxFail("Failed to coalesce placeholders left for unmap");
}
}
else
{
// this is a new placeholder
left_it = m_placeholder_ranges.emplace(map_offset, map_offset + map_size).first;
}
// can we coalesce to the right?
PlaceholderMap::iterator right_it = ((map_offset + map_size) < m_size) ? FindPlaceholder(map_offset + map_size) : m_placeholder_ranges.end();
if (right_it != m_placeholder_ranges.end())
{
// should start at our end
pxAssert(right_it->first == (map_offset + map_size));
left_it->second = right_it->second;
m_placeholder_ranges.erase(right_it);
// combine our placeholder and the next, i.e. to the right
if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first),
left_it->second - left_it->first, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS))
{
pxFail("Failed to coalescae placeholders right for unmap");
}
}
m_num_mappings--;
return true;
}
namespace PageFaultHandler
{
static LONG ExceptionHandler(PEXCEPTION_POINTERS exi);
static std::recursive_mutex s_exception_handler_mutex;
static bool s_in_exception_handler = false;
static bool s_installed = false;
} // namespace PageFaultHandler
LONG PageFaultHandler::ExceptionHandler(PEXCEPTION_POINTERS exi)
{
// Executing the handler concurrently from multiple threads wouldn't go down well.
std::unique_lock lock(s_exception_handler_mutex);
// Prevent recursive exception filtering.
if (s_in_exception_handler)
return EXCEPTION_CONTINUE_SEARCH;
// Only interested in page faults.
if (exi->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
return EXCEPTION_CONTINUE_SEARCH;
#if defined(_M_X86)
void* const exception_pc = reinterpret_cast<void*>(exi->ContextRecord->Rip);
#elif defined(_M_ARM64)
void* const exception_pc = reinterpret_cast<void*>(exi->ContextRecord->Pc);
#else
void* const exception_pc = nullptr;
#endif
void* const exception_address = reinterpret_cast<void*>(exi->ExceptionRecord->ExceptionInformation[1]);
const bool is_write = exi->ExceptionRecord->ExceptionInformation[0] == 1;
s_in_exception_handler = true;
const HandlerResult handled = HandlePageFault(exception_pc, exception_address, is_write);
s_in_exception_handler = false;
return (handled == HandlerResult::ContinueExecution) ? EXCEPTION_CONTINUE_EXECUTION : EXCEPTION_CONTINUE_SEARCH;
}
bool PageFaultHandler::Install(Error* error)
{
std::unique_lock lock(s_exception_handler_mutex);
pxAssertRel(!s_installed, "Page fault handler has already been installed.");
PVOID handle = AddVectoredExceptionHandler(1, ExceptionHandler);
if (!handle)
{
Error::SetWin32(error, "AddVectoredExceptionHandler() failed: ", GetLastError());
return false;
}
s_installed = true;
return true;
}

188
common/Windows/WinMisc.cpp Normal file
View File

@@ -0,0 +1,188 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/Console.h"
#include "common/FileSystem.h"
#include "common/HostSys.h"
#include "common/RedtapeWindows.h"
#include "common/StringUtil.h"
#include "common/Threading.h"
#include "common/WindowInfo.h"
#include <mmsystem.h>
#include <timeapi.h>
#include <VersionHelpers.h>
// If anything tries to read this as an initializer, we're in trouble.
static const LARGE_INTEGER lfreq = []() {
LARGE_INTEGER ret = {};
QueryPerformanceFrequency(&ret);
return ret;
}();
// This gets leaked... oh well.
static thread_local HANDLE s_sleep_timer;
static thread_local bool s_sleep_timer_created = false;
static HANDLE GetSleepTimer()
{
if (s_sleep_timer_created)
return s_sleep_timer;
s_sleep_timer_created = true;
s_sleep_timer = CreateWaitableTimerEx(nullptr, nullptr, CREATE_WAITABLE_TIMER_HIGH_RESOLUTION, TIMER_ALL_ACCESS);
if (!s_sleep_timer)
s_sleep_timer = CreateWaitableTimer(nullptr, TRUE, nullptr);
return s_sleep_timer;
}
u64 GetTickFrequency()
{
return lfreq.QuadPart;
}
u64 GetCPUTicks()
{
LARGE_INTEGER count;
QueryPerformanceCounter(&count);
return count.QuadPart;
}
u64 GetPhysicalMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
u64 GetAvailablePhysicalMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullAvailPhys;
}
// Calculates the Windows OS Version and processor architecture, and returns it as a
// human-readable string. :)
std::string GetOSVersionString()
{
std::string retval;
SYSTEM_INFO si;
GetNativeSystemInfo(&si);
if (IsWindows10OrGreater())
{
retval = "Microsoft ";
retval += IsWindowsServer() ? "Windows Server 2016+" : "Windows 10+";
}
else
retval = "Unsupported Operating System!";
return retval;
}
bool Common::InhibitScreensaver(bool inhibit)
{
EXECUTION_STATE flags = ES_CONTINUOUS;
if (inhibit)
flags |= ES_DISPLAY_REQUIRED;
SetThreadExecutionState(flags);
return true;
}
void Common::SetMousePosition(int x, int y)
{
SetCursorPos(x, y);
}
/*
static HHOOK mouseHook = nullptr;
static std::function<void(int, int)> fnMouseMoveCb;
LRESULT CALLBACK Mousecb(int nCode, WPARAM wParam, LPARAM lParam)
{
if (nCode >= 0 && wParam == WM_MOUSEMOVE)
{
MSLLHOOKSTRUCT* mouse = (MSLLHOOKSTRUCT*)lParam;
fnMouseMoveCb(mouse->pt.x, mouse->pt.y);
}
return CallNextHookEx(mouseHook, nCode, wParam, lParam);
}
*/
// This (and the above) works, but is not recommended on Windows and is only here for consistency.
// Defer to using raw input instead.
bool Common::AttachMousePositionCb(std::function<void(int, int)> cb)
{
/*
if (mouseHook)
Common::DetachMousePositionCb();
fnMouseMoveCb = cb;
mouseHook = SetWindowsHookEx(WH_MOUSE_LL, Mousecb, GetModuleHandle(NULL), 0);
if (!mouseHook)
{
Console.Warning("Failed to set mouse hook: %d", GetLastError());
return false;
}
#if defined(PCSX2_DEBUG) || defined(PCSX2_DEVBUILD)
static bool warned = false;
if (!warned)
{
Console.Warning("Mouse hooks are enabled, and this isn't a release build! Using a debugger, or loading symbols, _will_ stall the hook and cause global mouse lag.");
warned = true;
}
#endif
*/
return true;
}
void Common::DetachMousePositionCb()
{
/*
UnhookWindowsHookEx(mouseHook);
mouseHook = nullptr;
*/
}
bool Common::PlaySoundAsync(const char* path)
{
const std::wstring wpath = FileSystem::GetWin32Path(path);
return PlaySoundW(wpath.c_str(), NULL, SND_ASYNC | SND_NODEFAULT);
}
void Threading::Sleep(int ms)
{
::Sleep(ms);
}
void Threading::SleepUntil(u64 ticks)
{
// This is definitely sub-optimal, but there's no way to sleep until a QPC timestamp on Win32.
const s64 diff = static_cast<s64>(ticks - GetCPUTicks());
if (diff <= 0)
return;
const HANDLE hTimer = GetSleepTimer();
if (!hTimer)
return;
const u64 one_hundred_nanos_diff = (static_cast<u64>(diff) * 10000000ULL) / GetTickFrequency();
if (one_hundred_nanos_diff == 0)
return;
LARGE_INTEGER fti;
fti.QuadPart = -static_cast<s64>(one_hundred_nanos_diff);
if (SetWaitableTimer(hTimer, &fti, 0, nullptr, nullptr, FALSE))
{
WaitForSingleObject(hTimer, INFINITE);
return;
}
}

View File

@@ -0,0 +1,289 @@
// SPDX-FileCopyrightText: 2002-2025 PCSX2 Dev Team
// SPDX-License-Identifier: GPL-3.0+
#include "common/Threading.h"
#include "common/Assertions.h"
#include "common/RedtapeWindows.h"
#include <memory>
#include <mmsystem.h>
#include <process.h>
#include <timeapi.h>
__fi void Threading::Timeslice()
{
::Sleep(0);
}
// For use in spin/wait loops, Acts as a hint to Intel CPUs and should, in theory
// improve performance and reduce cpu power consumption.
__fi void Threading::SpinWait()
{
#ifdef _M_X86
_mm_pause();
#else
YieldProcessor();
#endif
}
__fi void Threading::EnableHiresScheduler()
{
// This improves accuracy of Sleep() by some amount, and only adds a negligible amount of
// overhead on modern CPUs. Typically desktops are already set pretty low, but laptops in
// particular may have a scheduler Period of 15 or 20ms to extend battery life.
// (note: this same trick is used by most multimedia software and games)
timeBeginPeriod(1);
}
__fi void Threading::DisableHiresScheduler()
{
timeEndPeriod(1);
}
Threading::ThreadHandle::ThreadHandle() = default;
Threading::ThreadHandle::ThreadHandle(const ThreadHandle& handle)
{
if (handle.m_native_handle)
{
HANDLE new_handle;
if (DuplicateHandle(GetCurrentProcess(), (HANDLE)handle.m_native_handle,
GetCurrentProcess(), &new_handle, THREAD_QUERY_INFORMATION | THREAD_SET_LIMITED_INFORMATION, FALSE, 0))
{
m_native_handle = (void*)new_handle;
}
}
}
Threading::ThreadHandle::ThreadHandle(ThreadHandle&& handle)
: m_native_handle(handle.m_native_handle)
{
handle.m_native_handle = nullptr;
}
Threading::ThreadHandle::~ThreadHandle()
{
if (m_native_handle)
CloseHandle(m_native_handle);
}
Threading::ThreadHandle Threading::ThreadHandle::GetForCallingThread()
{
ThreadHandle ret;
ret.m_native_handle = (void*)OpenThread(THREAD_QUERY_INFORMATION | THREAD_SET_LIMITED_INFORMATION, FALSE, GetCurrentThreadId());
return ret;
}
Threading::ThreadHandle& Threading::ThreadHandle::operator=(ThreadHandle&& handle)
{
if (m_native_handle)
CloseHandle((HANDLE)m_native_handle);
m_native_handle = handle.m_native_handle;
handle.m_native_handle = nullptr;
return *this;
}
Threading::ThreadHandle& Threading::ThreadHandle::operator=(const ThreadHandle& handle)
{
if (m_native_handle)
{
CloseHandle((HANDLE)m_native_handle);
m_native_handle = nullptr;
}
HANDLE new_handle;
if (DuplicateHandle(GetCurrentProcess(), (HANDLE)handle.m_native_handle,
GetCurrentProcess(), &new_handle, THREAD_QUERY_INFORMATION | THREAD_SET_LIMITED_INFORMATION, FALSE, 0))
{
m_native_handle = (void*)new_handle;
}
return *this;
}
u64 Threading::ThreadHandle::GetCPUTime() const
{
#ifndef _M_ARM64
u64 ret = 0;
if (m_native_handle)
QueryThreadCycleTime((HANDLE)m_native_handle, &ret);
return ret;
#else
FILETIME user, kernel, unused;
if (!GetThreadTimes((HANDLE)m_native_handle, &unused, &unused, &kernel, &user))
return 0;
const u64 user_time = (static_cast<u64>(user.dwHighDateTime) << 32) | static_cast<u64>(user.dwLowDateTime);
const u64 kernel_time = (static_cast<u64>(kernel.dwHighDateTime) << 32) | static_cast<u64>(kernel.dwLowDateTime);
return user_time + kernel_time;
#endif
}
bool Threading::ThreadHandle::SetAffinity(u64 processor_mask) const
{
if (processor_mask == 0)
processor_mask = ~processor_mask;
return (SetThreadAffinityMask(GetCurrentThread(), (DWORD_PTR)processor_mask) != 0 || GetLastError() != ERROR_SUCCESS);
}
Threading::Thread::Thread() = default;
Threading::Thread::Thread(Thread&& thread)
: ThreadHandle(thread)
, m_stack_size(thread.m_stack_size)
{
thread.m_stack_size = 0;
}
Threading::Thread::Thread(EntryPoint func)
: ThreadHandle()
{
if (!Start(std::move(func)))
pxFailRel("Failed to start implicitly started thread.");
}
Threading::Thread::~Thread()
{
pxAssertRel(!m_native_handle, "Thread should be detached or joined at destruction");
}
void Threading::Thread::SetStackSize(u32 size)
{
pxAssertRel(!m_native_handle, "Can't change the stack size on a started thread");
m_stack_size = size;
}
unsigned Threading::Thread::ThreadProc(void* param)
{
std::unique_ptr<EntryPoint> entry(static_cast<EntryPoint*>(param));
(*entry.get())();
return 0;
}
bool Threading::Thread::Start(EntryPoint func)
{
pxAssertRel(!m_native_handle, "Can't start an already-started thread");
std::unique_ptr<EntryPoint> func_clone(std::make_unique<EntryPoint>(std::move(func)));
unsigned thread_id;
m_native_handle = reinterpret_cast<void*>(_beginthreadex(nullptr, m_stack_size, ThreadProc, func_clone.get(), 0, &thread_id));
if (!m_native_handle)
return false;
// thread started, it'll release the memory
func_clone.release();
return true;
}
void Threading::Thread::Detach()
{
pxAssertRel(m_native_handle, "Can't detach without a thread");
CloseHandle((HANDLE)m_native_handle);
m_native_handle = nullptr;
}
void Threading::Thread::Join()
{
pxAssertRel(m_native_handle, "Can't join without a thread");
const DWORD res = WaitForSingleObject((HANDLE)m_native_handle, INFINITE);
if (res != WAIT_OBJECT_0)
pxFailRel("WaitForSingleObject() for thread join failed");
CloseHandle((HANDLE)m_native_handle);
m_native_handle = nullptr;
}
Threading::ThreadHandle& Threading::Thread::operator=(Thread&& thread)
{
ThreadHandle::operator=(thread);
m_stack_size = thread.m_stack_size;
thread.m_stack_size = 0;
return *this;
}
u64 Threading::GetThreadCpuTime()
{
#ifndef _M_ARM64
u64 ret = 0;
QueryThreadCycleTime(GetCurrentThread(), &ret);
return ret;
#else
FILETIME user, kernel, unused;
if (!GetThreadTimes(GetCurrentThread(), &unused, &unused, &kernel, &user))
return 0;
const u64 user_time = (static_cast<u64>(user.dwHighDateTime) << 32) | static_cast<u64>(user.dwLowDateTime);
const u64 kernel_time = (static_cast<u64>(kernel.dwHighDateTime) << 32) | static_cast<u64>(kernel.dwLowDateTime);
return user_time + kernel_time;
#endif
}
u64 Threading::GetThreadTicksPerSecond()
{
#ifndef _M_ARM64
// On x86, despite what the MS documentation says, this basically appears to be rdtsc.
// So, the frequency is our base clock speed (and stable regardless of power management).
static u64 frequency = 0;
if (frequency == 0) [[unlikely]]
{
HKEY key;
LSTATUS res =
RegOpenKeyExW(HKEY_LOCAL_MACHINE, L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, KEY_READ, &key);
if (res == ERROR_SUCCESS)
{
DWORD mhz;
DWORD size = sizeof(mhz);
res = RegQueryValueExW(key, L"~MHz", nullptr, nullptr, reinterpret_cast<LPBYTE>(&mhz), &size);
if (res == ERROR_SUCCESS)
frequency = static_cast<u64>(mhz) * static_cast<u64>(1000000);
RegCloseKey(key);
}
}
return frequency;
#else
return 10000000;
#endif
}
void Threading::SetNameOfCurrentThread(const char* name)
{
// This feature needs Windows headers and MSVC's SEH support:
#if defined(_WIN32) && defined(_MSC_VER)
// This code sample was borrowed form some obscure MSDN article.
// In a rare bout of sanity, it's an actual Microsoft-published hack
// that actually works!
static const int MS_VC_EXCEPTION = 0x406D1388;
#pragma pack(push, 8)
struct THREADNAME_INFO
{
DWORD dwType; // Must be 0x1000.
LPCSTR szName; // Pointer to name (in user addr space).
DWORD dwThreadID; // Thread ID (-1=caller thread).
DWORD dwFlags; // Reserved for future use, must be zero.
};
#pragma pack(pop)
THREADNAME_INFO info;
info.dwType = 0x1000;
info.szName = name;
info.dwThreadID = GetCurrentThreadId();
info.dwFlags = 0;
__try
{
RaiseException(MS_VC_EXCEPTION, 0, sizeof(info) / sizeof(ULONG_PTR), (ULONG_PTR*)&info);
}
__except (EXCEPTION_EXECUTE_HANDLER)
{
}
#endif
}