From 8e56a84566036cfff0aa5c3d80ae1b051d2bd0bf Mon Sep 17 00:00:00 2001 From: Morph <39850852+Morph1984@users.noreply.github.com> Date: Sun, 23 Apr 2023 00:01:08 -0400 Subject: core_timing: Use CNTPCT as the guest CPU tick Previously, we were mixing the raw CPU frequency and CNTFRQ. The raw CPU frequency (1020 MHz) should've never been used as CNTPCT (whose frequency is CNTFRQ) is the only counter available. --- src/video_core/gpu.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'src/video_core/gpu.cpp') diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp index 456f733cf..70762c51a 100644 --- a/src/video_core/gpu.cpp +++ b/src/video_core/gpu.cpp @@ -194,17 +194,17 @@ struct GPU::Impl { [[nodiscard]] u64 GetTicks() const { // This values were reversed engineered by fincs from NVN - // The gpu clock is reported in units of 385/625 nanoseconds - constexpr u64 gpu_ticks_num = 384; - constexpr u64 gpu_ticks_den = 625; + // The GPU clock is 614.4 MHz + using NsToGPUTickRatio = std::ratio<614'400'000, std::nano::den>; + static_assert(NsToGPUTickRatio::num == 384 && NsToGPUTickRatio::den == 625); + + u64 nanoseconds = system.CoreTiming().GetGlobalTimeNs().count(); - u64 nanoseconds = system.CoreTiming().GetCPUTimeNs().count(); if (Settings::values.use_fast_gpu_time.GetValue()) { nanoseconds /= 256; } - const u64 nanoseconds_num = nanoseconds / gpu_ticks_den; - const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den; - return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den; + + return nanoseconds * NsToGPUTickRatio::num / NsToGPUTickRatio::den; } [[nodiscard]] bool IsAsync() const { -- cgit v1.2.3