nvflinger: Use jthread and stop_token for VSync thread
Avoids a destruction data race that may occur on the vsync thread
This commit is contained in:
parent
6c0d902373
commit
71e19153ef
|
@ -13,28 +13,20 @@
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/hardware_properties.h"
|
|
||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
|
||||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/hle/service/nvflinger/buffer_queue.h"
|
#include "core/hle/service/nvflinger/buffer_queue.h"
|
||||||
#include "core/hle/service/nvflinger/nvflinger.h"
|
#include "core/hle/service/nvflinger/nvflinger.h"
|
||||||
#include "core/hle/service/vi/display/vi_display.h"
|
#include "core/hle/service/vi/display/vi_display.h"
|
||||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||||
#include "core/perf_stats.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/renderer_base.h"
|
|
||||||
|
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
|
|
||||||
constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60};
|
constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60};
|
||||||
|
|
||||||
void NVFlinger::VSyncThread(NVFlinger& nv_flinger) {
|
void NVFlinger::SplitVSync(std::stop_token stop_token) {
|
||||||
nv_flinger.SplitVSync();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NVFlinger::SplitVSync() {
|
|
||||||
system.RegisterHostThread();
|
system.RegisterHostThread();
|
||||||
std::string name = "yuzu:VSyncThread";
|
std::string name = "yuzu:VSyncThread";
|
||||||
MicroProfileOnThreadCreate(name.c_str());
|
MicroProfileOnThreadCreate(name.c_str());
|
||||||
|
@ -45,7 +37,7 @@ void NVFlinger::SplitVSync() {
|
||||||
Common::SetCurrentThreadName(name.c_str());
|
Common::SetCurrentThreadName(name.c_str());
|
||||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||||
s64 delay = 0;
|
s64 delay = 0;
|
||||||
while (is_running) {
|
while (!stop_token.stop_requested()) {
|
||||||
guard->lock();
|
guard->lock();
|
||||||
const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count();
|
const s64 time_start = system.CoreTiming().GetGlobalTimeNs().count();
|
||||||
Compose();
|
Compose();
|
||||||
|
@ -55,7 +47,7 @@ void NVFlinger::SplitVSync() {
|
||||||
const s64 next_time = std::max<s64>(0, ticks - time_passed - delay);
|
const s64 next_time = std::max<s64>(0, ticks - time_passed - delay);
|
||||||
guard->unlock();
|
guard->unlock();
|
||||||
if (next_time > 0) {
|
if (next_time > 0) {
|
||||||
wait_event->WaitFor(std::chrono::nanoseconds{next_time});
|
std::this_thread::sleep_for(std::chrono::nanoseconds{next_time});
|
||||||
}
|
}
|
||||||
delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time;
|
delay = (system.CoreTiming().GetGlobalTimeNs().count() - time_end) - next_time;
|
||||||
}
|
}
|
||||||
|
@ -84,9 +76,7 @@ NVFlinger::NVFlinger(Core::System& system_)
|
||||||
});
|
});
|
||||||
|
|
||||||
if (system.IsMulticore()) {
|
if (system.IsMulticore()) {
|
||||||
is_running = true;
|
vsync_thread = std::jthread([this](std::stop_token token) { SplitVSync(token); });
|
||||||
wait_event = std::make_unique<Common::Event>();
|
|
||||||
vsync_thread = std::make_unique<std::thread>(VSyncThread, std::ref(*this));
|
|
||||||
} else {
|
} else {
|
||||||
system.CoreTiming().ScheduleEvent(frame_ns, composition_event);
|
system.CoreTiming().ScheduleEvent(frame_ns, composition_event);
|
||||||
}
|
}
|
||||||
|
@ -96,14 +86,7 @@ NVFlinger::~NVFlinger() {
|
||||||
for (auto& buffer_queue : buffer_queues) {
|
for (auto& buffer_queue : buffer_queues) {
|
||||||
buffer_queue->Disconnect();
|
buffer_queue->Disconnect();
|
||||||
}
|
}
|
||||||
|
if (!system.IsMulticore()) {
|
||||||
if (system.IsMulticore()) {
|
|
||||||
is_running = false;
|
|
||||||
wait_event->Set();
|
|
||||||
vsync_thread->join();
|
|
||||||
vsync_thread.reset();
|
|
||||||
wait_event.reset();
|
|
||||||
} else {
|
|
||||||
system.CoreTiming().UnscheduleEvent(composition_event, 0);
|
system.CoreTiming().UnscheduleEvent(composition_event, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,13 +4,10 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <string>
|
|
||||||
#include <string_view>
|
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
@ -109,9 +106,7 @@ private:
|
||||||
/// Creates a layer with the specified layer ID in the desired display.
|
/// Creates a layer with the specified layer ID in the desired display.
|
||||||
void CreateLayerAtId(VI::Display& display, u64 layer_id);
|
void CreateLayerAtId(VI::Display& display, u64 layer_id);
|
||||||
|
|
||||||
static void VSyncThread(NVFlinger& nv_flinger);
|
void SplitVSync(std::stop_token stop_token);
|
||||||
|
|
||||||
void SplitVSync();
|
|
||||||
|
|
||||||
std::shared_ptr<Nvidia::Module> nvdrv;
|
std::shared_ptr<Nvidia::Module> nvdrv;
|
||||||
|
|
||||||
|
@ -133,9 +128,7 @@ private:
|
||||||
|
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
|
||||||
std::unique_ptr<std::thread> vsync_thread;
|
std::jthread vsync_thread;
|
||||||
std::unique_ptr<Common::Event> wait_event;
|
|
||||||
std::atomic<bool> is_running{};
|
|
||||||
|
|
||||||
KernelHelpers::ServiceContext service_context;
|
KernelHelpers::ServiceContext service_context;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue