commit
848795f383
|
@ -21,7 +21,7 @@ namespace Kernel {
|
|||
*/
|
||||
static void ResumeWaitingThread(Mutex* mutex) {
|
||||
// Reset mutex lock thread handle, nothing is waiting
|
||||
mutex->locked = false;
|
||||
mutex->lock_count = 0;
|
||||
mutex->holding_thread = nullptr;
|
||||
|
||||
// Find the next waiting thread for the mutex...
|
||||
|
@ -44,8 +44,7 @@ Mutex::~Mutex() {}
|
|||
SharedPtr<Mutex> Mutex::Create(bool initial_locked, std::string name) {
|
||||
SharedPtr<Mutex> mutex(new Mutex);
|
||||
|
||||
mutex->initial_locked = initial_locked;
|
||||
mutex->locked = false;
|
||||
mutex->lock_count = 0;
|
||||
mutex->name = std::move(name);
|
||||
mutex->holding_thread = nullptr;
|
||||
|
||||
|
@ -57,7 +56,7 @@ SharedPtr<Mutex> Mutex::Create(bool initial_locked, std::string name) {
|
|||
}
|
||||
|
||||
bool Mutex::ShouldWait() {
|
||||
return locked && holding_thread != GetCurrentThread();
|
||||
return lock_count > 0 && holding_thread != GetCurrentThread();;
|
||||
}
|
||||
|
||||
void Mutex::Acquire() {
|
||||
|
@ -66,21 +65,27 @@ void Mutex::Acquire() {
|
|||
|
||||
void Mutex::Acquire(SharedPtr<Thread> thread) {
|
||||
_assert_msg_(Kernel, !ShouldWait(), "object unavailable!");
|
||||
if (locked)
|
||||
return;
|
||||
|
||||
locked = true;
|
||||
|
||||
// Actually "acquire" the mutex only if we don't already have it...
|
||||
if (lock_count == 0) {
|
||||
thread->held_mutexes.insert(this);
|
||||
holding_thread = std::move(thread);
|
||||
}
|
||||
|
||||
void Mutex::Release() {
|
||||
if (!locked)
|
||||
return;
|
||||
lock_count++;
|
||||
}
|
||||
|
||||
void Mutex::Release() {
|
||||
// Only release if the mutex is held...
|
||||
if (lock_count > 0) {
|
||||
lock_count--;
|
||||
|
||||
// Yield to the next thread only if we've fully released the mutex...
|
||||
if (lock_count == 0) {
|
||||
holding_thread->held_mutexes.erase(this);
|
||||
ResumeWaitingThread(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -30,8 +30,7 @@ public:
|
|||
static const HandleType HANDLE_TYPE = HandleType::Mutex;
|
||||
HandleType GetHandleType() const override { return HANDLE_TYPE; }
|
||||
|
||||
bool initial_locked; ///< Initial lock state when mutex was created
|
||||
bool locked; ///< Current locked state
|
||||
int lock_count; ///< Number of times the mutex has been acquired
|
||||
std::string name; ///< Name of mutex (optional)
|
||||
SharedPtr<Thread> holding_thread; ///< Thread that has acquired the mutex
|
||||
|
||||
|
|
|
@ -144,6 +144,8 @@ static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
|||
LOG_TRACE(Kernel_SVC, "called handle=0x%08X(%s:%s), nanoseconds=%lld", handle,
|
||||
object->GetTypeName().c_str(), object->GetName().c_str(), nano_seconds);
|
||||
|
||||
HLE::Reschedule(__func__);
|
||||
|
||||
// Check for next thread to schedule
|
||||
if (object->ShouldWait()) {
|
||||
|
||||
|
@ -153,8 +155,6 @@ static ResultCode WaitSynchronization1(Handle handle, s64 nano_seconds) {
|
|||
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
||||
Kernel::GetCurrentThread()->WakeAfterDelay(nano_seconds);
|
||||
|
||||
HLE::Reschedule(__func__);
|
||||
|
||||
// NOTE: output of this SVC will be set later depending on how the thread resumes
|
||||
return RESULT_INVALID;
|
||||
}
|
||||
|
@ -216,6 +216,8 @@ static ResultCode WaitSynchronizationN(s32* out, Handle* handles, s32 handle_cou
|
|||
}
|
||||
}
|
||||
|
||||
HLE::Reschedule(__func__);
|
||||
|
||||
// If thread should wait, then set its state to waiting and then reschedule...
|
||||
if (wait_thread) {
|
||||
|
||||
|
@ -229,8 +231,6 @@ static ResultCode WaitSynchronizationN(s32* out, Handle* handles, s32 handle_cou
|
|||
// Create an event to wake the thread up after the specified nanosecond delay has passed
|
||||
Kernel::GetCurrentThread()->WakeAfterDelay(nano_seconds);
|
||||
|
||||
HLE::Reschedule(__func__);
|
||||
|
||||
// NOTE: output of this SVC will be set later depending on how the thread resumes
|
||||
return RESULT_INVALID;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue