mirror of
				https://git.zaroz.cloud/nintendo-back-up/yuzu/yuzu-mainline.git
				synced 2025-03-21 01:53:15 +00:00 
			
		
		
		
	hle: kernel: Migrate to KScopedSchedulerLock.
This commit is contained in:
		
							parent
							
								
									4756cb203e
								
							
						
					
					
						commit
						ccce6cb3be
					
				@ -159,6 +159,7 @@ add_library(core STATIC
 | 
			
		||||
    hle/kernel/k_scheduler.cpp
 | 
			
		||||
    hle/kernel/k_scheduler.h
 | 
			
		||||
    hle/kernel/k_scheduler_lock.h
 | 
			
		||||
    hle/kernel/k_scoped_lock.h
 | 
			
		||||
    hle/kernel/k_scoped_scheduler_lock_and_sleep.h
 | 
			
		||||
    hle/kernel/kernel.cpp
 | 
			
		||||
    hle/kernel/kernel.h
 | 
			
		||||
 | 
			
		||||
@ -59,7 +59,7 @@ ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 v
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    const std::vector<std::shared_ptr<Thread>> waiting_threads =
 | 
			
		||||
        GetThreadsWaitingOnAddress(address);
 | 
			
		||||
    WakeThreads(waiting_threads, num_to_wake);
 | 
			
		||||
@ -68,7 +68,7 @@ ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
 | 
			
		||||
 | 
			
		||||
ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
 | 
			
		||||
                                                              s32 num_to_wake) {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    auto& memory = system.Memory();
 | 
			
		||||
 | 
			
		||||
    // Ensure that we can write to the address.
 | 
			
		||||
@ -93,7 +93,7 @@ ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32
 | 
			
		||||
 | 
			
		||||
ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
 | 
			
		||||
                                                                         s32 num_to_wake) {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    auto& memory = system.Memory();
 | 
			
		||||
 | 
			
		||||
    // Ensure that we can write to the address.
 | 
			
		||||
@ -211,7 +211,7 @@ ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s6
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        if (current_thread->IsWaitingForArbitration()) {
 | 
			
		||||
            RemoveThread(SharedFrom(current_thread));
 | 
			
		||||
            current_thread->WaitForArbitration(false);
 | 
			
		||||
@ -266,7 +266,7 @@ ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 t
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        if (current_thread->IsWaitingForArbitration()) {
 | 
			
		||||
            RemoveThread(SharedFrom(current_thread));
 | 
			
		||||
            current_thread->WaitForArbitration(false);
 | 
			
		||||
 | 
			
		||||
@ -27,6 +27,8 @@ class GlobalSchedulerContext final {
 | 
			
		||||
    friend class KScheduler;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    using LockType = KAbstractSchedulerLock<KScheduler>;
 | 
			
		||||
 | 
			
		||||
    explicit GlobalSchedulerContext(KernelCore& kernel);
 | 
			
		||||
    ~GlobalSchedulerContext();
 | 
			
		||||
 | 
			
		||||
@ -53,8 +55,16 @@ public:
 | 
			
		||||
    /// Returns true if the global scheduler lock is acquired
 | 
			
		||||
    bool IsLocked() const;
 | 
			
		||||
 | 
			
		||||
    LockType& SchedulerLock() {
 | 
			
		||||
        return scheduler_lock;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const LockType& SchedulerLock() const {
 | 
			
		||||
        return scheduler_lock;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    friend class SchedulerLock;
 | 
			
		||||
    friend class KScopedSchedulerLock;
 | 
			
		||||
    friend class KScopedSchedulerLockAndSleep;
 | 
			
		||||
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
 | 
			
		||||
@ -410,7 +410,7 @@ void KScheduler::YieldWithoutCoreMigration() {
 | 
			
		||||
 | 
			
		||||
    /* Perform the yield. */
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
        const auto cur_state = cur_thread.scheduling_state;
 | 
			
		||||
        if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
 | 
			
		||||
@ -451,7 +451,7 @@ void KScheduler::YieldWithCoreMigration() {
 | 
			
		||||
 | 
			
		||||
    /* Perform the yield. */
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
        const auto cur_state = cur_thread.scheduling_state;
 | 
			
		||||
        if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
 | 
			
		||||
@ -541,7 +541,7 @@ void KScheduler::YieldToAnyThread() {
 | 
			
		||||
 | 
			
		||||
    /* Perform the yield. */
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
        const auto cur_state = cur_thread.scheduling_state;
 | 
			
		||||
        if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) {
 | 
			
		||||
@ -793,12 +793,9 @@ void KScheduler::Initialize() {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
SchedulerLock::SchedulerLock(KernelCore& kernel) : kernel{kernel} {
 | 
			
		||||
    kernel.GlobalSchedulerContext().Lock();
 | 
			
		||||
}
 | 
			
		||||
KScopedSchedulerLock::KScopedSchedulerLock(KernelCore& kernel)
 | 
			
		||||
    : KScopedLock(kernel.GlobalSchedulerContext().SchedulerLock()) {}
 | 
			
		||||
 | 
			
		||||
SchedulerLock::~SchedulerLock() {
 | 
			
		||||
    kernel.GlobalSchedulerContext().Unlock();
 | 
			
		||||
}
 | 
			
		||||
KScopedSchedulerLock::~KScopedSchedulerLock() = default;
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 | 
			
		||||
@ -14,6 +14,7 @@
 | 
			
		||||
#include "core/hle/kernel/global_scheduler_context.h"
 | 
			
		||||
#include "core/hle/kernel/k_priority_queue.h"
 | 
			
		||||
#include "core/hle/kernel/k_scheduler_lock.h"
 | 
			
		||||
#include "core/hle/kernel/k_scoped_lock.h"
 | 
			
		||||
 | 
			
		||||
namespace Common {
 | 
			
		||||
class Fiber;
 | 
			
		||||
@ -198,13 +199,10 @@ private:
 | 
			
		||||
    Common::SpinLock guard{};
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
class SchedulerLock {
 | 
			
		||||
class KScopedSchedulerLock : KScopedLock<GlobalSchedulerContext::LockType> {
 | 
			
		||||
public:
 | 
			
		||||
    [[nodiscard]] explicit SchedulerLock(KernelCore& kernel);
 | 
			
		||||
    ~SchedulerLock();
 | 
			
		||||
 | 
			
		||||
protected:
 | 
			
		||||
    KernelCore& kernel;
 | 
			
		||||
    explicit KScopedSchedulerLock(KernelCore& kernel);
 | 
			
		||||
    ~KScopedSchedulerLock();
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										39
									
								
								src/core/hle/kernel/k_scoped_lock.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								src/core/hle/kernel/k_scoped_lock.h
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,39 @@
 | 
			
		||||
// Copyright 2020 yuzu Emulator Project
 | 
			
		||||
// Licensed under GPLv2 or any later version
 | 
			
		||||
// Refer to the license.txt file included.
 | 
			
		||||
 | 
			
		||||
// This file references various implementation details from Atmosphere, an open-source firmware for
 | 
			
		||||
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "common/common_types.h"
 | 
			
		||||
 | 
			
		||||
namespace Kernel {
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
concept KLockable = !std::is_reference<T>::value && requires(T & t) {
 | 
			
		||||
    { t.Lock() }
 | 
			
		||||
    ->std::same_as<void>;
 | 
			
		||||
    { t.Unlock() }
 | 
			
		||||
    ->std::same_as<void>;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <typename T>
 | 
			
		||||
requires KLockable<T> class KScopedLock : NonCopyable {
 | 
			
		||||
 | 
			
		||||
private:
 | 
			
		||||
    T* lock_ptr;
 | 
			
		||||
 | 
			
		||||
public:
 | 
			
		||||
    explicit KScopedLock(T* l) : lock_ptr(l) {
 | 
			
		||||
        this->lock_ptr->Lock();
 | 
			
		||||
    }
 | 
			
		||||
    explicit KScopedLock(T& l) : KScopedLock(std::addressof(l)) { /* ... */
 | 
			
		||||
    }
 | 
			
		||||
    ~KScopedLock() {
 | 
			
		||||
        this->lock_ptr->Unlock();
 | 
			
		||||
    }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
} // namespace Kernel
 | 
			
		||||
@ -146,7 +146,7 @@ struct KernelCore::Impl {
 | 
			
		||||
        preemption_event = Core::Timing::CreateEvent(
 | 
			
		||||
            "PreemptionCallback", [this, &kernel](std::uintptr_t, std::chrono::nanoseconds) {
 | 
			
		||||
                {
 | 
			
		||||
                    SchedulerLock lock(kernel);
 | 
			
		||||
                    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
                    global_scheduler_context->PreemptThreads();
 | 
			
		||||
                }
 | 
			
		||||
                const auto time_interval = std::chrono::nanoseconds{
 | 
			
		||||
@ -612,7 +612,7 @@ const Kernel::SharedMemory& KernelCore::GetTimeSharedMem() const {
 | 
			
		||||
void KernelCore::Suspend(bool in_suspention) {
 | 
			
		||||
    const bool should_suspend = exception_exited || in_suspention;
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(*this);
 | 
			
		||||
        KScopedSchedulerLock lock(*this);
 | 
			
		||||
        ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep;
 | 
			
		||||
        for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
 | 
			
		||||
            impl->suspend_threads[i]->SetStatus(status);
 | 
			
		||||
 | 
			
		||||
@ -75,7 +75,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
 | 
			
		||||
    std::shared_ptr<Thread> current_thread =
 | 
			
		||||
        SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        // The mutex address must be 4-byte aligned
 | 
			
		||||
        if ((address % sizeof(u32)) != 0) {
 | 
			
		||||
            return ERR_INVALID_ADDRESS;
 | 
			
		||||
@ -114,7 +114,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        auto* owner = current_thread->GetLockOwner();
 | 
			
		||||
        if (owner != nullptr) {
 | 
			
		||||
            owner->RemoveMutexWaiter(current_thread);
 | 
			
		||||
@ -153,7 +153,7 @@ std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thr
 | 
			
		||||
 | 
			
		||||
ResultCode Mutex::Release(VAddr address) {
 | 
			
		||||
    auto& kernel = system.Kernel();
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
    std::shared_ptr<Thread> current_thread =
 | 
			
		||||
        SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
 | 
			
		||||
 | 
			
		||||
@ -54,7 +54,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
 | 
			
		||||
    auto& kernel = system.Kernel();
 | 
			
		||||
    // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock{kernel};
 | 
			
		||||
        KScopedSchedulerLock lock{kernel};
 | 
			
		||||
        thread->SetStatus(ThreadStatus::Ready);
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
@ -213,7 +213,7 @@ void Process::UnregisterThread(const Thread* thread) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode Process::ClearSignalState() {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    if (status == ProcessStatus::Exited) {
 | 
			
		||||
        LOG_ERROR(Kernel, "called on a terminated process instance.");
 | 
			
		||||
        return ERR_INVALID_STATE;
 | 
			
		||||
@ -347,7 +347,7 @@ static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VAddr Process::CreateTLSRegion() {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    if (auto tls_page_iter{FindTLSPageWithAvailableSlots(tls_pages)};
 | 
			
		||||
        tls_page_iter != tls_pages.cend()) {
 | 
			
		||||
        return *tls_page_iter->ReserveSlot();
 | 
			
		||||
@ -378,7 +378,7 @@ VAddr Process::CreateTLSRegion() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Process::FreeTLSRegion(VAddr tls_address) {
 | 
			
		||||
    SchedulerLock lock(system.Kernel());
 | 
			
		||||
    KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
    const VAddr aligned_address = Common::AlignDown(tls_address, Core::Memory::PAGE_SIZE);
 | 
			
		||||
    auto iter =
 | 
			
		||||
        std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
 | 
			
		||||
 | 
			
		||||
@ -39,7 +39,7 @@ void ReadableEvent::Clear() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode ReadableEvent::Reset() {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    if (!is_signaled) {
 | 
			
		||||
        LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
 | 
			
		||||
                  GetObjectId(), GetTypeName(), GetName());
 | 
			
		||||
 | 
			
		||||
@ -171,7 +171,7 @@ ResultCode ServerSession::CompleteSyncRequest() {
 | 
			
		||||
 | 
			
		||||
    // Some service requests require the thread to block
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        if (!context.IsThreadWaiting()) {
 | 
			
		||||
            context.GetThread().ResumeFromWait();
 | 
			
		||||
            context.GetThread().SetSynchronizationResults(nullptr, result);
 | 
			
		||||
 | 
			
		||||
@ -345,7 +345,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
 | 
			
		||||
 | 
			
		||||
    auto thread = kernel.CurrentScheduler()->GetCurrentThread();
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        thread->InvalidateHLECallback();
 | 
			
		||||
        thread->SetStatus(ThreadStatus::WaitIPC);
 | 
			
		||||
        session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
 | 
			
		||||
@ -359,7 +359,7 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        {
 | 
			
		||||
            SchedulerLock lock(kernel);
 | 
			
		||||
            KScopedSchedulerLock lock(kernel);
 | 
			
		||||
            auto* sync_object = thread->GetHLESyncObject();
 | 
			
		||||
            sync_object->RemoveWaitingThread(SharedFrom(thread));
 | 
			
		||||
        }
 | 
			
		||||
@ -1691,7 +1691,7 @@ static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_add
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
        auto* owner = current_thread->GetLockOwner();
 | 
			
		||||
        if (owner != nullptr) {
 | 
			
		||||
@ -1724,7 +1724,7 @@ static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_
 | 
			
		||||
 | 
			
		||||
    // Retrieve a list of all threads that are waiting for this condition variable.
 | 
			
		||||
    auto& kernel = system.Kernel();
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    auto* const current_process = kernel.CurrentProcess();
 | 
			
		||||
    std::vector<std::shared_ptr<Thread>> waiting_threads =
 | 
			
		||||
        current_process->GetConditionVariableThreads(condition_variable_addr);
 | 
			
		||||
 | 
			
		||||
@ -19,7 +19,7 @@ Synchronization::Synchronization(Core::System& system) : system{system} {}
 | 
			
		||||
 | 
			
		||||
void Synchronization::SignalObject(SynchronizationObject& obj) const {
 | 
			
		||||
    auto& kernel = system.Kernel();
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    if (obj.IsSignaled()) {
 | 
			
		||||
        for (auto thread : obj.GetWaitingThreads()) {
 | 
			
		||||
            if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
 | 
			
		||||
@ -90,7 +90,7 @@ std::pair<ResultCode, Handle> Synchronization::WaitFor(
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        ResultCode signaling_result = thread->GetSignalingResult();
 | 
			
		||||
        SynchronizationObject* signaling_object = thread->GetSignalingObject();
 | 
			
		||||
        thread->SetSynchronizationObjects(nullptr);
 | 
			
		||||
 | 
			
		||||
@ -51,7 +51,7 @@ Thread::~Thread() = default;
 | 
			
		||||
 | 
			
		||||
void Thread::Stop() {
 | 
			
		||||
    {
 | 
			
		||||
        SchedulerLock lock(kernel);
 | 
			
		||||
        KScopedSchedulerLock lock(kernel);
 | 
			
		||||
        SetStatus(ThreadStatus::Dead);
 | 
			
		||||
        Signal();
 | 
			
		||||
        kernel.GlobalHandleTable().Close(global_handle);
 | 
			
		||||
@ -68,7 +68,7 @@ void Thread::Stop() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Thread::ResumeFromWait() {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    switch (status) {
 | 
			
		||||
    case ThreadStatus::Paused:
 | 
			
		||||
    case ThreadStatus::WaitSynch:
 | 
			
		||||
@ -100,19 +100,18 @@ void Thread::ResumeFromWait() {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Thread::OnWakeUp() {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    SetStatus(ThreadStatus::Ready);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode Thread::Start() {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    SetStatus(ThreadStatus::Ready);
 | 
			
		||||
    return RESULT_SUCCESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Thread::CancelWait() {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) {
 | 
			
		||||
        is_sync_cancelled = true;
 | 
			
		||||
        return;
 | 
			
		||||
@ -228,7 +227,7 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void Thread::SetPriority(u32 priority) {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
 | 
			
		||||
               "Invalid priority value.");
 | 
			
		||||
    nominal_priority = priority;
 | 
			
		||||
@ -365,7 +364,7 @@ bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode Thread::SetActivity(ThreadActivity value) {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
 | 
			
		||||
    auto sched_status = GetSchedulingStatus();
 | 
			
		||||
 | 
			
		||||
@ -435,7 +434,7 @@ void Thread::SetCurrentPriority(u32 new_priority) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
 | 
			
		||||
    SchedulerLock lock(kernel);
 | 
			
		||||
    KScopedSchedulerLock lock(kernel);
 | 
			
		||||
    const auto HighestSetCore = [](u64 mask, u32 max_cores) {
 | 
			
		||||
        for (s32 core = static_cast<s32>(max_cores - 1); core >= 0; core--) {
 | 
			
		||||
            if (((mask >> core) & 1) != 0) {
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
 | 
			
		||||
    time_manager_event_type = Core::Timing::CreateEvent(
 | 
			
		||||
        "Kernel::TimeManagerCallback",
 | 
			
		||||
        [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
 | 
			
		||||
            const SchedulerLock lock(system.Kernel());
 | 
			
		||||
            const KScopedSchedulerLock lock(system.Kernel());
 | 
			
		||||
            const auto proper_handle = static_cast<Handle>(thread_handle);
 | 
			
		||||
            if (cancelled_events[proper_handle]) {
 | 
			
		||||
                return;
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user