early-access version 2259
This commit is contained in:
parent
3c5094eec0
commit
fdaddafa9e
32 changed files with 665 additions and 763 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2258.
|
This is the source code for early-access 2259.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -185,7 +185,6 @@ add_library(core STATIC
|
||||||
hle/kernel/k_event.h
|
hle/kernel/k_event.h
|
||||||
hle/kernel/k_handle_table.cpp
|
hle/kernel/k_handle_table.cpp
|
||||||
hle/kernel/k_handle_table.h
|
hle/kernel/k_handle_table.h
|
||||||
hle/kernel/k_light_condition_variable.cpp
|
|
||||||
hle/kernel/k_light_condition_variable.h
|
hle/kernel/k_light_condition_variable.h
|
||||||
hle/kernel/k_light_lock.cpp
|
hle/kernel/k_light_lock.cpp
|
||||||
hle/kernel/k_light_lock.h
|
hle/kernel/k_light_lock.h
|
||||||
|
@ -238,7 +237,6 @@ add_library(core STATIC
|
||||||
hle/kernel/k_system_control.h
|
hle/kernel/k_system_control.h
|
||||||
hle/kernel/k_thread.cpp
|
hle/kernel/k_thread.cpp
|
||||||
hle/kernel/k_thread.h
|
hle/kernel/k_thread.h
|
||||||
hle/kernel/k_thread_queue.cpp
|
|
||||||
hle/kernel/k_thread_queue.h
|
hle/kernel/k_thread_queue.h
|
||||||
hle/kernel/k_trace.h
|
hle/kernel/k_trace.h
|
||||||
hle/kernel/k_transfer_memory.cpp
|
hle/kernel/k_transfer_memory.cpp
|
||||||
|
|
|
@ -521,6 +521,12 @@ const ARM_Interface& System::CurrentArmInterface() const {
|
||||||
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
return impl->kernel.CurrentPhysicalCore().ArmInterface();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::size_t System::CurrentCoreIndex() const {
|
||||||
|
std::size_t core = impl->kernel.GetCurrentHostThreadID();
|
||||||
|
ASSERT(core < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
return core;
|
||||||
|
}
|
||||||
|
|
||||||
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& System::CurrentPhysicalCore() {
|
||||||
return impl->kernel.CurrentPhysicalCore();
|
return impl->kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
|
|
|
@ -208,6 +208,9 @@ public:
|
||||||
/// Gets an ARM interface to the CPU core that is currently running
|
/// Gets an ARM interface to the CPU core that is currently running
|
||||||
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
[[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
|
||||||
|
|
||||||
|
/// Gets the index of the currently running CPU core
|
||||||
|
[[nodiscard]] std::size_t CurrentCoreIndex() const;
|
||||||
|
|
||||||
/// Gets the physical core for the CPU core that is currently running
|
/// Gets the physical core for the CPU core that is currently running
|
||||||
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
[[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
|
||||||
|
|
||||||
|
|
|
@ -117,18 +117,17 @@ void CpuManager::MultiCoreRunGuestLoop() {
|
||||||
physical_core = &kernel.CurrentPhysicalCore();
|
physical_core = &kernel.CurrentPhysicalCore();
|
||||||
}
|
}
|
||||||
system.ExitDynarmicProfile();
|
system.ExitDynarmicProfile();
|
||||||
{
|
|
||||||
Kernel::KScopedDisableDispatch dd(kernel);
|
|
||||||
physical_core->ArmInterface().ClearExclusiveState();
|
physical_core->ArmInterface().ClearExclusiveState();
|
||||||
}
|
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CpuManager::MultiCoreRunIdleThread() {
|
void CpuManager::MultiCoreRunIdleThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
while (true) {
|
while (true) {
|
||||||
Kernel::KScopedDisableDispatch dd(kernel);
|
auto& physical_core = kernel.CurrentPhysicalCore();
|
||||||
kernel.CurrentPhysicalCore().Idle();
|
physical_core.Idle();
|
||||||
|
kernel.CurrentScheduler()->RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,12 +135,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
kernel.CurrentScheduler()->OnThreadStart();
|
kernel.CurrentScheduler()->OnThreadStart();
|
||||||
while (true) {
|
while (true) {
|
||||||
auto core = kernel.CurrentPhysicalCoreIndex();
|
auto core = kernel.GetCurrentHostThreadID();
|
||||||
auto& scheduler = *kernel.CurrentScheduler();
|
auto& scheduler = *kernel.CurrentScheduler();
|
||||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||||
ASSERT(scheduler.ContextSwitchPending());
|
ASSERT(scheduler.ContextSwitchPending());
|
||||||
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||||
scheduler.RescheduleCurrentCore();
|
scheduler.RescheduleCurrentCore();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -347,11 +346,15 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||||
sc_sync_first_use = false;
|
sc_sync_first_use = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Emulation was stopped
|
// Abort if emulation was killed before the session really starts
|
||||||
if (stop_token.stop_requested()) {
|
if (!system.IsPoweredOn()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (stop_token.stop_requested()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||||
data.is_running = true;
|
data.is_running = true;
|
||||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_queue.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
@ -29,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
|
||||||
|
|
||||||
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
@ -59,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
|
||||||
|
|
||||||
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
|
@ -86,27 +85,6 @@ bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKAddressArbiter(KernelCore& kernel_, KAddressArbiter::ThreadTree* t)
|
|
||||||
: KThreadQueue(kernel_), m_tree(t) {}
|
|
||||||
|
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task) override {
|
|
||||||
// If the thread is waiting on an address arbiter, remove it from the tree.
|
|
||||||
if (waiting_thread->IsWaitingForAddressArbiter()) {
|
|
||||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
|
||||||
waiting_thread->ClearAddressArbiter();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invoke the base cancel wait handler.
|
|
||||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
KAddressArbiter::ThreadTree* m_tree;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
|
@ -118,14 +96,14 @@ ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
// End the thread's wait.
|
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->EndWait(ResultSuccess);
|
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -151,14 +129,14 @@ ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32
|
||||||
auto it = thread_tree.nfind_light({addr, -1});
|
auto it = thread_tree.nfind_light({addr, -1});
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
// End the thread's wait.
|
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->EndWait(ResultSuccess);
|
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -219,14 +197,14 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||||
|
|
||||||
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
|
||||||
(it->GetAddressArbiterKey() == addr)) {
|
(it->GetAddressArbiterKey() == addr)) {
|
||||||
// End the thread's wait.
|
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
target_thread->EndWait(ResultSuccess);
|
target_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
|
||||||
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
ASSERT(target_thread->IsWaitingForAddressArbiter());
|
||||||
target_thread->ClearAddressArbiter();
|
target_thread->Wakeup();
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
|
target_thread->ClearAddressArbiter();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -236,7 +214,6 @@ ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32
|
||||||
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
@ -247,6 +224,9 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||||
|
|
||||||
// Read the value from userspace.
|
// Read the value from userspace.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
bool succeeded{};
|
bool succeeded{};
|
||||||
|
@ -276,20 +256,31 @@ ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
// Wait for the thread to finish.
|
|
||||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cancel the timer wait.
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
|
|
||||||
|
// Remove from the address arbiter.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearAddressArbiter();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
return cur_thread->GetWaitResult();
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(&dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
ThreadQueueImplForKAddressArbiter wait_queue(kernel, std::addressof(thread_tree));
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
@ -300,6 +291,9 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||||
|
|
||||||
// Read the value from userspace.
|
// Read the value from userspace.
|
||||||
s32 user_value{};
|
s32 user_value{};
|
||||||
if (!ReadFromUser(system, &user_value, addr)) {
|
if (!ReadFromUser(system, &user_value, addr)) {
|
||||||
|
@ -322,14 +316,26 @@ ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
|
||||||
// Set the arbiter.
|
// Set the arbiter.
|
||||||
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
cur_thread->SetAddressArbiter(&thread_tree, addr);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
// Wait for the thread to finish.
|
|
||||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cancel the timer wait.
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
|
|
||||||
|
// Remove from the address arbiter.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForAddressArbiter()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearAddressArbiter();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the result.
|
// Get the result.
|
||||||
return cur_thread->GetWaitResult();
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(&dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -170,10 +170,6 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string& GetName() const {
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void RegisterWithKernel();
|
void RegisterWithKernel();
|
||||||
void UnregisterWithKernel();
|
void UnregisterWithKernel();
|
||||||
|
|
|
@ -11,7 +11,6 @@
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_queue.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_common.h"
|
#include "core/hle/kernel/svc_common.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
@ -34,7 +33,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
|
||||||
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
|
||||||
u32 new_orr_mask) {
|
u32 new_orr_mask) {
|
||||||
auto& monitor = system.Monitor();
|
auto& monitor = system.Monitor();
|
||||||
const auto current_core = system.Kernel().CurrentPhysicalCoreIndex();
|
const auto current_core = system.CurrentCoreIndex();
|
||||||
|
|
||||||
// Load the value from the address.
|
// Load the value from the address.
|
||||||
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
const auto expected = monitor.ExclusiveRead32(current_core, address);
|
||||||
|
@ -58,48 +57,6 @@ bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKConditionVariableWaitForAddress(KernelCore& kernel_)
|
|
||||||
: KThreadQueue(kernel_) {}
|
|
||||||
|
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task) override {
|
|
||||||
// Remove the thread as a waiter from its owner.
|
|
||||||
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
|
|
||||||
|
|
||||||
// Invoke the base cancel wait handler.
|
|
||||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
|
|
||||||
private:
|
|
||||||
KConditionVariable::ThreadTree* m_tree;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKConditionVariableWaitConditionVariable(
|
|
||||||
KernelCore& kernel_, KConditionVariable::ThreadTree* t)
|
|
||||||
: KThreadQueue(kernel_), m_tree(t) {}
|
|
||||||
|
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task) override {
|
|
||||||
// Remove the thread as a waiter from its owner.
|
|
||||||
if (KThread* owner = waiting_thread->GetLockOwner(); owner != nullptr) {
|
|
||||||
owner->RemoveWaiter(waiting_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the thread is waiting on a condvar, remove it from the tree.
|
|
||||||
if (waiting_thread->IsWaitingForConditionVariable()) {
|
|
||||||
m_tree->erase(m_tree->iterator_to(*waiting_thread));
|
|
||||||
waiting_thread->ClearConditionVariable();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invoke the base cancel wait handler.
|
|
||||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
KConditionVariable::KConditionVariable(Core::System& system_)
|
KConditionVariable::KConditionVariable(Core::System& system_)
|
||||||
|
@ -121,77 +78,84 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
|
||||||
|
|
||||||
// Determine the next tag.
|
// Determine the next tag.
|
||||||
u32 next_value{};
|
u32 next_value{};
|
||||||
if (next_owner_thread != nullptr) {
|
if (next_owner_thread) {
|
||||||
next_value = next_owner_thread->GetAddressKeyValue();
|
next_value = next_owner_thread->GetAddressKeyValue();
|
||||||
if (num_waiters > 1) {
|
if (num_waiters > 1) {
|
||||||
next_value |= Svc::HandleWaitMask;
|
next_value |= Svc::HandleWaitMask;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the value to userspace.
|
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
ResultCode result{ResultSuccess};
|
next_owner_thread->Wakeup();
|
||||||
if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
|
|
||||||
result = ResultSuccess;
|
|
||||||
} else {
|
|
||||||
result = ResultInvalidCurrentMemory;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Signal the next owner thread.
|
// Write the value to userspace.
|
||||||
next_owner_thread->EndWait(result);
|
if (!WriteToUser(system, addr, std::addressof(next_value))) {
|
||||||
return result;
|
if (next_owner_thread) {
|
||||||
} else {
|
next_owner_thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||||
// Just write the value to userspace.
|
}
|
||||||
R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
|
|
||||||
ResultInvalidCurrentMemory);
|
return ResultInvalidCurrentMemory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
|
||||||
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
|
|
||||||
|
|
||||||
// Wait for the address.
|
// Wait for the address.
|
||||||
KThread* owner_thread{};
|
{
|
||||||
|
KScopedAutoObject<KThread> owner_thread;
|
||||||
|
ASSERT(owner_thread.IsNull());
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
cur_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
|
||||||
|
|
||||||
|
{
|
||||||
// Read the tag from userspace.
|
// Read the tag from userspace.
|
||||||
u32 test_tag{};
|
u32 test_tag{};
|
||||||
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
|
R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
|
||||||
|
ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
// If the tag isn't the handle (with wait mask), we're done.
|
// If the tag isn't the handle (with wait mask), we're done.
|
||||||
R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
|
R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
|
||||||
|
|
||||||
// Get the lock owner thread.
|
// Get the lock owner thread.
|
||||||
owner_thread = kernel.CurrentProcess()
|
owner_thread =
|
||||||
->GetHandleTable()
|
kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
|
||||||
.GetObjectWithoutPseudoHandle<KThread>(handle)
|
handle);
|
||||||
.ReleasePointerUnsafe();
|
R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
|
||||||
R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
|
|
||||||
|
|
||||||
// Update the lock.
|
// Update the lock.
|
||||||
cur_thread->SetAddressKey(addr, value);
|
cur_thread->SetAddressKey(addr, value);
|
||||||
owner_thread->AddWaiter(cur_thread);
|
owner_thread->AddWaiter(cur_thread);
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
// Begin waiting.
|
|
||||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
ASSERT(owner_thread.IsNotNull());
|
||||||
|
}
|
||||||
|
|
||||||
// Close our reference to the owner thread, now that the wait is over.
|
// Remove the thread as a waiter from the lock owner.
|
||||||
owner_thread->Close();
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
KThread* owner_thread = cur_thread->GetLockOwner();
|
||||||
|
if (owner_thread != nullptr) {
|
||||||
|
owner_thread->RemoveWaiter(cur_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the wait result.
|
// Get the wait result.
|
||||||
return cur_thread->GetWaitResult();
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KConditionVariable::SignalImpl(KThread* thread) {
|
KThread* KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
// Check pre-conditions.
|
// Check pre-conditions.
|
||||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
||||||
|
|
||||||
|
@ -205,16 +169,18 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
// TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
|
||||||
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
// TODO(bunnei): We should call CanAccessAtomic(..) here.
|
||||||
can_access = true;
|
can_access = true;
|
||||||
if (can_access) [[likely]] {
|
if (can_access) {
|
||||||
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
|
||||||
Svc::HandleWaitMask);
|
Svc::HandleWaitMask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (can_access) [[likely]] {
|
KThread* thread_to_close = nullptr;
|
||||||
|
if (can_access) {
|
||||||
if (prev_tag == Svc::InvalidHandle) {
|
if (prev_tag == Svc::InvalidHandle) {
|
||||||
// If nobody held the lock previously, we're all good.
|
// If nobody held the lock previously, we're all good.
|
||||||
thread->EndWait(ResultSuccess);
|
thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
thread->Wakeup();
|
||||||
} else {
|
} else {
|
||||||
// Get the previous owner.
|
// Get the previous owner.
|
||||||
KThread* owner_thread = kernel.CurrentProcess()
|
KThread* owner_thread = kernel.CurrentProcess()
|
||||||
|
@ -223,24 +189,35 @@ void KConditionVariable::SignalImpl(KThread* thread) {
|
||||||
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
|
||||||
.ReleasePointerUnsafe();
|
.ReleasePointerUnsafe();
|
||||||
|
|
||||||
if (owner_thread) [[likely]] {
|
if (owner_thread) {
|
||||||
// Add the thread as a waiter on the owner.
|
// Add the thread as a waiter on the owner.
|
||||||
owner_thread->AddWaiter(thread);
|
owner_thread->AddWaiter(thread);
|
||||||
owner_thread->Close();
|
thread_to_close = owner_thread;
|
||||||
} else {
|
} else {
|
||||||
// The lock was tagged with a thread that doesn't exist.
|
// The lock was tagged with a thread that doesn't exist.
|
||||||
thread->EndWait(ResultInvalidState);
|
thread->SetSyncedObject(nullptr, ResultInvalidState);
|
||||||
|
thread->Wakeup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If the address wasn't accessible, note so.
|
// If the address wasn't accessible, note so.
|
||||||
thread->EndWait(ResultInvalidCurrentMemory);
|
thread->SetSyncedObject(nullptr, ResultInvalidCurrentMemory);
|
||||||
|
thread->Wakeup();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return thread_to_close;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
|
// Prepare for signaling.
|
||||||
|
constexpr int MaxThreads = 16;
|
||||||
|
|
||||||
|
KLinkedList<KThread> thread_list{kernel};
|
||||||
|
std::array<KThread*, MaxThreads> thread_array;
|
||||||
|
s32 num_to_close{};
|
||||||
|
|
||||||
// Perform signaling.
|
// Perform signaling.
|
||||||
int num_waiters = 0;
|
s32 num_waiters{};
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
@ -249,7 +226,14 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
(it->GetConditionVariableKey() == cv_key)) {
|
(it->GetConditionVariableKey() == cv_key)) {
|
||||||
KThread* target_thread = std::addressof(*it);
|
KThread* target_thread = std::addressof(*it);
|
||||||
|
|
||||||
this->SignalImpl(target_thread);
|
if (KThread* thread = SignalImpl(target_thread); thread != nullptr) {
|
||||||
|
if (num_to_close < MaxThreads) {
|
||||||
|
thread_array[num_to_close++] = thread;
|
||||||
|
} else {
|
||||||
|
thread_list.push_back(*thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
it = thread_tree.erase(it);
|
it = thread_tree.erase(it);
|
||||||
target_thread->ClearConditionVariable();
|
target_thread->ClearConditionVariable();
|
||||||
++num_waiters;
|
++num_waiters;
|
||||||
|
@ -257,20 +241,31 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
|
||||||
|
|
||||||
// If we have no waiters, clear the has waiter flag.
|
// If we have no waiters, clear the has waiter flag.
|
||||||
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
|
||||||
const u32 has_waiter_flag = 0;
|
const u32 has_waiter_flag{};
|
||||||
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Close threads in the array.
|
||||||
|
for (auto i = 0; i < num_to_close; ++i) {
|
||||||
|
thread_array[i]->Close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close threads in the list.
|
||||||
|
for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
|
||||||
|
(*it).Close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
|
||||||
// Prepare to wait.
|
// Prepare to wait.
|
||||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
|
|
||||||
kernel, std::addressof(thread_tree));
|
|
||||||
|
|
||||||
{
|
{
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout};
|
||||||
|
|
||||||
|
// Set the synced object.
|
||||||
|
cur_thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||||
|
|
||||||
// Check that the thread isn't terminating.
|
// Check that the thread isn't terminating.
|
||||||
if (cur_thread->IsTerminationRequested()) {
|
if (cur_thread->IsTerminationRequested()) {
|
||||||
|
@ -295,7 +290,8 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wake up the next owner.
|
// Wake up the next owner.
|
||||||
next_owner_thread->EndWait(ResultSuccess);
|
next_owner_thread->SetSyncedObject(nullptr, ResultSuccess);
|
||||||
|
next_owner_thread->Wakeup();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write to the cv key.
|
// Write to the cv key.
|
||||||
|
@ -312,21 +308,40 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If timeout is zero, time out.
|
|
||||||
R_UNLESS(timeout != 0, ResultTimedOut);
|
|
||||||
|
|
||||||
// Update condition variable tracking.
|
// Update condition variable tracking.
|
||||||
|
{
|
||||||
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
|
||||||
thread_tree.insert(*cur_thread);
|
thread_tree.insert(*cur_thread);
|
||||||
|
}
|
||||||
|
|
||||||
// Begin waiting.
|
// If the timeout is non-zero, set the thread as waiting.
|
||||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
if (timeout != 0) {
|
||||||
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
|
||||||
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
cur_thread->SetMutexWaitAddressForDebugging(addr);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the wait result.
|
// Cancel the timer wait.
|
||||||
return cur_thread->GetWaitResult();
|
kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
|
||||||
|
|
||||||
|
// Remove from the condition variable.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl(kernel);
|
||||||
|
|
||||||
|
if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
|
||||||
|
owner->RemoveWaiter(cur_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_thread->IsWaitingForConditionVariable()) {
|
||||||
|
thread_tree.erase(thread_tree.iterator_to(*cur_thread));
|
||||||
|
cur_thread->ClearConditionVariable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the result.
|
||||||
|
KSynchronizationObject* dummy{};
|
||||||
|
return cur_thread->GetWaitResult(std::addressof(dummy));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -34,7 +34,7 @@ public:
|
||||||
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
[[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void SignalImpl(KThread* thread);
|
[[nodiscard]] KThread* SignalImpl(KThread* thread);
|
||||||
|
|
||||||
ThreadTree thread_tree;
|
ThreadTree thread_tree;
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() {
|
||||||
// Get the table and clear our record of it.
|
// Get the table and clear our record of it.
|
||||||
u16 saved_table_size = 0;
|
u16 saved_table_size = 0;
|
||||||
{
|
{
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
std::swap(m_table_size, saved_table_size);
|
std::swap(m_table_size, saved_table_size);
|
||||||
|
@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
// Find the object and free the entry.
|
// Find the object and free the entry.
|
||||||
KAutoObject* obj = nullptr;
|
KAutoObject* obj = nullptr;
|
||||||
{
|
{
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if (this->IsValidHandle(handle)) {
|
if (this->IsValidHandle(handle)) {
|
||||||
|
@ -64,7 +62,6 @@ bool KHandleTable::Remove(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -87,7 +84,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Never exceed our capacity.
|
// Never exceed our capacity.
|
||||||
|
@ -98,7 +94,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Unreserve(Handle handle) {
|
void KHandleTable::Unreserve(Handle handle) {
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
@ -117,7 +112,6 @@ void KHandleTable::Unreserve(Handle handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
// Unpack the handle.
|
// Unpack the handle.
|
||||||
|
|
|
@ -68,7 +68,6 @@ public:
|
||||||
template <typename T = KAutoObject>
|
template <typename T = KAutoObject>
|
||||||
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
|
||||||
// Lock and look up in table.
|
// Lock and look up in table.
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
|
|
||||||
if constexpr (std::is_same_v<T, KAutoObject>) {
|
if constexpr (std::is_same_v<T, KAutoObject>) {
|
||||||
|
@ -123,7 +122,6 @@ public:
|
||||||
size_t num_opened;
|
size_t num_opened;
|
||||||
{
|
{
|
||||||
// Lock the table.
|
// Lock the table.
|
||||||
KScopedDisableDispatch dd(kernel);
|
|
||||||
KScopedSpinLock lk(m_lock);
|
KScopedSpinLock lk(m_lock);
|
||||||
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
for (num_opened = 0; num_opened < num_handles; num_opened++) {
|
||||||
// Get the current handle.
|
// Get the current handle.
|
||||||
|
|
|
@ -1,25 +1,73 @@
|
||||||
// Copyright 2021 yuzu Emulator Project
|
// Copyright 2020 yuzu Emulator Project
|
||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
// This file references various implementation details from Atmosphere, an open-source firmware for
|
||||||
|
// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
class KLightLock;
|
|
||||||
|
|
||||||
class KLightConditionVariable {
|
class KLightConditionVariable {
|
||||||
public:
|
public:
|
||||||
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
explicit KLightConditionVariable(KernelCore& kernel_) : kernel{kernel_} {}
|
||||||
|
|
||||||
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true);
|
void Wait(KLightLock* lock, s64 timeout = -1, bool allow_terminating_thread = true) {
|
||||||
void Broadcast();
|
WaitImpl(lock, timeout, allow_terminating_thread);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Broadcast() {
|
||||||
|
KScopedSchedulerLock lk{kernel};
|
||||||
|
|
||||||
|
// Signal all threads.
|
||||||
|
for (auto& thread : wait_list) {
|
||||||
|
thread.SetState(ThreadState::Runnable);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void WaitImpl(KLightLock* lock, s64 timeout, bool allow_terminating_thread) {
|
||||||
|
KThread* owner = GetCurrentThreadPointer(kernel);
|
||||||
|
|
||||||
|
// Sleep the thread.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLockAndSleep lk{kernel, owner, timeout};
|
||||||
|
|
||||||
|
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
|
||||||
|
lk.CancelSleep();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
lock->Unlock();
|
||||||
|
|
||||||
|
// Set the thread as waiting.
|
||||||
|
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||||
|
|
||||||
|
// Add the thread to the queue.
|
||||||
|
wait_list.push_back(GetCurrentThread(kernel));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the thread from the wait list.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
wait_list.erase(wait_list.iterator_to(GetCurrentThread(kernel)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel the task that the sleep setup.
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(owner);
|
||||||
|
|
||||||
|
// Re-acquire the lock.
|
||||||
|
lock->Lock();
|
||||||
|
}
|
||||||
|
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
KThread::WaiterList wait_list{};
|
KThread::WaiterList wait_list{};
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,54 +5,44 @@
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
#include "core/hle/kernel/k_scheduler.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_queue.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
class ThreadQueueImplForKLightLock final : public KThreadQueue {
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKLightLock(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
|
||||||
|
|
||||||
virtual void CancelWait([[maybe_unused]] KThread* waiting_thread,
|
|
||||||
[[maybe_unused]] ResultCode wait_result,
|
|
||||||
[[maybe_unused]] bool cancel_timer_task) override {
|
|
||||||
// Do nothing, waiting to acquire a light lock cannot be canceled.
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void KLightLock::Lock() {
|
void KLightLock::Lock() {
|
||||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||||
|
const uintptr_t cur_thread_tag = (cur_thread | 1);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
uintptr_t old_tag = tag.load(std::memory_order_relaxed);
|
||||||
|
|
||||||
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1),
|
while (!tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1,
|
||||||
std::memory_order_acquire)) {
|
std::memory_order_acquire)) {
|
||||||
|
if ((old_tag | 1) == cur_thread_tag) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
|
if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LockSlowPath(old_tag | 1, cur_thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KLightLock::Unlock() {
|
void KLightLock::Unlock() {
|
||||||
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer(kernel));
|
||||||
|
|
||||||
uintptr_t expected = cur_thread;
|
uintptr_t expected = cur_thread;
|
||||||
if (!tag.compare_exchange_strong(expected, 0, std::memory_order_release)) {
|
do {
|
||||||
this->UnlockSlowPath(cur_thread);
|
if (expected != cur_thread) {
|
||||||
|
return UnlockSlowPath(cur_thread);
|
||||||
}
|
}
|
||||||
|
} while (!tag.compare_exchange_weak(expected, 0, std::memory_order_release));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
void KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||||
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
KThread* cur_thread = reinterpret_cast<KThread*>(_cur_thread);
|
||||||
ThreadQueueImplForKLightLock wait_queue(kernel);
|
|
||||||
|
|
||||||
// Pend the current thread waiting on the owner thread.
|
// Pend the current thread waiting on the owner thread.
|
||||||
{
|
{
|
||||||
|
@ -60,23 +50,30 @@ bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
|
||||||
|
|
||||||
// Ensure we actually have locking to do.
|
// Ensure we actually have locking to do.
|
||||||
if (tag.load(std::memory_order_relaxed) != _owner) {
|
if (tag.load(std::memory_order_relaxed) != _owner) {
|
||||||
return false;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the current thread as a waiter on the owner.
|
// Add the current thread as a waiter on the owner.
|
||||||
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ul);
|
KThread* owner_thread = reinterpret_cast<KThread*>(_owner & ~1ULL);
|
||||||
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||||
owner_thread->AddWaiter(cur_thread);
|
owner_thread->AddWaiter(cur_thread);
|
||||||
|
|
||||||
// Begin waiting to hold the lock.
|
// Set thread states.
|
||||||
cur_thread->BeginWait(std::addressof(wait_queue));
|
cur_thread->SetState(ThreadState::Waiting);
|
||||||
|
|
||||||
if (owner_thread->IsSuspended()) {
|
if (owner_thread->IsSuspended()) {
|
||||||
owner_thread->ContinueIfHasKernelWaiters();
|
owner_thread->ContinueIfHasKernelWaiters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
// We're no longer waiting on the lock owner.
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
if (KThread* owner_thread = cur_thread->GetLockOwner(); owner_thread != nullptr) {
|
||||||
|
owner_thread->RemoveWaiter(cur_thread);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
|
@ -84,20 +81,22 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
|
|
||||||
// Unlock.
|
// Unlock.
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Get the next owner.
|
// Get the next owner.
|
||||||
s32 num_waiters;
|
s32 num_waiters = 0;
|
||||||
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
KThread* next_owner = owner_thread->RemoveWaiterByKey(
|
||||||
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(tag)));
|
||||||
|
|
||||||
// Pass the lock to the next owner.
|
// Pass the lock to the next owner.
|
||||||
uintptr_t next_tag = 0;
|
uintptr_t next_tag = 0;
|
||||||
if (next_owner != nullptr) {
|
if (next_owner != nullptr) {
|
||||||
next_tag =
|
next_tag = reinterpret_cast<uintptr_t>(next_owner);
|
||||||
reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(num_waiters > 1);
|
if (num_waiters > 1) {
|
||||||
|
next_tag |= 0x1;
|
||||||
|
}
|
||||||
|
|
||||||
next_owner->EndWait(ResultSuccess);
|
next_owner->SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
if (next_owner->IsSuspended()) {
|
if (next_owner->IsSuspended()) {
|
||||||
next_owner->ContinueIfHasKernelWaiters();
|
next_owner->ContinueIfHasKernelWaiters();
|
||||||
|
@ -111,7 +110,7 @@ void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the new tag value.
|
// Write the new tag value.
|
||||||
tag.store(next_tag, std::memory_order_release);
|
tag.store(next_tag);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ public:
|
||||||
|
|
||||||
void Unlock();
|
void Unlock();
|
||||||
|
|
||||||
bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
void LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
|
||||||
|
|
||||||
void UnlockSlowPath(uintptr_t cur_thread);
|
void UnlockSlowPath(uintptr_t cur_thread);
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
|
||||||
thread->GetContext64().cpu_registers[0] = 0;
|
thread->GetContext64().cpu_registers[0] = 0;
|
||||||
thread->GetContext32().cpu_registers[1] = thread_handle;
|
thread->GetContext32().cpu_registers[1] = thread_handle;
|
||||||
thread->GetContext64().cpu_registers[1] = thread_handle;
|
thread->GetContext64().cpu_registers[1] = thread_handle;
|
||||||
thread->DisableDispatch();
|
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
// Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
|
||||||
|
@ -228,15 +227,12 @@ void KProcess::PinCurrentThread() {
|
||||||
const s32 core_id = GetCurrentCoreId(kernel);
|
const s32 core_id = GetCurrentCoreId(kernel);
|
||||||
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
KThread* cur_thread = GetCurrentThreadPointer(kernel);
|
||||||
|
|
||||||
// If the thread isn't terminated, pin it.
|
|
||||||
if (!cur_thread->IsTerminationRequested()) {
|
|
||||||
// Pin it.
|
// Pin it.
|
||||||
PinThread(core_id, cur_thread);
|
PinThread(core_id, cur_thread);
|
||||||
cur_thread->Pin();
|
cur_thread->Pin();
|
||||||
|
|
||||||
// An update is needed.
|
// An update is needed.
|
||||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::UnpinCurrentThread() {
|
void KProcess::UnpinCurrentThread() {
|
||||||
|
@ -254,20 +250,6 @@ void KProcess::UnpinCurrentThread() {
|
||||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::UnpinThread(KThread* thread) {
|
|
||||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
|
||||||
|
|
||||||
// Get the thread's core id.
|
|
||||||
const auto core_id = thread->GetActiveCore();
|
|
||||||
|
|
||||||
// Unpin it.
|
|
||||||
UnpinThread(core_id, thread);
|
|
||||||
thread->Unpin();
|
|
||||||
|
|
||||||
// An update is needed.
|
|
||||||
KScheduler::SetSchedulerUpdateNeeded(kernel);
|
|
||||||
}
|
|
||||||
|
|
||||||
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
ResultCode KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||||
[[maybe_unused]] size_t size) {
|
[[maybe_unused]] size_t size) {
|
||||||
// Lock ourselves, to prevent concurrent access.
|
// Lock ourselves, to prevent concurrent access.
|
||||||
|
|
|
@ -259,7 +259,7 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
[[nodiscard]] KThread* GetPinnedThread(s32 core_id) const {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
return pinned_threads.at(core_id);
|
return pinned_threads[core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
/// Gets 8 bytes of random data for svcGetInfo RandomEntropy
|
||||||
|
@ -347,7 +347,6 @@ public:
|
||||||
|
|
||||||
void PinCurrentThread();
|
void PinCurrentThread();
|
||||||
void UnpinCurrentThread();
|
void UnpinCurrentThread();
|
||||||
void UnpinThread(KThread* thread);
|
|
||||||
|
|
||||||
KLightLock& GetStateLock() {
|
KLightLock& GetStateLock() {
|
||||||
return state_lock;
|
return state_lock;
|
||||||
|
@ -369,14 +368,14 @@ private:
|
||||||
void PinThread(s32 core_id, KThread* thread) {
|
void PinThread(s32 core_id, KThread* thread) {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
ASSERT(pinned_threads.at(core_id) == nullptr);
|
ASSERT(pinned_threads[core_id] == nullptr);
|
||||||
pinned_threads[core_id] = thread;
|
pinned_threads[core_id] = thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnpinThread(s32 core_id, KThread* thread) {
|
void UnpinThread(s32 core_id, KThread* thread) {
|
||||||
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
ASSERT(0 <= core_id && core_id < static_cast<s32>(Core::Hardware::NUM_CPU_CORES));
|
||||||
ASSERT(thread != nullptr);
|
ASSERT(thread != nullptr);
|
||||||
ASSERT(pinned_threads.at(core_id) == thread);
|
ASSERT(pinned_threads[core_id] == thread);
|
||||||
pinned_threads[core_id] = nullptr;
|
pinned_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -240,8 +240,8 @@ void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, KThread* thread, s3
|
||||||
|
|
||||||
// If the thread is runnable, we want to change its priority in the queue.
|
// If the thread is runnable, we want to change its priority in the queue.
|
||||||
if (thread->GetRawState() == ThreadState::Runnable) {
|
if (thread->GetRawState() == ThreadState::Runnable) {
|
||||||
GetPriorityQueue(kernel).ChangePriority(old_priority,
|
GetPriorityQueue(kernel).ChangePriority(
|
||||||
thread == kernel.GetCurrentEmuThread(), thread);
|
old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
|
||||||
IncrementScheduledCount(thread);
|
IncrementScheduledCount(thread);
|
||||||
SetSchedulerUpdateNeeded(kernel);
|
SetSchedulerUpdateNeeded(kernel);
|
||||||
}
|
}
|
||||||
|
@ -360,7 +360,7 @@ void KScheduler::RotateScheduledQueue(s32 cpu_core_id, s32 priority) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
bool KScheduler::CanSchedule(KernelCore& kernel) {
|
||||||
return kernel.GetCurrentEmuThread()->GetDisableDispatchCount() <= 1;
|
return kernel.CurrentScheduler()->GetCurrentThread()->GetDisableDispatchCount() <= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
bool KScheduler::IsSchedulerUpdateNeeded(const KernelCore& kernel) {
|
||||||
|
@ -376,28 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
void KScheduler::DisableScheduling(KernelCore& kernel) {
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||||
if (kernel.IsShuttingDown()) {
|
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
|
||||||
return;
|
scheduler->GetCurrentThread()->DisableDispatch();
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0);
|
|
||||||
GetCurrentThreadPointer(kernel)->DisableDispatch();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
|
||||||
if (kernel.IsShuttingDown()) {
|
ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
|
||||||
return;
|
if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
|
||||||
|
scheduler->GetCurrentThread()->EnableDispatch();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1);
|
|
||||||
|
|
||||||
if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) {
|
|
||||||
GetCurrentThreadPointer(kernel)->EnableDispatch();
|
|
||||||
} else {
|
|
||||||
RescheduleCores(kernel, cores_needing_scheduling);
|
RescheduleCores(kernel, cores_needing_scheduling);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
|
||||||
|
@ -625,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
|
||||||
state.highest_priority_thread = nullptr;
|
state.highest_priority_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Finalize() {
|
KScheduler::~KScheduler() {
|
||||||
if (idle_thread) {
|
if (idle_thread) {
|
||||||
idle_thread->Close();
|
idle_thread->Close();
|
||||||
idle_thread = nullptr;
|
idle_thread = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KScheduler::~KScheduler() {
|
|
||||||
ASSERT(!idle_thread);
|
|
||||||
}
|
|
||||||
|
|
||||||
KThread* KScheduler::GetCurrentThread() const {
|
KThread* KScheduler::GetCurrentThread() const {
|
||||||
if (auto result = current_thread.load(); result) {
|
if (auto result = current_thread.load(); result) {
|
||||||
return result;
|
return result;
|
||||||
|
@ -654,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
|
||||||
if (phys_core.IsInterrupted()) {
|
if (phys_core.IsInterrupted()) {
|
||||||
phys_core.ClearInterrupt();
|
phys_core.ClearInterrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
guard.Lock();
|
guard.Lock();
|
||||||
if (state.needs_scheduling.load()) {
|
if (state.needs_scheduling.load()) {
|
||||||
Schedule();
|
Schedule();
|
||||||
} else {
|
} else {
|
||||||
GetCurrentThread()->EnableDispatch();
|
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -669,33 +655,26 @@ void KScheduler::OnThreadStart() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Unload(KThread* thread) {
|
void KScheduler::Unload(KThread* thread) {
|
||||||
ASSERT(thread);
|
|
||||||
|
|
||||||
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
|
||||||
|
|
||||||
|
if (thread) {
|
||||||
if (thread->IsCallingSvc()) {
|
if (thread->IsCallingSvc()) {
|
||||||
thread->ClearIsCallingSvc();
|
thread->ClearIsCallingSvc();
|
||||||
}
|
}
|
||||||
|
if (!thread->IsTerminationRequested()) {
|
||||||
|
prev_thread = thread;
|
||||||
|
|
||||||
auto& physical_core = system.Kernel().PhysicalCore(core_id);
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
if (!physical_core.IsInitialized()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = physical_core.ArmInterface();
|
|
||||||
cpu_core.SaveContext(thread->GetContext32());
|
cpu_core.SaveContext(thread->GetContext32());
|
||||||
cpu_core.SaveContext(thread->GetContext64());
|
cpu_core.SaveContext(thread->GetContext64());
|
||||||
// Save the TPIDR_EL0 system register in case it was modified.
|
// Save the TPIDR_EL0 system register in case it was modified.
|
||||||
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
|
||||||
cpu_core.ClearExclusiveState();
|
cpu_core.ClearExclusiveState();
|
||||||
|
|
||||||
if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) {
|
|
||||||
prev_thread = thread;
|
|
||||||
} else {
|
} else {
|
||||||
prev_thread = nullptr;
|
prev_thread = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
thread->context_guard.Unlock();
|
thread->context_guard.Unlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::Reload(KThread* thread) {
|
void KScheduler::Reload(KThread* thread) {
|
||||||
|
@ -704,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
|
||||||
if (thread) {
|
if (thread) {
|
||||||
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
|
||||||
|
|
||||||
|
auto* const thread_owner_process = thread->GetOwnerProcess();
|
||||||
|
if (thread_owner_process != nullptr) {
|
||||||
|
system.Kernel().MakeCurrentProcess(thread_owner_process);
|
||||||
|
}
|
||||||
|
|
||||||
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
|
||||||
cpu_core.LoadContext(thread->GetContext32());
|
cpu_core.LoadContext(thread->GetContext32());
|
||||||
cpu_core.LoadContext(thread->GetContext64());
|
cpu_core.LoadContext(thread->GetContext64());
|
||||||
|
@ -721,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void KScheduler::ScheduleImpl() {
|
void KScheduler::ScheduleImpl() {
|
||||||
KThread* previous_thread = GetCurrentThread();
|
KThread* previous_thread = current_thread.load();
|
||||||
KThread* next_thread = state.highest_priority_thread;
|
KThread* next_thread = state.highest_priority_thread;
|
||||||
|
|
||||||
state.needs_scheduling = false;
|
state.needs_scheduling = false;
|
||||||
|
@ -733,15 +717,10 @@ void KScheduler::ScheduleImpl() {
|
||||||
|
|
||||||
// If we're not actually switching thread, there's nothing to do.
|
// If we're not actually switching thread, there's nothing to do.
|
||||||
if (next_thread == current_thread.load()) {
|
if (next_thread == current_thread.load()) {
|
||||||
previous_thread->EnableDispatch();
|
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next_thread->GetCurrentCore() != core_id) {
|
|
||||||
next_thread->SetCurrentCore(core_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
current_thread.store(next_thread);
|
current_thread.store(next_thread);
|
||||||
|
|
||||||
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
KProcess* const previous_process = system.Kernel().CurrentProcess();
|
||||||
|
@ -752,7 +731,11 @@ void KScheduler::ScheduleImpl() {
|
||||||
Unload(previous_thread);
|
Unload(previous_thread);
|
||||||
|
|
||||||
std::shared_ptr<Common::Fiber>* old_context;
|
std::shared_ptr<Common::Fiber>* old_context;
|
||||||
|
if (previous_thread != nullptr) {
|
||||||
old_context = &previous_thread->GetHostContext();
|
old_context = &previous_thread->GetHostContext();
|
||||||
|
} else {
|
||||||
|
old_context = &idle_thread->GetHostContext();
|
||||||
|
}
|
||||||
guard.Unlock();
|
guard.Unlock();
|
||||||
|
|
||||||
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
Common::Fiber::YieldTo(*old_context, *switch_fiber);
|
||||||
|
|
|
@ -33,8 +33,6 @@ public:
|
||||||
explicit KScheduler(Core::System& system_, s32 core_id_);
|
explicit KScheduler(Core::System& system_, s32 core_id_);
|
||||||
~KScheduler();
|
~KScheduler();
|
||||||
|
|
||||||
void Finalize();
|
|
||||||
|
|
||||||
/// Reschedules to the next available thread (call after current thread is suspended)
|
/// Reschedules to the next available thread (call after current thread is suspended)
|
||||||
void RescheduleCurrentCore();
|
void RescheduleCurrentCore();
|
||||||
|
|
||||||
|
|
|
@ -23,11 +23,6 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void Lock() {
|
void Lock() {
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
||||||
if (kernel.IsShuttingDown()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IsLockedByCurrentThread()) {
|
if (IsLockedByCurrentThread()) {
|
||||||
// If we already own the lock, we can just increment the count.
|
// If we already own the lock, we can just increment the count.
|
||||||
ASSERT(lock_count > 0);
|
ASSERT(lock_count > 0);
|
||||||
|
@ -48,11 +43,6 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void Unlock() {
|
void Unlock() {
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
||||||
if (kernel.IsShuttingDown()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(IsLockedByCurrentThread());
|
ASSERT(IsLockedByCurrentThread());
|
||||||
ASSERT(lock_count > 0);
|
ASSERT(lock_count > 0);
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/kernel/global_scheduler_context.h"
|
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
|
@ -175,7 +175,8 @@ ResultCode KServerSession::CompleteSyncRequest(HLERequestContext& context) {
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
if (!context.IsThreadWaiting()) {
|
if (!context.IsThreadWaiting()) {
|
||||||
context.GetThread().EndWait(result);
|
context.GetThread().Wakeup();
|
||||||
|
context.GetThread().SetSyncedObject(nullptr, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,66 +8,11 @@
|
||||||
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_queue.h"
|
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
|
|
||||||
public:
|
|
||||||
ThreadQueueImplForKSynchronizationObjectWait(KernelCore& kernel_, KSynchronizationObject** o,
|
|
||||||
KSynchronizationObject::ThreadListNode* n, s32 c)
|
|
||||||
: KThreadQueueWithoutEndWait(kernel_), m_objects(o), m_nodes(n), m_count(c) {}
|
|
||||||
|
|
||||||
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
|
||||||
ResultCode wait_result) override {
|
|
||||||
// Determine the sync index, and unlink all nodes.
|
|
||||||
s32 sync_index = -1;
|
|
||||||
for (auto i = 0; i < m_count; ++i) {
|
|
||||||
// Check if this is the signaled object.
|
|
||||||
if (m_objects[i] == signaled_object && sync_index == -1) {
|
|
||||||
sync_index = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unlink the current node from the current object.
|
|
||||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the waiting thread's sync index.
|
|
||||||
waiting_thread->SetSyncedIndex(sync_index);
|
|
||||||
|
|
||||||
// Set the waiting thread as not cancellable.
|
|
||||||
waiting_thread->ClearCancellable();
|
|
||||||
|
|
||||||
// Invoke the base end wait handler.
|
|
||||||
KThreadQueue::EndWait(waiting_thread, wait_result);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task) override {
|
|
||||||
// Remove all nodes from our list.
|
|
||||||
for (auto i = 0; i < m_count; ++i) {
|
|
||||||
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the waiting thread as not cancellable.
|
|
||||||
waiting_thread->ClearCancellable();
|
|
||||||
|
|
||||||
// Invoke the base cancel wait handler.
|
|
||||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
KSynchronizationObject** m_objects;
|
|
||||||
KSynchronizationObject::ThreadListNode* m_nodes;
|
|
||||||
s32 m_count;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void KSynchronizationObject::Finalize() {
|
void KSynchronizationObject::Finalize() {
|
||||||
this->OnFinalizeSynchronizationObject();
|
this->OnFinalizeSynchronizationObject();
|
||||||
KAutoObject::Finalize();
|
KAutoObject::Finalize();
|
||||||
|
@ -80,19 +25,11 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
std::vector<ThreadListNode> thread_nodes(num_objects);
|
std::vector<ThreadListNode> thread_nodes(num_objects);
|
||||||
|
|
||||||
// Prepare for wait.
|
// Prepare for wait.
|
||||||
KThread* thread = GetCurrentThreadPointer(kernel_ctx);
|
KThread* thread = kernel_ctx.CurrentScheduler()->GetCurrentThread();
|
||||||
ThreadQueueImplForKSynchronizationObjectWait wait_queue(kernel_ctx, objects,
|
|
||||||
thread_nodes.data(), num_objects);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
// Setup the scheduling lock and sleep.
|
// Setup the scheduling lock and sleep.
|
||||||
KScopedSchedulerLockAndSleep slp(kernel_ctx, thread, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel_ctx, thread, timeout};
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
|
||||||
if (thread->IsTerminationRequested()) {
|
|
||||||
slp.CancelSleep();
|
|
||||||
return ResultTerminationRequested;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if any of the objects are already signaled.
|
// Check if any of the objects are already signaled.
|
||||||
for (auto i = 0; i < num_objects; ++i) {
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
@ -111,6 +48,12 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
return ResultTimedOut;
|
return ResultTimedOut;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the thread should terminate.
|
||||||
|
if (thread->IsTerminationRequested()) {
|
||||||
|
slp.CancelSleep();
|
||||||
|
return ResultTerminationRequested;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if waiting was canceled.
|
// Check if waiting was canceled.
|
||||||
if (thread->IsWaitCancelled()) {
|
if (thread->IsWaitCancelled()) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
|
@ -123,25 +66,73 @@ ResultCode KSynchronizationObject::Wait(KernelCore& kernel_ctx, s32* out_index,
|
||||||
thread_nodes[i].thread = thread;
|
thread_nodes[i].thread = thread;
|
||||||
thread_nodes[i].next = nullptr;
|
thread_nodes[i].next = nullptr;
|
||||||
|
|
||||||
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
|
if (objects[i]->thread_list_tail == nullptr) {
|
||||||
|
objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
|
||||||
|
} else {
|
||||||
|
objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark the thread as cancellable.
|
objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// For debugging only
|
||||||
|
thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
|
||||||
|
|
||||||
|
// Mark the thread as waiting.
|
||||||
thread->SetCancellable();
|
thread->SetCancellable();
|
||||||
|
thread->SetSyncedObject(nullptr, ResultTimedOut);
|
||||||
// Clear the thread's synced index.
|
thread->SetState(ThreadState::Waiting);
|
||||||
thread->SetSyncedIndex(-1);
|
|
||||||
|
|
||||||
// Wait for an object to be signaled.
|
|
||||||
thread->BeginWait(std::addressof(wait_queue));
|
|
||||||
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the output index.
|
// The lock/sleep is done, so we should be able to get our result.
|
||||||
*out_index = thread->GetSyncedIndex();
|
|
||||||
|
// Thread is no longer cancellable.
|
||||||
|
thread->ClearCancellable();
|
||||||
|
|
||||||
|
// For debugging only
|
||||||
|
thread->SetWaitObjectsForDebugging({});
|
||||||
|
|
||||||
|
// Cancel the timer as needed.
|
||||||
|
kernel_ctx.TimeManager().UnscheduleTimeEvent(thread);
|
||||||
|
|
||||||
// Get the wait result.
|
// Get the wait result.
|
||||||
return thread->GetWaitResult();
|
ResultCode wait_result{ResultSuccess};
|
||||||
|
s32 sync_index = -1;
|
||||||
|
{
|
||||||
|
KScopedSchedulerLock lock(kernel_ctx);
|
||||||
|
KSynchronizationObject* synced_obj;
|
||||||
|
wait_result = thread->GetWaitResult(std::addressof(synced_obj));
|
||||||
|
|
||||||
|
for (auto i = 0; i < num_objects; ++i) {
|
||||||
|
// Unlink the object from the list.
|
||||||
|
ThreadListNode* prev_ptr =
|
||||||
|
reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
|
||||||
|
ThreadListNode* prev_val = nullptr;
|
||||||
|
ThreadListNode *prev, *tail_prev;
|
||||||
|
|
||||||
|
do {
|
||||||
|
prev = prev_ptr;
|
||||||
|
prev_ptr = prev_ptr->next;
|
||||||
|
tail_prev = prev_val;
|
||||||
|
prev_val = prev_ptr;
|
||||||
|
} while (prev_ptr != std::addressof(thread_nodes[i]));
|
||||||
|
|
||||||
|
if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
|
||||||
|
objects[i]->thread_list_tail = tail_prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
prev->next = thread_nodes[i].next;
|
||||||
|
|
||||||
|
if (objects[i] == synced_obj) {
|
||||||
|
sync_index = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set output.
|
||||||
|
*out_index = sync_index;
|
||||||
|
return wait_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||||
|
@ -150,7 +141,7 @@ KSynchronizationObject::KSynchronizationObject(KernelCore& kernel_)
|
||||||
KSynchronizationObject::~KSynchronizationObject() = default;
|
KSynchronizationObject::~KSynchronizationObject() = default;
|
||||||
|
|
||||||
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
|
||||||
// If we're not signaled, we've nothing to notify.
|
// If we're not signaled, we've nothing to notify.
|
||||||
if (!this->IsSignaled()) {
|
if (!this->IsSignaled()) {
|
||||||
|
@ -159,7 +150,11 @@ void KSynchronizationObject::NotifyAvailable(ResultCode result) {
|
||||||
|
|
||||||
// Iterate over each thread.
|
// Iterate over each thread.
|
||||||
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
|
||||||
cur_node->thread->NotifyAvailable(this, result);
|
KThread* thread = cur_node->thread;
|
||||||
|
if (thread->GetState() == ThreadState::Waiting) {
|
||||||
|
thread->SetSyncedObject(this, result);
|
||||||
|
thread->SetState(ThreadState::Runnable);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,38 +35,6 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
[[nodiscard]] std::vector<KThread*> GetWaitingThreadsForDebugging() const;
|
||||||
|
|
||||||
void LinkNode(ThreadListNode* node_) {
|
|
||||||
// Link the node to the list.
|
|
||||||
if (thread_list_tail == nullptr) {
|
|
||||||
thread_list_head = node_;
|
|
||||||
} else {
|
|
||||||
thread_list_tail->next = node_;
|
|
||||||
}
|
|
||||||
|
|
||||||
thread_list_tail = node_;
|
|
||||||
}
|
|
||||||
|
|
||||||
void UnlinkNode(ThreadListNode* node_) {
|
|
||||||
// Unlink the node from the list.
|
|
||||||
ThreadListNode* prev_ptr =
|
|
||||||
reinterpret_cast<ThreadListNode*>(std::addressof(thread_list_head));
|
|
||||||
ThreadListNode* prev_val = nullptr;
|
|
||||||
ThreadListNode *prev, *tail_prev;
|
|
||||||
|
|
||||||
do {
|
|
||||||
prev = prev_ptr;
|
|
||||||
prev_ptr = prev_ptr->next;
|
|
||||||
tail_prev = prev_val;
|
|
||||||
prev_val = prev_ptr;
|
|
||||||
} while (prev_ptr != node_);
|
|
||||||
|
|
||||||
if (thread_list_tail == node_) {
|
|
||||||
thread_list_tail = tail_prev;
|
|
||||||
}
|
|
||||||
|
|
||||||
prev->next = node_->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit KSynchronizationObject(KernelCore& kernel);
|
explicit KSynchronizationObject(KernelCore& kernel);
|
||||||
~KSynchronizationObject() override;
|
~KSynchronizationObject() override;
|
||||||
|
|
|
@ -13,9 +13,6 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/fiber.h"
|
#include "common/fiber.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/scope_exit.h"
|
|
||||||
#include "common/settings.h"
|
|
||||||
#include "common/thread_queue_list.h"
|
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/cpu_manager.h"
|
#include "core/cpu_manager.h"
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
|
@ -59,34 +56,6 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
|
|
||||||
: KThreadQueueWithoutEndWait(kernel_) {}
|
|
||||||
};
|
|
||||||
|
|
||||||
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
|
|
||||||
public:
|
|
||||||
explicit ThreadQueueImplForKThreadSetProperty(KernelCore& kernel_, KThread::WaiterList* wl)
|
|
||||||
: KThreadQueue(kernel_), m_wait_list(wl) {}
|
|
||||||
|
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task) override {
|
|
||||||
// Remove the thread from the wait list.
|
|
||||||
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
|
|
||||||
|
|
||||||
// Invoke the base cancel wait handler.
|
|
||||||
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
KThread::WaiterList* m_wait_list;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
KThread::KThread(KernelCore& kernel_)
|
KThread::KThread(KernelCore& kernel_)
|
||||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
: KAutoObjectWithSlabHeapAndContainer{kernel_}, activity_pause_lock{kernel_} {}
|
||||||
KThread::~KThread() = default;
|
KThread::~KThread() = default;
|
||||||
|
@ -113,8 +82,6 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType::HighPriority:
|
case ThreadType::HighPriority:
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case ThreadType::Dummy:
|
|
||||||
[[fallthrough]];
|
|
||||||
case ThreadType::User:
|
case ThreadType::User:
|
||||||
ASSERT(((owner == nullptr) ||
|
ASSERT(((owner == nullptr) ||
|
||||||
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
(owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
|
||||||
|
@ -160,8 +127,11 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
priority = prio;
|
priority = prio;
|
||||||
base_priority = prio;
|
base_priority = prio;
|
||||||
|
|
||||||
|
// Set sync object and waiting lock to null.
|
||||||
|
synced_object = nullptr;
|
||||||
|
|
||||||
// Initialize sleeping queue.
|
// Initialize sleeping queue.
|
||||||
wait_queue = nullptr;
|
sleeping_queue = nullptr;
|
||||||
|
|
||||||
// Set suspend flags.
|
// Set suspend flags.
|
||||||
suspend_request_flags = 0;
|
suspend_request_flags = 0;
|
||||||
|
@ -214,7 +184,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
||||||
// Setup the stack parameters.
|
// Setup the stack parameters.
|
||||||
StackParameters& sp = GetStackParameters();
|
StackParameters& sp = GetStackParameters();
|
||||||
sp.cur_thread = this;
|
sp.cur_thread = this;
|
||||||
sp.disable_count = 0;
|
sp.disable_count = 1;
|
||||||
SetInExceptionHandler();
|
SetInExceptionHandler();
|
||||||
|
|
||||||
// Set thread ID.
|
// Set thread ID.
|
||||||
|
@ -241,16 +211,15 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
|
||||||
// Initialize the thread.
|
// Initialize the thread.
|
||||||
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
|
||||||
|
|
||||||
// Initialize emulation parameters.
|
// Initialize host context.
|
||||||
thread->host_context =
|
thread->host_context =
|
||||||
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
|
std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
|
||||||
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
ResultCode KThread::InitializeDummyThread(KThread* thread) {
|
||||||
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Dummy);
|
return thread->Initialize({}, {}, {}, DefaultThreadPriority, 3, {}, ThreadType::Main);
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
ResultCode KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||||
|
@ -304,14 +273,11 @@ void KThread::Finalize() {
|
||||||
|
|
||||||
auto it = waiter_list.begin();
|
auto it = waiter_list.begin();
|
||||||
while (it != waiter_list.end()) {
|
while (it != waiter_list.end()) {
|
||||||
// Clear the lock owner
|
// The thread shouldn't be a kernel waiter.
|
||||||
it->SetLockOwner(nullptr);
|
it->SetLockOwner(nullptr);
|
||||||
|
it->SetSyncedObject(nullptr, ResultInvalidState);
|
||||||
// Erase the waiter from our list.
|
it->Wakeup();
|
||||||
it = waiter_list.erase(it);
|
it = waiter_list.erase(it);
|
||||||
|
|
||||||
// Cancel the thread's wait.
|
|
||||||
it->CancelWait(ResultInvalidState, true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,12 +294,15 @@ bool KThread::IsSignaled() const {
|
||||||
return signaled;
|
return signaled;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::OnTimer() {
|
void KThread::Wakeup() {
|
||||||
ASSERT(kernel.GlobalSchedulerContext().IsLocked());
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// If we're waiting, cancel the wait.
|
|
||||||
if (GetState() == ThreadState::Waiting) {
|
if (GetState() == ThreadState::Waiting) {
|
||||||
wait_queue->CancelWait(this, ResultTimedOut, false);
|
if (sleeping_queue != nullptr) {
|
||||||
|
sleeping_queue->WakeupThread(this);
|
||||||
|
} else {
|
||||||
|
SetState(ThreadState::Runnable);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,7 +327,7 @@ void KThread::StartTermination() {
|
||||||
|
|
||||||
// Signal.
|
// Signal.
|
||||||
signaled = true;
|
signaled = true;
|
||||||
KSynchronizationObject::NotifyAvailable();
|
NotifyAvailable();
|
||||||
|
|
||||||
// Clear previous thread in KScheduler.
|
// Clear previous thread in KScheduler.
|
||||||
KScheduler::ClearPreviousThread(kernel, this);
|
KScheduler::ClearPreviousThread(kernel, this);
|
||||||
|
@ -506,32 +475,30 @@ ResultCode KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_m
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
ResultCode KThread::SetCoreMask(s32 cpu_core_id, u64 v_affinity_mask) {
|
||||||
ASSERT(parent != nullptr);
|
ASSERT(parent != nullptr);
|
||||||
ASSERT(v_affinity_mask != 0);
|
ASSERT(v_affinity_mask != 0);
|
||||||
KScopedLightLock lk(activity_pause_lock);
|
KScopedLightLock lk{activity_pause_lock};
|
||||||
|
|
||||||
// Set the core mask.
|
// Set the core mask.
|
||||||
u64 p_affinity_mask = 0;
|
u64 p_affinity_mask = 0;
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl{kernel};
|
||||||
ASSERT(num_core_migration_disables >= 0);
|
ASSERT(num_core_migration_disables >= 0);
|
||||||
|
|
||||||
// If we're updating, set our ideal virtual core.
|
// If the core id is no-update magic, preserve the ideal core id.
|
||||||
if (core_id_ != Svc::IdealCoreNoUpdate) {
|
if (cpu_core_id == Svc::IdealCoreNoUpdate) {
|
||||||
virtual_ideal_core_id = core_id_;
|
cpu_core_id = virtual_ideal_core_id;
|
||||||
} else {
|
R_UNLESS(((1ULL << cpu_core_id) & v_affinity_mask) != 0, ResultInvalidCombination);
|
||||||
// Preserve our ideal core id.
|
|
||||||
core_id_ = virtual_ideal_core_id;
|
|
||||||
R_UNLESS(((1ULL << core_id_) & v_affinity_mask) != 0, ResultInvalidCombination);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set our affinity mask.
|
// Set the virtual core/affinity mask.
|
||||||
|
virtual_ideal_core_id = cpu_core_id;
|
||||||
virtual_affinity_mask = v_affinity_mask;
|
virtual_affinity_mask = v_affinity_mask;
|
||||||
|
|
||||||
// Translate the virtual core to a physical core.
|
// Translate the virtual core to a physical core.
|
||||||
if (core_id_ >= 0) {
|
if (cpu_core_id >= 0) {
|
||||||
core_id_ = Core::Hardware::VirtualToPhysicalCoreMap[core_id_];
|
cpu_core_id = Core::Hardware::VirtualToPhysicalCoreMap[cpu_core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Translate the virtual affinity mask to a physical one.
|
// Translate the virtual affinity mask to a physical one.
|
||||||
|
@ -546,7 +513,7 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
const KAffinityMask old_mask = physical_affinity_mask;
|
const KAffinityMask old_mask = physical_affinity_mask;
|
||||||
|
|
||||||
// Set our new ideals.
|
// Set our new ideals.
|
||||||
physical_ideal_core_id = core_id_;
|
physical_ideal_core_id = cpu_core_id;
|
||||||
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
|
|
||||||
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
if (physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
|
||||||
|
@ -564,18 +531,18 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, we edit the original affinity for restoration later.
|
// Otherwise, we edit the original affinity for restoration later.
|
||||||
original_physical_ideal_core_id = core_id_;
|
original_physical_ideal_core_id = cpu_core_id;
|
||||||
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the pinned waiter list.
|
// Update the pinned waiter list.
|
||||||
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel, std::addressof(pinned_waiter_list));
|
|
||||||
{
|
{
|
||||||
bool retry_update{};
|
bool retry_update{};
|
||||||
|
bool thread_is_pinned{};
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Don't do any further management if our termination has been requested.
|
// Don't do any further management if our termination has been requested.
|
||||||
R_SUCCEED_IF(IsTerminationRequested());
|
R_SUCCEED_IF(IsTerminationRequested());
|
||||||
|
@ -603,9 +570,12 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||||
ResultTerminationRequested);
|
ResultTerminationRequested);
|
||||||
|
|
||||||
|
// Note that the thread was pinned.
|
||||||
|
thread_is_pinned = true;
|
||||||
|
|
||||||
// Wait until the thread isn't pinned any more.
|
// Wait until the thread isn't pinned any more.
|
||||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||||
} else {
|
} else {
|
||||||
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
// If the thread isn't pinned, release the scheduler lock and retry until it's
|
||||||
// not current.
|
// not current.
|
||||||
|
@ -613,6 +583,16 @@ ResultCode KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (retry_update);
|
} while (retry_update);
|
||||||
|
|
||||||
|
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
||||||
|
// our waiter list.
|
||||||
|
if (thread_is_pinned) {
|
||||||
|
// Lock the scheduler.
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
// Remove from the list.
|
||||||
|
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
@ -661,9 +641,15 @@ void KThread::WaitCancel() {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Check if we're waiting and cancellable.
|
// Check if we're waiting and cancellable.
|
||||||
if (this->GetState() == ThreadState::Waiting && cancellable) {
|
if (GetState() == ThreadState::Waiting && cancellable) {
|
||||||
|
if (sleeping_queue != nullptr) {
|
||||||
|
sleeping_queue->WakeupThread(this);
|
||||||
|
wait_cancelled = true;
|
||||||
|
} else {
|
||||||
|
SetSyncedObject(nullptr, ResultCancelled);
|
||||||
|
SetState(ThreadState::Runnable);
|
||||||
wait_cancelled = false;
|
wait_cancelled = false;
|
||||||
wait_queue->CancelWait(this, ResultCancelled, true);
|
}
|
||||||
} else {
|
} else {
|
||||||
// Otherwise, note that we cancelled a wait.
|
// Otherwise, note that we cancelled a wait.
|
||||||
wait_cancelled = true;
|
wait_cancelled = true;
|
||||||
|
@ -714,59 +700,60 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||||
// Set the activity.
|
// Set the activity.
|
||||||
{
|
{
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Verify our state.
|
// Verify our state.
|
||||||
const auto cur_state = this->GetState();
|
const auto cur_state = GetState();
|
||||||
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
R_UNLESS((cur_state == ThreadState::Waiting || cur_state == ThreadState::Runnable),
|
||||||
ResultInvalidState);
|
ResultInvalidState);
|
||||||
|
|
||||||
// Either pause or resume.
|
// Either pause or resume.
|
||||||
if (activity == Svc::ThreadActivity::Paused) {
|
if (activity == Svc::ThreadActivity::Paused) {
|
||||||
// Verify that we're not suspended.
|
// Verify that we're not suspended.
|
||||||
R_UNLESS(!this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
R_UNLESS(!IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||||
|
|
||||||
// Suspend.
|
// Suspend.
|
||||||
this->RequestSuspend(SuspendType::Thread);
|
RequestSuspend(SuspendType::Thread);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
ASSERT(activity == Svc::ThreadActivity::Runnable);
|
||||||
|
|
||||||
// Verify that we're suspended.
|
// Verify that we're suspended.
|
||||||
R_UNLESS(this->IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
R_UNLESS(IsSuspendRequested(SuspendType::Thread), ResultInvalidState);
|
||||||
|
|
||||||
// Resume.
|
// Resume.
|
||||||
this->Resume(SuspendType::Thread);
|
Resume(SuspendType::Thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the thread is now paused, update the pinned waiter list.
|
// If the thread is now paused, update the pinned waiter list.
|
||||||
if (activity == Svc::ThreadActivity::Paused) {
|
if (activity == Svc::ThreadActivity::Paused) {
|
||||||
ThreadQueueImplForKThreadSetProperty wait_queue_(kernel,
|
bool thread_is_pinned{};
|
||||||
std::addressof(pinned_waiter_list));
|
bool thread_is_current{};
|
||||||
|
|
||||||
bool thread_is_current;
|
|
||||||
do {
|
do {
|
||||||
// Lock the scheduler.
|
// Lock the scheduler.
|
||||||
KScopedSchedulerLock sl(kernel);
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Don't do any further management if our termination has been requested.
|
// Don't do any further management if our termination has been requested.
|
||||||
R_SUCCEED_IF(this->IsTerminationRequested());
|
R_SUCCEED_IF(IsTerminationRequested());
|
||||||
|
|
||||||
// By default, treat the thread as not current.
|
|
||||||
thread_is_current = false;
|
|
||||||
|
|
||||||
// Check whether the thread is pinned.
|
// Check whether the thread is pinned.
|
||||||
if (this->GetStackParameters().is_pinned) {
|
if (GetStackParameters().is_pinned) {
|
||||||
// Verify that the current thread isn't terminating.
|
// Verify that the current thread isn't terminating.
|
||||||
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
R_UNLESS(!GetCurrentThread(kernel).IsTerminationRequested(),
|
||||||
ResultTerminationRequested);
|
ResultTerminationRequested);
|
||||||
|
|
||||||
|
// Note that the thread was pinned and not current.
|
||||||
|
thread_is_pinned = true;
|
||||||
|
thread_is_current = false;
|
||||||
|
|
||||||
// Wait until the thread isn't pinned any more.
|
// Wait until the thread isn't pinned any more.
|
||||||
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
pinned_waiter_list.push_back(GetCurrentThread(kernel));
|
||||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue_));
|
GetCurrentThread(kernel).SetState(ThreadState::Waiting);
|
||||||
} else {
|
} else {
|
||||||
// Check if the thread is currently running.
|
// Check if the thread is currently running.
|
||||||
// If it is, we'll need to retry.
|
// If it is, we'll need to retry.
|
||||||
|
thread_is_current = false;
|
||||||
|
|
||||||
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
for (auto i = 0; i < static_cast<s32>(Core::Hardware::NUM_CPU_CORES); ++i) {
|
||||||
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
if (kernel.Scheduler(i).GetCurrentThread() == this) {
|
||||||
thread_is_current = true;
|
thread_is_current = true;
|
||||||
|
@ -775,6 +762,16 @@ ResultCode KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} while (thread_is_current);
|
} while (thread_is_current);
|
||||||
|
|
||||||
|
// If the thread was pinned, it no longer is, and we should remove the current thread from
|
||||||
|
// our waiter list.
|
||||||
|
if (thread_is_pinned) {
|
||||||
|
// Lock the scheduler.
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
// Remove from the list.
|
||||||
|
pinned_waiter_list.erase(pinned_waiter_list.iterator_to(GetCurrentThread(kernel)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
@ -969,9 +966,6 @@ ResultCode KThread::Run() {
|
||||||
|
|
||||||
// Set our state and finish.
|
// Set our state and finish.
|
||||||
SetState(ThreadState::Runnable);
|
SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
DisableDispatch();
|
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1002,63 +996,29 @@ ResultCode KThread::Sleep(s64 timeout) {
|
||||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||||
ASSERT(timeout > 0);
|
ASSERT(timeout > 0);
|
||||||
|
|
||||||
ThreadQueueImplForKThreadSleep wait_queue_(kernel);
|
|
||||||
{
|
{
|
||||||
// Setup the scheduling lock and sleep.
|
// Setup the scheduling lock and sleep.
|
||||||
KScopedSchedulerLockAndSleep slp(kernel, this, timeout);
|
KScopedSchedulerLockAndSleep slp{kernel, this, timeout};
|
||||||
|
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
if (this->IsTerminationRequested()) {
|
if (IsTerminationRequested()) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
return ResultTerminationRequested;
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the sleep to end.
|
// Mark the thread as waiting.
|
||||||
this->BeginWait(std::addressof(wait_queue_));
|
SetState(ThreadState::Waiting);
|
||||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The lock/sleep is done.
|
||||||
|
|
||||||
|
// Cancel the timer.
|
||||||
|
kernel.TimeManager().UnscheduleTimeEvent(this);
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::BeginWait(KThreadQueue* queue) {
|
|
||||||
// Set our state as waiting.
|
|
||||||
SetState(ThreadState::Waiting);
|
|
||||||
|
|
||||||
// Set our wait queue.
|
|
||||||
wait_queue = queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
|
|
||||||
// Lock the scheduler.
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
// If we're waiting, notify our queue that we're available.
|
|
||||||
if (GetState() == ThreadState::Waiting) {
|
|
||||||
wait_queue->NotifyAvailable(this, signaled_object, wait_result_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KThread::EndWait(ResultCode wait_result_) {
|
|
||||||
// Lock the scheduler.
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
// If we're waiting, notify our queue that we're available.
|
|
||||||
if (GetState() == ThreadState::Waiting) {
|
|
||||||
wait_queue->EndWait(this, wait_result_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KThread::CancelWait(ResultCode wait_result_, bool cancel_timer_task) {
|
|
||||||
// Lock the scheduler.
|
|
||||||
KScopedSchedulerLock sl(kernel);
|
|
||||||
|
|
||||||
// If we're waiting, notify our queue that we're available.
|
|
||||||
if (GetState() == ThreadState::Waiting) {
|
|
||||||
wait_queue->CancelWait(this, wait_result_, cancel_timer_task);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KThread::SetState(ThreadState state) {
|
void KThread::SetState(ThreadState state) {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
@ -1090,26 +1050,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
|
||||||
return GetCurrentThread(kernel).GetCurrentCore();
|
return GetCurrentThread(kernel).GetCurrentCore();
|
||||||
}
|
}
|
||||||
|
|
||||||
KScopedDisableDispatch::~KScopedDisableDispatch() {
|
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
||||||
if (kernel.IsShuttingDown()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip the reschedule if single-core, as dispatch tracking is disabled here.
|
|
||||||
if (!Settings::values.use_multi_core.GetValue()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
|
|
||||||
auto scheduler = kernel.CurrentScheduler();
|
|
||||||
|
|
||||||
if (scheduler) {
|
|
||||||
scheduler->RescheduleCurrentCore();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
GetCurrentThread(kernel).EnableDispatch();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -48,7 +48,6 @@ enum class ThreadType : u32 {
|
||||||
Kernel = 1,
|
Kernel = 1,
|
||||||
HighPriority = 2,
|
HighPriority = 2,
|
||||||
User = 3,
|
User = 3,
|
||||||
Dummy = 100, // Special thread type for emulation purposes only
|
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
DECLARE_ENUM_FLAG_OPERATORS(ThreadType);
|
||||||
|
|
||||||
|
@ -162,6 +161,8 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Wakeup();
|
||||||
|
|
||||||
void SetBasePriority(s32 value);
|
void SetBasePriority(s32 value);
|
||||||
|
|
||||||
[[nodiscard]] ResultCode Run();
|
[[nodiscard]] ResultCode Run();
|
||||||
|
@ -196,19 +197,13 @@ public:
|
||||||
|
|
||||||
void Suspend();
|
void Suspend();
|
||||||
|
|
||||||
constexpr void SetSyncedIndex(s32 index) {
|
void SetSyncedObject(KSynchronizationObject* obj, ResultCode wait_res) {
|
||||||
synced_index = index;
|
synced_object = obj;
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr s32 GetSyncedIndex() const {
|
|
||||||
return synced_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void SetWaitResult(ResultCode wait_res) {
|
|
||||||
wait_result = wait_res;
|
wait_result = wait_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] constexpr ResultCode GetWaitResult() const {
|
[[nodiscard]] ResultCode GetWaitResult(KSynchronizationObject** out) const {
|
||||||
|
*out = synced_object;
|
||||||
return wait_result;
|
return wait_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,8 +374,6 @@ public:
|
||||||
|
|
||||||
[[nodiscard]] bool IsSignaled() const override;
|
[[nodiscard]] bool IsSignaled() const override;
|
||||||
|
|
||||||
void OnTimer();
|
|
||||||
|
|
||||||
static void PostDestroy(uintptr_t arg);
|
static void PostDestroy(uintptr_t arg);
|
||||||
|
|
||||||
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
[[nodiscard]] static ResultCode InitializeDummyThread(KThread* thread);
|
||||||
|
@ -453,39 +446,20 @@ public:
|
||||||
return per_core_priority_queue_entry[core];
|
return per_core_priority_queue_entry[core];
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] bool IsKernelThread() const {
|
void SetSleepingQueue(KThreadQueue* q) {
|
||||||
return GetActiveCore() == 3;
|
sleeping_queue = q;
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool IsDispatchTrackingDisabled() const {
|
|
||||||
return is_single_core || IsKernelThread();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
[[nodiscard]] s32 GetDisableDispatchCount() const {
|
||||||
if (IsDispatchTrackingDisabled()) {
|
|
||||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return this->GetStackParameters().disable_count;
|
return this->GetStackParameters().disable_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisableDispatch() {
|
void DisableDispatch() {
|
||||||
if (IsDispatchTrackingDisabled()) {
|
|
||||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
|
||||||
this->GetStackParameters().disable_count++;
|
this->GetStackParameters().disable_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnableDispatch() {
|
void EnableDispatch() {
|
||||||
if (IsDispatchTrackingDisabled()) {
|
|
||||||
// TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
|
||||||
this->GetStackParameters().disable_count--;
|
this->GetStackParameters().disable_count--;
|
||||||
}
|
}
|
||||||
|
@ -599,15 +573,6 @@ public:
|
||||||
address_key_value = val;
|
address_key_value = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClearWaitQueue() {
|
|
||||||
wait_queue = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BeginWait(KThreadQueue* queue);
|
|
||||||
void NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_);
|
|
||||||
void EndWait(ResultCode wait_result_);
|
|
||||||
void CancelWait(ResultCode wait_result_, bool cancel_timer_task);
|
|
||||||
|
|
||||||
[[nodiscard]] bool HasWaiters() const {
|
[[nodiscard]] bool HasWaiters() const {
|
||||||
return !waiter_list.empty();
|
return !waiter_list.empty();
|
||||||
}
|
}
|
||||||
|
@ -702,6 +667,7 @@ private:
|
||||||
KAffinityMask physical_affinity_mask{};
|
KAffinityMask physical_affinity_mask{};
|
||||||
u64 thread_id{};
|
u64 thread_id{};
|
||||||
std::atomic<s64> cpu_time{};
|
std::atomic<s64> cpu_time{};
|
||||||
|
KSynchronizationObject* synced_object{};
|
||||||
VAddr address_key{};
|
VAddr address_key{};
|
||||||
KProcess* parent{};
|
KProcess* parent{};
|
||||||
VAddr kernel_stack_top{};
|
VAddr kernel_stack_top{};
|
||||||
|
@ -711,14 +677,13 @@ private:
|
||||||
s64 schedule_count{};
|
s64 schedule_count{};
|
||||||
s64 last_scheduled_tick{};
|
s64 last_scheduled_tick{};
|
||||||
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
|
||||||
KThreadQueue* wait_queue{};
|
KThreadQueue* sleeping_queue{};
|
||||||
WaiterList waiter_list{};
|
WaiterList waiter_list{};
|
||||||
WaiterList pinned_waiter_list{};
|
WaiterList pinned_waiter_list{};
|
||||||
KThread* lock_owner{};
|
KThread* lock_owner{};
|
||||||
u32 address_key_value{};
|
u32 address_key_value{};
|
||||||
u32 suspend_request_flags{};
|
u32 suspend_request_flags{};
|
||||||
u32 suspend_allowed_flags{};
|
u32 suspend_allowed_flags{};
|
||||||
s32 synced_index{};
|
|
||||||
ResultCode wait_result{ResultSuccess};
|
ResultCode wait_result{ResultSuccess};
|
||||||
s32 base_priority{};
|
s32 base_priority{};
|
||||||
s32 physical_ideal_core_id{};
|
s32 physical_ideal_core_id{};
|
||||||
|
@ -743,7 +708,6 @@ private:
|
||||||
|
|
||||||
// For emulation
|
// For emulation
|
||||||
std::shared_ptr<Common::Fiber> host_context{};
|
std::shared_ptr<Common::Fiber> host_context{};
|
||||||
bool is_single_core{};
|
|
||||||
|
|
||||||
// For debugging
|
// For debugging
|
||||||
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
std::vector<KSynchronizationObject*> wait_objects_for_debugging;
|
||||||
|
@ -788,20 +752,4 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class KScopedDisableDispatch {
|
|
||||||
public:
|
|
||||||
[[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
|
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
||||||
if (kernel.IsShuttingDown()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
GetCurrentThread(kernel).DisableDispatch();
|
|
||||||
}
|
|
||||||
|
|
||||||
~KScopedDisableDispatch();
|
|
||||||
|
|
||||||
private:
|
|
||||||
KernelCore& kernel;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -12,24 +11,71 @@ namespace Kernel {
|
||||||
class KThreadQueue {
|
class KThreadQueue {
|
||||||
public:
|
public:
|
||||||
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
|
explicit KThreadQueue(KernelCore& kernel_) : kernel{kernel_} {}
|
||||||
virtual ~KThreadQueue() = default;
|
|
||||||
|
|
||||||
virtual void NotifyAvailable(KThread* waiting_thread, KSynchronizationObject* signaled_object,
|
bool IsEmpty() const {
|
||||||
ResultCode wait_result);
|
return wait_list.empty();
|
||||||
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result);
|
}
|
||||||
virtual void CancelWait(KThread* waiting_thread, ResultCode wait_result,
|
|
||||||
bool cancel_timer_task);
|
KThread::WaiterList::iterator begin() {
|
||||||
|
return wait_list.begin();
|
||||||
|
}
|
||||||
|
KThread::WaiterList::iterator end() {
|
||||||
|
return wait_list.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SleepThread(KThread* t) {
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
// If the thread needs terminating, don't enqueue it.
|
||||||
|
if (t->IsTerminationRequested()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the thread's queue and mark it as waiting.
|
||||||
|
t->SetSleepingQueue(this);
|
||||||
|
t->SetState(ThreadState::Waiting);
|
||||||
|
|
||||||
|
// Add the thread to the queue.
|
||||||
|
wait_list.push_back(*t);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void WakeupThread(KThread* t) {
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
// Remove the thread from the queue.
|
||||||
|
wait_list.erase(wait_list.iterator_to(*t));
|
||||||
|
|
||||||
|
// Mark the thread as no longer sleeping.
|
||||||
|
t->SetState(ThreadState::Runnable);
|
||||||
|
t->SetSleepingQueue(nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
KThread* WakeupFrontThread() {
|
||||||
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
|
if (wait_list.empty()) {
|
||||||
|
return nullptr;
|
||||||
|
} else {
|
||||||
|
// Remove the thread from the queue.
|
||||||
|
auto it = wait_list.begin();
|
||||||
|
KThread* thread = std::addressof(*it);
|
||||||
|
wait_list.erase(it);
|
||||||
|
|
||||||
|
ASSERT(thread->GetState() == ThreadState::Waiting);
|
||||||
|
|
||||||
|
// Mark the thread as no longer sleeping.
|
||||||
|
thread->SetState(ThreadState::Runnable);
|
||||||
|
thread->SetSleepingQueue(nullptr);
|
||||||
|
|
||||||
|
return thread;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
KernelCore& kernel;
|
KernelCore& kernel;
|
||||||
KThread::WaiterList wait_list{};
|
KThread::WaiterList wait_list{};
|
||||||
};
|
};
|
||||||
|
|
||||||
class KThreadQueueWithoutEndWait : public KThreadQueue {
|
|
||||||
public:
|
|
||||||
explicit KThreadQueueWithoutEndWait(KernelCore& kernel_) : KThreadQueue(kernel_) {}
|
|
||||||
|
|
||||||
virtual void EndWait(KThread* waiting_thread, ResultCode wait_result) override final;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
#include "common/scope_exit.h"
|
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "common/thread_worker.h"
|
#include "common/thread_worker.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
|
@ -84,16 +83,12 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeCores() {
|
void InitializeCores() {
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
for (auto& core : cores) {
|
||||||
cores[core_id].Initialize(current_process->Is64BitProcess());
|
core.Initialize(current_process->Is64BitProcess());
|
||||||
system.Memory().SetCurrentPageTable(*current_process, core_id);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
|
||||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
|
||||||
|
|
||||||
process_list.clear();
|
process_list.clear();
|
||||||
|
|
||||||
// Close all open server ports.
|
// Close all open server ports.
|
||||||
|
@ -128,6 +123,15 @@ struct KernelCore::Impl {
|
||||||
next_user_process_id = KProcess::ProcessIDMin;
|
next_user_process_id = KProcess::ProcessIDMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
|
if (suspend_threads[core_id]) {
|
||||||
|
suspend_threads[core_id]->Close();
|
||||||
|
suspend_threads[core_id] = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
schedulers[core_id].reset();
|
||||||
|
}
|
||||||
|
|
||||||
cores.clear();
|
cores.clear();
|
||||||
|
|
||||||
global_handle_table->Finalize();
|
global_handle_table->Finalize();
|
||||||
|
@ -155,16 +159,6 @@ struct KernelCore::Impl {
|
||||||
CleanupObject(time_shared_mem);
|
CleanupObject(time_shared_mem);
|
||||||
CleanupObject(system_resource_limit);
|
CleanupObject(system_resource_limit);
|
||||||
|
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
|
||||||
if (suspend_threads[core_id]) {
|
|
||||||
suspend_threads[core_id]->Close();
|
|
||||||
suspend_threads[core_id] = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedulers[core_id]->Finalize();
|
|
||||||
schedulers[core_id].reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
|
||||||
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
|
||||||
|
|
||||||
|
@ -251,11 +245,13 @@ struct KernelCore::Impl {
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
global_scheduler_context->PreemptThreads();
|
global_scheduler_context->PreemptThreads();
|
||||||
}
|
}
|
||||||
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
const auto time_interval = std::chrono::nanoseconds{
|
||||||
|
Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
});
|
});
|
||||||
|
|
||||||
const auto time_interval = std::chrono::nanoseconds{std::chrono::milliseconds(10)};
|
const auto time_interval =
|
||||||
|
std::chrono::nanoseconds{Core::Timing::msToCycles(std::chrono::milliseconds(10))};
|
||||||
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
system.CoreTiming().ScheduleEvent(time_interval, preemption_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,6 +267,14 @@ struct KernelCore::Impl {
|
||||||
|
|
||||||
void MakeCurrentProcess(KProcess* process) {
|
void MakeCurrentProcess(KProcess* process) {
|
||||||
current_process = process;
|
current_process = process;
|
||||||
|
if (process == nullptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u32 core_id = GetCurrentHostThreadID();
|
||||||
|
if (core_id < Core::Hardware::NUM_CPU_CORES) {
|
||||||
|
system.Memory().SetCurrentPageTable(*process, core_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
||||||
|
@ -340,16 +344,7 @@ struct KernelCore::Impl {
|
||||||
is_phantom_mode_for_singlecore = value;
|
is_phantom_mode_for_singlecore = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsShuttingDown() const {
|
|
||||||
return is_shutting_down.load(std::memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
KThread* GetCurrentEmuThread() {
|
KThread* GetCurrentEmuThread() {
|
||||||
// If we are shutting down the kernel, none of this is relevant anymore.
|
|
||||||
if (IsShuttingDown()) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto thread_id = GetCurrentHostThreadID();
|
const auto thread_id = GetCurrentHostThreadID();
|
||||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
return GetHostDummyThread();
|
return GetHostDummyThread();
|
||||||
|
@ -765,7 +760,6 @@ struct KernelCore::Impl {
|
||||||
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
std::vector<std::unique_ptr<KThread>> dummy_threads;
|
||||||
|
|
||||||
bool is_multicore{};
|
bool is_multicore{};
|
||||||
std::atomic_bool is_shutting_down{};
|
|
||||||
bool is_phantom_mode_for_singlecore{};
|
bool is_phantom_mode_for_singlecore{};
|
||||||
u32 single_core_thread_id{};
|
u32 single_core_thread_id{};
|
||||||
|
|
||||||
|
@ -851,20 +845,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
|
||||||
return impl->cores[id];
|
return impl->cores[id];
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KernelCore::CurrentPhysicalCoreIndex() const {
|
|
||||||
const u32 core_id = impl->GetCurrentHostThreadID();
|
|
||||||
if (core_id >= Core::Hardware::NUM_CPU_CORES) {
|
|
||||||
return Core::Hardware::NUM_CPU_CORES - 1;
|
|
||||||
}
|
|
||||||
return core_id;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
|
||||||
return impl->cores[CurrentPhysicalCoreIndex()];
|
u32 core_id = impl->GetCurrentHostThreadID();
|
||||||
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
return impl->cores[core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
|
||||||
return impl->cores[CurrentPhysicalCoreIndex()];
|
u32 core_id = impl->GetCurrentHostThreadID();
|
||||||
|
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||||
|
return impl->cores[core_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
Kernel::KScheduler* KernelCore::CurrentScheduler() {
|
||||||
|
@ -1067,9 +1057,6 @@ void KernelCore::Suspend(bool in_suspention) {
|
||||||
impl->suspend_threads[core_id]->SetState(state);
|
impl->suspend_threads[core_id]->SetState(state);
|
||||||
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||||
ThreadWaitReasonForDebugging::Suspended);
|
ThreadWaitReasonForDebugging::Suspended);
|
||||||
if (!should_suspend) {
|
|
||||||
impl->suspend_threads[core_id]->DisableDispatch();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1078,21 +1065,19 @@ bool KernelCore::IsMulticore() const {
|
||||||
return impl->is_multicore;
|
return impl->is_multicore;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KernelCore::IsShuttingDown() const {
|
|
||||||
return impl->IsShuttingDown();
|
|
||||||
}
|
|
||||||
|
|
||||||
void KernelCore::ExceptionalExit() {
|
void KernelCore::ExceptionalExit() {
|
||||||
exception_exited = true;
|
exception_exited = true;
|
||||||
Suspend(true);
|
Suspend(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::EnterSVCProfile() {
|
void KernelCore::EnterSVCProfile() {
|
||||||
impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
std::size_t core = impl->GetCurrentHostThreadID();
|
||||||
|
impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::ExitSVCProfile() {
|
void KernelCore::ExitSVCProfile() {
|
||||||
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]);
|
std::size_t core = impl->GetCurrentHostThreadID();
|
||||||
|
MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
|
||||||
|
|
|
@ -148,9 +148,6 @@ public:
|
||||||
/// Gets the an instance of the respective physical CPU core.
|
/// Gets the an instance of the respective physical CPU core.
|
||||||
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
|
||||||
|
|
||||||
/// Gets the current physical core index for the running host thread.
|
|
||||||
std::size_t CurrentPhysicalCoreIndex() const;
|
|
||||||
|
|
||||||
/// Gets the sole instance of the Scheduler at the current running core.
|
/// Gets the sole instance of the Scheduler at the current running core.
|
||||||
Kernel::KScheduler* CurrentScheduler();
|
Kernel::KScheduler* CurrentScheduler();
|
||||||
|
|
||||||
|
@ -274,8 +271,6 @@ public:
|
||||||
|
|
||||||
bool IsMulticore() const;
|
bool IsMulticore() const;
|
||||||
|
|
||||||
bool IsShuttingDown() const;
|
|
||||||
|
|
||||||
void EnterSVCProfile();
|
void EnterSVCProfile();
|
||||||
|
|
||||||
void ExitSVCProfile();
|
void ExitSVCProfile();
|
||||||
|
|
|
@ -25,27 +25,24 @@ public:
|
||||||
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
|
void QueueSyncRequest(KSession& session, std::shared_ptr<HLERequestContext>&& context);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<std::jthread> threads;
|
std::vector<std::thread> threads;
|
||||||
std::queue<std::function<void()>> requests;
|
std::queue<std::function<void()>> requests;
|
||||||
std::mutex queue_mutex;
|
std::mutex queue_mutex;
|
||||||
std::condition_variable_any condition;
|
std::condition_variable condition;
|
||||||
const std::string service_name;
|
const std::string service_name;
|
||||||
|
bool stop{};
|
||||||
};
|
};
|
||||||
|
|
||||||
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||||
: service_name{name} {
|
: service_name{name} {
|
||||||
for (std::size_t i = 0; i < num_threads; ++i) {
|
for (std::size_t i = 0; i < num_threads; ++i)
|
||||||
threads.emplace_back([this, &kernel](std::stop_token stop_token) {
|
threads.emplace_back([this, &kernel] {
|
||||||
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str());
|
||||||
|
|
||||||
// Wait for first request before trying to acquire a render context
|
// Wait for first request before trying to acquire a render context
|
||||||
{
|
{
|
||||||
std::unique_lock lock{queue_mutex};
|
std::unique_lock lock{queue_mutex};
|
||||||
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||||
}
|
|
||||||
|
|
||||||
if (stop_token.stop_requested()) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel.RegisterHostThread();
|
kernel.RegisterHostThread();
|
||||||
|
@ -55,16 +52,10 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||||
|
|
||||||
{
|
{
|
||||||
std::unique_lock lock{queue_mutex};
|
std::unique_lock lock{queue_mutex};
|
||||||
condition.wait(lock, stop_token, [this] { return !requests.empty(); });
|
condition.wait(lock, [this] { return stop || !requests.empty(); });
|
||||||
|
if (stop || requests.empty()) {
|
||||||
if (stop_token.stop_requested()) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (requests.empty()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
task = std::move(requests.front());
|
task = std::move(requests.front());
|
||||||
requests.pop();
|
requests.pop();
|
||||||
}
|
}
|
||||||
|
@ -72,7 +63,6 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
|
||||||
task();
|
task();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||||
|
@ -97,7 +87,16 @@ void ServiceThread::Impl::QueueSyncRequest(KSession& session,
|
||||||
condition.notify_one();
|
condition.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
ServiceThread::Impl::~Impl() = default;
|
ServiceThread::Impl::~Impl() {
|
||||||
|
{
|
||||||
|
std::unique_lock lock{queue_mutex};
|
||||||
|
stop = true;
|
||||||
|
}
|
||||||
|
condition.notify_all();
|
||||||
|
for (std::thread& thread : threads) {
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
ServiceThread::ServiceThread(KernelCore& kernel, std::size_t num_threads, const std::string& name)
|
||||||
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
|
: impl{std::make_unique<Impl>(kernel, num_threads, name)} {}
|
||||||
|
|
|
@ -31,7 +31,6 @@
|
||||||
#include "core/hle/kernel/k_shared_memory.h"
|
#include "core/hle/kernel/k_shared_memory.h"
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/k_thread_queue.h"
|
|
||||||
#include "core/hle/kernel/k_transfer_memory.h"
|
#include "core/hle/kernel/k_transfer_memory.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
|
@ -308,29 +307,26 @@ static ResultCode ConnectToNamedPort32(Core::System& system, Handle* out_handle,
|
||||||
|
|
||||||
/// Makes a blocking IPC call to an OS service.
|
/// Makes a blocking IPC call to an OS service.
|
||||||
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
|
||||||
|
|
||||||
auto& kernel = system.Kernel();
|
auto& kernel = system.Kernel();
|
||||||
|
|
||||||
// Create the wait queue.
|
|
||||||
KThreadQueue wait_queue(kernel);
|
|
||||||
|
|
||||||
// Get the client session from its handle.
|
|
||||||
KScopedAutoObject session =
|
|
||||||
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
|
||||||
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
|
||||||
|
|
||||||
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
|
||||||
|
|
||||||
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
auto thread = kernel.CurrentScheduler()->GetCurrentThread();
|
||||||
{
|
{
|
||||||
KScopedSchedulerLock lock(kernel);
|
KScopedSchedulerLock lock(kernel);
|
||||||
|
thread->SetState(ThreadState::Waiting);
|
||||||
|
thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
||||||
|
|
||||||
// This is a synchronous request, so we should wait for our request to complete.
|
{
|
||||||
GetCurrentThread(kernel).BeginWait(std::addressof(wait_queue));
|
KScopedAutoObject session =
|
||||||
GetCurrentThread(kernel).SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
|
kernel.CurrentProcess()->GetHandleTable().GetObject<KClientSession>(handle);
|
||||||
session->SendSyncRequest(&GetCurrentThread(kernel), system.Memory(), system.CoreTiming());
|
R_UNLESS(session.IsNotNull(), ResultInvalidHandle);
|
||||||
|
LOG_TRACE(Kernel_SVC, "called handle=0x{:08X}({})", handle, session->GetName());
|
||||||
|
session->SendSyncRequest(thread, system.Memory(), system.CoreTiming());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return thread->GetWaitResult();
|
KSynchronizationObject* dummy{};
|
||||||
|
return thread->GetWaitResult(std::addressof(dummy));
|
||||||
}
|
}
|
||||||
|
|
||||||
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
static ResultCode SendSyncRequest32(Core::System& system, Handle handle) {
|
||||||
|
@ -877,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||||
const u64 thread_ticks = current_thread->GetCpuTime();
|
const u64 thread_ticks = current_thread->GetCpuTime();
|
||||||
|
|
||||||
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
|
||||||
} else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) {
|
} else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
|
||||||
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -891,8 +887,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
|
||||||
return ResultInvalidHandle;
|
return ResultInvalidHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info_sub_id != 0xFFFFFFFFFFFFFFFF &&
|
if (info_sub_id != 0xFFFFFFFFFFFFFFFF && info_sub_id != system.CurrentCoreIndex()) {
|
||||||
info_sub_id != system.Kernel().CurrentPhysicalCoreIndex()) {
|
|
||||||
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
|
LOG_ERROR(Kernel_SVC, "Core is not the current core, got {}", info_sub_id);
|
||||||
return ResultInvalidCombination;
|
return ResultInvalidCombination;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/kernel/k_scheduler.h"
|
|
||||||
#include "core/hle/kernel/k_thread.h"
|
#include "core/hle/kernel/k_thread.h"
|
||||||
#include "core/hle/kernel/time_manager.h"
|
#include "core/hle/kernel/time_manager.h"
|
||||||
|
|
||||||
|
@ -16,10 +15,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
|
||||||
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
|
Core::Timing::CreateEvent("Kernel::TimeManagerCallback",
|
||||||
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
[this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
|
||||||
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
|
KThread* thread = reinterpret_cast<KThread*>(thread_handle);
|
||||||
{
|
thread->Wakeup();
|
||||||
KScopedSchedulerLock sl(system.Kernel());
|
|
||||||
thread->OnTimer();
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,10 +17,11 @@ namespace Service::Friend {
|
||||||
|
|
||||||
class IFriendService final : public ServiceFramework<IFriendService> {
|
class IFriendService final : public ServiceFramework<IFriendService> {
|
||||||
public:
|
public:
|
||||||
explicit IFriendService(Core::System& system_) : ServiceFramework{system_, "IFriendService"} {
|
explicit IFriendService(Core::System& system_)
|
||||||
|
: ServiceFramework{system_, "IFriendService"}, service_context{system, "IFriendService"} {
|
||||||
// clang-format off
|
// clang-format off
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, nullptr, "GetCompletionEvent"},
|
{0, &IFriendService::GetCompletionEvent, "GetCompletionEvent"},
|
||||||
{1, nullptr, "Cancel"},
|
{1, nullptr, "Cancel"},
|
||||||
{10100, nullptr, "GetFriendListIds"},
|
{10100, nullptr, "GetFriendListIds"},
|
||||||
{10101, &IFriendService::GetFriendList, "GetFriendList"},
|
{10101, &IFriendService::GetFriendList, "GetFriendList"},
|
||||||
|
@ -109,6 +110,12 @@ public:
|
||||||
// clang-format on
|
// clang-format on
|
||||||
|
|
||||||
RegisterHandlers(functions);
|
RegisterHandlers(functions);
|
||||||
|
|
||||||
|
completion_event = service_context.CreateEvent("IFriendService:CompletionEvent");
|
||||||
|
}
|
||||||
|
|
||||||
|
~IFriendService() override {
|
||||||
|
service_context.CloseEvent(completion_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -129,6 +136,14 @@ private:
|
||||||
};
|
};
|
||||||
static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size");
|
static_assert(sizeof(SizedFriendFilter) == 0x10, "SizedFriendFilter is an invalid size");
|
||||||
|
|
||||||
|
void GetCompletionEvent(Kernel::HLERequestContext& ctx) {
|
||||||
|
LOG_DEBUG(Service_Friend, "called");
|
||||||
|
|
||||||
|
IPC::ResponseBuilder rb{ctx, 2, 1};
|
||||||
|
rb.Push(ResultSuccess);
|
||||||
|
rb.PushCopyObjects(completion_event->GetReadableEvent());
|
||||||
|
}
|
||||||
|
|
||||||
void GetBlockedUserListIds(Kernel::HLERequestContext& ctx) {
|
void GetBlockedUserListIds(Kernel::HLERequestContext& ctx) {
|
||||||
// This is safe to stub, as there should be no adverse consequences from reporting no
|
// This is safe to stub, as there should be no adverse consequences from reporting no
|
||||||
// blocked users.
|
// blocked users.
|
||||||
|
@ -179,6 +194,10 @@ private:
|
||||||
rb.Push<u32>(0); // Friend count
|
rb.Push<u32>(0); // Friend count
|
||||||
// TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId"
|
// TODO(ogniK): Return a buffer of u64s which are the "NetworkServiceAccountId"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KernelHelpers::ServiceContext service_context;
|
||||||
|
|
||||||
|
Kernel::KEvent* completion_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
class INotificationService final : public ServiceFramework<INotificationService> {
|
class INotificationService final : public ServiceFramework<INotificationService> {
|
||||||
|
|
Loading…
Reference in a new issue