early-access version 3038
This commit is contained in:
parent
9f50a9bdc6
commit
a9b25068d9
41 changed files with 1259 additions and 2149 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 3035.
|
This is the source code for early-access 3038.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -190,9 +190,6 @@ add_library(core STATIC
|
||||||
hle/kernel/k_code_memory.h
|
hle/kernel/k_code_memory.h
|
||||||
hle/kernel/k_condition_variable.cpp
|
hle/kernel/k_condition_variable.cpp
|
||||||
hle/kernel/k_condition_variable.h
|
hle/kernel/k_condition_variable.h
|
||||||
hle/kernel/k_dynamic_page_manager.h
|
|
||||||
hle/kernel/k_dynamic_resource_manager.h
|
|
||||||
hle/kernel/k_dynamic_slab_heap.h
|
|
||||||
hle/kernel/k_event.cpp
|
hle/kernel/k_event.cpp
|
||||||
hle/kernel/k_event.h
|
hle/kernel/k_event.h
|
||||||
hle/kernel/k_handle_table.cpp
|
hle/kernel/k_handle_table.cpp
|
||||||
|
|
|
@ -134,14 +134,6 @@ void ARM_Interface::Run() {
|
||||||
}
|
}
|
||||||
system.ExitDynarmicProfile();
|
system.ExitDynarmicProfile();
|
||||||
|
|
||||||
// If the thread is scheduled for termination, exit the thread.
|
|
||||||
if (current_thread->HasDpc()) {
|
|
||||||
if (current_thread->IsTerminationRequested()) {
|
|
||||||
current_thread->Exit();
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify the debugger and go to sleep if a breakpoint was hit,
|
// Notify the debugger and go to sleep if a breakpoint was hit,
|
||||||
// or if the thread is unable to continue for any reason.
|
// or if the thread is unable to continue for any reason.
|
||||||
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
if (Has(hr, breakpoint) || Has(hr, no_execute)) {
|
||||||
|
|
|
@ -133,48 +133,6 @@ struct System::Impl {
|
||||||
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
|
: kernel{system}, fs_controller{system}, memory{system}, hid_core{}, room_network{},
|
||||||
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
|
cpu_manager{system}, reporter{system}, applet_manager{system}, time_manager{system} {}
|
||||||
|
|
||||||
void Initialize(System& system) {
|
|
||||||
device_memory = std::make_unique<Core::DeviceMemory>();
|
|
||||||
|
|
||||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
|
||||||
|
|
||||||
core_timing.SetMulticore(is_multicore);
|
|
||||||
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
|
||||||
|
|
||||||
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
|
||||||
const auto current_time =
|
|
||||||
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
|
||||||
Settings::values.custom_rtc_differential =
|
|
||||||
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
|
||||||
|
|
||||||
// Create a default fs if one doesn't already exist.
|
|
||||||
if (virtual_filesystem == nullptr)
|
|
||||||
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
|
||||||
if (content_provider == nullptr)
|
|
||||||
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
|
||||||
|
|
||||||
// Create default implementations of applets if one is not provided.
|
|
||||||
applet_manager.SetDefaultAppletsIfMissing();
|
|
||||||
|
|
||||||
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
|
||||||
|
|
||||||
kernel.SetMulticore(is_multicore);
|
|
||||||
cpu_manager.SetMulticore(is_multicore);
|
|
||||||
cpu_manager.SetAsyncGpu(is_async_gpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReinitializeIfNecessary(System& system) {
|
|
||||||
if (is_multicore == Settings::values.use_multi_core.GetValue()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_DEBUG(Kernel, "Re-initializing");
|
|
||||||
|
|
||||||
is_multicore = Settings::values.use_multi_core.GetValue();
|
|
||||||
|
|
||||||
Initialize(system);
|
|
||||||
}
|
|
||||||
|
|
||||||
SystemResultStatus Run() {
|
SystemResultStatus Run() {
|
||||||
std::unique_lock<std::mutex> lk(suspend_guard);
|
std::unique_lock<std::mutex> lk(suspend_guard);
|
||||||
status = SystemResultStatus::Success;
|
status = SystemResultStatus::Success;
|
||||||
|
@ -220,14 +178,37 @@ struct System::Impl {
|
||||||
debugger = std::make_unique<Debugger>(system, port);
|
debugger = std::make_unique<Debugger>(system, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
SystemResultStatus SetupForMainProcess(System& system, Frontend::EmuWindow& emu_window) {
|
SystemResultStatus Init(System& system, Frontend::EmuWindow& emu_window) {
|
||||||
LOG_DEBUG(Core, "initialized OK");
|
LOG_DEBUG(Core, "initialized OK");
|
||||||
|
|
||||||
// Setting changes may require a full system reinitialization (e.g., disabling multicore).
|
device_memory = std::make_unique<Core::DeviceMemory>();
|
||||||
ReinitializeIfNecessary(system);
|
|
||||||
|
is_multicore = Settings::values.use_multi_core.GetValue();
|
||||||
|
is_async_gpu = Settings::values.use_asynchronous_gpu_emulation.GetValue();
|
||||||
|
|
||||||
|
kernel.SetMulticore(is_multicore);
|
||||||
|
cpu_manager.SetMulticore(is_multicore);
|
||||||
|
cpu_manager.SetAsyncGpu(is_async_gpu);
|
||||||
|
core_timing.SetMulticore(is_multicore);
|
||||||
|
|
||||||
kernel.Initialize();
|
kernel.Initialize();
|
||||||
cpu_manager.Initialize();
|
cpu_manager.Initialize();
|
||||||
|
core_timing.Initialize([&system]() { system.RegisterHostThread(); });
|
||||||
|
|
||||||
|
const auto posix_time = std::chrono::system_clock::now().time_since_epoch();
|
||||||
|
const auto current_time =
|
||||||
|
std::chrono::duration_cast<std::chrono::seconds>(posix_time).count();
|
||||||
|
Settings::values.custom_rtc_differential =
|
||||||
|
Settings::values.custom_rtc.value_or(current_time) - current_time;
|
||||||
|
|
||||||
|
// Create a default fs if one doesn't already exist.
|
||||||
|
if (virtual_filesystem == nullptr)
|
||||||
|
virtual_filesystem = std::make_shared<FileSys::RealVfsFilesystem>();
|
||||||
|
if (content_provider == nullptr)
|
||||||
|
content_provider = std::make_unique<FileSys::ContentProviderUnion>();
|
||||||
|
|
||||||
|
/// Create default implementations of applets if one is not provided.
|
||||||
|
applet_manager.SetDefaultAppletsIfMissing();
|
||||||
|
|
||||||
/// Reset all glue registrations
|
/// Reset all glue registrations
|
||||||
arp_manager.ResetAll();
|
arp_manager.ResetAll();
|
||||||
|
@ -272,11 +253,11 @@ struct System::Impl {
|
||||||
return SystemResultStatus::ErrorGetLoader;
|
return SystemResultStatus::ErrorGetLoader;
|
||||||
}
|
}
|
||||||
|
|
||||||
SystemResultStatus init_result{SetupForMainProcess(system, emu_window)};
|
SystemResultStatus init_result{Init(system, emu_window)};
|
||||||
if (init_result != SystemResultStatus::Success) {
|
if (init_result != SystemResultStatus::Success) {
|
||||||
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
|
LOG_CRITICAL(Core, "Failed to initialize system (Error {})!",
|
||||||
static_cast<int>(init_result));
|
static_cast<int>(init_result));
|
||||||
ShutdownMainProcess();
|
Shutdown();
|
||||||
return init_result;
|
return init_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,7 +276,7 @@ struct System::Impl {
|
||||||
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
const auto [load_result, load_parameters] = app_loader->Load(*main_process, system);
|
||||||
if (load_result != Loader::ResultStatus::Success) {
|
if (load_result != Loader::ResultStatus::Success) {
|
||||||
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
|
LOG_CRITICAL(Core, "Failed to load ROM (Error {})!", load_result);
|
||||||
ShutdownMainProcess();
|
Shutdown();
|
||||||
|
|
||||||
return static_cast<SystemResultStatus>(
|
return static_cast<SystemResultStatus>(
|
||||||
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
|
static_cast<u32>(SystemResultStatus::ErrorLoader) + static_cast<u32>(load_result));
|
||||||
|
@ -354,7 +335,7 @@ struct System::Impl {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShutdownMainProcess() {
|
void Shutdown() {
|
||||||
SetShuttingDown(true);
|
SetShuttingDown(true);
|
||||||
|
|
||||||
// Log last frame performance stats if game was loded
|
// Log last frame performance stats if game was loded
|
||||||
|
@ -388,7 +369,7 @@ struct System::Impl {
|
||||||
cheat_engine.reset();
|
cheat_engine.reset();
|
||||||
telemetry_session.reset();
|
telemetry_session.reset();
|
||||||
time_manager.Shutdown();
|
time_manager.Shutdown();
|
||||||
core_timing.ClearPendingEvents();
|
core_timing.Shutdown();
|
||||||
app_loader.reset();
|
app_loader.reset();
|
||||||
audio_core.reset();
|
audio_core.reset();
|
||||||
gpu_core.reset();
|
gpu_core.reset();
|
||||||
|
@ -396,6 +377,7 @@ struct System::Impl {
|
||||||
perf_stats.reset();
|
perf_stats.reset();
|
||||||
kernel.Shutdown();
|
kernel.Shutdown();
|
||||||
memory.Reset();
|
memory.Reset();
|
||||||
|
applet_manager.ClearAll();
|
||||||
|
|
||||||
if (auto room_member = room_network.GetRoomMember().lock()) {
|
if (auto room_member = room_network.GetRoomMember().lock()) {
|
||||||
Network::GameInfo game_info{};
|
Network::GameInfo game_info{};
|
||||||
|
@ -538,10 +520,6 @@ const CpuManager& System::GetCpuManager() const {
|
||||||
return impl->cpu_manager;
|
return impl->cpu_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
void System::Initialize() {
|
|
||||||
impl->Initialize(*this);
|
|
||||||
}
|
|
||||||
|
|
||||||
SystemResultStatus System::Run() {
|
SystemResultStatus System::Run() {
|
||||||
return impl->Run();
|
return impl->Run();
|
||||||
}
|
}
|
||||||
|
@ -562,8 +540,8 @@ void System::InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size) {
|
||||||
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
|
impl->kernel.InvalidateCpuInstructionCacheRange(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void System::ShutdownMainProcess() {
|
void System::Shutdown() {
|
||||||
impl->ShutdownMainProcess();
|
impl->Shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool System::IsShuttingDown() const {
|
bool System::IsShuttingDown() const {
|
||||||
|
|
|
@ -142,12 +142,6 @@ public:
|
||||||
System(System&&) = delete;
|
System(System&&) = delete;
|
||||||
System& operator=(System&&) = delete;
|
System& operator=(System&&) = delete;
|
||||||
|
|
||||||
/**
|
|
||||||
* Initializes the system
|
|
||||||
* This function will initialize core functionaility used for system emulation
|
|
||||||
*/
|
|
||||||
void Initialize();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Run the OS and Application
|
* Run the OS and Application
|
||||||
* This function will start emulation and run the relevant devices
|
* This function will start emulation and run the relevant devices
|
||||||
|
@ -172,8 +166,8 @@ public:
|
||||||
|
|
||||||
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
void InvalidateCpuInstructionCacheRange(VAddr addr, std::size_t size);
|
||||||
|
|
||||||
/// Shutdown the main emulated process.
|
/// Shutdown the emulated system.
|
||||||
void ShutdownMainProcess();
|
void Shutdown();
|
||||||
|
|
||||||
/// Check if the core is shutting down.
|
/// Check if the core is shutting down.
|
||||||
[[nodiscard]] bool IsShuttingDown() const;
|
[[nodiscard]] bool IsShuttingDown() const;
|
||||||
|
|
|
@ -40,9 +40,7 @@ struct CoreTiming::Event {
|
||||||
CoreTiming::CoreTiming()
|
CoreTiming::CoreTiming()
|
||||||
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
|
: clock{Common::CreateBestMatchingClock(Hardware::BASE_CLOCK_RATE, Hardware::CNTFREQ)} {}
|
||||||
|
|
||||||
CoreTiming::~CoreTiming() {
|
CoreTiming::~CoreTiming() = default;
|
||||||
Reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||||
constexpr char name[] = "HostTiming";
|
constexpr char name[] = "HostTiming";
|
||||||
|
@ -55,7 +53,6 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||||
Reset();
|
|
||||||
on_thread_init = std::move(on_thread_init_);
|
on_thread_init = std::move(on_thread_init_);
|
||||||
event_fifo_id = 0;
|
event_fifo_id = 0;
|
||||||
shutting_down = false;
|
shutting_down = false;
|
||||||
|
@ -68,8 +65,17 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::ClearPendingEvents() {
|
void CoreTiming::Shutdown() {
|
||||||
event_queue.clear();
|
paused = true;
|
||||||
|
shutting_down = true;
|
||||||
|
pause_event.Set();
|
||||||
|
event.Set();
|
||||||
|
if (timer_thread) {
|
||||||
|
timer_thread->join();
|
||||||
|
}
|
||||||
|
ClearPendingEvents();
|
||||||
|
timer_thread.reset();
|
||||||
|
has_started = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::Pause(bool is_paused) {
|
void CoreTiming::Pause(bool is_paused) {
|
||||||
|
@ -190,6 +196,10 @@ u64 CoreTiming::GetClockTicks() const {
|
||||||
return CpuCyclesToClockCycles(ticks);
|
return CpuCyclesToClockCycles(ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CoreTiming::ClearPendingEvents() {
|
||||||
|
event_queue.clear();
|
||||||
|
}
|
||||||
|
|
||||||
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
|
||||||
std::scoped_lock lock{basic_lock};
|
std::scoped_lock lock{basic_lock};
|
||||||
|
|
||||||
|
@ -297,18 +307,6 @@ void CoreTiming::ThreadLoop() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CoreTiming::Reset() {
|
|
||||||
paused = true;
|
|
||||||
shutting_down = true;
|
|
||||||
pause_event.Set();
|
|
||||||
event.Set();
|
|
||||||
if (timer_thread) {
|
|
||||||
timer_thread->join();
|
|
||||||
}
|
|
||||||
timer_thread.reset();
|
|
||||||
has_started = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||||
if (is_multicore) {
|
if (is_multicore) {
|
||||||
return clock->GetTimeNS();
|
return clock->GetTimeNS();
|
||||||
|
|
|
@ -61,14 +61,19 @@ public:
|
||||||
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
/// required to end slice - 1 and start slice 0 before the first cycle of code is executed.
|
||||||
void Initialize(std::function<void()>&& on_thread_init_);
|
void Initialize(std::function<void()>&& on_thread_init_);
|
||||||
|
|
||||||
/// Clear all pending events. This should ONLY be done on exit.
|
/// Tears down all timing related functionality.
|
||||||
void ClearPendingEvents();
|
void Shutdown();
|
||||||
|
|
||||||
/// Sets if emulation is multicore or single core, must be set before Initialize
|
/// Sets if emulation is multicore or single core, must be set before Initialize
|
||||||
void SetMulticore(bool is_multicore_) {
|
void SetMulticore(bool is_multicore_) {
|
||||||
is_multicore = is_multicore_;
|
is_multicore = is_multicore_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if it's using host timing.
|
||||||
|
bool IsHostTiming() const {
|
||||||
|
return is_multicore;
|
||||||
|
}
|
||||||
|
|
||||||
/// Pauses/Unpauses the execution of the timer thread.
|
/// Pauses/Unpauses the execution of the timer thread.
|
||||||
void Pause(bool is_paused);
|
void Pause(bool is_paused);
|
||||||
|
|
||||||
|
@ -131,11 +136,12 @@ public:
|
||||||
private:
|
private:
|
||||||
struct Event;
|
struct Event;
|
||||||
|
|
||||||
|
/// Clear all pending events. This should ONLY be done on exit.
|
||||||
|
void ClearPendingEvents();
|
||||||
|
|
||||||
static void ThreadEntry(CoreTiming& instance);
|
static void ThreadEntry(CoreTiming& instance);
|
||||||
void ThreadLoop();
|
void ThreadLoop();
|
||||||
|
|
||||||
void Reset();
|
|
||||||
|
|
||||||
std::unique_ptr<Common::WallClock> clock;
|
std::unique_ptr<Common::WallClock> clock;
|
||||||
|
|
||||||
s64 global_timer = 0;
|
s64 global_timer = 0;
|
||||||
|
|
|
@ -31,14 +31,12 @@ public:
|
||||||
DramMemoryMap::Base;
|
DramMemoryMap::Base;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
u8* GetPointer(PAddr addr) {
|
||||||
T* GetPointer(PAddr addr) {
|
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
const u8* GetPointer(PAddr addr) const {
|
||||||
const T* GetPointer(PAddr addr) const {
|
return buffer.BackingBasePointer() + (addr - DramMemoryMap::Base);
|
||||||
return reinterpret_cast<T*>(buffer.BackingBasePointer() + (addr - DramMemoryMap::Base));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Common::HostMemory buffer;
|
Common::HostMemory buffer;
|
||||||
|
|
|
@ -94,8 +94,8 @@ VAddr InitializeSlabHeap(Core::System& system, KMemoryLayout& memory_layout, VAd
|
||||||
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
// TODO(bunnei): Fix this once we support the kernel virtual memory layout.
|
||||||
|
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
void* backing_kernel_memory{system.DeviceMemory().GetPointer<void>(
|
void* backing_kernel_memory{
|
||||||
TranslateSlabAddrToPhysical(memory_layout, start))};
|
system.DeviceMemory().GetPointer(TranslateSlabAddrToPhysical(memory_layout, start))};
|
||||||
|
|
||||||
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
const KMemoryRegion* region = memory_layout.FindVirtual(start + size - 1);
|
||||||
ASSERT(region != nullptr);
|
ASSERT(region != nullptr);
|
||||||
|
@ -181,7 +181,7 @@ void InitializeKPageBufferSlabHeap(Core::System& system) {
|
||||||
ASSERT(slab_address != 0);
|
ASSERT(slab_address != 0);
|
||||||
|
|
||||||
// Initialize the slabheap.
|
// Initialize the slabheap.
|
||||||
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer<void>(slab_address),
|
KPageBuffer::InitializeSlabHeap(kernel, system.DeviceMemory().GetPointer(slab_address),
|
||||||
slab_size);
|
slab_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ Result KCodeMemory::Initialize(Core::DeviceMemory& device_memory, VAddr addr, si
|
||||||
|
|
||||||
// Clear the memory.
|
// Clear the memory.
|
||||||
for (const auto& block : m_page_group.Nodes()) {
|
for (const auto& block : m_page_group.Nodes()) {
|
||||||
std::memset(device_memory.GetPointer<void>(block.GetAddress()), 0xFF, block.GetSize());
|
std::memset(device_memory.GetPointer(block.GetAddress()), 0xFF, block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set remaining tracking members.
|
// Set remaining tracking members.
|
||||||
|
|
|
@ -11,34 +11,29 @@
|
||||||
namespace Kernel::KInterruptManager {
|
namespace Kernel::KInterruptManager {
|
||||||
|
|
||||||
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
void HandleInterrupt(KernelCore& kernel, s32 core_id) {
|
||||||
|
auto* process = kernel.CurrentProcess();
|
||||||
|
if (!process) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Acknowledge the interrupt.
|
// Acknowledge the interrupt.
|
||||||
kernel.PhysicalCore(core_id).ClearInterrupt();
|
kernel.PhysicalCore(core_id).ClearInterrupt();
|
||||||
|
|
||||||
auto& current_thread = GetCurrentThread(kernel);
|
auto& current_thread = GetCurrentThread(kernel);
|
||||||
|
|
||||||
if (auto* process = kernel.CurrentProcess(); process) {
|
// If the user disable count is set, we may need to pin the current thread.
|
||||||
// If the user disable count is set, we may need to pin the current thread.
|
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
||||||
if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
|
KScopedSchedulerLock sl{kernel};
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// Pin the current thread.
|
// Pin the current thread.
|
||||||
process->PinCurrentThread(core_id);
|
process->PinCurrentThread(core_id);
|
||||||
|
|
||||||
// Set the interrupt flag for the thread.
|
// Set the interrupt flag for the thread.
|
||||||
GetCurrentThread(kernel).SetInterruptFlag();
|
GetCurrentThread(kernel).SetInterruptFlag();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request interrupt scheduling.
|
// Request interrupt scheduling.
|
||||||
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
|
kernel.CurrentScheduler()->RequestScheduleOnInterrupt();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask) {
|
|
||||||
for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
|
|
||||||
if (core_mask & (1ULL << core_id)) {
|
|
||||||
kernel.PhysicalCore(core_id).Interrupt();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Kernel::KInterruptManager
|
} // namespace Kernel::KInterruptManager
|
||||||
|
|
|
@ -11,8 +11,6 @@ class KernelCore;
|
||||||
|
|
||||||
namespace KInterruptManager {
|
namespace KInterruptManager {
|
||||||
void HandleInterrupt(KernelCore& kernel, s32 core_id);
|
void HandleInterrupt(KernelCore& kernel, s32 core_id);
|
||||||
void SendInterProcessorInterrupt(KernelCore& kernel, u64 core_mask);
|
}
|
||||||
|
|
||||||
} // namespace KInterruptManager
|
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
#include "common/alignment.h"
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/intrusive_red_black_tree.h"
|
|
||||||
#include "core/hle/kernel/memory_types.h"
|
#include "core/hle/kernel/memory_types.h"
|
||||||
#include "core/hle/kernel/svc_types.h"
|
#include "core/hle/kernel/svc_types.h"
|
||||||
|
|
||||||
|
@ -169,8 +168,9 @@ constexpr KMemoryPermission ConvertToKMemoryPermission(Svc::MemoryPermission per
|
||||||
|
|
||||||
enum class KMemoryAttribute : u8 {
|
enum class KMemoryAttribute : u8 {
|
||||||
None = 0x00,
|
None = 0x00,
|
||||||
All = 0xFF,
|
Mask = 0x7F,
|
||||||
UserMask = All,
|
All = Mask,
|
||||||
|
DontCareMask = 0x80,
|
||||||
|
|
||||||
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
Locked = static_cast<u8>(Svc::MemoryAttribute::Locked),
|
||||||
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
IpcLocked = static_cast<u8>(Svc::MemoryAttribute::IpcLocked),
|
||||||
|
@ -178,112 +178,76 @@ enum class KMemoryAttribute : u8 {
|
||||||
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
Uncached = static_cast<u8>(Svc::MemoryAttribute::Uncached),
|
||||||
|
|
||||||
SetMask = Uncached,
|
SetMask = Uncached,
|
||||||
|
|
||||||
|
IpcAndDeviceMapped = IpcLocked | DeviceShared,
|
||||||
|
LockedAndIpcLocked = Locked | IpcLocked,
|
||||||
|
DeviceSharedAndUncached = DeviceShared | Uncached
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
DECLARE_ENUM_FLAG_OPERATORS(KMemoryAttribute);
|
||||||
|
|
||||||
enum class KMemoryBlockDisableMergeAttribute : u8 {
|
static_assert((static_cast<u8>(KMemoryAttribute::Mask) &
|
||||||
None = 0,
|
static_cast<u8>(KMemoryAttribute::DontCareMask)) == 0);
|
||||||
Normal = (1u << 0),
|
|
||||||
DeviceLeft = (1u << 1),
|
|
||||||
IpcLeft = (1u << 2),
|
|
||||||
Locked = (1u << 3),
|
|
||||||
DeviceRight = (1u << 4),
|
|
||||||
|
|
||||||
AllLeft = Normal | DeviceLeft | IpcLeft | Locked,
|
|
||||||
AllRight = DeviceRight,
|
|
||||||
};
|
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(KMemoryBlockDisableMergeAttribute);
|
|
||||||
|
|
||||||
struct KMemoryInfo {
|
struct KMemoryInfo {
|
||||||
uintptr_t m_address;
|
VAddr addr{};
|
||||||
size_t m_size;
|
std::size_t size{};
|
||||||
KMemoryState m_state;
|
KMemoryState state{};
|
||||||
u16 m_device_disable_merge_left_count;
|
KMemoryPermission perm{};
|
||||||
u16 m_device_disable_merge_right_count;
|
KMemoryAttribute attribute{};
|
||||||
u16 m_ipc_lock_count;
|
KMemoryPermission original_perm{};
|
||||||
u16 m_device_use_count;
|
u16 ipc_lock_count{};
|
||||||
u16 m_ipc_disable_merge_count;
|
u16 device_use_count{};
|
||||||
KMemoryPermission m_permission;
|
|
||||||
KMemoryAttribute m_attribute;
|
|
||||||
KMemoryPermission m_original_permission;
|
|
||||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
|
||||||
|
|
||||||
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
constexpr Svc::MemoryInfo GetSvcMemoryInfo() const {
|
||||||
return {
|
return {
|
||||||
.addr = m_address,
|
addr,
|
||||||
.size = m_size,
|
size,
|
||||||
.state = static_cast<Svc::MemoryState>(m_state & KMemoryState::Mask),
|
static_cast<Svc::MemoryState>(state & KMemoryState::Mask),
|
||||||
.attr = static_cast<Svc::MemoryAttribute>(m_attribute & KMemoryAttribute::UserMask),
|
static_cast<Svc::MemoryAttribute>(attribute & KMemoryAttribute::Mask),
|
||||||
.perm = static_cast<Svc::MemoryPermission>(m_permission & KMemoryPermission::UserMask),
|
static_cast<Svc::MemoryPermission>(perm & KMemoryPermission::UserMask),
|
||||||
.ipc_refcount = m_ipc_lock_count,
|
ipc_lock_count,
|
||||||
.device_refcount = m_device_use_count,
|
device_use_count,
|
||||||
.padding = {},
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr uintptr_t GetAddress() const {
|
constexpr VAddr GetAddress() const {
|
||||||
return m_address;
|
return addr;
|
||||||
}
|
}
|
||||||
|
constexpr std::size_t GetSize() const {
|
||||||
constexpr size_t GetSize() const {
|
return size;
|
||||||
return m_size;
|
|
||||||
}
|
}
|
||||||
|
constexpr std::size_t GetNumPages() const {
|
||||||
constexpr size_t GetNumPages() const {
|
return GetSize() / PageSize;
|
||||||
return this->GetSize() / PageSize;
|
|
||||||
}
|
}
|
||||||
|
constexpr VAddr GetEndAddress() const {
|
||||||
constexpr uintptr_t GetEndAddress() const {
|
return GetAddress() + GetSize();
|
||||||
return this->GetAddress() + this->GetSize();
|
|
||||||
}
|
}
|
||||||
|
constexpr VAddr GetLastAddress() const {
|
||||||
constexpr uintptr_t GetLastAddress() const {
|
return GetEndAddress() - 1;
|
||||||
return this->GetEndAddress() - 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr u16 GetIpcLockCount() const {
|
|
||||||
return m_ipc_lock_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr u16 GetIpcDisableMergeCount() const {
|
|
||||||
return m_ipc_disable_merge_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryState GetState() const {
|
constexpr KMemoryState GetState() const {
|
||||||
return m_state;
|
return state;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryPermission GetPermission() const {
|
|
||||||
return m_permission;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
|
||||||
return m_original_permission;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryAttribute GetAttribute() const {
|
constexpr KMemoryAttribute GetAttribute() const {
|
||||||
return m_attribute;
|
return attribute;
|
||||||
}
|
}
|
||||||
|
constexpr KMemoryPermission GetPermission() const {
|
||||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
return perm;
|
||||||
return m_disable_merge_attribute;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class KMemoryBlock : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
|
class KMemoryBlock final {
|
||||||
|
friend class KMemoryBlockManager;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u16 m_device_disable_merge_left_count;
|
VAddr addr{};
|
||||||
u16 m_device_disable_merge_right_count;
|
std::size_t num_pages{};
|
||||||
VAddr m_address;
|
KMemoryState state{KMemoryState::None};
|
||||||
size_t m_num_pages;
|
u16 ipc_lock_count{};
|
||||||
KMemoryState m_memory_state;
|
u16 device_use_count{};
|
||||||
u16 m_ipc_lock_count;
|
KMemoryPermission perm{KMemoryPermission::None};
|
||||||
u16 m_device_use_count;
|
KMemoryPermission original_perm{KMemoryPermission::None};
|
||||||
u16 m_ipc_disable_merge_count;
|
KMemoryAttribute attribute{KMemoryAttribute::None};
|
||||||
KMemoryPermission m_permission;
|
|
||||||
KMemoryPermission m_original_permission;
|
|
||||||
KMemoryAttribute m_attribute;
|
|
||||||
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
static constexpr int Compare(const KMemoryBlock& lhs, const KMemoryBlock& rhs) {
|
||||||
|
@ -297,349 +261,113 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
constexpr KMemoryBlock() = default;
|
||||||
|
constexpr KMemoryBlock(VAddr addr_, std::size_t num_pages_, KMemoryState state_,
|
||||||
|
KMemoryPermission perm_, KMemoryAttribute attribute_)
|
||||||
|
: addr{addr_}, num_pages(num_pages_), state{state_}, perm{perm_}, attribute{attribute_} {}
|
||||||
|
|
||||||
constexpr VAddr GetAddress() const {
|
constexpr VAddr GetAddress() const {
|
||||||
return m_address;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetNumPages() const {
|
constexpr std::size_t GetNumPages() const {
|
||||||
return m_num_pages;
|
return num_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr std::size_t GetSize() const {
|
||||||
return this->GetNumPages() * PageSize;
|
return GetNumPages() * PageSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr VAddr GetEndAddress() const {
|
constexpr VAddr GetEndAddress() const {
|
||||||
return this->GetAddress() + this->GetSize();
|
return GetAddress() + GetSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr VAddr GetLastAddress() const {
|
constexpr VAddr GetLastAddress() const {
|
||||||
return this->GetEndAddress() - 1;
|
return GetEndAddress() - 1;
|
||||||
}
|
|
||||||
|
|
||||||
constexpr u16 GetIpcLockCount() const {
|
|
||||||
return m_ipc_lock_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr u16 GetIpcDisableMergeCount() const {
|
|
||||||
return m_ipc_disable_merge_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryPermission GetPermission() const {
|
|
||||||
return m_permission;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryPermission GetOriginalPermission() const {
|
|
||||||
return m_original_permission;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryAttribute GetAttribute() const {
|
|
||||||
return m_attribute;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr KMemoryInfo GetMemoryInfo() const {
|
constexpr KMemoryInfo GetMemoryInfo() const {
|
||||||
return {
|
return {
|
||||||
.m_address = this->GetAddress(),
|
GetAddress(), GetSize(), state, perm,
|
||||||
.m_size = this->GetSize(),
|
attribute, original_perm, ipc_lock_count, device_use_count,
|
||||||
.m_state = m_memory_state,
|
|
||||||
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
|
|
||||||
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
|
|
||||||
.m_ipc_lock_count = m_ipc_lock_count,
|
|
||||||
.m_device_use_count = m_device_use_count,
|
|
||||||
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
|
|
||||||
.m_permission = m_permission,
|
|
||||||
.m_attribute = m_attribute,
|
|
||||||
.m_original_permission = m_original_permission,
|
|
||||||
.m_disable_merge_attribute = m_disable_merge_attribute,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
void ShareToDevice(KMemoryPermission /*new_perm*/) {
|
||||||
explicit KMemoryBlock() = default;
|
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
||||||
|
device_use_count == 0);
|
||||||
constexpr KMemoryBlock(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
attribute |= KMemoryAttribute::DeviceShared;
|
||||||
KMemoryAttribute attr)
|
const u16 new_use_count{++device_use_count};
|
||||||
: Common::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(),
|
ASSERT(new_use_count > 0);
|
||||||
m_device_disable_merge_left_count(), m_device_disable_merge_right_count(),
|
|
||||||
m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
|
|
||||||
m_device_use_count(0), m_ipc_disable_merge_count(), m_permission(p),
|
|
||||||
m_original_permission(KMemoryPermission::None), m_attribute(attr),
|
|
||||||
m_disable_merge_attribute() {}
|
|
||||||
|
|
||||||
constexpr void Initialize(VAddr addr, size_t np, KMemoryState ms, KMemoryPermission p,
|
|
||||||
KMemoryAttribute attr) {
|
|
||||||
m_device_disable_merge_left_count = 0;
|
|
||||||
m_device_disable_merge_right_count = 0;
|
|
||||||
m_address = addr;
|
|
||||||
m_num_pages = np;
|
|
||||||
m_memory_state = ms;
|
|
||||||
m_ipc_lock_count = 0;
|
|
||||||
m_device_use_count = 0;
|
|
||||||
m_permission = p;
|
|
||||||
m_original_permission = KMemoryPermission::None;
|
|
||||||
m_attribute = attr;
|
|
||||||
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute::None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void UnshareToDevice(KMemoryPermission /*new_perm*/) {
|
||||||
|
ASSERT((attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
||||||
|
const u16 prev_use_count{device_use_count--};
|
||||||
|
ASSERT(prev_use_count > 0);
|
||||||
|
if (prev_use_count == 1) {
|
||||||
|
attribute &= ~KMemoryAttribute::DeviceShared;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
|
||||||
constexpr auto AttributeIgnoreMask =
|
constexpr KMemoryAttribute AttributeIgnoreMask{KMemoryAttribute::DontCareMask |
|
||||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
KMemoryAttribute::IpcLocked |
|
||||||
return m_memory_state == s && m_permission == p &&
|
KMemoryAttribute::DeviceShared};
|
||||||
(m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
return state == s && perm == p &&
|
||||||
|
(attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
constexpr bool HasSameProperties(const KMemoryBlock& rhs) const {
|
||||||
return m_memory_state == rhs.m_memory_state && m_permission == rhs.m_permission &&
|
return state == rhs.state && perm == rhs.perm && original_perm == rhs.original_perm &&
|
||||||
m_original_permission == rhs.m_original_permission &&
|
attribute == rhs.attribute && ipc_lock_count == rhs.ipc_lock_count &&
|
||||||
m_attribute == rhs.m_attribute && m_ipc_lock_count == rhs.m_ipc_lock_count &&
|
device_use_count == rhs.device_use_count;
|
||||||
m_device_use_count == rhs.m_device_use_count;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool CanMergeWith(const KMemoryBlock& rhs) const {
|
constexpr bool Contains(VAddr start) const {
|
||||||
return this->HasSameProperties(rhs) &&
|
return GetAddress() <= start && start <= GetEndAddress();
|
||||||
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight) ==
|
|
||||||
KMemoryBlockDisableMergeAttribute::None &&
|
|
||||||
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft) ==
|
|
||||||
KMemoryBlockDisableMergeAttribute::None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool Contains(VAddr addr) const {
|
constexpr void Add(std::size_t count) {
|
||||||
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
|
ASSERT(count > 0);
|
||||||
|
ASSERT(GetAddress() + count * PageSize - 1 < GetEndAddress() + count * PageSize - 1);
|
||||||
|
|
||||||
|
num_pages += count;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Add(const KMemoryBlock& added_block) {
|
constexpr void Update(KMemoryState new_state, KMemoryPermission new_perm,
|
||||||
ASSERT(added_block.GetNumPages() > 0);
|
KMemoryAttribute new_attribute) {
|
||||||
ASSERT(this->GetAddress() + added_block.GetSize() - 1 <
|
ASSERT(original_perm == KMemoryPermission::None);
|
||||||
this->GetEndAddress() + added_block.GetSize() - 1);
|
ASSERT((attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
||||||
|
|
||||||
m_num_pages += added_block.GetNumPages();
|
state = new_state;
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
perm = new_perm;
|
||||||
m_disable_merge_attribute | added_block.m_disable_merge_attribute);
|
|
||||||
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
|
attribute = static_cast<KMemoryAttribute>(
|
||||||
|
new_attribute |
|
||||||
|
(attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a,
|
constexpr KMemoryBlock Split(VAddr split_addr) {
|
||||||
bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
|
ASSERT(GetAddress() < split_addr);
|
||||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
ASSERT(Contains(split_addr));
|
||||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::None);
|
ASSERT(Common::IsAligned(split_addr, PageSize));
|
||||||
|
|
||||||
m_memory_state = s;
|
KMemoryBlock block;
|
||||||
m_permission = p;
|
block.addr = addr;
|
||||||
m_attribute = static_cast<KMemoryAttribute>(
|
block.num_pages = (split_addr - GetAddress()) / PageSize;
|
||||||
a | (m_attribute & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)));
|
block.state = state;
|
||||||
|
block.ipc_lock_count = ipc_lock_count;
|
||||||
|
block.device_use_count = device_use_count;
|
||||||
|
block.perm = perm;
|
||||||
|
block.original_perm = original_perm;
|
||||||
|
block.attribute = attribute;
|
||||||
|
|
||||||
if (set_disable_merge_attr && set_mask != 0) {
|
addr = split_addr;
|
||||||
m_disable_merge_attribute = m_disable_merge_attribute |
|
num_pages -= block.num_pages;
|
||||||
static_cast<KMemoryBlockDisableMergeAttribute>(set_mask);
|
|
||||||
}
|
|
||||||
if (clear_mask != 0) {
|
|
||||||
m_disable_merge_attribute = m_disable_merge_attribute &
|
|
||||||
static_cast<KMemoryBlockDisableMergeAttribute>(~clear_mask);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void Split(KMemoryBlock* block, VAddr addr) {
|
return block;
|
||||||
ASSERT(this->GetAddress() < addr);
|
|
||||||
ASSERT(this->Contains(addr));
|
|
||||||
ASSERT(Common::IsAligned(addr, PageSize));
|
|
||||||
|
|
||||||
block->m_address = m_address;
|
|
||||||
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
|
|
||||||
block->m_memory_state = m_memory_state;
|
|
||||||
block->m_ipc_lock_count = m_ipc_lock_count;
|
|
||||||
block->m_device_use_count = m_device_use_count;
|
|
||||||
block->m_permission = m_permission;
|
|
||||||
block->m_original_permission = m_original_permission;
|
|
||||||
block->m_attribute = m_attribute;
|
|
||||||
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllLeft);
|
|
||||||
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
|
|
||||||
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
|
|
||||||
block->m_device_disable_merge_right_count = 0;
|
|
||||||
|
|
||||||
m_address = addr;
|
|
||||||
m_num_pages -= block->m_num_pages;
|
|
||||||
|
|
||||||
m_ipc_disable_merge_count = 0;
|
|
||||||
m_device_disable_merge_left_count = 0;
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute::AllRight);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForShareLeft(
|
|
||||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
|
||||||
if (left) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
|
||||||
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
|
|
||||||
ASSERT(new_device_disable_merge_left_count > 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForShareRight(
|
|
||||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
|
||||||
if (right) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::DeviceRight);
|
|
||||||
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
|
|
||||||
ASSERT(new_device_disable_merge_right_count > 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left,
|
|
||||||
bool right) {
|
|
||||||
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
|
|
||||||
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void ShareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
|
||||||
bool right) {
|
|
||||||
// We must either be shared or have a zero lock count.
|
|
||||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared ||
|
|
||||||
m_device_use_count == 0);
|
|
||||||
|
|
||||||
// Share.
|
|
||||||
const u16 new_count = ++m_device_use_count;
|
|
||||||
ASSERT(new_count > 0);
|
|
||||||
|
|
||||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::DeviceShared);
|
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(
|
|
||||||
[[maybe_unused]] KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
|
||||||
|
|
||||||
if (left) {
|
|
||||||
if (!m_device_disable_merge_left_count) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
--m_device_disable_merge_left_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
m_device_disable_merge_left_count =
|
|
||||||
std::min(m_device_disable_merge_left_count, m_device_use_count);
|
|
||||||
|
|
||||||
if (m_device_disable_merge_left_count == 0) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceLeft);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(
|
|
||||||
[[maybe_unused]] KMemoryPermission new_perm, [[maybe_unused]] bool left, bool right) {
|
|
||||||
if (right) {
|
|
||||||
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
|
|
||||||
ASSERT(old_device_disable_merge_right_count > 0);
|
|
||||||
if (old_device_disable_merge_right_count == 1) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::DeviceRight);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left,
|
|
||||||
bool right) {
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UnshareToDevice([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
|
||||||
bool right) {
|
|
||||||
// We must be shared.
|
|
||||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
|
||||||
|
|
||||||
// Unhare.
|
|
||||||
const u16 old_count = m_device_use_count--;
|
|
||||||
ASSERT(old_count > 0);
|
|
||||||
|
|
||||||
if (old_count == 1) {
|
|
||||||
m_attribute =
|
|
||||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
|
||||||
}
|
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UnshareToDeviceRight([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
|
||||||
bool right) {
|
|
||||||
|
|
||||||
// We must be shared.
|
|
||||||
ASSERT((m_attribute & KMemoryAttribute::DeviceShared) == KMemoryAttribute::DeviceShared);
|
|
||||||
|
|
||||||
// Unhare.
|
|
||||||
const u16 old_count = m_device_use_count--;
|
|
||||||
ASSERT(old_count > 0);
|
|
||||||
|
|
||||||
if (old_count == 1) {
|
|
||||||
m_attribute =
|
|
||||||
static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::DeviceShared);
|
|
||||||
}
|
|
||||||
|
|
||||||
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, [[maybe_unused]] bool right) {
|
|
||||||
// We must either be locked or have a zero lock count.
|
|
||||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked ||
|
|
||||||
m_ipc_lock_count == 0);
|
|
||||||
|
|
||||||
// Lock.
|
|
||||||
const u16 new_lock_count = ++m_ipc_lock_count;
|
|
||||||
ASSERT(new_lock_count > 0);
|
|
||||||
|
|
||||||
// If this is our first lock, update our permissions.
|
|
||||||
if (new_lock_count == 1) {
|
|
||||||
ASSERT(m_original_permission == KMemoryPermission::None);
|
|
||||||
ASSERT((m_permission | new_perm | KMemoryPermission::NotMapped) ==
|
|
||||||
(m_permission | KMemoryPermission::NotMapped));
|
|
||||||
ASSERT((m_permission & KMemoryPermission::UserExecute) !=
|
|
||||||
KMemoryPermission::UserExecute ||
|
|
||||||
(new_perm == KMemoryPermission::UserRead));
|
|
||||||
m_original_permission = m_permission;
|
|
||||||
m_permission = static_cast<KMemoryPermission>(
|
|
||||||
(new_perm & KMemoryPermission::IpcLockChangeMask) |
|
|
||||||
(m_original_permission & ~KMemoryPermission::IpcLockChangeMask));
|
|
||||||
}
|
|
||||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute::IpcLocked);
|
|
||||||
|
|
||||||
if (left) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute::IpcLeft);
|
|
||||||
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
|
|
||||||
ASSERT(new_ipc_disable_merge_count > 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr void UnlockForIpc([[maybe_unused]] KMemoryPermission new_perm, bool left,
|
|
||||||
[[maybe_unused]] bool right) {
|
|
||||||
// We must be locked.
|
|
||||||
ASSERT((m_attribute & KMemoryAttribute::IpcLocked) == KMemoryAttribute::IpcLocked);
|
|
||||||
|
|
||||||
// Unlock.
|
|
||||||
const u16 old_lock_count = m_ipc_lock_count--;
|
|
||||||
ASSERT(old_lock_count > 0);
|
|
||||||
|
|
||||||
// If this is our last unlock, update our permissions.
|
|
||||||
if (old_lock_count == 1) {
|
|
||||||
ASSERT(m_original_permission != KMemoryPermission::None);
|
|
||||||
m_permission = m_original_permission;
|
|
||||||
m_original_permission = KMemoryPermission::None;
|
|
||||||
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute::IpcLocked);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (left) {
|
|
||||||
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
|
|
||||||
ASSERT(old_ipc_disable_merge_count > 0);
|
|
||||||
if (old_ipc_disable_merge_count == 1) {
|
|
||||||
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(
|
|
||||||
m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute::IpcLeft);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
|
|
||||||
return m_disable_merge_attribute;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
|
||||||
|
|
|
@ -2,336 +2,221 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
#include "core/hle/kernel/k_memory_block_manager.h"
|
||||||
|
#include "core/hle/kernel/memory_types.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
KMemoryBlockManager::KMemoryBlockManager() = default;
|
KMemoryBlockManager::KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_)
|
||||||
|
: start_addr{start_addr_}, end_addr{end_addr_} {
|
||||||
Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager) {
|
const u64 num_pages{(end_addr - start_addr) / PageSize};
|
||||||
// Allocate a block to encapsulate the address space, insert it into the tree.
|
memory_block_tree.emplace_back(start_addr, num_pages, KMemoryState::Free,
|
||||||
KMemoryBlock* start_block = slab_manager->Allocate();
|
KMemoryPermission::None, KMemoryAttribute::None);
|
||||||
R_UNLESS(start_block != nullptr, ResultOutOfResource);
|
|
||||||
|
|
||||||
// Set our start and end.
|
|
||||||
m_start_address = st;
|
|
||||||
m_end_address = nd;
|
|
||||||
ASSERT(Common::IsAligned(m_start_address, PageSize));
|
|
||||||
ASSERT(Common::IsAligned(m_end_address, PageSize));
|
|
||||||
|
|
||||||
// Initialize and insert the block.
|
|
||||||
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize,
|
|
||||||
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
|
|
||||||
m_memory_block_tree.insert(*start_block);
|
|
||||||
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
KMemoryBlockManager::iterator KMemoryBlockManager::FindIterator(VAddr addr) {
|
||||||
HostUnmapCallback&& host_unmap_callback) {
|
auto node{memory_block_tree.begin()};
|
||||||
// Erase every block until we have none left.
|
while (node != end()) {
|
||||||
auto it = m_memory_block_tree.begin();
|
const VAddr node_end_addr{node->GetNumPages() * PageSize + node->GetAddress()};
|
||||||
while (it != m_memory_block_tree.end()) {
|
if (node->GetAddress() <= addr && node_end_addr - 1 >= addr) {
|
||||||
KMemoryBlock* block = std::addressof(*it);
|
return node;
|
||||||
it = m_memory_block_tree.erase(it);
|
}
|
||||||
slab_manager->Free(block);
|
node = std::next(node);
|
||||||
host_unmap_callback(block->GetAddress(), block->GetSize());
|
}
|
||||||
|
return end();
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, std::size_t region_num_pages,
|
||||||
|
std::size_t num_pages, std::size_t align,
|
||||||
|
std::size_t offset, std::size_t guard_pages) {
|
||||||
|
if (num_pages == 0) {
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(m_memory_block_tree.empty());
|
const VAddr region_end{region_start + region_num_pages * PageSize};
|
||||||
}
|
const VAddr region_last{region_end - 1};
|
||||||
|
for (auto it{FindIterator(region_start)}; it != memory_block_tree.cend(); it++) {
|
||||||
|
const auto info{it->GetMemoryInfo()};
|
||||||
|
if (region_last < info.GetAddress()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
VAddr KMemoryBlockManager::FindFreeArea(VAddr region_start, size_t region_num_pages,
|
if (info.state != KMemoryState::Free) {
|
||||||
size_t num_pages, size_t alignment, size_t offset,
|
continue;
|
||||||
size_t guard_pages) const {
|
}
|
||||||
if (num_pages > 0) {
|
|
||||||
const VAddr region_end = region_start + region_num_pages * PageSize;
|
|
||||||
const VAddr region_last = region_end - 1;
|
|
||||||
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend();
|
|
||||||
it++) {
|
|
||||||
const KMemoryInfo info = it->GetMemoryInfo();
|
|
||||||
if (region_last < info.GetAddress()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (info.m_state != KMemoryState::Free) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
VAddr area = (info.GetAddress() <= region_start) ? region_start : info.GetAddress();
|
VAddr area{(info.GetAddress() <= region_start) ? region_start : info.GetAddress()};
|
||||||
area += guard_pages * PageSize;
|
area += guard_pages * PageSize;
|
||||||
|
|
||||||
const VAddr offset_area = Common::AlignDown(area, alignment) + offset;
|
const VAddr offset_area{Common::AlignDown(area, align) + offset};
|
||||||
area = (area <= offset_area) ? offset_area : offset_area + alignment;
|
area = (area <= offset_area) ? offset_area : offset_area + align;
|
||||||
|
|
||||||
const VAddr area_end = area + num_pages * PageSize + guard_pages * PageSize;
|
const VAddr area_end{area + num_pages * PageSize + guard_pages * PageSize};
|
||||||
const VAddr area_last = area_end - 1;
|
const VAddr area_last{area_end - 1};
|
||||||
|
|
||||||
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
if (info.GetAddress() <= area && area < area_last && area_last <= region_last &&
|
||||||
area_last <= info.GetLastAddress()) {
|
area_last <= info.GetLastAddress()) {
|
||||||
return area;
|
return area;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator,
|
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||||
VAddr address, size_t num_pages) {
|
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute,
|
||||||
// Find the iterator now that we've updated.
|
KMemoryState state, KMemoryPermission perm,
|
||||||
iterator it = this->FindIterator(address);
|
KMemoryAttribute attribute) {
|
||||||
if (address != m_start_address) {
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||||
it--;
|
iterator node{memory_block_tree.begin()};
|
||||||
}
|
|
||||||
|
|
||||||
// Coalesce blocks that we can.
|
prev_attribute |= KMemoryAttribute::IpcAndDeviceMapped;
|
||||||
while (true) {
|
|
||||||
iterator prev = it++;
|
while (node != memory_block_tree.end()) {
|
||||||
if (it == m_memory_block_tree.end()) {
|
KMemoryBlock* block{&(*node)};
|
||||||
|
iterator next_node{std::next(node)};
|
||||||
|
const VAddr cur_addr{block->GetAddress()};
|
||||||
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||||
|
|
||||||
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||||
|
if (!block->HasProperties(prev_state, prev_perm, prev_attribute)) {
|
||||||
|
node = next_node;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
iterator new_node{node};
|
||||||
|
if (addr > cur_addr) {
|
||||||
|
memory_block_tree.insert(node, block->Split(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update_end_addr < cur_end_addr) {
|
||||||
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
new_node->Update(state, perm, attribute);
|
||||||
|
|
||||||
|
MergeAdjacent(new_node, next_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (prev->CanMergeWith(*it)) {
|
node = next_node;
|
||||||
KMemoryBlock* block = std::addressof(*it);
|
}
|
||||||
m_memory_block_tree.erase(it);
|
}
|
||||||
prev->Add(*block);
|
|
||||||
allocator->Free(block);
|
void KMemoryBlockManager::Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||||
it = prev;
|
KMemoryPermission perm, KMemoryAttribute attribute) {
|
||||||
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||||
|
iterator node{memory_block_tree.begin()};
|
||||||
|
|
||||||
|
while (node != memory_block_tree.end()) {
|
||||||
|
KMemoryBlock* block{&(*node)};
|
||||||
|
iterator next_node{std::next(node)};
|
||||||
|
const VAddr cur_addr{block->GetAddress()};
|
||||||
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||||
|
|
||||||
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||||
|
iterator new_node{node};
|
||||||
|
|
||||||
|
if (addr > cur_addr) {
|
||||||
|
memory_block_tree.insert(node, block->Split(addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (update_end_addr < cur_end_addr) {
|
||||||
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||||
|
}
|
||||||
|
|
||||||
|
new_node->Update(state, perm, attribute);
|
||||||
|
|
||||||
|
MergeAdjacent(new_node, next_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) {
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node = next_node;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
void KMemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||||
size_t num_pages, KMemoryState state, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr,
|
|
||||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
|
||||||
KMemoryBlockDisableMergeAttribute clear_disable_attr) {
|
|
||||||
// Ensure for auditing that we never end up with an invalid tree.
|
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
|
||||||
ASSERT(Common::IsAligned(address, PageSize));
|
|
||||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
|
||||||
KMemoryAttribute::None);
|
|
||||||
|
|
||||||
VAddr cur_address = address;
|
|
||||||
size_t remaining_pages = num_pages;
|
|
||||||
iterator it = this->FindIterator(address);
|
|
||||||
|
|
||||||
while (remaining_pages > 0) {
|
|
||||||
const size_t remaining_size = remaining_pages * PageSize;
|
|
||||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
||||||
if (it->HasProperties(state, perm, attr)) {
|
|
||||||
// If we already have the right properties, just advance.
|
|
||||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
|
||||||
remaining_pages = 0;
|
|
||||||
cur_address += remaining_size;
|
|
||||||
} else {
|
|
||||||
remaining_pages =
|
|
||||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
|
||||||
cur_address = cur_info.GetEndAddress();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If we need to, create a new block before and insert it.
|
|
||||||
if (cur_info.GetAddress() != cur_address) {
|
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
|
||||||
|
|
||||||
it->Split(new_block, cur_address);
|
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
it++;
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
|
||||||
cur_address = cur_info.GetAddress();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we need to, create a new block after and insert it.
|
|
||||||
if (cur_info.GetSize() > remaining_size) {
|
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
|
||||||
|
|
||||||
it->Split(new_block, cur_address + remaining_size);
|
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update block state.
|
|
||||||
it->Update(state, perm, attr, cur_address == address, static_cast<u8>(set_disable_attr),
|
|
||||||
static_cast<u8>(clear_disable_attr));
|
|
||||||
cur_address += cur_info.GetSize();
|
|
||||||
remaining_pages -= cur_info.GetNumPages();
|
|
||||||
}
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
|
|
||||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator,
|
|
||||||
VAddr address, size_t num_pages, KMemoryState test_state,
|
|
||||||
KMemoryPermission test_perm, KMemoryAttribute test_attr,
|
|
||||||
KMemoryState state, KMemoryPermission perm,
|
|
||||||
KMemoryAttribute attr) {
|
|
||||||
// Ensure for auditing that we never end up with an invalid tree.
|
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
|
||||||
ASSERT(Common::IsAligned(address, PageSize));
|
|
||||||
ASSERT((attr & (KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared)) ==
|
|
||||||
KMemoryAttribute::None);
|
|
||||||
|
|
||||||
VAddr cur_address = address;
|
|
||||||
size_t remaining_pages = num_pages;
|
|
||||||
iterator it = this->FindIterator(address);
|
|
||||||
|
|
||||||
while (remaining_pages > 0) {
|
|
||||||
const size_t remaining_size = remaining_pages * PageSize;
|
|
||||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
|
||||||
if (it->HasProperties(test_state, test_perm, test_attr) &&
|
|
||||||
!it->HasProperties(state, perm, attr)) {
|
|
||||||
// If we need to, create a new block before and insert it.
|
|
||||||
if (cur_info.GetAddress() != cur_address) {
|
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
|
||||||
|
|
||||||
it->Split(new_block, cur_address);
|
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
it++;
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
|
||||||
cur_address = cur_info.GetAddress();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we need to, create a new block after and insert it.
|
|
||||||
if (cur_info.GetSize() > remaining_size) {
|
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
|
||||||
|
|
||||||
it->Split(new_block, cur_address + remaining_size);
|
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update block state.
|
|
||||||
it->Update(state, perm, attr, false, 0, 0);
|
|
||||||
cur_address += cur_info.GetSize();
|
|
||||||
remaining_pages -= cur_info.GetNumPages();
|
|
||||||
} else {
|
|
||||||
// If we already have the right properties, just advance.
|
|
||||||
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
|
|
||||||
remaining_pages = 0;
|
|
||||||
cur_address += remaining_size;
|
|
||||||
} else {
|
|
||||||
remaining_pages =
|
|
||||||
(cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
|
|
||||||
cur_address = cur_info.GetEndAddress();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
|
|
||||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
|
||||||
size_t num_pages, MemoryBlockLockFunction lock_func,
|
|
||||||
KMemoryPermission perm) {
|
KMemoryPermission perm) {
|
||||||
// Ensure for auditing that we never end up with an invalid tree.
|
const VAddr update_end_addr{addr + num_pages * PageSize};
|
||||||
KScopedMemoryBlockManagerAuditor auditor(this);
|
iterator node{memory_block_tree.begin()};
|
||||||
ASSERT(Common::IsAligned(address, PageSize));
|
|
||||||
|
|
||||||
VAddr cur_address = address;
|
while (node != memory_block_tree.end()) {
|
||||||
size_t remaining_pages = num_pages;
|
KMemoryBlock* block{&(*node)};
|
||||||
iterator it = this->FindIterator(address);
|
iterator next_node{std::next(node)};
|
||||||
|
const VAddr cur_addr{block->GetAddress()};
|
||||||
|
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
|
||||||
|
|
||||||
const VAddr end_address = address + (num_pages * PageSize);
|
if (addr < cur_end_addr && cur_addr < update_end_addr) {
|
||||||
|
iterator new_node{node};
|
||||||
|
|
||||||
while (remaining_pages > 0) {
|
if (addr > cur_addr) {
|
||||||
const size_t remaining_size = remaining_pages * PageSize;
|
memory_block_tree.insert(node, block->Split(addr));
|
||||||
KMemoryInfo cur_info = it->GetMemoryInfo();
|
}
|
||||||
|
|
||||||
// If we need to, create a new block before and insert it.
|
if (update_end_addr < cur_end_addr) {
|
||||||
if (cur_info.m_address != cur_address) {
|
new_node = memory_block_tree.insert(node, block->Split(update_end_addr));
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
}
|
||||||
|
|
||||||
it->Split(new_block, cur_address);
|
lock_func(new_node, perm);
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
it++;
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
MergeAdjacent(new_node, next_node);
|
||||||
cur_address = cur_info.GetAddress();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cur_info.GetSize() > remaining_size) {
|
if (cur_end_addr - 1 >= update_end_addr - 1) {
|
||||||
// If we need to, create a new block after and insert it.
|
break;
|
||||||
KMemoryBlock* new_block = allocator->Allocate();
|
|
||||||
|
|
||||||
it->Split(new_block, cur_address + remaining_size);
|
|
||||||
it = m_memory_block_tree.insert(*new_block);
|
|
||||||
|
|
||||||
cur_info = it->GetMemoryInfo();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call the locked update function.
|
node = next_node;
|
||||||
(std::addressof(*it)->*lock_func)(perm, cur_info.GetAddress() == address,
|
|
||||||
cur_info.GetEndAddress() == end_address);
|
|
||||||
cur_address += cur_info.GetSize();
|
|
||||||
remaining_pages -= cur_info.GetNumPages();
|
|
||||||
it++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this->CoalesceForUpdate(allocator, address, num_pages);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug.
|
void KMemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
|
||||||
bool KMemoryBlockManager::CheckState() const {
|
const_iterator it{FindIterator(start)};
|
||||||
// Loop over every block, ensuring that we are sorted and coalesced.
|
KMemoryInfo info{};
|
||||||
auto it = m_memory_block_tree.cbegin();
|
do {
|
||||||
auto prev = it++;
|
info = it->GetMemoryInfo();
|
||||||
while (it != m_memory_block_tree.cend()) {
|
func(info);
|
||||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
it = std::next(it);
|
||||||
const KMemoryInfo cur_info = it->GetMemoryInfo();
|
} while (info.addr + info.size - 1 < end - 1 && it != cend());
|
||||||
|
}
|
||||||
|
|
||||||
// Sequential blocks which can be merged should be merged.
|
void KMemoryBlockManager::MergeAdjacent(iterator it, iterator& next_it) {
|
||||||
if (prev->CanMergeWith(*it)) {
|
KMemoryBlock* block{&(*it)};
|
||||||
return false;
|
|
||||||
|
auto EraseIt = [&](const iterator it_to_erase) {
|
||||||
|
if (next_it == it_to_erase) {
|
||||||
|
next_it = std::next(next_it);
|
||||||
}
|
}
|
||||||
|
memory_block_tree.erase(it_to_erase);
|
||||||
|
};
|
||||||
|
|
||||||
// Sequential blocks should be sequential.
|
if (it != memory_block_tree.begin()) {
|
||||||
if (prev_info.GetEndAddress() != cur_info.GetAddress()) {
|
KMemoryBlock* prev{&(*std::prev(it))};
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the block is ipc locked, it must have a count.
|
if (block->HasSameProperties(*prev)) {
|
||||||
if ((cur_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
const iterator prev_it{std::prev(it)};
|
||||||
cur_info.m_ipc_lock_count == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the block is device shared, it must have a count.
|
prev->Add(block->GetNumPages());
|
||||||
if ((cur_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
EraseIt(it);
|
||||||
cur_info.m_device_use_count == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance the iterator.
|
it = prev_it;
|
||||||
prev = it++;
|
block = prev;
|
||||||
}
|
|
||||||
|
|
||||||
// Our loop will miss checking the last block, potentially, so check it.
|
|
||||||
if (prev != m_memory_block_tree.cend()) {
|
|
||||||
const KMemoryInfo prev_info = prev->GetMemoryInfo();
|
|
||||||
// If the block is ipc locked, it must have a count.
|
|
||||||
if ((prev_info.m_attribute & KMemoryAttribute::IpcLocked) != KMemoryAttribute::None &&
|
|
||||||
prev_info.m_ipc_lock_count == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the block is device shared, it must have a count.
|
|
||||||
if ((prev_info.m_attribute & KMemoryAttribute::DeviceShared) != KMemoryAttribute::None &&
|
|
||||||
prev_info.m_device_use_count == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
if (it != cend()) {
|
||||||
|
const KMemoryBlock* const next{&(*std::next(it))};
|
||||||
|
|
||||||
|
if (block->HasSameProperties(*next)) {
|
||||||
|
block->Add(next->GetNumPages());
|
||||||
|
EraseIt(std::next(it));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -4,154 +4,63 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <list>
|
||||||
|
|
||||||
#include "common/common_funcs.h"
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
|
||||||
#include "core/hle/kernel/k_memory_block.h"
|
#include "core/hle/kernel/k_memory_block.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KMemoryBlockManagerUpdateAllocator {
|
|
||||||
public:
|
|
||||||
static constexpr size_t MaxBlocks = 2;
|
|
||||||
|
|
||||||
private:
|
|
||||||
KMemoryBlock* m_blocks[MaxBlocks];
|
|
||||||
size_t m_index;
|
|
||||||
KMemoryBlockSlabManager* m_slab_manager;
|
|
||||||
|
|
||||||
private:
|
|
||||||
Result Initialize(size_t num_blocks) {
|
|
||||||
// Check num blocks.
|
|
||||||
ASSERT(num_blocks <= MaxBlocks);
|
|
||||||
|
|
||||||
// Set index.
|
|
||||||
m_index = MaxBlocks - num_blocks;
|
|
||||||
|
|
||||||
// Allocate the blocks.
|
|
||||||
for (size_t i = 0; i < num_blocks && i < MaxBlocks; ++i) {
|
|
||||||
m_blocks[m_index + i] = m_slab_manager->Allocate();
|
|
||||||
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
|
|
||||||
}
|
|
||||||
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
KMemoryBlockManagerUpdateAllocator(Result* out_result, KMemoryBlockSlabManager* sm,
|
|
||||||
size_t num_blocks = MaxBlocks)
|
|
||||||
: m_blocks(), m_index(MaxBlocks), m_slab_manager(sm) {
|
|
||||||
*out_result = this->Initialize(num_blocks);
|
|
||||||
}
|
|
||||||
|
|
||||||
~KMemoryBlockManagerUpdateAllocator() {
|
|
||||||
for (const auto& block : m_blocks) {
|
|
||||||
if (block != nullptr) {
|
|
||||||
m_slab_manager->Free(block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
KMemoryBlock* Allocate() {
|
|
||||||
ASSERT(m_index < MaxBlocks);
|
|
||||||
ASSERT(m_blocks[m_index] != nullptr);
|
|
||||||
KMemoryBlock* block = nullptr;
|
|
||||||
std::swap(block, m_blocks[m_index++]);
|
|
||||||
return block;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Free(KMemoryBlock* block) {
|
|
||||||
ASSERT(m_index <= MaxBlocks);
|
|
||||||
ASSERT(block != nullptr);
|
|
||||||
if (m_index == 0) {
|
|
||||||
m_slab_manager->Free(block);
|
|
||||||
} else {
|
|
||||||
m_blocks[--m_index] = block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class KMemoryBlockManager final {
|
class KMemoryBlockManager final {
|
||||||
public:
|
public:
|
||||||
using MemoryBlockTree =
|
using MemoryBlockTree = std::list<KMemoryBlock>;
|
||||||
Common::IntrusiveRedBlackTreeBaseTraits<KMemoryBlock>::TreeType<KMemoryBlock>;
|
|
||||||
using MemoryBlockLockFunction = void (KMemoryBlock::*)(KMemoryPermission new_perm, bool left,
|
|
||||||
bool right);
|
|
||||||
using iterator = MemoryBlockTree::iterator;
|
using iterator = MemoryBlockTree::iterator;
|
||||||
using const_iterator = MemoryBlockTree::const_iterator;
|
using const_iterator = MemoryBlockTree::const_iterator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
KMemoryBlockManager();
|
KMemoryBlockManager(VAddr start_addr_, VAddr end_addr_);
|
||||||
|
|
||||||
using HostUnmapCallback = std::function<void(VAddr, u64)>;
|
|
||||||
|
|
||||||
Result Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManager* slab_manager);
|
|
||||||
void Finalize(KMemoryBlockSlabManager* slab_manager, HostUnmapCallback&& host_unmap_callback);
|
|
||||||
|
|
||||||
iterator end() {
|
iterator end() {
|
||||||
return m_memory_block_tree.end();
|
return memory_block_tree.end();
|
||||||
}
|
}
|
||||||
const_iterator end() const {
|
const_iterator end() const {
|
||||||
return m_memory_block_tree.end();
|
return memory_block_tree.end();
|
||||||
}
|
}
|
||||||
const_iterator cend() const {
|
const_iterator cend() const {
|
||||||
return m_memory_block_tree.cend();
|
return memory_block_tree.cend();
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
iterator FindIterator(VAddr addr);
|
||||||
size_t alignment, size_t offset, size_t guard_pages) const;
|
|
||||||
|
|
||||||
void Update(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||||
KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr,
|
std::size_t align, std::size_t offset, std::size_t guard_pages);
|
||||||
KMemoryBlockDisableMergeAttribute set_disable_attr,
|
|
||||||
KMemoryBlockDisableMergeAttribute clear_disable_attr);
|
|
||||||
void UpdateLock(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address, size_t num_pages,
|
|
||||||
MemoryBlockLockFunction lock_func, KMemoryPermission perm);
|
|
||||||
|
|
||||||
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
void Update(VAddr addr, std::size_t num_pages, KMemoryState prev_state,
|
||||||
size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm,
|
KMemoryPermission prev_perm, KMemoryAttribute prev_attribute, KMemoryState state,
|
||||||
KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm,
|
KMemoryPermission perm, KMemoryAttribute attribute);
|
||||||
KMemoryAttribute attr);
|
|
||||||
|
|
||||||
iterator FindIterator(VAddr address) const {
|
void Update(VAddr addr, std::size_t num_pages, KMemoryState state,
|
||||||
return m_memory_block_tree.find(KMemoryBlock(
|
KMemoryPermission perm = KMemoryPermission::None,
|
||||||
address, 1, KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None));
|
KMemoryAttribute attribute = KMemoryAttribute::None);
|
||||||
|
|
||||||
|
using LockFunc = std::function<void(iterator, KMemoryPermission)>;
|
||||||
|
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
|
||||||
|
KMemoryPermission perm);
|
||||||
|
|
||||||
|
using IterateFunc = std::function<void(const KMemoryInfo&)>;
|
||||||
|
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
|
||||||
|
|
||||||
|
KMemoryBlock& FindBlock(VAddr addr) {
|
||||||
|
return *FindIterator(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
const KMemoryBlock* FindBlock(VAddr address) const {
|
|
||||||
if (const_iterator it = this->FindIterator(address); it != m_memory_block_tree.end()) {
|
|
||||||
return std::addressof(*it);
|
|
||||||
}
|
|
||||||
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug.
|
|
||||||
bool CheckState() const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator* allocator, VAddr address,
|
void MergeAdjacent(iterator it, iterator& next_it);
|
||||||
size_t num_pages);
|
|
||||||
|
|
||||||
MemoryBlockTree m_memory_block_tree;
|
[[maybe_unused]] const VAddr start_addr;
|
||||||
VAddr m_start_address{};
|
[[maybe_unused]] const VAddr end_addr;
|
||||||
VAddr m_end_address{};
|
|
||||||
};
|
|
||||||
|
|
||||||
class KScopedMemoryBlockManagerAuditor {
|
MemoryBlockTree memory_block_tree;
|
||||||
private:
|
|
||||||
KMemoryBlockManager* m_manager;
|
|
||||||
|
|
||||||
public:
|
|
||||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager* m) : m_manager(m) {
|
|
||||||
ASSERT(m_manager->CheckState());
|
|
||||||
}
|
|
||||||
explicit KScopedMemoryBlockManagerAuditor(KMemoryBlockManager& m)
|
|
||||||
: KScopedMemoryBlockManagerAuditor(std::addressof(m)) {}
|
|
||||||
~KScopedMemoryBlockManagerAuditor() {
|
|
||||||
ASSERT(m_manager->CheckState());
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -331,7 +331,7 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
|
||||||
|
|
||||||
// Set all the allocated memory.
|
// Set all the allocated memory.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : out->Nodes()) {
|
||||||
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
std::memset(system.DeviceMemory().GetPointer(block.GetAddress()), fill_pattern,
|
||||||
block.GetSize());
|
block.GetSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ namespace Kernel {
|
||||||
|
|
||||||
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
KPageBuffer* KPageBuffer::FromPhysicalAddress(Core::System& system, PAddr phys_addr) {
|
||||||
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
ASSERT(Common::IsAligned(phys_addr, PageSize));
|
||||||
return system.DeviceMemory().GetPointer<KPageBuffer>(phys_addr);
|
return reinterpret_cast<KPageBuffer*>(system.DeviceMemory().GetPointer(phys_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -9,10 +9,8 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/page_table.h"
|
#include "common/page_table.h"
|
||||||
#include "core/file_sys/program_metadata.h"
|
#include "core/file_sys/program_metadata.h"
|
||||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
|
||||||
#include "core/hle/kernel/k_light_lock.h"
|
#include "core/hle/kernel/k_light_lock.h"
|
||||||
#include "core/hle/kernel/k_memory_block.h"
|
#include "core/hle/kernel/k_memory_block.h"
|
||||||
#include "core/hle/kernel/k_memory_block_manager.h"
|
|
||||||
#include "core/hle/kernel/k_memory_layout.h"
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
|
@ -36,66 +34,58 @@ public:
|
||||||
~KPageTable();
|
~KPageTable();
|
||||||
|
|
||||||
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
Result InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
|
||||||
VAddr code_addr, size_t code_size,
|
VAddr code_addr, std::size_t code_size, KMemoryManager::Pool pool);
|
||||||
KMemoryBlockSlabManager* mem_block_slab_manager,
|
Result MapProcessCode(VAddr addr, std::size_t pages_count, KMemoryState state,
|
||||||
KMemoryManager::Pool pool);
|
|
||||||
|
|
||||||
void Finalize();
|
|
||||||
|
|
||||||
Result MapProcessCode(VAddr addr, size_t pages_count, KMemoryState state,
|
|
||||||
KMemoryPermission perm);
|
KMemoryPermission perm);
|
||||||
Result MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size);
|
Result MapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size);
|
||||||
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
|
Result UnmapCodeMemory(VAddr dst_address, VAddr src_address, std::size_t size,
|
||||||
ICacheInvalidationStrategy icache_invalidation_strategy);
|
ICacheInvalidationStrategy icache_invalidation_strategy);
|
||||||
Result UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
|
Result UnmapProcessMemory(VAddr dst_addr, std::size_t size, KPageTable& src_page_table,
|
||||||
VAddr src_addr);
|
VAddr src_addr);
|
||||||
Result MapPhysicalMemory(VAddr addr, size_t size);
|
Result MapPhysicalMemory(VAddr addr, std::size_t size);
|
||||||
Result UnmapPhysicalMemory(VAddr addr, size_t size);
|
Result UnmapPhysicalMemory(VAddr addr, std::size_t size);
|
||||||
Result MapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
Result MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, size_t size);
|
Result UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size);
|
||||||
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
Result MapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state,
|
||||||
KMemoryPermission perm);
|
KMemoryPermission perm);
|
||||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||||
KMemoryState state, KMemoryPermission perm) {
|
KMemoryState state, KMemoryPermission perm) {
|
||||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||||
this->GetRegionAddress(state),
|
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||||
this->GetRegionSize(state) / PageSize, state, perm));
|
state, perm);
|
||||||
}
|
}
|
||||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||||
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
|
Result UnmapPages(VAddr address, std::size_t num_pages, KMemoryState state);
|
||||||
Result SetProcessMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission svc_perm);
|
Result SetProcessMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission svc_perm);
|
||||||
KMemoryInfo QueryInfo(VAddr addr);
|
KMemoryInfo QueryInfo(VAddr addr);
|
||||||
Result SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermission perm);
|
Result ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm);
|
||||||
Result SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr);
|
Result ResetTransferMemory(VAddr addr, std::size_t size);
|
||||||
Result SetMaxHeapSize(size_t size);
|
Result SetMemoryPermission(VAddr addr, std::size_t size, Svc::MemoryPermission perm);
|
||||||
Result SetHeapSize(VAddr* out, size_t size);
|
Result SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask, u32 attr);
|
||||||
ResultVal<VAddr> AllocateAndMapMemory(size_t needed_num_pages, size_t align, bool is_map_only,
|
Result SetMaxHeapSize(std::size_t size);
|
||||||
VAddr region_start, size_t region_num_pages,
|
Result SetHeapSize(VAddr* out, std::size_t size);
|
||||||
KMemoryState state, KMemoryPermission perm,
|
ResultVal<VAddr> AllocateAndMapMemory(std::size_t needed_num_pages, std::size_t align,
|
||||||
PAddr map_addr = 0);
|
bool is_map_only, VAddr region_start,
|
||||||
|
std::size_t region_num_pages, KMemoryState state,
|
||||||
Result LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
|
KMemoryPermission perm, PAddr map_addr = 0);
|
||||||
bool is_aligned);
|
Result LockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||||
Result LockForUnmapDeviceAddressSpace(VAddr address, size_t size);
|
Result UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
|
||||||
|
Result LockForCodeMemory(KPageGroup* out, VAddr addr, std::size_t size);
|
||||||
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
|
Result UnlockForCodeMemory(VAddr addr, std::size_t size, const KPageGroup& pg);
|
||||||
|
|
||||||
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
|
|
||||||
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
|
|
||||||
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||||
KMemoryState state_mask, KMemoryState state,
|
KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
KMemoryAttribute attr_mask, KMemoryAttribute attr);
|
||||||
|
|
||||||
Common::PageTable& PageTableImpl() {
|
Common::PageTable& PageTableImpl() {
|
||||||
return *m_page_table_impl;
|
return page_table_impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Common::PageTable& PageTableImpl() const {
|
const Common::PageTable& PageTableImpl() const {
|
||||||
return *m_page_table_impl;
|
return page_table_impl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
|
bool CanContain(VAddr addr, std::size_t size, KMemoryState state) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class OperationType : u32 {
|
enum class OperationType : u32 {
|
||||||
|
@ -106,65 +96,67 @@ private:
|
||||||
ChangePermissionsAndRefresh,
|
ChangePermissionsAndRefresh,
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr =
|
static constexpr KMemoryAttribute DefaultMemoryIgnoreAttr = KMemoryAttribute::DontCareMask |
|
||||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::DeviceShared;
|
KMemoryAttribute::IpcLocked |
|
||||||
|
KMemoryAttribute::DeviceShared;
|
||||||
|
|
||||||
|
Result InitializeMemoryLayout(VAddr start, VAddr end);
|
||||||
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
Result MapPages(VAddr addr, const KPageGroup& page_linked_list, KMemoryPermission perm);
|
||||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
Result MapPages(VAddr* out_addr, std::size_t num_pages, std::size_t alignment, PAddr phys_addr,
|
||||||
bool is_pa_valid, VAddr region_start, size_t region_num_pages,
|
bool is_pa_valid, VAddr region_start, std::size_t region_num_pages,
|
||||||
KMemoryState state, KMemoryPermission perm);
|
KMemoryState state, KMemoryPermission perm);
|
||||||
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
Result UnmapPages(VAddr addr, const KPageGroup& page_linked_list);
|
||||||
|
bool IsRegionMapped(VAddr address, u64 size);
|
||||||
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
bool IsRegionContiguous(VAddr addr, u64 size) const;
|
||||||
void AddRegionToPages(VAddr start, size_t num_pages, KPageGroup& page_linked_list);
|
void AddRegionToPages(VAddr start, std::size_t num_pages, KPageGroup& page_linked_list);
|
||||||
KMemoryInfo QueryInfoImpl(VAddr addr);
|
KMemoryInfo QueryInfoImpl(VAddr addr);
|
||||||
VAddr AllocateVirtualMemory(VAddr start, size_t region_num_pages, u64 needed_num_pages,
|
VAddr AllocateVirtualMemory(VAddr start, std::size_t region_num_pages, u64 needed_num_pages,
|
||||||
size_t align);
|
std::size_t align);
|
||||||
Result Operate(VAddr addr, size_t num_pages, const KPageGroup& page_group,
|
Result Operate(VAddr addr, std::size_t num_pages, const KPageGroup& page_group,
|
||||||
OperationType operation);
|
OperationType operation);
|
||||||
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
|
Result Operate(VAddr addr, std::size_t num_pages, KMemoryPermission perm,
|
||||||
PAddr map_addr = 0);
|
OperationType operation, PAddr map_addr = 0);
|
||||||
VAddr GetRegionAddress(KMemoryState state) const;
|
VAddr GetRegionAddress(KMemoryState state) const;
|
||||||
size_t GetRegionSize(KMemoryState state) const;
|
std::size_t GetRegionSize(KMemoryState state) const;
|
||||||
|
|
||||||
VAddr FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
VAddr FindFreeArea(VAddr region_start, std::size_t region_num_pages, std::size_t num_pages,
|
||||||
size_t alignment, size_t offset, size_t guard_pages);
|
std::size_t alignment, std::size_t offset, std::size_t guard_pages);
|
||||||
|
|
||||||
Result CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
|
Result CheckMemoryStateContiguous(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||||
KMemoryState state_mask, KMemoryState state,
|
KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||||
Result CheckMemoryStateContiguous(VAddr addr, size_t size, KMemoryState state_mask,
|
Result CheckMemoryStateContiguous(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||||
KMemoryState state, KMemoryPermission perm_mask,
|
KMemoryState state, KMemoryPermission perm_mask,
|
||||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||||
KMemoryAttribute attr) const {
|
KMemoryAttribute attr) const {
|
||||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||||
perm, attr_mask, attr));
|
perm, attr_mask, attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
KMemoryAttribute attr_mask, KMemoryAttribute attr) const;
|
||||||
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
Result CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||||
KMemoryAttribute* out_attr, size_t* out_blocks_needed, VAddr addr,
|
KMemoryAttribute* out_attr, std::size_t* out_blocks_needed, VAddr addr,
|
||||||
size_t size, KMemoryState state_mask, KMemoryState state,
|
std::size_t size, KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const;
|
||||||
Result CheckMemoryState(size_t* out_blocks_needed, VAddr addr, size_t size,
|
Result CheckMemoryState(std::size_t* out_blocks_needed, VAddr addr, std::size_t size,
|
||||||
KMemoryState state_mask, KMemoryState state,
|
KMemoryState state_mask, KMemoryState state,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||||
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||||
state_mask, state, perm_mask, perm, attr_mask, attr,
|
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||||
ignore_attr));
|
|
||||||
}
|
}
|
||||||
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
Result CheckMemoryState(VAddr addr, std::size_t size, KMemoryState state_mask,
|
||||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
KMemoryState state, KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||||
attr_mask, attr, ignore_attr));
|
attr_mask, attr, ignore_attr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||||
|
@ -182,13 +174,13 @@ private:
|
||||||
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
bool IsValidPageGroup(const KPageGroup& pg, VAddr addr, size_t num_pages);
|
||||||
|
|
||||||
bool IsLockedByCurrentThread() const {
|
bool IsLockedByCurrentThread() const {
|
||||||
return m_general_lock.IsLockedByCurrentThread();
|
return general_lock.IsLockedByCurrentThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
|
bool IsHeapPhysicalAddress(const KMemoryLayout& layout, PAddr phys_addr) {
|
||||||
ASSERT(this->IsLockedByCurrentThread());
|
ASSERT(this->IsLockedByCurrentThread());
|
||||||
|
|
||||||
return layout.IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
|
return layout.IsHeapPhysicalAddress(cached_physical_heap_region, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
|
bool GetPhysicalAddressLocked(PAddr* out, VAddr virt_addr) const {
|
||||||
|
@ -199,93 +191,95 @@ private:
|
||||||
return *out != 0;
|
return *out != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutable KLightLock m_general_lock;
|
mutable KLightLock general_lock;
|
||||||
mutable KLightLock m_map_physical_memory_lock;
|
mutable KLightLock map_physical_memory_lock;
|
||||||
|
|
||||||
|
std::unique_ptr<KMemoryBlockManager> block_manager;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
constexpr VAddr GetAddressSpaceStart() const {
|
constexpr VAddr GetAddressSpaceStart() const {
|
||||||
return m_address_space_start;
|
return address_space_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetAddressSpaceEnd() const {
|
constexpr VAddr GetAddressSpaceEnd() const {
|
||||||
return m_address_space_end;
|
return address_space_end;
|
||||||
}
|
}
|
||||||
constexpr size_t GetAddressSpaceSize() const {
|
constexpr std::size_t GetAddressSpaceSize() const {
|
||||||
return m_address_space_end - m_address_space_start;
|
return address_space_end - address_space_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetHeapRegionStart() const {
|
constexpr VAddr GetHeapRegionStart() const {
|
||||||
return m_heap_region_start;
|
return heap_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetHeapRegionEnd() const {
|
constexpr VAddr GetHeapRegionEnd() const {
|
||||||
return m_heap_region_end;
|
return heap_region_end;
|
||||||
}
|
}
|
||||||
constexpr size_t GetHeapRegionSize() const {
|
constexpr std::size_t GetHeapRegionSize() const {
|
||||||
return m_heap_region_end - m_heap_region_start;
|
return heap_region_end - heap_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetAliasRegionStart() const {
|
constexpr VAddr GetAliasRegionStart() const {
|
||||||
return m_alias_region_start;
|
return alias_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetAliasRegionEnd() const {
|
constexpr VAddr GetAliasRegionEnd() const {
|
||||||
return m_alias_region_end;
|
return alias_region_end;
|
||||||
}
|
}
|
||||||
constexpr size_t GetAliasRegionSize() const {
|
constexpr std::size_t GetAliasRegionSize() const {
|
||||||
return m_alias_region_end - m_alias_region_start;
|
return alias_region_end - alias_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetStackRegionStart() const {
|
constexpr VAddr GetStackRegionStart() const {
|
||||||
return m_stack_region_start;
|
return stack_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetStackRegionEnd() const {
|
constexpr VAddr GetStackRegionEnd() const {
|
||||||
return m_stack_region_end;
|
return stack_region_end;
|
||||||
}
|
}
|
||||||
constexpr size_t GetStackRegionSize() const {
|
constexpr std::size_t GetStackRegionSize() const {
|
||||||
return m_stack_region_end - m_stack_region_start;
|
return stack_region_end - stack_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetKernelMapRegionStart() const {
|
constexpr VAddr GetKernelMapRegionStart() const {
|
||||||
return m_kernel_map_region_start;
|
return kernel_map_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetKernelMapRegionEnd() const {
|
constexpr VAddr GetKernelMapRegionEnd() const {
|
||||||
return m_kernel_map_region_end;
|
return kernel_map_region_end;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetCodeRegionStart() const {
|
constexpr VAddr GetCodeRegionStart() const {
|
||||||
return m_code_region_start;
|
return code_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetCodeRegionEnd() const {
|
constexpr VAddr GetCodeRegionEnd() const {
|
||||||
return m_code_region_end;
|
return code_region_end;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetAliasCodeRegionStart() const {
|
constexpr VAddr GetAliasCodeRegionStart() const {
|
||||||
return m_alias_code_region_start;
|
return alias_code_region_start;
|
||||||
}
|
}
|
||||||
constexpr VAddr GetAliasCodeRegionSize() const {
|
constexpr VAddr GetAliasCodeRegionSize() const {
|
||||||
return m_alias_code_region_end - m_alias_code_region_start;
|
return alias_code_region_end - alias_code_region_start;
|
||||||
}
|
}
|
||||||
size_t GetNormalMemorySize() {
|
std::size_t GetNormalMemorySize() {
|
||||||
KScopedLightLock lk(m_general_lock);
|
KScopedLightLock lk(general_lock);
|
||||||
return GetHeapSize() + m_mapped_physical_memory_size;
|
return GetHeapSize() + mapped_physical_memory_size;
|
||||||
}
|
}
|
||||||
constexpr size_t GetAddressSpaceWidth() const {
|
constexpr std::size_t GetAddressSpaceWidth() const {
|
||||||
return m_address_space_width;
|
return address_space_width;
|
||||||
}
|
}
|
||||||
constexpr size_t GetHeapSize() const {
|
constexpr std::size_t GetHeapSize() const {
|
||||||
return m_current_heap_end - m_heap_region_start;
|
return current_heap_end - heap_region_start;
|
||||||
}
|
}
|
||||||
constexpr bool IsInsideAddressSpace(VAddr address, size_t size) const {
|
constexpr bool IsInsideAddressSpace(VAddr address, std::size_t size) const {
|
||||||
return m_address_space_start <= address && address + size - 1 <= m_address_space_end - 1;
|
return address_space_start <= address && address + size - 1 <= address_space_end - 1;
|
||||||
}
|
}
|
||||||
constexpr bool IsOutsideAliasRegion(VAddr address, size_t size) const {
|
constexpr bool IsOutsideAliasRegion(VAddr address, std::size_t size) const {
|
||||||
return m_alias_region_start > address || address + size - 1 > m_alias_region_end - 1;
|
return alias_region_start > address || address + size - 1 > alias_region_end - 1;
|
||||||
}
|
}
|
||||||
constexpr bool IsOutsideStackRegion(VAddr address, size_t size) const {
|
constexpr bool IsOutsideStackRegion(VAddr address, std::size_t size) const {
|
||||||
return m_stack_region_start > address || address + size - 1 > m_stack_region_end - 1;
|
return stack_region_start > address || address + size - 1 > stack_region_end - 1;
|
||||||
}
|
}
|
||||||
constexpr bool IsInvalidRegion(VAddr address, size_t size) const {
|
constexpr bool IsInvalidRegion(VAddr address, std::size_t size) const {
|
||||||
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
return address + size - 1 > GetAliasCodeRegionStart() + GetAliasCodeRegionSize() - 1;
|
||||||
}
|
}
|
||||||
constexpr bool IsInsideHeapRegion(VAddr address, size_t size) const {
|
constexpr bool IsInsideHeapRegion(VAddr address, std::size_t size) const {
|
||||||
return address + size > m_heap_region_start && m_heap_region_end > address;
|
return address + size > heap_region_start && heap_region_end > address;
|
||||||
}
|
}
|
||||||
constexpr bool IsInsideAliasRegion(VAddr address, size_t size) const {
|
constexpr bool IsInsideAliasRegion(VAddr address, std::size_t size) const {
|
||||||
return address + size > m_alias_region_start && m_alias_region_end > address;
|
return address + size > alias_region_start && alias_region_end > address;
|
||||||
}
|
}
|
||||||
constexpr bool IsOutsideASLRRegion(VAddr address, size_t size) const {
|
constexpr bool IsOutsideASLRRegion(VAddr address, std::size_t size) const {
|
||||||
if (IsInvalidRegion(address, size)) {
|
if (IsInvalidRegion(address, size)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -297,78 +291,73 @@ public:
|
||||||
}
|
}
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
constexpr bool IsInsideASLRRegion(VAddr address, size_t size) const {
|
constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
|
||||||
return !IsOutsideASLRRegion(address, size);
|
return !IsOutsideASLRRegion(address, size);
|
||||||
}
|
}
|
||||||
constexpr size_t GetNumGuardPages() const {
|
constexpr std::size_t GetNumGuardPages() const {
|
||||||
return IsKernel() ? 1 : 4;
|
return IsKernel() ? 1 : 4;
|
||||||
}
|
}
|
||||||
PAddr GetPhysicalAddr(VAddr addr) const {
|
PAddr GetPhysicalAddr(VAddr addr) const {
|
||||||
const auto backing_addr = m_page_table_impl->backing_addr[addr >> PageBits];
|
const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
|
||||||
ASSERT(backing_addr);
|
ASSERT(backing_addr);
|
||||||
return backing_addr + addr;
|
return backing_addr + addr;
|
||||||
}
|
}
|
||||||
constexpr bool Contains(VAddr addr) const {
|
constexpr bool Contains(VAddr addr) const {
|
||||||
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
|
return address_space_start <= addr && addr <= address_space_end - 1;
|
||||||
}
|
}
|
||||||
constexpr bool Contains(VAddr addr, size_t size) const {
|
constexpr bool Contains(VAddr addr, std::size_t size) const {
|
||||||
return m_address_space_start <= addr && addr < addr + size &&
|
return address_space_start <= addr && addr < addr + size &&
|
||||||
addr + size - 1 <= m_address_space_end - 1;
|
addr + size - 1 <= address_space_end - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
constexpr bool IsKernel() const {
|
constexpr bool IsKernel() const {
|
||||||
return m_is_kernel;
|
return is_kernel;
|
||||||
}
|
}
|
||||||
constexpr bool IsAslrEnabled() const {
|
constexpr bool IsAslrEnabled() const {
|
||||||
return m_enable_aslr;
|
return is_aslr_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr bool ContainsPages(VAddr addr, size_t num_pages) const {
|
constexpr bool ContainsPages(VAddr addr, std::size_t num_pages) const {
|
||||||
return (m_address_space_start <= addr) &&
|
return (address_space_start <= addr) &&
|
||||||
(num_pages <= (m_address_space_end - m_address_space_start) / PageSize) &&
|
(num_pages <= (address_space_end - address_space_start) / PageSize) &&
|
||||||
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
|
(addr + num_pages * PageSize - 1 <= address_space_end - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VAddr m_address_space_start{};
|
VAddr address_space_start{};
|
||||||
VAddr m_address_space_end{};
|
VAddr address_space_end{};
|
||||||
VAddr m_heap_region_start{};
|
VAddr heap_region_start{};
|
||||||
VAddr m_heap_region_end{};
|
VAddr heap_region_end{};
|
||||||
VAddr m_current_heap_end{};
|
VAddr current_heap_end{};
|
||||||
VAddr m_alias_region_start{};
|
VAddr alias_region_start{};
|
||||||
VAddr m_alias_region_end{};
|
VAddr alias_region_end{};
|
||||||
VAddr m_stack_region_start{};
|
VAddr stack_region_start{};
|
||||||
VAddr m_stack_region_end{};
|
VAddr stack_region_end{};
|
||||||
VAddr m_kernel_map_region_start{};
|
VAddr kernel_map_region_start{};
|
||||||
VAddr m_kernel_map_region_end{};
|
VAddr kernel_map_region_end{};
|
||||||
VAddr m_code_region_start{};
|
VAddr code_region_start{};
|
||||||
VAddr m_code_region_end{};
|
VAddr code_region_end{};
|
||||||
VAddr m_alias_code_region_start{};
|
VAddr alias_code_region_start{};
|
||||||
VAddr m_alias_code_region_end{};
|
VAddr alias_code_region_end{};
|
||||||
|
|
||||||
size_t m_mapped_physical_memory_size{};
|
std::size_t mapped_physical_memory_size{};
|
||||||
size_t m_max_heap_size{};
|
std::size_t max_heap_size{};
|
||||||
size_t m_max_physical_memory_size{};
|
std::size_t max_physical_memory_size{};
|
||||||
size_t m_address_space_width{};
|
std::size_t address_space_width{};
|
||||||
|
|
||||||
KMemoryBlockManager m_memory_block_manager;
|
bool is_kernel{};
|
||||||
|
bool is_aslr_enabled{};
|
||||||
|
|
||||||
bool m_is_kernel{};
|
u32 heap_fill_value{};
|
||||||
bool m_enable_aslr{};
|
const KMemoryRegion* cached_physical_heap_region{};
|
||||||
bool m_enable_device_address_space_merge{};
|
|
||||||
|
|
||||||
KMemoryBlockSlabManager* m_memory_block_slab_manager{};
|
KMemoryManager::Pool memory_pool{KMemoryManager::Pool::Application};
|
||||||
|
KMemoryManager::Direction allocation_option{KMemoryManager::Direction::FromFront};
|
||||||
|
|
||||||
u32 m_heap_fill_value{};
|
Common::PageTable page_table_impl;
|
||||||
const KMemoryRegion* m_cached_physical_heap_region{};
|
|
||||||
|
|
||||||
KMemoryManager::Pool m_memory_pool{KMemoryManager::Pool::Application};
|
Core::System& system;
|
||||||
KMemoryManager::Direction m_allocation_option{KMemoryManager::Direction::FromFront};
|
|
||||||
|
|
||||||
std::unique_ptr<Common::PageTable> m_page_table_impl;
|
|
||||||
|
|
||||||
Core::System& m_system;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -72,8 +72,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||||
|
|
||||||
process->name = std::move(process_name);
|
process->name = std::move(process_name);
|
||||||
process->resource_limit = res_limit;
|
process->resource_limit = res_limit;
|
||||||
process->system_resource_address = 0;
|
process->status = ProcessStatus::Created;
|
||||||
process->state = State::Created;
|
|
||||||
process->program_id = 0;
|
process->program_id = 0;
|
||||||
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
process->process_id = type == ProcessType::KernelInternal ? kernel.CreateNewKernelProcessID()
|
||||||
: kernel.CreateNewUserProcessID();
|
: kernel.CreateNewUserProcessID();
|
||||||
|
@ -93,12 +92,11 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
||||||
process->exception_thread = nullptr;
|
process->exception_thread = nullptr;
|
||||||
process->is_suspended = false;
|
process->is_suspended = false;
|
||||||
process->schedule_count = 0;
|
process->schedule_count = 0;
|
||||||
process->is_handle_table_initialized = false;
|
|
||||||
|
|
||||||
// Open a reference to the resource limit.
|
// Open a reference to the resource limit.
|
||||||
process->resource_limit->Open();
|
process->resource_limit->Open();
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::DoWorkerTaskImpl() {
|
void KProcess::DoWorkerTaskImpl() {
|
||||||
|
@ -123,9 +121,9 @@ void KProcess::DecrementRunningThreadCount() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTotalPhysicalMemoryAvailable() {
|
u64 KProcess::GetTotalPhysicalMemoryAvailable() const {
|
||||||
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
const u64 capacity{resource_limit->GetFreeValue(LimitableResource::PhysicalMemory) +
|
||||||
page_table.GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
page_table->GetNormalMemorySize() + GetSystemResourceSize() + image_size +
|
||||||
main_thread_stack_size};
|
main_thread_stack_size};
|
||||||
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
|
if (const auto pool_size = kernel.MemoryManager().GetSize(KMemoryManager::Pool::Application);
|
||||||
capacity != pool_size) {
|
capacity != pool_size) {
|
||||||
|
@ -137,16 +135,16 @@ u64 KProcess::GetTotalPhysicalMemoryAvailable() {
|
||||||
return memory_usage_capacity;
|
return memory_usage_capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() {
|
u64 KProcess::GetTotalPhysicalMemoryAvailableWithoutSystemResource() const {
|
||||||
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
return GetTotalPhysicalMemoryAvailable() - GetSystemResourceSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTotalPhysicalMemoryUsed() {
|
u64 KProcess::GetTotalPhysicalMemoryUsed() const {
|
||||||
return image_size + main_thread_stack_size + page_table.GetNormalMemorySize() +
|
return image_size + main_thread_stack_size + page_table->GetNormalMemorySize() +
|
||||||
GetSystemResourceSize();
|
GetSystemResourceSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() {
|
u64 KProcess::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
|
||||||
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,7 +244,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
||||||
shmem->Open();
|
shmem->Open();
|
||||||
shemen_info->Open();
|
shemen_info->Open();
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||||
|
@ -291,12 +289,12 @@ Result KProcess::Reset() {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Validate that we're in a state that we can reset.
|
// Validate that we're in a state that we can reset.
|
||||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||||
R_UNLESS(is_signaled, ResultInvalidState);
|
R_UNLESS(is_signaled, ResultInvalidState);
|
||||||
|
|
||||||
// Clear signaled.
|
// Clear signaled.
|
||||||
is_signaled = false;
|
is_signaled = false;
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||||
|
@ -306,13 +304,15 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||||
KScopedSchedulerLock sl{kernel};
|
KScopedSchedulerLock sl{kernel};
|
||||||
|
|
||||||
// Validate our state.
|
// Validate our state.
|
||||||
R_UNLESS(state != State::Terminating, ResultInvalidState);
|
R_UNLESS(status != ProcessStatus::Exiting, ResultInvalidState);
|
||||||
R_UNLESS(state != State::Terminated, ResultInvalidState);
|
R_UNLESS(status != ProcessStatus::Exited, ResultInvalidState);
|
||||||
|
|
||||||
// Either pause or resume.
|
// Either pause or resume.
|
||||||
if (activity == ProcessActivity::Paused) {
|
if (activity == ProcessActivity::Paused) {
|
||||||
// Verify that we're not suspended.
|
// Verify that we're not suspended.
|
||||||
R_UNLESS(!is_suspended, ResultInvalidState);
|
if (is_suspended) {
|
||||||
|
return ResultInvalidState;
|
||||||
|
}
|
||||||
|
|
||||||
// Suspend all threads.
|
// Suspend all threads.
|
||||||
for (auto* thread : GetThreadList()) {
|
for (auto* thread : GetThreadList()) {
|
||||||
|
@ -325,7 +325,9 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||||
ASSERT(activity == ProcessActivity::Runnable);
|
ASSERT(activity == ProcessActivity::Runnable);
|
||||||
|
|
||||||
// Verify that we're suspended.
|
// Verify that we're suspended.
|
||||||
R_UNLESS(is_suspended, ResultInvalidState);
|
if (!is_suspended) {
|
||||||
|
return ResultInvalidState;
|
||||||
|
}
|
||||||
|
|
||||||
// Resume all threads.
|
// Resume all threads.
|
||||||
for (auto* thread : GetThreadList()) {
|
for (auto* thread : GetThreadList()) {
|
||||||
|
@ -336,7 +338,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
||||||
SetSuspended(false);
|
SetSuspended(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||||
|
@ -346,38 +348,35 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||||
system_resource_size = metadata.GetSystemResourceSize();
|
system_resource_size = metadata.GetSystemResourceSize();
|
||||||
image_size = code_size;
|
image_size = code_size;
|
||||||
|
|
||||||
// We currently do not support process-specific system resource
|
|
||||||
UNIMPLEMENTED_IF(system_resource_size != 0);
|
|
||||||
|
|
||||||
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
KScopedResourceReservation memory_reservation(resource_limit, LimitableResource::PhysicalMemory,
|
||||||
code_size + system_resource_size);
|
code_size + system_resource_size);
|
||||||
if (!memory_reservation.Succeeded()) {
|
if (!memory_reservation.Succeeded()) {
|
||||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||||
code_size + system_resource_size);
|
code_size + system_resource_size);
|
||||||
R_RETURN(ResultLimitReached);
|
return ResultLimitReached;
|
||||||
}
|
}
|
||||||
// Initialize proces address space
|
// Initialize proces address space
|
||||||
if (const Result result{page_table.InitializeForProcess(
|
if (const Result result{page_table->InitializeForProcess(metadata.GetAddressSpaceType(), false,
|
||||||
metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
|
0x8000000, code_size,
|
||||||
&kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
|
KMemoryManager::Pool::Application)};
|
||||||
result.IsError()) {
|
result.IsError()) {
|
||||||
R_RETURN(result);
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Map process code region
|
// Map process code region
|
||||||
if (const Result result{page_table.MapProcessCode(page_table.GetCodeRegionStart(),
|
if (const Result result{page_table->MapProcessCode(page_table->GetCodeRegionStart(),
|
||||||
code_size / PageSize, KMemoryState::Code,
|
code_size / PageSize, KMemoryState::Code,
|
||||||
KMemoryPermission::None)};
|
KMemoryPermission::None)};
|
||||||
result.IsError()) {
|
result.IsError()) {
|
||||||
R_RETURN(result);
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize process capabilities
|
// Initialize process capabilities
|
||||||
const auto& caps{metadata.GetKernelCapabilities()};
|
const auto& caps{metadata.GetKernelCapabilities()};
|
||||||
if (const Result result{
|
if (const Result result{
|
||||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
|
capabilities.InitializeForUserProcess(caps.data(), caps.size(), *page_table)};
|
||||||
result.IsError()) {
|
result.IsError()) {
|
||||||
R_RETURN(result);
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set memory usage capacity
|
// Set memory usage capacity
|
||||||
|
@ -385,12 +384,12 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||||
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
case FileSys::ProgramAddressSpaceType::Is32Bit:
|
||||||
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
case FileSys::ProgramAddressSpaceType::Is36Bit:
|
||||||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart();
|
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
case FileSys::ProgramAddressSpaceType::Is32BitNoMap:
|
||||||
memory_usage_capacity = page_table.GetHeapRegionEnd() - page_table.GetHeapRegionStart() +
|
memory_usage_capacity = page_table->GetHeapRegionEnd() - page_table->GetHeapRegionStart() +
|
||||||
page_table.GetAliasRegionEnd() - page_table.GetAliasRegionStart();
|
page_table->GetAliasRegionEnd() - page_table->GetAliasRegionStart();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -398,10 +397,10 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create TLS region
|
// Create TLS region
|
||||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
|
R_TRY(this->CreateThreadLocalRegion(std::addressof(tls_region_address)));
|
||||||
memory_reservation.Commit();
|
memory_reservation.Commit();
|
||||||
|
|
||||||
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
|
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||||
|
@ -410,15 +409,15 @@ void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||||
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
resource_limit->Reserve(LimitableResource::PhysicalMemory, main_thread_stack_size);
|
||||||
|
|
||||||
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
|
const std::size_t heap_capacity{memory_usage_capacity - (main_thread_stack_size + image_size)};
|
||||||
ASSERT(!page_table.SetMaxHeapSize(heap_capacity).IsError());
|
ASSERT(!page_table->SetMaxHeapSize(heap_capacity).IsError());
|
||||||
|
|
||||||
ChangeState(State::Running);
|
ChangeStatus(ProcessStatus::Running);
|
||||||
|
|
||||||
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
|
SetupMainThread(kernel.System(), *this, main_thread_priority, main_thread_stack_top);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::PrepareForTermination() {
|
void KProcess::PrepareForTermination() {
|
||||||
ChangeState(State::Terminating);
|
ChangeStatus(ProcessStatus::Exiting);
|
||||||
|
|
||||||
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
const auto stop_threads = [this](const std::vector<KThread*>& in_thread_list) {
|
||||||
for (auto* thread : in_thread_list) {
|
for (auto* thread : in_thread_list) {
|
||||||
|
@ -438,15 +437,15 @@ void KProcess::PrepareForTermination() {
|
||||||
|
|
||||||
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
stop_threads(kernel.System().GlobalSchedulerContext().GetThreadList());
|
||||||
|
|
||||||
this->DeleteThreadLocalRegion(plr_address);
|
this->DeleteThreadLocalRegion(tls_region_address);
|
||||||
plr_address = 0;
|
tls_region_address = 0;
|
||||||
|
|
||||||
if (resource_limit) {
|
if (resource_limit) {
|
||||||
resource_limit->Release(LimitableResource::PhysicalMemory,
|
resource_limit->Release(LimitableResource::PhysicalMemory,
|
||||||
main_thread_stack_size + image_size);
|
main_thread_stack_size + image_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChangeState(State::Terminated);
|
ChangeStatus(ProcessStatus::Exited);
|
||||||
}
|
}
|
||||||
|
|
||||||
void KProcess::Finalize() {
|
void KProcess::Finalize() {
|
||||||
|
@ -475,7 +474,7 @@ void KProcess::Finalize() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize the page table.
|
// Finalize the page table.
|
||||||
page_table.Finalize();
|
page_table.reset();
|
||||||
|
|
||||||
// Perform inherited finalization.
|
// Perform inherited finalization.
|
||||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask>::Finalize();
|
||||||
|
@ -500,7 +499,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||||
}
|
}
|
||||||
|
|
||||||
*out = tlr;
|
*out = tlr;
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,7 +528,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
||||||
// We succeeded!
|
// We succeeded!
|
||||||
tlp_guard.Cancel();
|
tlp_guard.Cancel();
|
||||||
*out = tlr;
|
*out = tlr;
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||||
|
@ -577,7 +576,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||||
KThreadLocalPage::Free(kernel, page_to_free);
|
KThreadLocalPage::Free(kernel, page_to_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||||
|
@ -629,7 +628,7 @@ bool KProcess::RemoveWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||||
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
void KProcess::LoadModule(CodeSet code_set, VAddr base_addr) {
|
||||||
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
const auto ReprotectSegment = [&](const CodeSet::Segment& segment,
|
||||||
Svc::MemoryPermission permission) {
|
Svc::MemoryPermission permission) {
|
||||||
page_table.SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
page_table->SetProcessMemoryPermission(segment.addr + base_addr, segment.size, permission);
|
||||||
};
|
};
|
||||||
|
|
||||||
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
|
kernel.System().Memory().WriteBlock(*this, base_addr, code_set.memory.data(),
|
||||||
|
@ -646,18 +645,19 @@ bool KProcess::IsSignaled() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
KProcess::KProcess(KernelCore& kernel_)
|
KProcess::KProcess(KernelCore& kernel_)
|
||||||
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{kernel_.System()},
|
: KAutoObjectWithSlabHeapAndContainer{kernel_}, page_table{std::make_unique<KPageTable>(
|
||||||
|
kernel_.System())},
|
||||||
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
|
handle_table{kernel_}, address_arbiter{kernel_.System()}, condition_var{kernel_.System()},
|
||||||
state_lock{kernel_}, list_lock{kernel_} {}
|
state_lock{kernel_}, list_lock{kernel_} {}
|
||||||
|
|
||||||
KProcess::~KProcess() = default;
|
KProcess::~KProcess() = default;
|
||||||
|
|
||||||
void KProcess::ChangeState(State new_state) {
|
void KProcess::ChangeStatus(ProcessStatus new_status) {
|
||||||
if (state == new_state) {
|
if (status == new_status) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state = new_state;
|
status = new_status;
|
||||||
is_signaled = true;
|
is_signaled = true;
|
||||||
NotifyAvailable();
|
NotifyAvailable();
|
||||||
}
|
}
|
||||||
|
@ -668,17 +668,17 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
||||||
// The kernel always ensures that the given stack size is page aligned.
|
// The kernel always ensures that the given stack size is page aligned.
|
||||||
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
main_thread_stack_size = Common::AlignUp(stack_size, PageSize);
|
||||||
|
|
||||||
const VAddr start{page_table.GetStackRegionStart()};
|
const VAddr start{page_table->GetStackRegionStart()};
|
||||||
const std::size_t size{page_table.GetStackRegionEnd() - start};
|
const std::size_t size{page_table->GetStackRegionEnd() - start};
|
||||||
|
|
||||||
CASCADE_RESULT(main_thread_stack_top,
|
CASCADE_RESULT(main_thread_stack_top,
|
||||||
page_table.AllocateAndMapMemory(
|
page_table->AllocateAndMapMemory(
|
||||||
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
main_thread_stack_size / PageSize, PageSize, false, start, size / PageSize,
|
||||||
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
KMemoryState::Stack, KMemoryPermission::UserReadWrite));
|
||||||
|
|
||||||
main_thread_stack_top += main_thread_stack_size;
|
main_thread_stack_top += main_thread_stack_size;
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include "core/hle/kernel/k_auto_object.h"
|
#include "core/hle/kernel/k_auto_object.h"
|
||||||
#include "core/hle/kernel/k_condition_variable.h"
|
#include "core/hle/kernel/k_condition_variable.h"
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/k_page_table.h"
|
|
||||||
#include "core/hle/kernel/k_synchronization_object.h"
|
#include "core/hle/kernel/k_synchronization_object.h"
|
||||||
#include "core/hle/kernel/k_thread_local_page.h"
|
#include "core/hle/kernel/k_thread_local_page.h"
|
||||||
#include "core/hle/kernel/k_worker_task.h"
|
#include "core/hle/kernel/k_worker_task.h"
|
||||||
|
@ -32,6 +31,7 @@ class ProgramMetadata;
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class KernelCore;
|
class KernelCore;
|
||||||
|
class KPageTable;
|
||||||
class KResourceLimit;
|
class KResourceLimit;
|
||||||
class KThread;
|
class KThread;
|
||||||
class KSharedMemoryInfo;
|
class KSharedMemoryInfo;
|
||||||
|
@ -45,6 +45,24 @@ enum class MemoryRegion : u16 {
|
||||||
BASE = 3,
|
BASE = 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates the status of a Process instance.
|
||||||
|
*
|
||||||
|
* @note These match the values as used by kernel,
|
||||||
|
* so new entries should only be added if RE
|
||||||
|
* shows that a new value has been introduced.
|
||||||
|
*/
|
||||||
|
enum class ProcessStatus {
|
||||||
|
Created,
|
||||||
|
CreatedWithDebuggerAttached,
|
||||||
|
Running,
|
||||||
|
WaitingForDebuggerToAttach,
|
||||||
|
DebuggerAttached,
|
||||||
|
Exiting,
|
||||||
|
Exited,
|
||||||
|
DebugBreak,
|
||||||
|
};
|
||||||
|
|
||||||
enum class ProcessActivity : u32 {
|
enum class ProcessActivity : u32 {
|
||||||
Runnable,
|
Runnable,
|
||||||
Paused,
|
Paused,
|
||||||
|
@ -71,17 +89,6 @@ public:
|
||||||
explicit KProcess(KernelCore& kernel_);
|
explicit KProcess(KernelCore& kernel_);
|
||||||
~KProcess() override;
|
~KProcess() override;
|
||||||
|
|
||||||
enum class State {
|
|
||||||
Created = Svc::ProcessState_Created,
|
|
||||||
CreatedAttached = Svc::ProcessState_CreatedAttached,
|
|
||||||
Running = Svc::ProcessState_Running,
|
|
||||||
Crashed = Svc::ProcessState_Crashed,
|
|
||||||
RunningAttached = Svc::ProcessState_RunningAttached,
|
|
||||||
Terminating = Svc::ProcessState_Terminating,
|
|
||||||
Terminated = Svc::ProcessState_Terminated,
|
|
||||||
DebugBreak = Svc::ProcessState_DebugBreak,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum : u64 {
|
enum : u64 {
|
||||||
/// Lowest allowed process ID for a kernel initial process.
|
/// Lowest allowed process ID for a kernel initial process.
|
||||||
InitialKIPIDMin = 1,
|
InitialKIPIDMin = 1,
|
||||||
|
@ -107,12 +114,12 @@ public:
|
||||||
|
|
||||||
/// Gets a reference to the process' page table.
|
/// Gets a reference to the process' page table.
|
||||||
KPageTable& PageTable() {
|
KPageTable& PageTable() {
|
||||||
return page_table;
|
return *page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets const a reference to the process' page table.
|
/// Gets const a reference to the process' page table.
|
||||||
const KPageTable& PageTable() const {
|
const KPageTable& PageTable() const {
|
||||||
return page_table;
|
return *page_table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets a reference to the process' handle table.
|
/// Gets a reference to the process' handle table.
|
||||||
|
@ -138,25 +145,26 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||||
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
|
return condition_var.Wait(address, cv_key, tag, ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||||
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
|
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||||
s64 timeout) {
|
s64 timeout) {
|
||||||
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr GetProcessLocalRegionAddress() const {
|
/// Gets the address to the process' dedicated TLS region.
|
||||||
return plr_address;
|
VAddr GetTLSRegionAddress() const {
|
||||||
|
return tls_region_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the current status of the process
|
/// Gets the current status of the process
|
||||||
State GetState() const {
|
ProcessStatus GetStatus() const {
|
||||||
return state;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the unique ID that identifies this particular process.
|
/// Gets the unique ID that identifies this particular process.
|
||||||
|
@ -278,18 +286,18 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves the total physical memory available to this process in bytes.
|
/// Retrieves the total physical memory available to this process in bytes.
|
||||||
u64 GetTotalPhysicalMemoryAvailable();
|
u64 GetTotalPhysicalMemoryAvailable() const;
|
||||||
|
|
||||||
/// Retrieves the total physical memory available to this process in bytes,
|
/// Retrieves the total physical memory available to this process in bytes,
|
||||||
/// without the size of the personal system resource heap added to it.
|
/// without the size of the personal system resource heap added to it.
|
||||||
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource();
|
u64 GetTotalPhysicalMemoryAvailableWithoutSystemResource() const;
|
||||||
|
|
||||||
/// Retrieves the total physical memory used by this process in bytes.
|
/// Retrieves the total physical memory used by this process in bytes.
|
||||||
u64 GetTotalPhysicalMemoryUsed();
|
u64 GetTotalPhysicalMemoryUsed() const;
|
||||||
|
|
||||||
/// Retrieves the total physical memory used by this process in bytes,
|
/// Retrieves the total physical memory used by this process in bytes,
|
||||||
/// without the size of the personal system resource heap added to it.
|
/// without the size of the personal system resource heap added to it.
|
||||||
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource();
|
u64 GetTotalPhysicalMemoryUsedWithoutSystemResource() const;
|
||||||
|
|
||||||
/// Gets the list of all threads created with this process as their owner.
|
/// Gets the list of all threads created with this process as their owner.
|
||||||
std::list<KThread*>& GetThreadList() {
|
std::list<KThread*>& GetThreadList() {
|
||||||
|
@ -407,24 +415,19 @@ private:
|
||||||
pinned_threads[core_id] = nullptr;
|
pinned_threads[core_id] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FinalizeHandleTable() {
|
/// Changes the process status. If the status is different
|
||||||
// Finalize the table.
|
/// from the current process status, then this will trigger
|
||||||
handle_table.Finalize();
|
/// a process signal.
|
||||||
|
void ChangeStatus(ProcessStatus new_status);
|
||||||
// Note that the table is finalized.
|
|
||||||
is_handle_table_initialized = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ChangeState(State new_state);
|
|
||||||
|
|
||||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||||
|
|
||||||
/// Memory manager for this process
|
/// Memory manager for this process
|
||||||
KPageTable page_table;
|
std::unique_ptr<KPageTable> page_table;
|
||||||
|
|
||||||
/// Current status of the process
|
/// Current status of the process
|
||||||
State state{};
|
ProcessStatus status{};
|
||||||
|
|
||||||
/// The ID of this process
|
/// The ID of this process
|
||||||
u64 process_id = 0;
|
u64 process_id = 0;
|
||||||
|
@ -440,8 +443,6 @@ private:
|
||||||
/// Resource limit descriptor for this process
|
/// Resource limit descriptor for this process
|
||||||
KResourceLimit* resource_limit{};
|
KResourceLimit* resource_limit{};
|
||||||
|
|
||||||
VAddr system_resource_address{};
|
|
||||||
|
|
||||||
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
/// The ideal CPU core for this process, threads are scheduled on this core by default.
|
||||||
u8 ideal_core = 0;
|
u8 ideal_core = 0;
|
||||||
|
|
||||||
|
@ -468,7 +469,7 @@ private:
|
||||||
KConditionVariable condition_var;
|
KConditionVariable condition_var;
|
||||||
|
|
||||||
/// Address indicating the location of the process' dedicated TLS region.
|
/// Address indicating the location of the process' dedicated TLS region.
|
||||||
VAddr plr_address = 0;
|
VAddr tls_region_address = 0;
|
||||||
|
|
||||||
/// Random values for svcGetInfo RandomEntropy
|
/// Random values for svcGetInfo RandomEntropy
|
||||||
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy{};
|
||||||
|
@ -494,12 +495,8 @@ private:
|
||||||
/// Schedule count of this process
|
/// Schedule count of this process
|
||||||
s64 schedule_count{};
|
s64 schedule_count{};
|
||||||
|
|
||||||
size_t memory_release_hint{};
|
|
||||||
|
|
||||||
bool is_signaled{};
|
bool is_signaled{};
|
||||||
bool is_suspended{};
|
bool is_suspended{};
|
||||||
bool is_immortal{};
|
|
||||||
bool is_handle_table_initialized{};
|
|
||||||
bool is_initialized{};
|
bool is_initialized{};
|
||||||
|
|
||||||
std::atomic<u16> num_running_threads{};
|
std::atomic<u16> num_running_threads{};
|
||||||
|
|
|
@ -50,7 +50,7 @@ Result KSharedMemory::Initialize(Core::DeviceMemory& device_memory_, KProcess* o
|
||||||
is_initialized = true;
|
is_initialized = true;
|
||||||
|
|
||||||
// Clear all pages in the memory.
|
// Clear all pages in the memory.
|
||||||
std::memset(device_memory_.GetPointer<void>(physical_address_), 0, size_);
|
std::memset(device_memory_.GetPointer(physical_address_), 0, size_);
|
||||||
|
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,7 +54,7 @@ public:
|
||||||
* @return A pointer to the shared memory block from the specified offset
|
* @return A pointer to the shared memory block from the specified offset
|
||||||
*/
|
*/
|
||||||
u8* GetPointer(std::size_t offset = 0) {
|
u8* GetPointer(std::size_t offset = 0) {
|
||||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
return device_memory->GetPointer(physical_address + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -63,7 +63,7 @@ public:
|
||||||
* @return A pointer to the shared memory block from the specified offset
|
* @return A pointer to the shared memory block from the specified offset
|
||||||
*/
|
*/
|
||||||
const u8* GetPointer(std::size_t offset = 0) const {
|
const u8* GetPointer(std::size_t offset = 0) const {
|
||||||
return device_memory->GetPointer<u8>(physical_address + offset);
|
return device_memory->GetPointer(physical_address + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Finalize() override;
|
void Finalize() override;
|
||||||
|
|
|
@ -30,7 +30,6 @@
|
||||||
#include "core/hle/kernel/k_worker_task_manager.h"
|
#include "core/hle/kernel/k_worker_task_manager.h"
|
||||||
#include "core/hle/kernel/kernel.h"
|
#include "core/hle/kernel/kernel.h"
|
||||||
#include "core/hle/kernel/svc_results.h"
|
#include "core/hle/kernel/svc_results.h"
|
||||||
#include "core/hle/kernel/svc_types.h"
|
|
||||||
#include "core/hle/result.h"
|
#include "core/hle/result.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
@ -39,9 +38,6 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
constexpr inline s32 TerminatingThreadPriority = Kernel::Svc::SystemThreadPriorityHighest - 1;
|
|
||||||
|
|
||||||
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
|
||||||
u32 entry_point, u32 arg) {
|
u32 entry_point, u32 arg) {
|
||||||
context = {};
|
context = {};
|
||||||
|
@ -245,7 +241,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
||||||
|
@ -258,7 +254,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
|
||||||
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
|
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
|
||||||
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeDummyThread(KThread* thread) {
|
Result KThread::InitializeDummyThread(KThread* thread) {
|
||||||
|
@ -268,32 +264,31 @@ Result KThread::InitializeDummyThread(KThread* thread) {
|
||||||
// Initialize emulation parameters.
|
// Initialize emulation parameters.
|
||||||
thread->stack_parameters.disable_count = 0;
|
thread->stack_parameters.disable_count = 0;
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
|
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||||
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
|
system.GetCpuManager().GetGuestActivateFunc());
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||||
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
|
system.GetCpuManager().GetIdleThreadStartFunc());
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||||
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
||||||
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
|
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
|
||||||
ThreadType::HighPriority,
|
system.GetCpuManager().GetShutdownThreadStartFunc());
|
||||||
system.GetCpuManager().GetShutdownThreadStartFunc()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
||||||
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
|
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
|
||||||
KProcess* owner) {
|
KProcess* owner) {
|
||||||
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
||||||
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||||
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
|
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::PostDestroy(uintptr_t arg) {
|
void KThread::PostDestroy(uintptr_t arg) {
|
||||||
|
@ -543,7 +538,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||||
*out_ideal_core = virtual_ideal_core_id;
|
*out_ideal_core = virtual_ideal_core_id;
|
||||||
*out_affinity_mask = virtual_affinity_mask;
|
*out_affinity_mask = virtual_affinity_mask;
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||||
|
@ -559,7 +554,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
|
||||||
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
|
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
|
@ -671,7 +666,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||||
} while (retry_update);
|
} while (retry_update);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::SetBasePriority(s32 value) {
|
void KThread::SetBasePriority(s32 value) {
|
||||||
|
@ -844,7 +839,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
|
||||||
} while (thread_is_current);
|
} while (thread_is_current);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||||
|
@ -879,7 +874,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::AddWaiterImpl(KThread* thread) {
|
void KThread::AddWaiterImpl(KThread* thread) {
|
||||||
|
@ -1043,7 +1038,7 @@ Result KThread::Run() {
|
||||||
// Set our state and finish.
|
// Set our state and finish.
|
||||||
SetState(ThreadState::Runnable);
|
SetState(ThreadState::Runnable);
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1078,78 +1073,6 @@ void KThread::Exit() {
|
||||||
UNREACHABLE_MSG("KThread::Exit() would return");
|
UNREACHABLE_MSG("KThread::Exit() would return");
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KThread::Terminate() {
|
|
||||||
ASSERT(this != GetCurrentThreadPointer(kernel));
|
|
||||||
|
|
||||||
// Request the thread terminate if it hasn't already.
|
|
||||||
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState::Terminated) {
|
|
||||||
// If the thread isn't terminated, wait for it to terminate.
|
|
||||||
s32 index;
|
|
||||||
KSynchronizationObject* objects[] = {this};
|
|
||||||
R_TRY(KSynchronizationObject::Wait(kernel, std::addressof(index), objects, 1,
|
|
||||||
Svc::WaitInfinite));
|
|
||||||
}
|
|
||||||
|
|
||||||
R_SUCCEED();
|
|
||||||
}
|
|
||||||
|
|
||||||
ThreadState KThread::RequestTerminate() {
|
|
||||||
ASSERT(this != GetCurrentThreadPointer(kernel));
|
|
||||||
|
|
||||||
KScopedSchedulerLock sl{kernel};
|
|
||||||
|
|
||||||
// Determine if this is the first termination request.
|
|
||||||
const bool first_request = [&]() -> bool {
|
|
||||||
// Perform an atomic compare-and-swap from false to true.
|
|
||||||
bool expected = false;
|
|
||||||
return termination_requested.compare_exchange_strong(expected, true);
|
|
||||||
}();
|
|
||||||
|
|
||||||
// If this is the first request, start termination procedure.
|
|
||||||
if (first_request) {
|
|
||||||
// If the thread is in initialized state, just change state to terminated.
|
|
||||||
if (this->GetState() == ThreadState::Initialized) {
|
|
||||||
thread_state = ThreadState::Terminated;
|
|
||||||
return ThreadState::Terminated;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the terminating dpc.
|
|
||||||
this->RegisterDpc(DpcFlag::Terminating);
|
|
||||||
|
|
||||||
// If the thread is pinned, unpin it.
|
|
||||||
if (this->GetStackParameters().is_pinned) {
|
|
||||||
this->GetOwnerProcess()->UnpinThread(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the thread is suspended, continue it.
|
|
||||||
if (this->IsSuspended()) {
|
|
||||||
suspend_allowed_flags = 0;
|
|
||||||
this->UpdateState();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change the thread's priority to be higher than any system thread's.
|
|
||||||
if (this->GetBasePriority() >= Svc::SystemThreadPriorityHighest) {
|
|
||||||
this->SetBasePriority(TerminatingThreadPriority);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the thread is runnable, send a termination interrupt to other cores.
|
|
||||||
if (this->GetState() == ThreadState::Runnable) {
|
|
||||||
if (const u64 core_mask =
|
|
||||||
physical_affinity_mask.GetAffinityMask() & ~(1ULL << GetCurrentCoreId(kernel));
|
|
||||||
core_mask != 0) {
|
|
||||||
Kernel::KInterruptManager::SendInterProcessorInterrupt(kernel, core_mask);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wake up the thread.
|
|
||||||
if (this->GetState() == ThreadState::Waiting) {
|
|
||||||
wait_queue->CancelWait(this, ResultTerminationRequested, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return this->GetState();
|
|
||||||
}
|
|
||||||
|
|
||||||
Result KThread::Sleep(s64 timeout) {
|
Result KThread::Sleep(s64 timeout) {
|
||||||
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
|
ASSERT(!kernel.GlobalSchedulerContext().IsLocked());
|
||||||
ASSERT(this == GetCurrentThreadPointer(kernel));
|
ASSERT(this == GetCurrentThreadPointer(kernel));
|
||||||
|
@ -1163,7 +1086,7 @@ Result KThread::Sleep(s64 timeout) {
|
||||||
// Check if the thread should terminate.
|
// Check if the thread should terminate.
|
||||||
if (this->IsTerminationRequested()) {
|
if (this->IsTerminationRequested()) {
|
||||||
slp.CancelSleep();
|
slp.CancelSleep();
|
||||||
R_THROW(ResultTerminationRequested);
|
return ResultTerminationRequested;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the sleep to end.
|
// Wait for the sleep to end.
|
||||||
|
@ -1171,7 +1094,7 @@ Result KThread::Sleep(s64 timeout) {
|
||||||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||||
}
|
}
|
||||||
|
|
||||||
R_SUCCEED();
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KThread::IfDummyThreadTryWait() {
|
void KThread::IfDummyThreadTryWait() {
|
||||||
|
|
|
@ -180,10 +180,6 @@ public:
|
||||||
|
|
||||||
void Exit();
|
void Exit();
|
||||||
|
|
||||||
Result Terminate();
|
|
||||||
|
|
||||||
ThreadState RequestTerminate();
|
|
||||||
|
|
||||||
[[nodiscard]] u32 GetSuspendFlags() const {
|
[[nodiscard]] u32 GetSuspendFlags() const {
|
||||||
return suspend_allowed_flags & suspend_request_flags;
|
return suspend_allowed_flags & suspend_request_flags;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@
|
||||||
#include "core/hardware_properties.h"
|
#include "core/hardware_properties.h"
|
||||||
#include "core/hle/kernel/init/init_slab_setup.h"
|
#include "core/hle/kernel/init/init_slab_setup.h"
|
||||||
#include "core/hle/kernel/k_client_port.h"
|
#include "core/hle/kernel/k_client_port.h"
|
||||||
#include "core/hle/kernel/k_dynamic_resource_manager.h"
|
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/k_memory_layout.h"
|
#include "core/hle/kernel/k_memory_layout.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
|
@ -74,16 +73,8 @@ struct KernelCore::Impl {
|
||||||
InitializeMemoryLayout();
|
InitializeMemoryLayout();
|
||||||
Init::InitializeKPageBufferSlabHeap(system);
|
Init::InitializeKPageBufferSlabHeap(system);
|
||||||
InitializeShutdownThreads();
|
InitializeShutdownThreads();
|
||||||
InitializePhysicalCores();
|
|
||||||
InitializePreemption(kernel);
|
InitializePreemption(kernel);
|
||||||
|
InitializePhysicalCores();
|
||||||
// Initialize the Dynamic Slab Heaps.
|
|
||||||
{
|
|
||||||
const auto& pt_heap_region = memory_layout->GetPageTableHeapRegion();
|
|
||||||
ASSERT(pt_heap_region.GetEndAddress() != 0);
|
|
||||||
|
|
||||||
InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
RegisterHostThread();
|
RegisterHostThread();
|
||||||
}
|
}
|
||||||
|
@ -95,15 +86,6 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CloseCurrentProcess() {
|
|
||||||
(*current_process).Finalize();
|
|
||||||
// current_process->Close();
|
|
||||||
// TODO: The current process should be destroyed based on accurate ref counting after
|
|
||||||
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
|
||||||
(*current_process).Destroy();
|
|
||||||
current_process = nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Shutdown() {
|
void Shutdown() {
|
||||||
is_shutting_down.store(true, std::memory_order_relaxed);
|
is_shutting_down.store(true, std::memory_order_relaxed);
|
||||||
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
SCOPE_EXIT({ is_shutting_down.store(false, std::memory_order_relaxed); });
|
||||||
|
@ -117,6 +99,10 @@ struct KernelCore::Impl {
|
||||||
next_user_process_id = KProcess::ProcessIDMin;
|
next_user_process_id = KProcess::ProcessIDMin;
|
||||||
next_thread_id = 1;
|
next_thread_id = 1;
|
||||||
|
|
||||||
|
for (auto& core : cores) {
|
||||||
|
core = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
global_handle_table->Finalize();
|
global_handle_table->Finalize();
|
||||||
global_handle_table.reset();
|
global_handle_table.reset();
|
||||||
|
|
||||||
|
@ -166,7 +152,15 @@ struct KernelCore::Impl {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CloseCurrentProcess();
|
// Shutdown all processes.
|
||||||
|
if (current_process) {
|
||||||
|
(*current_process).Finalize();
|
||||||
|
// current_process->Close();
|
||||||
|
// TODO: The current process should be destroyed based on accurate ref counting after
|
||||||
|
// calling Close(). Adding a manual Destroy() call instead to avoid a memory leak.
|
||||||
|
(*current_process).Destroy();
|
||||||
|
current_process = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
// Track kernel objects that were not freed on shutdown
|
// Track kernel objects that were not freed on shutdown
|
||||||
{
|
{
|
||||||
|
@ -263,18 +257,6 @@ struct KernelCore::Impl {
|
||||||
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
|
system.CoreTiming().ScheduleLoopingEvent(time_interval, time_interval, preemption_event);
|
||||||
}
|
}
|
||||||
|
|
||||||
void InitializeResourceManagers(VAddr address, size_t size) {
|
|
||||||
dynamic_page_manager = std::make_unique<KDynamicPageManager>();
|
|
||||||
memory_block_heap = std::make_unique<KMemoryBlockSlabHeap>();
|
|
||||||
app_memory_block_manager = std::make_unique<KMemoryBlockSlabManager>();
|
|
||||||
|
|
||||||
dynamic_page_manager->Initialize(address, size);
|
|
||||||
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
|
|
||||||
memory_block_heap->Initialize(dynamic_page_manager.get(),
|
|
||||||
ApplicationMemoryBlockSlabHeapSize);
|
|
||||||
app_memory_block_manager->Initialize(nullptr, memory_block_heap.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeShutdownThreads() {
|
void InitializeShutdownThreads() {
|
||||||
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
|
||||||
shutdown_threads[core_id] = KThread::Create(system.Kernel());
|
shutdown_threads[core_id] = KThread::Create(system.Kernel());
|
||||||
|
@ -362,6 +344,11 @@ struct KernelCore::Impl {
|
||||||
static inline thread_local KThread* current_thread{nullptr};
|
static inline thread_local KThread* current_thread{nullptr};
|
||||||
|
|
||||||
KThread* GetCurrentEmuThread() {
|
KThread* GetCurrentEmuThread() {
|
||||||
|
// If we are shutting down the kernel, none of this is relevant anymore.
|
||||||
|
if (IsShuttingDown()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
const auto thread_id = GetCurrentHostThreadID();
|
const auto thread_id = GetCurrentHostThreadID();
|
||||||
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
if (thread_id >= Core::Hardware::NUM_CPU_CORES) {
|
||||||
return GetHostDummyThread();
|
return GetHostDummyThread();
|
||||||
|
@ -783,11 +770,6 @@ struct KernelCore::Impl {
|
||||||
// Kernel memory management
|
// Kernel memory management
|
||||||
std::unique_ptr<KMemoryManager> memory_manager;
|
std::unique_ptr<KMemoryManager> memory_manager;
|
||||||
|
|
||||||
// Dynamic slab managers
|
|
||||||
std::unique_ptr<KDynamicPageManager> dynamic_page_manager;
|
|
||||||
std::unique_ptr<KMemoryBlockSlabHeap> memory_block_heap;
|
|
||||||
std::unique_ptr<KMemoryBlockSlabManager> app_memory_block_manager;
|
|
||||||
|
|
||||||
// Shared memory for services
|
// Shared memory for services
|
||||||
Kernel::KSharedMemory* hid_shared_mem{};
|
Kernel::KSharedMemory* hid_shared_mem{};
|
||||||
Kernel::KSharedMemory* font_shared_mem{};
|
Kernel::KSharedMemory* font_shared_mem{};
|
||||||
|
@ -871,10 +853,6 @@ const KProcess* KernelCore::CurrentProcess() const {
|
||||||
return impl->current_process;
|
return impl->current_process;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KernelCore::CloseCurrentProcess() {
|
|
||||||
impl->CloseCurrentProcess();
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
const std::vector<KProcess*>& KernelCore::GetProcessList() const {
|
||||||
return impl->process_list;
|
return impl->process_list;
|
||||||
}
|
}
|
||||||
|
@ -1063,14 +1041,6 @@ const KMemoryManager& KernelCore::MemoryManager() const {
|
||||||
return *impl->memory_manager;
|
return *impl->memory_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() {
|
|
||||||
return *impl->app_memory_block_manager;
|
|
||||||
}
|
|
||||||
|
|
||||||
const KMemoryBlockSlabManager& KernelCore::GetApplicationMemoryBlockManager() const {
|
|
||||||
return *impl->app_memory_block_manager;
|
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
Kernel::KSharedMemory& KernelCore::GetHidSharedMem() {
|
||||||
return *impl->hid_shared_mem;
|
return *impl->hid_shared_mem;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,6 @@ class KClientSession;
|
||||||
class KEvent;
|
class KEvent;
|
||||||
class KHandleTable;
|
class KHandleTable;
|
||||||
class KLinkedListNode;
|
class KLinkedListNode;
|
||||||
class KMemoryBlockSlabManager;
|
|
||||||
class KMemoryLayout;
|
class KMemoryLayout;
|
||||||
class KMemoryManager;
|
class KMemoryManager;
|
||||||
class KPageBuffer;
|
class KPageBuffer;
|
||||||
|
@ -131,9 +130,6 @@ public:
|
||||||
/// Retrieves a const pointer to the current process.
|
/// Retrieves a const pointer to the current process.
|
||||||
const KProcess* CurrentProcess() const;
|
const KProcess* CurrentProcess() const;
|
||||||
|
|
||||||
/// Closes the current process.
|
|
||||||
void CloseCurrentProcess();
|
|
||||||
|
|
||||||
/// Retrieves the list of processes.
|
/// Retrieves the list of processes.
|
||||||
const std::vector<KProcess*>& GetProcessList() const;
|
const std::vector<KProcess*>& GetProcessList() const;
|
||||||
|
|
||||||
|
@ -242,12 +238,6 @@ public:
|
||||||
/// Gets the virtual memory manager for the kernel.
|
/// Gets the virtual memory manager for the kernel.
|
||||||
const KMemoryManager& MemoryManager() const;
|
const KMemoryManager& MemoryManager() const;
|
||||||
|
|
||||||
/// Gets the application memory block manager for the kernel.
|
|
||||||
KMemoryBlockSlabManager& GetApplicationMemoryBlockManager();
|
|
||||||
|
|
||||||
/// Gets the application memory block manager for the kernel.
|
|
||||||
const KMemoryBlockSlabManager& GetApplicationMemoryBlockManager() const;
|
|
||||||
|
|
||||||
/// Gets the shared memory object for HID services.
|
/// Gets the shared memory object for HID services.
|
||||||
Kernel::KSharedMemory& GetHidSharedMem();
|
Kernel::KSharedMemory& GetHidSharedMem();
|
||||||
|
|
||||||
|
|
|
@ -933,7 +933,7 @@ static Result GetInfo(Core::System& system, u64* result, u64 info_id, Handle han
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
|
||||||
case GetInfoType::UserExceptionContextAddr:
|
case GetInfoType::UserExceptionContextAddr:
|
||||||
*result = process->GetProcessLocalRegionAddress();
|
*result = process->GetTLSRegionAddress();
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
|
|
||||||
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
|
||||||
|
@ -1888,7 +1888,7 @@ static void ExitProcess(Core::System& system) {
|
||||||
auto* current_process = system.Kernel().CurrentProcess();
|
auto* current_process = system.Kernel().CurrentProcess();
|
||||||
|
|
||||||
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
|
||||||
ASSERT_MSG(current_process->GetState() == KProcess::State::Running,
|
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
|
||||||
"Process has already exited");
|
"Process has already exited");
|
||||||
|
|
||||||
system.Exit();
|
system.Exit();
|
||||||
|
@ -2557,7 +2557,7 @@ static Result GetProcessInfo(Core::System& system, u64* out, Handle process_hand
|
||||||
return ResultInvalidEnumValue;
|
return ResultInvalidEnumValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
*out = static_cast<u64>(process->GetState());
|
*out = static_cast<u64>(process->GetStatus());
|
||||||
return ResultSuccess;
|
return ResultSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,13 +14,8 @@ namespace Kernel::Svc {
|
||||||
|
|
||||||
using namespace Common::Literals;
|
using namespace Common::Literals;
|
||||||
|
|
||||||
enum {
|
constexpr s32 ArgumentHandleCountMax = 0x40;
|
||||||
HandleWaitMask = (1u << 30),
|
constexpr u32 HandleWaitMask{1u << 30};
|
||||||
};
|
|
||||||
|
|
||||||
constexpr inline s32 ArgumentHandleCountMax = 0x40;
|
|
||||||
|
|
||||||
constexpr inline s64 WaitInfinite = -1;
|
|
||||||
|
|
||||||
constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
|
constexpr inline std::size_t HeapSizeAlignment = 2_MiB;
|
||||||
|
|
||||||
|
|
|
@ -95,19 +95,6 @@ constexpr inline s32 IdealCoreNoUpdate = -3;
|
||||||
constexpr inline s32 LowestThreadPriority = 63;
|
constexpr inline s32 LowestThreadPriority = 63;
|
||||||
constexpr inline s32 HighestThreadPriority = 0;
|
constexpr inline s32 HighestThreadPriority = 0;
|
||||||
|
|
||||||
constexpr inline s32 SystemThreadPriorityHighest = 16;
|
|
||||||
|
|
||||||
enum ProcessState : u32 {
|
|
||||||
ProcessState_Created = 0,
|
|
||||||
ProcessState_CreatedAttached = 1,
|
|
||||||
ProcessState_Running = 2,
|
|
||||||
ProcessState_Crashed = 3,
|
|
||||||
ProcessState_RunningAttached = 4,
|
|
||||||
ProcessState_Terminating = 5,
|
|
||||||
ProcessState_Terminated = 6,
|
|
||||||
ProcessState_DebugBreak = 7,
|
|
||||||
};
|
|
||||||
|
|
||||||
constexpr inline size_t ThreadLocalRegionSize = 0x200;
|
constexpr inline size_t ThreadLocalRegionSize = 0x200;
|
||||||
|
|
||||||
} // namespace Kernel::Svc
|
} // namespace Kernel::Svc
|
||||||
|
|
|
@ -135,14 +135,6 @@ union Result {
|
||||||
[[nodiscard]] constexpr bool IsFailure() const {
|
[[nodiscard]] constexpr bool IsFailure() const {
|
||||||
return !IsSuccess();
|
return !IsSuccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] constexpr u32 GetInnerValue() const {
|
|
||||||
return static_cast<u32>(module.Value()) | (description << module.bits);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] constexpr bool Includes(Result result) const {
|
|
||||||
return GetInnerValue() == result.GetInnerValue();
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
static_assert(std::is_trivial_v<Result>);
|
static_assert(std::is_trivial_v<Result>);
|
||||||
|
|
||||||
|
@ -470,6 +462,9 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
|
||||||
#define R_UNLESS(expr, res) \
|
#define R_UNLESS(expr, res) \
|
||||||
{ \
|
{ \
|
||||||
if (!(expr)) { \
|
if (!(expr)) { \
|
||||||
|
if (res.IsError()) { \
|
||||||
|
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
|
||||||
|
} \
|
||||||
R_THROW(res); \
|
R_THROW(res); \
|
||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
|
@ -290,7 +290,7 @@ public:
|
||||||
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
const std::size_t padding_size{page_table.GetNumGuardPages() * Kernel::PageSize};
|
||||||
const auto start_info{page_table.QueryInfo(start - 1)};
|
const auto start_info{page_table.QueryInfo(start - 1)};
|
||||||
|
|
||||||
if (start_info.GetState() != Kernel::KMemoryState::Free) {
|
if (start_info.state != Kernel::KMemoryState::Free) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ public:
|
||||||
|
|
||||||
const auto end_info{page_table.QueryInfo(start + size)};
|
const auto end_info{page_table.QueryInfo(start + size)};
|
||||||
|
|
||||||
if (end_info.GetState() != Kernel::KMemoryState::Free) {
|
if (end_info.state != Kernel::KMemoryState::Free) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -128,8 +128,7 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
}
|
}
|
||||||
ASSERT(system.CurrentProcess()
|
ASSERT(system.CurrentProcess()
|
||||||
->PageTable()
|
->PageTable()
|
||||||
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
|
.LockForDeviceAddressSpace(handle_description->address, handle_description->size)
|
||||||
Kernel::KMemoryPermission::None, true)
|
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -65,7 +65,7 @@ struct Memory::Impl {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
return system.DeviceMemory().GetPointer(paddr) + vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
|
[[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
|
||||||
|
@ -75,7 +75,7 @@ struct Memory::Impl {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr;
|
return system.DeviceMemory().GetPointer(paddr) + vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 Read8(const VAddr addr) {
|
u8 Read8(const VAddr addr) {
|
||||||
|
@ -499,7 +499,7 @@ struct Memory::Impl {
|
||||||
} else {
|
} else {
|
||||||
while (base != end) {
|
while (base != end) {
|
||||||
page_table.pointers[base].Store(
|
page_table.pointers[base].Store(
|
||||||
system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS), type);
|
system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
|
||||||
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
|
page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
|
||||||
|
|
||||||
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
ASSERT_MSG(page_table.pointers[base].Pointer(),
|
||||||
|
|
|
@ -40,6 +40,9 @@ struct ScopeInit final {
|
||||||
core_timing.SetMulticore(true);
|
core_timing.SetMulticore(true);
|
||||||
core_timing.Initialize([]() {});
|
core_timing.Initialize([]() {});
|
||||||
}
|
}
|
||||||
|
~ScopeInit() {
|
||||||
|
core_timing.Shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
Core::Timing::CoreTiming core_timing;
|
Core::Timing::CoreTiming core_timing;
|
||||||
};
|
};
|
||||||
|
|
|
@ -483,9 +483,7 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
|
|
||||||
switch (regs.report_semaphore.query.operation) {
|
switch (regs.report_semaphore.query.operation) {
|
||||||
case Regs::ReportSemaphore::Operation::Release:
|
case Regs::ReportSemaphore::Operation::Release:
|
||||||
if (regs.report_semaphore.query.release ==
|
if (regs.report_semaphore.query.short_query != 0) {
|
||||||
Regs::ReportSemaphore::Release::AfterAllPreceedingWrites ||
|
|
||||||
regs.report_semaphore.query.short_query != 0) {
|
|
||||||
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
||||||
const u32 payload = regs.report_semaphore.payload;
|
const u32 payload = regs.report_semaphore.payload;
|
||||||
std::function<void()> operation([this, sequence_address, payload] {
|
std::function<void()> operation([this, sequence_address, payload] {
|
||||||
|
@ -499,11 +497,10 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
};
|
};
|
||||||
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
||||||
const u32 payload = regs.report_semaphore.payload;
|
const u32 payload = regs.report_semaphore.payload;
|
||||||
std::function<void()> operation([this, sequence_address, payload] {
|
[this, sequence_address, payload] {
|
||||||
memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
|
memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
|
||||||
memory_manager.Write<u64>(sequence_address, payload);
|
memory_manager.Write<u64>(sequence_address, payload);
|
||||||
});
|
}();
|
||||||
rasterizer->SyncOperation(std::move(operation));
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Regs::ReportSemaphore::Operation::Acquire:
|
case Regs::ReportSemaphore::Operation::Acquire:
|
||||||
|
@ -579,11 +576,11 @@ void Maxwell3D::ProcessCounterReset() {
|
||||||
|
|
||||||
void Maxwell3D::ProcessSyncPoint() {
|
void Maxwell3D::ProcessSyncPoint() {
|
||||||
const u32 sync_point = regs.sync_info.sync_point.Value();
|
const u32 sync_point = regs.sync_info.sync_point.Value();
|
||||||
const auto condition = regs.sync_info.condition.Value();
|
const u32 cache_flush = regs.sync_info.clean_l2.Value();
|
||||||
[[maybe_unused]] const u32 cache_flush = regs.sync_info.clean_l2.Value();
|
if (cache_flush != 0) {
|
||||||
if (condition == Regs::SyncInfo::Condition::RopWritesDone) {
|
rasterizer->InvalidateGPUCache();
|
||||||
rasterizer->SignalSyncPoint(sync_point);
|
|
||||||
}
|
}
|
||||||
|
rasterizer->SignalSyncPoint(sync_point);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<u64> Maxwell3D::GetQueryResult() {
|
std::optional<u64> Maxwell3D::GetQueryResult() {
|
||||||
|
|
|
@ -75,11 +75,10 @@ void Puller::ProcessSemaphoreTriggerMethod() {
|
||||||
if (op == GpuSemaphoreOperation::WriteLong) {
|
if (op == GpuSemaphoreOperation::WriteLong) {
|
||||||
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
||||||
const u32 payload = regs.semaphore_sequence;
|
const u32 payload = regs.semaphore_sequence;
|
||||||
std::function<void()> operation([this, sequence_address, payload] {
|
[this, sequence_address, payload] {
|
||||||
memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
|
memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
|
||||||
memory_manager.Write<u64>(sequence_address, payload);
|
memory_manager.Write<u64>(sequence_address, payload);
|
||||||
});
|
}();
|
||||||
rasterizer->SignalFence(std::move(operation));
|
|
||||||
} else {
|
} else {
|
||||||
do {
|
do {
|
||||||
const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
||||||
|
|
|
@ -59,11 +59,10 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
|
||||||
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
|
std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
|
||||||
return query_pool == *pool;
|
return query_pool == *pool;
|
||||||
});
|
});
|
||||||
|
ASSERT(it != std::end(pools));
|
||||||
|
|
||||||
if (it != std::end(pools)) {
|
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
|
||||||
const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
|
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
|
||||||
usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
|
QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
|
||||||
|
|
|
@ -120,8 +120,8 @@ void EmuThread::run() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the main emulated process
|
// Shutdown the core emulation
|
||||||
system.ShutdownMainProcess();
|
system.Shutdown();
|
||||||
|
|
||||||
#if MICROPROFILE_ENABLED
|
#if MICROPROFILE_ENABLED
|
||||||
MicroProfileOnThreadExit();
|
MicroProfileOnThreadExit();
|
||||||
|
|
|
@ -294,7 +294,6 @@ GMainWindow::GMainWindow(std::unique_ptr<Config> config_, bool has_broken_vulkan
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
SetupSigInterrupts();
|
SetupSigInterrupts();
|
||||||
#endif
|
#endif
|
||||||
system->Initialize();
|
|
||||||
|
|
||||||
Common::Log::Initialize();
|
Common::Log::Initialize();
|
||||||
LoadTranslation();
|
LoadTranslation();
|
||||||
|
|
|
@ -302,8 +302,6 @@ int main(int argc, char** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Core::System system{};
|
Core::System system{};
|
||||||
system.Initialize();
|
|
||||||
|
|
||||||
InputCommon::InputSubsystem input_subsystem{};
|
InputCommon::InputSubsystem input_subsystem{};
|
||||||
|
|
||||||
// Apply the command line arguments
|
// Apply the command line arguments
|
||||||
|
@ -394,7 +392,7 @@ int main(int argc, char** argv) {
|
||||||
}
|
}
|
||||||
system.DetachDebugger();
|
system.DetachDebugger();
|
||||||
void(system.Pause());
|
void(system.Pause());
|
||||||
system.ShutdownMainProcess();
|
system.Shutdown();
|
||||||
|
|
||||||
detached_tasks.WaitForAllTasks();
|
detached_tasks.WaitForAllTasks();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in a new issue