early-access version 2929

This commit is contained in:
pineappleEA 2022-09-01 09:37:35 +02:00
parent d6cf83dcef
commit 4521504d2e
54 changed files with 8468 additions and 8686 deletions

View file

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 2927. This is the source code for early-access 2929.
## Legal Notice ## Legal Notice

628
dist/languages/ca.ts vendored

File diff suppressed because it is too large Load diff

628
dist/languages/cs.ts vendored

File diff suppressed because it is too large Load diff

628
dist/languages/da.ts vendored

File diff suppressed because it is too large Load diff

630
dist/languages/de.ts vendored

File diff suppressed because it is too large Load diff

628
dist/languages/el.ts vendored

File diff suppressed because it is too large Load diff

922
dist/languages/es.ts vendored

File diff suppressed because it is too large Load diff

730
dist/languages/fr.ts vendored

File diff suppressed because it is too large Load diff

628
dist/languages/id.ts vendored

File diff suppressed because it is too large Load diff

1612
dist/languages/it.ts vendored

File diff suppressed because it is too large Load diff

1025
dist/languages/ja_JP.ts vendored

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

628
dist/languages/nb.ts vendored

File diff suppressed because it is too large Load diff

628
dist/languages/nl.ts vendored

File diff suppressed because it is too large Load diff

882
dist/languages/pl.ts vendored

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

628
dist/languages/sv.ts vendored

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

628
dist/languages/vi.ts vendored

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -46,19 +46,19 @@ public:
void ReserveRange(u64 start, std::size_t size); void ReserveRange(u64 start, std::size_t size);
[[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const { [[nodiscard]] const BaseAddr& operator[](std::size_t index) const {
return base_ptr[index]; return base_ptr[index];
} }
[[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) { [[nodiscard]] BaseAddr& operator[](std::size_t index) {
return base_ptr[index]; return base_ptr[index];
} }
[[nodiscard]] constexpr BaseAddr* data() { [[nodiscard]] BaseAddr* data() {
return base_ptr; return base_ptr;
} }
[[nodiscard]] constexpr const BaseAddr* data() const { [[nodiscard]] const BaseAddr* data() const {
return base_ptr; return base_ptr;
} }

View file

@ -10,9 +10,11 @@
namespace Service::Nvidia::NvCore { namespace Service::Nvidia::NvCore {
struct ContainerImpl { struct ContainerImpl {
ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {} explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
: file{host1x_}, manager{host1x_}, device_file_data{} {}
NvMap file; NvMap file;
SyncpointManager manager; SyncpointManager manager;
Container::Host1xDeviceFileData device_file_data;
}; };
Container::Container(Tegra::Host1x::Host1x& host1x_) { Container::Container(Tegra::Host1x::Host1x& host1x_) {
@ -29,6 +31,14 @@ const NvMap& Container::GetNvMapFile() const {
return impl->file; return impl->file;
} }
Container::Host1xDeviceFileData& Container::Host1xDeviceFile() {
return impl->device_file_data;
}
const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const {
return impl->device_file_data;
}
SyncpointManager& Container::GetSyncpointManager() { SyncpointManager& Container::GetSyncpointManager() {
return impl->manager; return impl->manager;
} }

View file

@ -4,15 +4,15 @@
#pragma once #pragma once
#include <deque>
#include <memory> #include <memory>
#include <unordered_map>
namespace Tegra { #include "core/hle/service/nvdrv/nvdata.h"
namespace Host1x { namespace Tegra::Host1x {
class Host1x; class Host1x;
} // namespace Host1x } // namespace Tegra::Host1x
} // namespace Tegra
namespace Service::Nvidia::NvCore { namespace Service::Nvidia::NvCore {
@ -23,7 +23,7 @@ struct ContainerImpl;
class Container { class Container {
public: public:
Container(Tegra::Host1x::Host1x& host1x); explicit Container(Tegra::Host1x::Host1x& host1x);
~Container(); ~Container();
NvMap& GetNvMapFile(); NvMap& GetNvMapFile();
@ -34,6 +34,17 @@ public:
const SyncpointManager& GetSyncpointManager() const; const SyncpointManager& GetSyncpointManager() const;
struct Host1xDeviceFileData {
std::unordered_map<DeviceFD, u32> fd_to_id{};
std::deque<u32> syncpts_accumulated{};
u32 nvdec_next_id{};
u32 vic_next_id{};
};
Host1xDeviceFileData& Host1xDeviceFile();
const Host1xDeviceFileData& Host1xDeviceFile() const;
private: private:
std::unique_ptr<ContainerImpl> impl; std::unique_ptr<ContainerImpl> impl;
}; };

View file

@ -119,7 +119,7 @@ std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
std::scoped_lock lock(handles_lock); std::scoped_lock lock(handles_lock);
try { try {
return handles.at(handle); return handles.at(handle);
} catch ([[maybe_unused]] std::out_of_range& e) { } catch (std::out_of_range&) {
return nullptr; return nullptr;
} }
} }
@ -128,7 +128,7 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
std::scoped_lock lock(handles_lock); std::scoped_lock lock(handles_lock);
try { try {
return handles.at(handle)->address; return handles.at(handle)->address;
} catch ([[maybe_unused]] std::out_of_range& e) { } catch (std::out_of_range&) {
return 0; return 0;
} }
} }

View file

@ -98,35 +98,6 @@ public:
} }
}; };
private:
std::list<std::shared_ptr<Handle>> unmap_queue{};
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
handles{}; //!< Main owning map of handles
std::mutex handles_lock; //!< Protects access to `handles`
static constexpr u32 HandleIdIncrement{
4}; //!< Each new handle ID is an increment of 4 from the previous
std::atomic<u32> next_handle_id{HandleIdIncrement};
Tegra::Host1x::Host1x& host1x;
void AddHandle(std::shared_ptr<Handle> handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
*/
void UnmapHandle(Handle& handle_description);
/**
* @brief Removes a handle from the map taking its dupes into account
* @note handle_description.mutex MUST be locked when calling this
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const Handle& handle_description);
public:
/** /**
* @brief Encapsulates the result of a FreeHandle operation * @brief Encapsulates the result of a FreeHandle operation
*/ */
@ -136,7 +107,7 @@ public:
bool was_uncached; //!< If the handle was allocated as uncached bool was_uncached; //!< If the handle was allocated as uncached
}; };
NvMap(Tegra::Host1x::Host1x& host1x); explicit NvMap(Tegra::Host1x::Host1x& host1x);
/** /**
* @brief Creates an unallocated handle of the given size * @brief Creates an unallocated handle of the given size
@ -172,5 +143,33 @@ public:
* describing the prior state of the handle * describing the prior state of the handle
*/ */
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session); std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
private:
std::list<std::shared_ptr<Handle>> unmap_queue{};
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
handles{}; //!< Main owning map of handles
std::mutex handles_lock; //!< Protects access to `handles`
static constexpr u32 HandleIdIncrement{
4}; //!< Each new handle ID is an increment of 4 from the previous
std::atomic<u32> next_handle_id{HandleIdIncrement};
Tegra::Host1x::Host1x& host1x;
void AddHandle(std::shared_ptr<Handle> handle);
/**
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
*/
void UnmapHandle(Handle& handle_description);
/**
* @brief Removes a handle from the map taking its dupes into account
* @note handle_description.mutex MUST be locked when calling this
* @return If the handle was removed from the map
*/
bool TryRemoveHandle(const Handle& handle_description);
}; };
} // namespace Service::Nvidia::NvCore } // namespace Service::Nvidia::NvCore

View file

@ -18,23 +18,23 @@ SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host
ReserveSyncpoint(VBlank0SyncpointId, true); ReserveSyncpoint(VBlank0SyncpointId, true);
ReserveSyncpoint(VBlank1SyncpointId, true); ReserveSyncpoint(VBlank1SyncpointId, true);
for (u32 syncpointId : channel_syncpoints) { for (u32 syncpoint_id : channel_syncpoints) {
if (syncpointId) { if (syncpoint_id) {
ReserveSyncpoint(syncpointId, false); ReserveSyncpoint(syncpoint_id, false);
} }
} }
} }
SyncpointManager::~SyncpointManager() = default; SyncpointManager::~SyncpointManager() = default;
u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) { u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) {
if (syncpoints.at(id).reserved) { if (syncpoints.at(id).reserved) {
ASSERT_MSG(false, "Requested syncpoint is in use"); ASSERT_MSG(false, "Requested syncpoint is in use");
return 0; return 0;
} }
syncpoints.at(id).reserved = true; syncpoints.at(id).reserved = true;
syncpoints.at(id).interfaceManaged = clientManaged; syncpoints.at(id).interface_managed = client_managed;
return id; return id;
} }
@ -49,9 +49,9 @@ u32 SyncpointManager::FindFreeSyncpoint() {
return 0; return 0;
} }
u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) { u32 SyncpointManager::AllocateSyncpoint(bool client_managed) {
std::lock_guard lock(reservation_lock); std::lock_guard lock(reservation_lock);
return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged); return ReserveSyncpoint(FindFreeSyncpoint(), client_managed);
} }
void SyncpointManager::FreeSyncpoint(u32 id) { void SyncpointManager::FreeSyncpoint(u32 id) {
@ -64,7 +64,7 @@ bool SyncpointManager::IsSyncpointAllocated(u32 id) {
return (id <= SyncpointCount) && syncpoints[id].reserved; return (id <= SyncpointCount) && syncpoints[id].reserved;
} }
bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) { bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const {
const SyncpointInfo& syncpoint{syncpoints.at(id)}; const SyncpointInfo& syncpoint{syncpoints.at(id)};
if (!syncpoint.reserved) { if (!syncpoint.reserved) {
@ -74,10 +74,10 @@ bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
// If the interface manages counters then we don't keep track of the maximum value as it handles // If the interface manages counters then we don't keep track of the maximum value as it handles
// sanity checking the values then // sanity checking the values then
if (syncpoint.interfaceManaged) { if (syncpoint.interface_managed) {
return static_cast<s32>(syncpoint.counterMin - threshold) >= 0; return static_cast<s32>(syncpoint.counter_min - threshold) >= 0;
} else { } else {
return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold); return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold);
} }
} }
@ -87,7 +87,7 @@ u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
return 0; return 0;
} }
return syncpoints.at(id).counterMax += amount; return syncpoints.at(id).counter_max += amount;
} }
u32 SyncpointManager::ReadSyncpointMinValue(u32 id) { u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
@ -96,7 +96,7 @@ u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
return 0; return 0;
} }
return syncpoints.at(id).counterMin; return syncpoints.at(id).counter_min;
} }
u32 SyncpointManager::UpdateMin(u32 id) { u32 SyncpointManager::UpdateMin(u32 id) {
@ -105,8 +105,8 @@ u32 SyncpointManager::UpdateMin(u32 id) {
return 0; return 0;
} }
syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id); syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
return syncpoints.at(id).counterMin; return syncpoints.at(id).counter_min;
} }
NvFence SyncpointManager::GetSyncpointFence(u32 id) { NvFence SyncpointManager::GetSyncpointFence(u32 id) {
@ -115,7 +115,7 @@ NvFence SyncpointManager::GetSyncpointFence(u32 id) {
return NvFence{}; return NvFence{};
} }
return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counterMax}; return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max};
} }
} // namespace Service::Nvidia::NvCore } // namespace Service::Nvidia::NvCore

View file

@ -11,13 +11,9 @@
#include "common/common_types.h" #include "common/common_types.h"
#include "core/hle/service/nvdrv/nvdata.h" #include "core/hle/service/nvdrv/nvdata.h"
namespace Tegra { namespace Tegra::Host1x {
namespace Host1x {
class Host1x; class Host1x;
} // namespace Host1x } // namespace Tegra::Host1x
} // namespace Tegra
namespace Service::Nvidia::NvCore { namespace Service::Nvidia::NvCore {
@ -54,15 +50,15 @@ public:
* @brief Finds a free syncpoint and reserves it * @brief Finds a free syncpoint and reserves it
* @return The ID of the reserved syncpoint * @return The ID of the reserved syncpoint
*/ */
u32 AllocateSyncpoint(bool clientManaged); u32 AllocateSyncpoint(bool client_managed);
/** /**
* @url * @url
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259 * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
*/ */
bool HasSyncpointExpired(u32 id, u32 threshold); bool HasSyncpointExpired(u32 id, u32 threshold) const;
bool IsFenceSignalled(NvFence fence) { bool IsFenceSignalled(NvFence fence) const {
return HasSyncpointExpired(fence.id, fence.value); return HasSyncpointExpired(fence.id, fence.value);
} }
@ -107,7 +103,7 @@ private:
/** /**
* @note reservation_lock should be locked when calling this * @note reservation_lock should be locked when calling this
*/ */
u32 ReserveSyncpoint(u32 id, bool clientManaged); u32 ReserveSyncpoint(u32 id, bool client_managed);
/** /**
* @return The ID of the first free syncpoint * @return The ID of the first free syncpoint
@ -115,15 +111,15 @@ private:
u32 FindFreeSyncpoint(); u32 FindFreeSyncpoint();
struct SyncpointInfo { struct SyncpointInfo {
std::atomic<u32> counterMin; //!< The least value the syncpoint can be (The value it was std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was
//!< when it was last synchronized with host1x) //!< when it was last synchronized with host1x)
std::atomic<u32> counterMax; //!< The maximum value the syncpoint can reach according to the std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to
//!< current usage //!< the current usage
bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a
//!< client interface is a HW block that can handle host1x //!< client interface is a HW block that can handle host1x
//!< transactions on behalf of a host1x client (Which would otherwise //!< transactions on behalf of a host1x client (Which would
//!< need to be manually synced using PIO which is synchronous and //!< otherwise need to be manually synced using PIO which is
//!< requires direct cooperation of the CPU) //!< synchronous and requires direct cooperation of the CPU)
bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
//!< value //!< value
}; };

View file

@ -106,7 +106,7 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
return NvResult::BadValue; return NvResult::BadValue;
} }
if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) { if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) {
LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size); LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
return NvResult::BadValue; return NvResult::BadValue;
} }
@ -124,12 +124,13 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
vm.va_range_end = params.va_range_end; vm.va_range_end = params.va_range_end;
} }
const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS}; const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)};
const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS}; const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)};
vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages); vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits}; const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)};
const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits}; const auto end_big_pages{
static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)};
vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages); vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits, gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
@ -210,10 +211,11 @@ void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
// Only FreeSpace can unmap them fully // Only FreeSpace can unmap them fully
if (mapping->sparse_alloc) if (mapping->sparse_alloc) {
gmmu->MapSparse(offset, mapping->size, mapping->big_page); gmmu->MapSparse(offset, mapping->size, mapping->big_page);
else } else {
gmmu->Unmap(offset, mapping->size); gmmu->Unmap(offset, mapping->size);
}
mapping_map.erase(offset); mapping_map.erase(offset);
} }
@ -256,7 +258,7 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
allocator.Free(static_cast<u32>(params.offset >> page_size_bits), allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
static_cast<u32>(allocation.size >> page_size_bits)); static_cast<u32>(allocation.size >> page_size_bits));
allocation_map.erase(params.offset); allocation_map.erase(params.offset);
} catch ([[maybe_unused]] const std::out_of_range& e) { } catch (const std::out_of_range&) {
return NvResult::BadValue; return NvResult::BadValue;
} }
@ -351,7 +353,7 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page); gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
return NvResult::Success; return NvResult::Success;
} catch ([[maybe_unused]] const std::out_of_range& e) { } catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}", LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
params.offset); params.offset);
return NvResult::BadValue; return NvResult::BadValue;
@ -367,11 +369,11 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size}; u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
bool big_page{[&]() { bool big_page{[&]() {
if (Common::IsAligned(handle->align, vm.big_page_size)) if (Common::IsAligned(handle->align, vm.big_page_size)) {
return true; return true;
else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
return false; return false;
else { } else {
ASSERT(false); ASSERT(false);
return false; return false;
} }
@ -450,7 +452,7 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
} }
mapping_map.erase(params.offset); mapping_map.erase(params.offset);
} catch ([[maybe_unused]] const std::out_of_range& e) { } catch (const std::out_of_range&) {
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset); LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
} }

View file

@ -79,7 +79,7 @@ private:
// Used for waiting on a syncpoint & canceling it. // Used for waiting on a syncpoint & canceling it.
Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{}; Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
bool IsBeingUsed() { bool IsBeingUsed() const {
const auto current_status = status.load(std::memory_order_acquire); const auto current_status = status.load(std::memory_order_acquire);
return current_status == EventState::Waiting || return current_status == EventState::Waiting ||
current_status == EventState::Cancelling || current_status == EventState::Cancelling ||

View file

@ -184,7 +184,7 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
params.unk3); params.unk3);
if (channel_state->initiated) { if (channel_state->initialized) {
LOG_CRITICAL(Service_NVDRV, "Already allocated!"); LOG_CRITICAL(Service_NVDRV, "Already allocated!");
return NvResult::AlreadyAllocated; return NvResult::AlreadyAllocated;
} }

View file

@ -4,13 +4,12 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" #include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
#include "video_core/renderer_base.h" #include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices { namespace Service::Nvidia::Devices {
u32 nvhost_nvdec::next_id{};
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_) nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {} : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
nvhost_nvdec::~nvhost_nvdec() = default; nvhost_nvdec::~nvhost_nvdec() = default;
@ -21,8 +20,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
case 0x0: case 0x0:
switch (command.cmd) { switch (command.cmd) {
case 0x1: { case 0x1: {
if (!fd_to_id.contains(fd)) { auto& host1x_file = core.Host1xDeviceFile();
fd_to_id[fd] = next_id++; if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
} }
return Submit(fd, input, output); return Submit(fd, input, output);
} }
@ -70,8 +70,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {}
void nvhost_nvdec::OnClose(DeviceFD fd) { void nvhost_nvdec::OnClose(DeviceFD fd) {
LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
const auto iter = fd_to_id.find(fd); auto& host1x_file = core.Host1xDeviceFile();
if (iter != fd_to_id.end()) { const auto iter = host1x_file.fd_to_id.find(fd);
if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second); system.GPU().ClearCdmaInstance(iter->second);
} }
} }

View file

@ -22,9 +22,6 @@ public:
void OnOpen(DeviceFD fd) override; void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override; void OnClose(DeviceFD fd) override;
private:
static u32 next_id;
}; };
} // namespace Service::Nvidia::Devices } // namespace Service::Nvidia::Devices

View file

@ -46,13 +46,11 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
} }
} // Anonymous namespace } // Anonymous namespace
std::unordered_map<DeviceFD, u32> nvhost_nvdec_common::fd_to_id{};
std::deque<u32> nvhost_nvdec_common::syncpts_accumulated{};
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_, nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
NvCore::ChannelType channel_type_) NvCore::ChannelType channel_type_)
: nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()}, : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
nvmap{core.GetNvMapFile()}, channel_type{channel_type_} { nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated;
if (syncpts_accumulated.empty()) { if (syncpts_accumulated.empty()) {
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false); channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
} else { } else {
@ -60,8 +58,9 @@ nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Containe
syncpts_accumulated.pop_front(); syncpts_accumulated.pop_front();
} }
} }
nvhost_nvdec_common::~nvhost_nvdec_common() { nvhost_nvdec_common::~nvhost_nvdec_common() {
syncpts_accumulated.push_back(channel_syncpoint); core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
} }
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
@ -108,7 +107,7 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(), system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
cmdlist.size() * sizeof(u32)); cmdlist.size() * sizeof(u32));
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
} }
std::memcpy(output.data(), &params, sizeof(IoctlSubmit)); std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
// Some games expect command_buffers to be written back // Some games expect command_buffers to be written back
@ -186,8 +185,4 @@ Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
return nullptr; return nullptr;
} }
void nvhost_nvdec_common::Reset() {
fd_to_id.clear();
}
} // namespace Service::Nvidia::Devices } // namespace Service::Nvidia::Devices

View file

@ -25,8 +25,6 @@ public:
NvCore::ChannelType channel_type); NvCore::ChannelType channel_type);
~nvhost_nvdec_common() override; ~nvhost_nvdec_common() override;
static void Reset();
protected: protected:
struct IoctlSetNvmapFD { struct IoctlSetNvmapFD {
s32_le nvmap_fd{}; s32_le nvmap_fd{};
@ -119,7 +117,6 @@ protected:
Kernel::KEvent* QueryEvent(u32 event_id) override; Kernel::KEvent* QueryEvent(u32 event_id) override;
static std::unordered_map<DeviceFD, u32> fd_to_id;
u32 channel_syncpoint; u32 channel_syncpoint;
s32_le nvmap_fd{}; s32_le nvmap_fd{};
u32_le submit_timeout{}; u32_le submit_timeout{};
@ -128,8 +125,6 @@ protected:
NvCore::NvMap& nvmap; NvCore::NvMap& nvmap;
NvCore::ChannelType channel_type; NvCore::ChannelType channel_type;
std::array<u32, MaxSyncPoints> device_syncpoints{}; std::array<u32, MaxSyncPoints> device_syncpoints{};
static std::deque<u32> syncpts_accumulated;
}; };
}; // namespace Devices }; // namespace Devices
} // namespace Service::Nvidia } // namespace Service::Nvidia

View file

@ -4,13 +4,12 @@
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/service/nvdrv/core/container.h"
#include "core/hle/service/nvdrv/devices/nvhost_vic.h" #include "core/hle/service/nvdrv/devices/nvhost_vic.h"
#include "video_core/renderer_base.h" #include "video_core/renderer_base.h"
namespace Service::Nvidia::Devices { namespace Service::Nvidia::Devices {
u32 nvhost_vic::next_id{};
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_) nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {} : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
@ -21,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
switch (command.group) { switch (command.group) {
case 0x0: case 0x0:
switch (command.cmd) { switch (command.cmd) {
case 0x1: case 0x1: {
if (!fd_to_id.contains(fd)) { auto& host1x_file = core.Host1xDeviceFile();
fd_to_id[fd] = next_id++; if (!host1x_file.fd_to_id.contains(fd)) {
host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
} }
return Submit(fd, input, output); return Submit(fd, input, output);
}
case 0x2: case 0x2:
return GetSyncpoint(input, output); return GetSyncpoint(input, output);
case 0x3: case 0x3:
@ -69,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
void nvhost_vic::OnOpen(DeviceFD fd) {} void nvhost_vic::OnOpen(DeviceFD fd) {}
void nvhost_vic::OnClose(DeviceFD fd) { void nvhost_vic::OnClose(DeviceFD fd) {
const auto iter = fd_to_id.find(fd); auto& host1x_file = core.Host1xDeviceFile();
if (iter != fd_to_id.end()) { const auto iter = host1x_file.fd_to_id.find(fd);
if (iter != host1x_file.fd_to_id.end()) {
system.GPU().ClearCdmaInstance(iter->second); system.GPU().ClearCdmaInstance(iter->second);
} }
} }

View file

@ -21,8 +21,5 @@ public:
void OnOpen(DeviceFD fd) override; void OnOpen(DeviceFD fd) override;
void OnClose(DeviceFD fd) override; void OnClose(DeviceFD fd) override;
private:
static u32 next_id;
}; };
} // namespace Service::Nvidia::Devices } // namespace Service::Nvidia::Devices

View file

@ -23,8 +23,8 @@ public:
explicit nvmap(Core::System& system_, NvCore::Container& container); explicit nvmap(Core::System& system_, NvCore::Container& container);
~nvmap() override; ~nvmap() override;
nvmap(nvmap const&) = delete; nvmap(const nvmap&) = delete;
nvmap& operator=(nvmap const&) = delete; nvmap& operator=(const nvmap&) = delete;
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
std::vector<u8>& output) override; std::vector<u8>& output) override;

View file

@ -101,9 +101,7 @@ Module::Module(Core::System& system)
}; };
} }
Module::~Module() { Module::~Module() {}
Devices::nvhost_nvdec_common::Reset();
}
NvResult Module::VerifyFD(DeviceFD fd) const { NvResult Module::VerifyFD(DeviceFD fd) const {
if (fd < 0) { if (fd < 0) {

View file

@ -46,7 +46,7 @@ class Module;
class EventInterface { class EventInterface {
public: public:
EventInterface(Module& module_); explicit EventInterface(Module& module_);
~EventInterface(); ~EventInterface();
Kernel::KEvent* CreateEvent(std::string name); Kernel::KEvent* CreateEvent(std::string name);

View file

@ -14,10 +14,7 @@
namespace Tegra::Control { namespace Tegra::Control {
ChannelState::ChannelState(s32 bind_id_) { ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
bind_id = bind_id_;
initiated = false;
}
void ChannelState::Init(Core::System& system, GPU& gpu) { void ChannelState::Init(Core::System& system, GPU& gpu) {
ASSERT(memory_manager); ASSERT(memory_manager);
@ -27,7 +24,7 @@ void ChannelState::Init(Core::System& system, GPU& gpu) {
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager); kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager); maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager); kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
initiated = true; initialized = true;
} }
void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) { void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {

View file

@ -34,7 +34,7 @@ class DmaPusher;
namespace Control { namespace Control {
struct ChannelState { struct ChannelState {
ChannelState(s32 bind_id); explicit ChannelState(s32 bind_id);
ChannelState(const ChannelState& state) = delete; ChannelState(const ChannelState& state) = delete;
ChannelState& operator=(const ChannelState&) = delete; ChannelState& operator=(const ChannelState&) = delete;
ChannelState(ChannelState&& other) noexcept = default; ChannelState(ChannelState&& other) noexcept = default;
@ -60,7 +60,7 @@ struct ChannelState {
std::unique_ptr<DmaPusher> dma_pusher; std::unique_ptr<DmaPusher> dma_pusher;
bool initiated{}; bool initialized{};
}; };
} // namespace Control } // namespace Control

View file

@ -32,7 +32,7 @@ namespace VideoCommon {
class ChannelInfo { class ChannelInfo {
public: public:
ChannelInfo() = delete; ChannelInfo() = delete;
ChannelInfo(Tegra::Control::ChannelState& state); explicit ChannelInfo(Tegra::Control::ChannelState& state);
ChannelInfo(const ChannelInfo& state) = delete; ChannelInfo(const ChannelInfo& state) = delete;
ChannelInfo& operator=(const ChannelInfo&) = delete; ChannelInfo& operator=(const ChannelInfo&) = delete;
ChannelInfo(ChannelInfo&& other) = default; ChannelInfo(ChannelInfo&& other) = default;

View file

@ -3,6 +3,7 @@
#include <memory> #include <memory>
#include "common/assert.h"
#include "video_core/control/channel_state.h" #include "video_core/control/channel_state.h"
#include "video_core/control/scheduler.h" #include "video_core/control/scheduler.h"
#include "video_core/gpu.h" #include "video_core/gpu.h"
@ -13,8 +14,9 @@ Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
Scheduler::~Scheduler() = default; Scheduler::~Scheduler() = default;
void Scheduler::Push(s32 channel, CommandList&& entries) { void Scheduler::Push(s32 channel, CommandList&& entries) {
std::unique_lock<std::mutex> lk(scheduling_guard); std::unique_lock lk(scheduling_guard);
auto it = channels.find(channel); auto it = channels.find(channel);
ASSERT(it != channels.end());
auto channel_state = it->second; auto channel_state = it->second;
gpu.BindChannel(channel_state->bind_id); gpu.BindChannel(channel_state->bind_id);
channel_state->dma_pusher->Push(std::move(entries)); channel_state->dma_pusher->Push(std::move(entries));
@ -23,7 +25,7 @@ void Scheduler::Push(s32 channel, CommandList&& entries) {
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) { void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
s32 channel = new_channel->bind_id; s32 channel = new_channel->bind_id;
std::unique_lock<std::mutex> lk(scheduling_guard); std::unique_lock lk(scheduling_guard);
channels.emplace(channel, new_channel); channels.emplace(channel, new_channel);
} }

View file

@ -19,7 +19,7 @@ struct ChannelState;
class Scheduler { class Scheduler {
public: public:
Scheduler(GPU& gpu_); explicit Scheduler(GPU& gpu_);
~Scheduler(); ~Scheduler();
void Push(s32 channel, CommandList&& entries); void Push(s32 channel, CommandList&& entries);

View file

@ -195,7 +195,7 @@ public:
BitField<24, 2, u32> num_dst_components_minus_one; BitField<24, 2, u32> num_dst_components_minus_one;
}; };
Swizzle GetComponent(size_t i) { Swizzle GetComponent(size_t i) const {
const u32 raw = dst_components_raw; const u32 raw = dst_components_raw;
return static_cast<Swizzle>((raw >> (i * 3)) & 0x7); return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
} }

View file

@ -355,7 +355,7 @@ struct GPU::Impl {
std::condition_variable sync_cv; std::condition_variable sync_cv;
std::list<std::function<void(void)>> sync_requests; std::list<std::function<void()>> sync_requests;
std::atomic<u64> current_sync_fence{}; std::atomic<u64> current_sync_fence{};
u64 last_sync_fence{}; u64 last_sync_fence{};
std::mutex sync_request_mutex; std::mutex sync_request_mutex;

View file

@ -19,7 +19,7 @@ namespace Host1x {
class Host1x { class Host1x {
public: public:
Host1x(Core::System& system); explicit Host1x(Core::System& system);
SyncpointManager& GetSyncpointManager() { SyncpointManager& GetSyncpointManager() {
return syncpoint_manager; return syncpoint_manager;

View file

@ -12,13 +12,13 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
SyncpointManager::ActionHandle SyncpointManager::RegisterAction( SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value, std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
std::function<void(void)>& action) { std::function<void()>&& action) {
if (syncpoint.load(std::memory_order_acquire) >= expected_value) { if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
action(); action();
return {}; return {};
} }
std::unique_lock<std::mutex> lk(guard); std::unique_lock lk(guard);
if (syncpoint.load(std::memory_order_relaxed) >= expected_value) { if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
action(); action();
return {}; return {};
@ -30,12 +30,12 @@ SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
} }
++it; ++it;
} }
return action_storage.emplace(it, expected_value, action); return action_storage.emplace(it, expected_value, std::move(action));
} }
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage, void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
ActionHandle& handle) { ActionHandle& handle) {
std::unique_lock<std::mutex> lk(guard); std::unique_lock lk(guard);
action_storage.erase(handle); action_storage.erase(handle);
} }
@ -68,7 +68,7 @@ void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_var
std::list<RegisteredAction>& action_storage) { std::list<RegisteredAction>& action_storage) {
auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1}; auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
std::unique_lock<std::mutex> lk(guard); std::unique_lock lk(guard);
auto it = action_storage.begin(); auto it = action_storage.begin();
while (it != action_storage.end()) { while (it != action_storage.end()) {
if (it->expected_value > new_value) { if (it->expected_value > new_value) {
@ -87,7 +87,7 @@ void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable
return; return;
} }
std::unique_lock<std::mutex> lk(guard); std::unique_lock lk(guard);
wait_cv.wait(lk, pred); wait_cv.wait(lk, pred);
} }

View file

@ -18,34 +18,34 @@ namespace Host1x {
class SyncpointManager { class SyncpointManager {
public: public:
u32 GetGuestSyncpointValue(u32 id) { u32 GetGuestSyncpointValue(u32 id) const {
return syncpoints_guest[id].load(std::memory_order_acquire); return syncpoints_guest[id].load(std::memory_order_acquire);
} }
u32 GetHostSyncpointValue(u32 id) { u32 GetHostSyncpointValue(u32 id) const {
return syncpoints_host[id].load(std::memory_order_acquire); return syncpoints_host[id].load(std::memory_order_acquire);
} }
struct RegisteredAction { struct RegisteredAction {
RegisteredAction(u32 expected_value_, std::function<void(void)>& action_) explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
: expected_value{expected_value_}, action{action_} {} : expected_value{expected_value_}, action{std::move(action_)} {}
u32 expected_value; u32 expected_value;
std::function<void(void)> action; std::function<void()> action;
}; };
using ActionHandle = std::list<RegisteredAction>::iterator; using ActionHandle = std::list<RegisteredAction>::iterator;
template <typename Func> template <typename Func>
ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) { ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
std::function<void(void)> func(action); std::function<void()> func(action);
return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id], return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
expected_value, func); expected_value, std::move(func));
} }
template <typename Func> template <typename Func>
ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) { ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
std::function<void(void)> func(action); std::function<void()> func(action);
return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id], return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
expected_value, func); expected_value, std::move(func));
} }
void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle); void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
@ -60,11 +60,11 @@ public:
void WaitHost(u32 syncpoint_id, u32 expected_value); void WaitHost(u32 syncpoint_id, u32 expected_value);
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) { bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value; return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
} }
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) { bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value; return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
} }
@ -74,7 +74,7 @@ private:
ActionHandle RegisterAction(std::atomic<u32>& syncpoint, ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
std::list<RegisteredAction>& action_storage, u32 expected_value, std::list<RegisteredAction>& action_storage, u32 expected_value,
std::function<void(void)>& action); std::function<void()>&& action);
void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle); void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);

View file

@ -126,7 +126,7 @@ private:
void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size); void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
template <bool is_big_page> template <bool is_big_page>
[[nodiscard]] inline std::size_t PageEntryIndex(GPUVAddr gpu_addr) const { [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
if constexpr (is_big_page) { if constexpr (is_big_page) {
return (gpu_addr >> big_page_bits) & big_page_table_mask; return (gpu_addr >> big_page_bits) & big_page_table_mask;
} else { } else {