early-access version 2790
This commit is contained in:
parent
a67a0e1eb5
commit
518fdfccad
155 changed files with 9311 additions and 2805 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2788.
|
This is the source code for early-access 2790.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -112,6 +112,7 @@ else()
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64)
|
if (ARCHITECTURE_x86_64)
|
||||||
add_compile_options("-mcx16")
|
add_compile_options("-mcx16")
|
||||||
|
add_compile_options("-fwrapv")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
|
if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
|
||||||
|
|
|
@ -39,6 +39,8 @@ add_custom_command(OUTPUT scm_rev.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(common STATIC
|
add_library(common STATIC
|
||||||
|
address_space.h
|
||||||
|
address_space.cpp
|
||||||
algorithm.h
|
algorithm.h
|
||||||
alignment.h
|
alignment.h
|
||||||
assert.cpp
|
assert.cpp
|
||||||
|
@ -100,6 +102,8 @@ add_library(common STATIC
|
||||||
microprofile.cpp
|
microprofile.cpp
|
||||||
microprofile.h
|
microprofile.h
|
||||||
microprofileui.h
|
microprofileui.h
|
||||||
|
multi_level_page_table.cpp
|
||||||
|
multi_level_page_table.h
|
||||||
nvidia_flags.cpp
|
nvidia_flags.cpp
|
||||||
nvidia_flags.h
|
nvidia_flags.h
|
||||||
page_table.cpp
|
page_table.cpp
|
||||||
|
|
11
src/common/address_space.cpp
Executable file
11
src/common/address_space.cpp
Executable file
|
@ -0,0 +1,11 @@
|
||||||
|
// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/address_space.inc"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
template class Common::FlatAllocator<u32, 0, 32>;
|
||||||
|
|
||||||
|
}
|
136
src/common/address_space.h
Executable file
136
src/common/address_space.h
Executable file
|
@ -0,0 +1,136 @@
|
||||||
|
// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <concepts>
|
||||||
|
#include <functional>
|
||||||
|
#include <mutex>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
template <typename VaType, size_t AddressSpaceBits>
|
||||||
|
concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
|
||||||
|
|
||||||
|
struct EmptyStruct {};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
|
||||||
|
*/
|
||||||
|
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
|
||||||
|
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits>
|
||||||
|
class FlatAddressSpaceMap {
|
||||||
|
private:
|
||||||
|
std::function<void(VaType, VaType)>
|
||||||
|
unmapCallback{}; //!< Callback called when the mappings in an region have changed
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/**
|
||||||
|
* @brief Represents a block of memory in the AS, the physical mapping is contiguous until
|
||||||
|
* another block with a different phys address is hit
|
||||||
|
*/
|
||||||
|
struct Block {
|
||||||
|
VaType virt{UnmappedVa}; //!< VA of the block
|
||||||
|
PaType phys{UnmappedPa}; //!< PA of the block, will increase 1-1 with VA until a new block
|
||||||
|
//!< is encountered
|
||||||
|
[[no_unique_address]] ExtraBlockInfo extraInfo;
|
||||||
|
|
||||||
|
Block() = default;
|
||||||
|
|
||||||
|
Block(VaType virt_, PaType phys_, ExtraBlockInfo extraInfo_)
|
||||||
|
: virt(virt_), phys(phys_), extraInfo(extraInfo_) {}
|
||||||
|
|
||||||
|
constexpr bool Valid() {
|
||||||
|
return virt != UnmappedVa;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Mapped() {
|
||||||
|
return phys != UnmappedPa;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool Unmapped() {
|
||||||
|
return phys == UnmappedPa;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator<(const VaType& pVirt) const {
|
||||||
|
return virt < pVirt;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::mutex blockMutex;
|
||||||
|
std::vector<Block> blocks{Block{}};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Maps a PA range into the given AS region
|
||||||
|
* @note blockMutex MUST be locked when calling this
|
||||||
|
*/
|
||||||
|
void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Unmaps the given range and merges it with other unmapped regions
|
||||||
|
* @note blockMutex MUST be locked when calling this
|
||||||
|
*/
|
||||||
|
void UnmapLocked(VaType virt, VaType size);
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) +
|
||||||
|
((1ULL << (AddressSpaceBits - 1)) -
|
||||||
|
1)}; //!< The maximum VA that this AS can technically reach
|
||||||
|
|
||||||
|
VaType vaLimit{VaMaximum}; //!< A soft limit on the maximum VA of the AS
|
||||||
|
|
||||||
|
FlatAddressSpaceMap(VaType vaLimit, std::function<void(VaType, VaType)> unmapCallback = {});
|
||||||
|
|
||||||
|
FlatAddressSpaceMap() = default;
|
||||||
|
|
||||||
|
void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo = {}) {
|
||||||
|
std::scoped_lock lock(blockMutex);
|
||||||
|
MapLocked(virt, phys, size, extraInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Unmap(VaType virt, VaType size) {
|
||||||
|
std::scoped_lock lock(blockMutex);
|
||||||
|
UnmapLocked(virt, size);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an
|
||||||
|
* initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
|
||||||
|
*/
|
||||||
|
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits>
|
||||||
|
class FlatAllocator
|
||||||
|
: public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
|
||||||
|
private:
|
||||||
|
using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
|
||||||
|
|
||||||
|
VaType currentLinearAllocEnd; //!< The end address for the initial linear allocation pass, once
|
||||||
|
//!< this reaches the AS limit the slower allocation path will be
|
||||||
|
//!< used
|
||||||
|
|
||||||
|
public:
|
||||||
|
VaType vaStart; //!< The base VA of the allocator, no allocations will be below this
|
||||||
|
|
||||||
|
FlatAllocator(VaType vaStart, VaType vaLimit = Base::VaMaximum);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Allocates a region in the AS of the given size and returns its address
|
||||||
|
*/
|
||||||
|
VaType Allocate(VaType size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Marks the given region in the AS as allocated
|
||||||
|
*/
|
||||||
|
void AllocateFixed(VaType virt, VaType size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Frees an AS region so it can be used again
|
||||||
|
*/
|
||||||
|
void Free(VaType virt, VaType size);
|
||||||
|
};
|
||||||
|
} // namespace Common
|
338
src/common/address_space.inc
Executable file
338
src/common/address_space.inc
Executable file
|
@ -0,0 +1,338 @@
|
||||||
|
// SPDX-License-Identifier: GPLv3 or later
|
||||||
|
// Copyright © 2021 Skyline Team and Contributors (https://github.com/skyline-emu/)
|
||||||
|
|
||||||
|
#include "common/address_space.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
|
||||||
|
#define MAP_MEMBER(returnType) \
|
||||||
|
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
|
||||||
|
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
|
||||||
|
VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
|
||||||
|
#define MAP_MEMBER_CONST() \
|
||||||
|
template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
|
||||||
|
bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
|
||||||
|
VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
|
||||||
|
|
||||||
|
#define MM_MEMBER(returnType) \
|
||||||
|
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
|
||||||
|
FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
|
||||||
|
|
||||||
|
#define ALLOC_MEMBER(returnType) \
|
||||||
|
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
|
||||||
|
FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
|
||||||
|
#define ALLOC_MEMBER_CONST() \
|
||||||
|
template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
|
||||||
|
requires AddressSpaceValid<VaType, AddressSpaceBits> \
|
||||||
|
FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType vaLimit_,
|
||||||
|
std::function<void(VaType, VaType)> unmapCallback_)
|
||||||
|
: unmapCallback(std::move(unmapCallback_)), vaLimit(vaLimit_) {
|
||||||
|
if (vaLimit > VaMaximum)
|
||||||
|
UNREACHABLE_MSG("Invalid VA limit!");
|
||||||
|
}
|
||||||
|
|
||||||
|
MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extraInfo) {
|
||||||
|
VaType virtEnd{virt + size};
|
||||||
|
|
||||||
|
if (virtEnd > vaLimit)
|
||||||
|
UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
|
||||||
|
virtEnd, vaLimit);
|
||||||
|
|
||||||
|
auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
|
||||||
|
if (blockEndSuccessor == blocks.begin())
|
||||||
|
UNREACHABLE_MSG("Trying to map a block before the VA start: virtEnd: 0x{:X}", virtEnd);
|
||||||
|
|
||||||
|
auto blockEndPredecessor{std::prev(blockEndSuccessor)};
|
||||||
|
|
||||||
|
if (blockEndSuccessor != blocks.end()) {
|
||||||
|
// We have blocks in front of us, if one is directly in front then we don't have to add a
|
||||||
|
// tail
|
||||||
|
if (blockEndSuccessor->virt != virtEnd) {
|
||||||
|
PaType tailPhys{[&]() -> PaType {
|
||||||
|
if constexpr (!PaContigSplit) {
|
||||||
|
return blockEndPredecessor
|
||||||
|
->phys; // Always propagate unmapped regions rather than calculating offset
|
||||||
|
} else {
|
||||||
|
if (blockEndPredecessor->Unmapped())
|
||||||
|
return blockEndPredecessor->phys; // Always propagate unmapped regions
|
||||||
|
// rather than calculating offset
|
||||||
|
else
|
||||||
|
return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
|
||||||
|
}
|
||||||
|
}()};
|
||||||
|
|
||||||
|
if (blockEndPredecessor->virt >= virt) {
|
||||||
|
// If this block's start would be overlapped by the map then reuse it as a tail
|
||||||
|
// block
|
||||||
|
blockEndPredecessor->virt = virtEnd;
|
||||||
|
blockEndPredecessor->phys = tailPhys;
|
||||||
|
blockEndPredecessor->extraInfo = blockEndPredecessor->extraInfo;
|
||||||
|
|
||||||
|
// No longer predecessor anymore
|
||||||
|
blockEndSuccessor = blockEndPredecessor--;
|
||||||
|
} else {
|
||||||
|
// Else insert a new one and we're done
|
||||||
|
blocks.insert(blockEndSuccessor,
|
||||||
|
{Block(virt, phys, extraInfo),
|
||||||
|
Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// blockEndPredecessor will always be unmapped as blocks has to be terminated by an unmapped
|
||||||
|
// chunk
|
||||||
|
if (blockEndPredecessor != blocks.begin() && blockEndPredecessor->virt >= virt) {
|
||||||
|
// Move the unmapped block start backwards
|
||||||
|
blockEndPredecessor->virt = virtEnd;
|
||||||
|
|
||||||
|
// No longer predecessor anymore
|
||||||
|
blockEndSuccessor = blockEndPredecessor--;
|
||||||
|
} else {
|
||||||
|
// Else insert a new one and we're done
|
||||||
|
blocks.insert(blockEndSuccessor,
|
||||||
|
{Block(virt, phys, extraInfo), Block(virtEnd, UnmappedPa, {})});
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto blockStartSuccessor{blockEndSuccessor};
|
||||||
|
|
||||||
|
// Walk the block vector to find the start successor as this is more efficient than another
|
||||||
|
// binary search in most scenarios
|
||||||
|
while (std::prev(blockStartSuccessor)->virt >= virt)
|
||||||
|
blockStartSuccessor--;
|
||||||
|
|
||||||
|
// Check that the start successor is either the end block or something in between
|
||||||
|
if (blockStartSuccessor->virt > virtEnd) {
|
||||||
|
UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
|
||||||
|
} else if (blockStartSuccessor->virt == virtEnd) {
|
||||||
|
// We need to create a new block as there are none spare that we would overwrite
|
||||||
|
blocks.insert(blockStartSuccessor, Block(virt, phys, extraInfo));
|
||||||
|
} else {
|
||||||
|
// Erase overwritten blocks
|
||||||
|
if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
|
||||||
|
blocks.erase(eraseStart, blockEndSuccessor);
|
||||||
|
|
||||||
|
// Reuse a block that would otherwise be overwritten as a start block
|
||||||
|
blockStartSuccessor->virt = virt;
|
||||||
|
blockStartSuccessor->phys = phys;
|
||||||
|
blockStartSuccessor->extraInfo = extraInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
|
||||||
|
VaType virtEnd{virt + size};
|
||||||
|
|
||||||
|
if (virtEnd > vaLimit)
|
||||||
|
UNREACHABLE_MSG("Trying to map a block past the VA limit: virtEnd: 0x{:X}, vaLimit: 0x{:X}",
|
||||||
|
virtEnd, vaLimit);
|
||||||
|
|
||||||
|
auto blockEndSuccessor{std::lower_bound(blocks.begin(), blocks.end(), virtEnd)};
|
||||||
|
if (blockEndSuccessor == blocks.begin())
|
||||||
|
UNREACHABLE_MSG("Trying to unmap a block before the VA start: virtEnd: 0x{:X}", virtEnd);
|
||||||
|
|
||||||
|
auto blockEndPredecessor{std::prev(blockEndSuccessor)};
|
||||||
|
|
||||||
|
auto walkBackToPredecessor{[&](auto iter) {
|
||||||
|
while (iter->virt >= virt)
|
||||||
|
iter--;
|
||||||
|
|
||||||
|
return iter;
|
||||||
|
}};
|
||||||
|
|
||||||
|
auto eraseBlocksWithEndUnmapped{[&](auto unmappedEnd) {
|
||||||
|
auto blockStartPredecessor{walkBackToPredecessor(unmappedEnd)};
|
||||||
|
auto blockStartSuccessor{std::next(blockStartPredecessor)};
|
||||||
|
|
||||||
|
auto eraseEnd{[&]() {
|
||||||
|
if (blockStartPredecessor->Unmapped()) {
|
||||||
|
// If the start predecessor is unmapped then we can erase everything in our region
|
||||||
|
// and be done
|
||||||
|
return std::next(unmappedEnd);
|
||||||
|
} else {
|
||||||
|
// Else reuse the end predecessor as the start of our unmapped region then erase all
|
||||||
|
// up to it
|
||||||
|
unmappedEnd->virt = virt;
|
||||||
|
return unmappedEnd;
|
||||||
|
}
|
||||||
|
}()};
|
||||||
|
|
||||||
|
// We can't have two unmapped regions after each other
|
||||||
|
if (eraseEnd != blocks.end() &&
|
||||||
|
(eraseEnd == blockStartSuccessor ||
|
||||||
|
(blockStartPredecessor->Unmapped() && eraseEnd->Unmapped())))
|
||||||
|
UNREACHABLE_MSG("Multiple contiguous unmapped regions are unsupported!");
|
||||||
|
|
||||||
|
blocks.erase(blockStartSuccessor, eraseEnd);
|
||||||
|
}};
|
||||||
|
|
||||||
|
// We can avoid any splitting logic if these are the case
|
||||||
|
if (blockEndPredecessor->Unmapped()) {
|
||||||
|
if (blockEndPredecessor->virt > virt)
|
||||||
|
eraseBlocksWithEndUnmapped(blockEndPredecessor);
|
||||||
|
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
|
||||||
|
return; // The region is unmapped, bail out early
|
||||||
|
} else if (blockEndSuccessor->virt == virtEnd && blockEndSuccessor->Unmapped()) {
|
||||||
|
eraseBlocksWithEndUnmapped(blockEndSuccessor);
|
||||||
|
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
|
||||||
|
return; // The region is unmapped here and doesn't need splitting, bail out early
|
||||||
|
} else if (blockEndSuccessor == blocks.end()) {
|
||||||
|
// This should never happen as the end should always follow an unmapped block
|
||||||
|
UNREACHABLE_MSG("Unexpected Memory Manager state!");
|
||||||
|
} else if (blockEndSuccessor->virt != virtEnd) {
|
||||||
|
// If one block is directly in front then we don't have to add a tail
|
||||||
|
|
||||||
|
// The previous block is mapped so we will need to add a tail with an offset
|
||||||
|
PaType tailPhys{[&]() {
|
||||||
|
if constexpr (PaContigSplit)
|
||||||
|
return blockEndPredecessor->phys + virtEnd - blockEndPredecessor->virt;
|
||||||
|
else
|
||||||
|
return blockEndPredecessor->phys;
|
||||||
|
}()};
|
||||||
|
|
||||||
|
if (blockEndPredecessor->virt >= virt) {
|
||||||
|
// If this block's start would be overlapped by the unmap then reuse it as a tail block
|
||||||
|
blockEndPredecessor->virt = virtEnd;
|
||||||
|
blockEndPredecessor->phys = tailPhys;
|
||||||
|
|
||||||
|
// No longer predecessor anymore
|
||||||
|
blockEndSuccessor = blockEndPredecessor--;
|
||||||
|
} else {
|
||||||
|
blocks.insert(blockEndSuccessor,
|
||||||
|
{Block(virt, UnmappedPa, {}),
|
||||||
|
Block(virtEnd, tailPhys, blockEndPredecessor->extraInfo)});
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
|
||||||
|
return; // The previous block is mapped and ends before
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk the block vector to find the start predecessor as this is more efficient than another
|
||||||
|
// binary search in most scenarios
|
||||||
|
auto blockStartPredecessor{walkBackToPredecessor(blockEndSuccessor)};
|
||||||
|
auto blockStartSuccessor{std::next(blockStartPredecessor)};
|
||||||
|
|
||||||
|
if (blockStartSuccessor->virt > virtEnd) {
|
||||||
|
UNREACHABLE_MSG("Unsorted block in AS map: virt: 0x{:X}", blockStartSuccessor->virt);
|
||||||
|
} else if (blockStartSuccessor->virt == virtEnd) {
|
||||||
|
// There are no blocks between the start and the end that would let us skip inserting a new
|
||||||
|
// one for head
|
||||||
|
|
||||||
|
// The previous block is may be unmapped, if so we don't need to insert any unmaps after it
|
||||||
|
if (blockStartPredecessor->Mapped())
|
||||||
|
blocks.insert(blockStartSuccessor, Block(virt, UnmappedPa, {}));
|
||||||
|
} else if (blockStartPredecessor->Unmapped()) {
|
||||||
|
// If the previous block is unmapped
|
||||||
|
blocks.erase(blockStartSuccessor, blockEndPredecessor);
|
||||||
|
} else {
|
||||||
|
// Erase overwritten blocks, skipping the first one as we have written the unmapped start
|
||||||
|
// block there
|
||||||
|
if (auto eraseStart{std::next(blockStartSuccessor)}; eraseStart != blockEndSuccessor)
|
||||||
|
blocks.erase(eraseStart, blockEndSuccessor);
|
||||||
|
|
||||||
|
// Add in the unmapped block header
|
||||||
|
blockStartSuccessor->virt = virt;
|
||||||
|
blockStartSuccessor->phys = UnmappedPa;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unmapCallback)
|
||||||
|
unmapCallback(virt, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALLOC_MEMBER_CONST()::FlatAllocator(VaType vaStart_, VaType vaLimit_)
|
||||||
|
: Base(vaLimit_), currentLinearAllocEnd(vaStart_), vaStart(vaStart_) {}
|
||||||
|
|
||||||
|
ALLOC_MEMBER(VaType)::Allocate(VaType size) {
|
||||||
|
std::scoped_lock lock(this->blockMutex);
|
||||||
|
|
||||||
|
VaType allocStart{UnmappedVa};
|
||||||
|
VaType allocEnd{currentLinearAllocEnd + size};
|
||||||
|
|
||||||
|
// Avoid searching backwards in the address space if possible
|
||||||
|
if (allocEnd >= currentLinearAllocEnd && allocEnd <= this->vaLimit) {
|
||||||
|
auto allocEndSuccessor{
|
||||||
|
std::lower_bound(this->blocks.begin(), this->blocks.end(), allocEnd)};
|
||||||
|
if (allocEndSuccessor == this->blocks.begin())
|
||||||
|
UNREACHABLE_MSG("First block in AS map is invalid!");
|
||||||
|
|
||||||
|
auto allocEndPredecessor{std::prev(allocEndSuccessor)};
|
||||||
|
if (allocEndPredecessor->virt <= currentLinearAllocEnd) {
|
||||||
|
allocStart = currentLinearAllocEnd;
|
||||||
|
} else {
|
||||||
|
// Skip over fixed any mappings in front of us
|
||||||
|
while (allocEndSuccessor != this->blocks.end()) {
|
||||||
|
if (allocEndSuccessor->virt - allocEndPredecessor->virt < size ||
|
||||||
|
allocEndPredecessor->Mapped()) {
|
||||||
|
allocStart = allocEndPredecessor->virt;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
allocEndPredecessor = allocEndSuccessor++;
|
||||||
|
|
||||||
|
// Use the VA limit to calculate if we can fit in the final block since it has no
|
||||||
|
// successor
|
||||||
|
if (allocEndSuccessor == this->blocks.end()) {
|
||||||
|
allocEnd = allocEndPredecessor->virt + size;
|
||||||
|
|
||||||
|
if (allocEnd >= allocEndPredecessor->virt && allocEnd <= this->vaLimit)
|
||||||
|
allocStart = allocEndPredecessor->virt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (allocStart != UnmappedVa) {
|
||||||
|
currentLinearAllocEnd = allocStart + size;
|
||||||
|
} else { // If linear allocation overflows the AS then find a gap
|
||||||
|
if (this->blocks.size() <= 2)
|
||||||
|
UNREACHABLE_MSG("Unexpected allocator state!");
|
||||||
|
|
||||||
|
auto searchPredecessor{this->blocks.begin()};
|
||||||
|
auto searchSuccessor{std::next(searchPredecessor)};
|
||||||
|
|
||||||
|
while (searchSuccessor != this->blocks.end() &&
|
||||||
|
(searchSuccessor->virt - searchPredecessor->virt < size ||
|
||||||
|
searchPredecessor->Mapped())) {
|
||||||
|
searchPredecessor = searchSuccessor++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (searchSuccessor != this->blocks.end())
|
||||||
|
allocStart = searchPredecessor->virt;
|
||||||
|
else
|
||||||
|
return {}; // AS is full
|
||||||
|
}
|
||||||
|
|
||||||
|
this->MapLocked(allocStart, true, size, {});
|
||||||
|
return allocStart;
|
||||||
|
}
|
||||||
|
|
||||||
|
ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
|
||||||
|
this->Map(virt, true, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
|
||||||
|
this->Unmap(virt, size);
|
||||||
|
}
|
||||||
|
} // namespace Common
|
|
@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>>
|
||||||
return first != last && !comp(value, *first) ? first : last;
|
return first != last && !comp(value, *first) ? first : last;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, typename Func, typename... Args>
|
||||||
|
T FoldRight(T initial_value, Func&& func, Args&&... args) {
|
||||||
|
T value{initial_value};
|
||||||
|
const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
|
||||||
|
(std::invoke(high_func, std::forward<Args>(args)), ...);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
|
@ -19,4 +19,11 @@ struct PairHash {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
struct IdentityHash {
|
||||||
|
[[nodiscard]] size_t operator()(T value) const noexcept {
|
||||||
|
return static_cast<size_t>(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
|
6
src/common/multi_level_page_table.cpp
Executable file
6
src/common/multi_level_page_table.cpp
Executable file
|
@ -0,0 +1,6 @@
|
||||||
|
#include "common/multi_level_page_table.inc"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
template class Common::MultiLevelPageTable<u64>;
|
||||||
|
template class Common::MultiLevelPageTable<u32>;
|
||||||
|
} // namespace Common
|
79
src/common/multi_level_page_table.h
Executable file
79
src/common/multi_level_page_table.h
Executable file
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <type_traits>
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
template <typename BaseAddr>
|
||||||
|
class MultiLevelPageTable final {
|
||||||
|
public:
|
||||||
|
constexpr MultiLevelPageTable() = default;
|
||||||
|
explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits,
|
||||||
|
std::size_t page_bits);
|
||||||
|
|
||||||
|
~MultiLevelPageTable() noexcept;
|
||||||
|
|
||||||
|
MultiLevelPageTable(const MultiLevelPageTable&) = delete;
|
||||||
|
MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete;
|
||||||
|
|
||||||
|
MultiLevelPageTable(MultiLevelPageTable&& other) noexcept
|
||||||
|
: address_space_bits{std::exchange(other.address_space_bits, 0)},
|
||||||
|
first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange(
|
||||||
|
other.page_bits, 0)},
|
||||||
|
first_level_shift{std::exchange(other.first_level_shift, 0)},
|
||||||
|
first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)},
|
||||||
|
first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr,
|
||||||
|
nullptr)} {}
|
||||||
|
|
||||||
|
MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept {
|
||||||
|
address_space_bits = std::exchange(other.address_space_bits, 0);
|
||||||
|
first_level_bits = std::exchange(other.first_level_bits, 0);
|
||||||
|
page_bits = std::exchange(other.page_bits, 0);
|
||||||
|
first_level_shift = std::exchange(other.first_level_shift, 0);
|
||||||
|
first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0);
|
||||||
|
alloc_size = std::exchange(other.alloc_size, 0);
|
||||||
|
first_level_map = std::move(other.first_level_map);
|
||||||
|
base_ptr = std::exchange(other.base_ptr, nullptr);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReserveRange(u64 start, std::size_t size);
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr const BaseAddr& operator[](std::size_t index) const {
|
||||||
|
return base_ptr[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr BaseAddr& operator[](std::size_t index) {
|
||||||
|
return base_ptr[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr BaseAddr* data() {
|
||||||
|
return base_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] constexpr const BaseAddr* data() const {
|
||||||
|
return base_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void AllocateLevel(u64 level);
|
||||||
|
|
||||||
|
std::size_t address_space_bits{};
|
||||||
|
std::size_t first_level_bits{};
|
||||||
|
std::size_t page_bits{};
|
||||||
|
std::size_t first_level_shift{};
|
||||||
|
std::size_t first_level_chunk_size{};
|
||||||
|
std::size_t alloc_size{};
|
||||||
|
std::vector<void*> first_level_map{};
|
||||||
|
BaseAddr* base_ptr{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Common
|
85
src/common/multi_level_page_table.inc
Executable file
85
src/common/multi_level_page_table.inc
Executable file
|
@ -0,0 +1,85 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <windows.h>
|
||||||
|
#else
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/multi_level_page_table.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
template <typename BaseAddr>
|
||||||
|
MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
|
||||||
|
std::size_t first_level_bits_,
|
||||||
|
std::size_t page_bits_)
|
||||||
|
: address_space_bits{address_space_bits_},
|
||||||
|
first_level_bits{first_level_bits_}, page_bits{page_bits_} {
|
||||||
|
if (page_bits == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
first_level_shift = address_space_bits - first_level_bits;
|
||||||
|
first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
|
||||||
|
alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
|
||||||
|
std::size_t first_level_size = 1ULL << first_level_bits;
|
||||||
|
first_level_map.resize(first_level_size, nullptr);
|
||||||
|
#ifdef _WIN32
|
||||||
|
void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
|
||||||
|
#else
|
||||||
|
void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
|
||||||
|
|
||||||
|
if (base == MAP_FAILED) {
|
||||||
|
base = nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ASSERT(base);
|
||||||
|
base_ptr = reinterpret_cast<BaseAddr*>(base);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BaseAddr>
|
||||||
|
MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
|
||||||
|
if (!base_ptr) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#ifdef _WIN32
|
||||||
|
ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE));
|
||||||
|
#else
|
||||||
|
ASSERT(munmap(base_ptr, alloc_size) == 0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BaseAddr>
|
||||||
|
void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
|
||||||
|
const u64 new_start = start >> first_level_shift;
|
||||||
|
const u64 new_end = (start + size) >> first_level_shift;
|
||||||
|
for (u64 i = new_start; i <= new_end; i++) {
|
||||||
|
if (!first_level_map[i]) {
|
||||||
|
AllocateLevel(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename BaseAddr>
|
||||||
|
void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
|
||||||
|
void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
|
||||||
|
#ifdef _WIN32
|
||||||
|
void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
|
||||||
|
#else
|
||||||
|
void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
|
||||||
|
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
|
||||||
|
|
||||||
|
if (base == MAP_FAILED) {
|
||||||
|
base = nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
ASSERT(base);
|
||||||
|
|
||||||
|
first_level_map[level] = base;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
|
@ -143,8 +143,6 @@ add_library(core STATIC
|
||||||
frontend/emu_window.h
|
frontend/emu_window.h
|
||||||
frontend/framebuffer_layout.cpp
|
frontend/framebuffer_layout.cpp
|
||||||
frontend/framebuffer_layout.h
|
frontend/framebuffer_layout.h
|
||||||
hardware_interrupt_manager.cpp
|
|
||||||
hardware_interrupt_manager.h
|
|
||||||
hid/emulated_console.cpp
|
hid/emulated_console.cpp
|
||||||
hid/emulated_console.h
|
hid/emulated_console.h
|
||||||
hid/emulated_controller.cpp
|
hid/emulated_controller.cpp
|
||||||
|
@ -528,6 +526,12 @@ add_library(core STATIC
|
||||||
hle/service/ns/pdm_qry.h
|
hle/service/ns/pdm_qry.h
|
||||||
hle/service/ns/pl_u.cpp
|
hle/service/ns/pl_u.cpp
|
||||||
hle/service/ns/pl_u.h
|
hle/service/ns/pl_u.h
|
||||||
|
hle/service/nvdrv/core/container.cpp
|
||||||
|
hle/service/nvdrv/core/container.h
|
||||||
|
hle/service/nvdrv/core/nvmap.cpp
|
||||||
|
hle/service/nvdrv/core/nvmap.h
|
||||||
|
hle/service/nvdrv/core/syncpoint_manager.cpp
|
||||||
|
hle/service/nvdrv/core/syncpoint_manager.h
|
||||||
hle/service/nvdrv/devices/nvdevice.h
|
hle/service/nvdrv/devices/nvdevice.h
|
||||||
hle/service/nvdrv/devices/nvdisp_disp0.cpp
|
hle/service/nvdrv/devices/nvdisp_disp0.cpp
|
||||||
hle/service/nvdrv/devices/nvdisp_disp0.h
|
hle/service/nvdrv/devices/nvdisp_disp0.h
|
||||||
|
@ -556,8 +560,6 @@ add_library(core STATIC
|
||||||
hle/service/nvdrv/nvdrv_interface.h
|
hle/service/nvdrv/nvdrv_interface.h
|
||||||
hle/service/nvdrv/nvmemp.cpp
|
hle/service/nvdrv/nvmemp.cpp
|
||||||
hle/service/nvdrv/nvmemp.h
|
hle/service/nvdrv/nvmemp.h
|
||||||
hle/service/nvdrv/syncpoint_manager.cpp
|
|
||||||
hle/service/nvdrv/syncpoint_manager.h
|
|
||||||
hle/service/nvflinger/binder.h
|
hle/service/nvflinger/binder.h
|
||||||
hle/service/nvflinger/buffer_item.h
|
hle/service/nvflinger/buffer_item.h
|
||||||
hle/service/nvflinger/buffer_item_consumer.cpp
|
hle/service/nvflinger/buffer_item_consumer.cpp
|
||||||
|
|
|
@ -27,7 +27,6 @@
|
||||||
#include "core/file_sys/savedata_factory.h"
|
#include "core/file_sys/savedata_factory.h"
|
||||||
#include "core/file_sys/vfs_concat.h"
|
#include "core/file_sys/vfs_concat.h"
|
||||||
#include "core/file_sys/vfs_real.h"
|
#include "core/file_sys/vfs_real.h"
|
||||||
#include "core/hardware_interrupt_manager.h"
|
|
||||||
#include "core/hid/hid_core.h"
|
#include "core/hid/hid_core.h"
|
||||||
#include "core/hle/kernel/k_memory_manager.h"
|
#include "core/hle/kernel/k_memory_manager.h"
|
||||||
#include "core/hle/kernel/k_process.h"
|
#include "core/hle/kernel/k_process.h"
|
||||||
|
@ -50,6 +49,7 @@
|
||||||
#include "core/reporter.h"
|
#include "core/reporter.h"
|
||||||
#include "core/telemetry_session.h"
|
#include "core/telemetry_session.h"
|
||||||
#include "core/tools/freezer.h"
|
#include "core/tools/freezer.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/video_core.h"
|
#include "video_core/video_core.h"
|
||||||
|
|
||||||
|
@ -209,6 +209,7 @@ struct System::Impl {
|
||||||
|
|
||||||
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
||||||
|
|
||||||
|
host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system);
|
||||||
gpu_core = VideoCore::CreateGPU(emu_window, system);
|
gpu_core = VideoCore::CreateGPU(emu_window, system);
|
||||||
if (!gpu_core) {
|
if (!gpu_core) {
|
||||||
return SystemResultStatus::ErrorVideoCore;
|
return SystemResultStatus::ErrorVideoCore;
|
||||||
|
@ -216,7 +217,6 @@ struct System::Impl {
|
||||||
|
|
||||||
service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
|
service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
|
||||||
services = std::make_unique<Service::Services>(service_manager, system);
|
services = std::make_unique<Service::Services>(service_manager, system);
|
||||||
interrupt_manager = std::make_unique<Hardware::InterruptManager>(system);
|
|
||||||
|
|
||||||
// Initialize time manager, which must happen after kernel is created
|
// Initialize time manager, which must happen after kernel is created
|
||||||
time_manager.Initialize();
|
time_manager.Initialize();
|
||||||
|
@ -342,6 +342,7 @@ struct System::Impl {
|
||||||
core_timing.Shutdown();
|
core_timing.Shutdown();
|
||||||
app_loader.reset();
|
app_loader.reset();
|
||||||
gpu_core.reset();
|
gpu_core.reset();
|
||||||
|
host1x_core.reset();
|
||||||
perf_stats.reset();
|
perf_stats.reset();
|
||||||
kernel.Shutdown();
|
kernel.Shutdown();
|
||||||
memory.Reset();
|
memory.Reset();
|
||||||
|
@ -405,7 +406,7 @@ struct System::Impl {
|
||||||
/// AppLoader used to load the current executing application
|
/// AppLoader used to load the current executing application
|
||||||
std::unique_ptr<Loader::AppLoader> app_loader;
|
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||||
std::unique_ptr<Hardware::InterruptManager> interrupt_manager;
|
std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
|
||||||
std::unique_ptr<Core::DeviceMemory> device_memory;
|
std::unique_ptr<Core::DeviceMemory> device_memory;
|
||||||
Core::Memory::Memory memory;
|
Core::Memory::Memory memory;
|
||||||
Core::HID::HIDCore hid_core;
|
Core::HID::HIDCore hid_core;
|
||||||
|
@ -608,12 +609,12 @@ const Tegra::GPU& System::GPU() const {
|
||||||
return *impl->gpu_core;
|
return *impl->gpu_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
Core::Hardware::InterruptManager& System::InterruptManager() {
|
Tegra::Host1x::Host1x& System::Host1x() {
|
||||||
return *impl->interrupt_manager;
|
return *impl->host1x_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Core::Hardware::InterruptManager& System::InterruptManager() const {
|
const Tegra::Host1x::Host1x& System::Host1x() const {
|
||||||
return *impl->interrupt_manager;
|
return *impl->host1x_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoCore::RendererBase& System::Renderer() {
|
VideoCore::RendererBase& System::Renderer() {
|
||||||
|
|
|
@ -75,6 +75,9 @@ class TimeManager;
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
class DebugContext;
|
class DebugContext;
|
||||||
class GPU;
|
class GPU;
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
||||||
namespace VideoCore {
|
namespace VideoCore {
|
||||||
|
@ -85,10 +88,6 @@ namespace Core::Timing {
|
||||||
class CoreTiming;
|
class CoreTiming;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace Core::Hardware {
|
|
||||||
class InterruptManager;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace Core::HID {
|
namespace Core::HID {
|
||||||
class HIDCore;
|
class HIDCore;
|
||||||
}
|
}
|
||||||
|
@ -244,6 +243,12 @@ public:
|
||||||
/// Gets an immutable reference to the GPU interface.
|
/// Gets an immutable reference to the GPU interface.
|
||||||
[[nodiscard]] const Tegra::GPU& GPU() const;
|
[[nodiscard]] const Tegra::GPU& GPU() const;
|
||||||
|
|
||||||
|
/// Gets a mutable reference to the Host1x interface
|
||||||
|
[[nodiscard]] Tegra::Host1x::Host1x& Host1x();
|
||||||
|
|
||||||
|
/// Gets an immutable reference to the Host1x interface.
|
||||||
|
[[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const;
|
||||||
|
|
||||||
/// Gets a mutable reference to the renderer.
|
/// Gets a mutable reference to the renderer.
|
||||||
[[nodiscard]] VideoCore::RendererBase& Renderer();
|
[[nodiscard]] VideoCore::RendererBase& Renderer();
|
||||||
|
|
||||||
|
@ -274,12 +279,6 @@ public:
|
||||||
/// Provides a constant reference to the core timing instance.
|
/// Provides a constant reference to the core timing instance.
|
||||||
[[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
|
[[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
|
||||||
|
|
||||||
/// Provides a reference to the interrupt manager instance.
|
|
||||||
[[nodiscard]] Core::Hardware::InterruptManager& InterruptManager();
|
|
||||||
|
|
||||||
/// Provides a constant reference to the interrupt manager instance.
|
|
||||||
[[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const;
|
|
||||||
|
|
||||||
/// Provides a reference to the kernel instance.
|
/// Provides a reference to the kernel instance.
|
||||||
[[nodiscard]] Kernel::KernelCore& Kernel();
|
[[nodiscard]] Kernel::KernelCore& Kernel();
|
||||||
|
|
||||||
|
|
41
src/core/hle/service/nvdrv/core/container.cpp
Executable file
41
src/core/hle/service/nvdrv/core/container.cpp
Executable file
|
@ -0,0 +1,41 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
|
||||||
|
struct ContainerImpl {
|
||||||
|
ContainerImpl(Tegra::Host1x::Host1x& host1x_) : file{host1x_}, manager{host1x_} {}
|
||||||
|
NvMap file;
|
||||||
|
SyncpointManager manager;
|
||||||
|
};
|
||||||
|
|
||||||
|
Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
||||||
|
impl = std::make_unique<ContainerImpl>(host1x_);
|
||||||
|
}
|
||||||
|
|
||||||
|
Container::~Container() = default;
|
||||||
|
|
||||||
|
NvMap& Container::GetNvMapFile() {
|
||||||
|
return impl->file;
|
||||||
|
}
|
||||||
|
|
||||||
|
const NvMap& Container::GetNvMapFile() const {
|
||||||
|
return impl->file;
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncpointManager& Container::GetSyncpointManager() {
|
||||||
|
return impl->manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SyncpointManager& Container::GetSyncpointManager() const {
|
||||||
|
return impl->manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
42
src/core/hle/service/nvdrv/core/container.h
Executable file
42
src/core/hle/service/nvdrv/core/container.h
Executable file
|
@ -0,0 +1,42 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
|
||||||
|
class NvMap;
|
||||||
|
class SyncpointManager;
|
||||||
|
|
||||||
|
struct ContainerImpl;
|
||||||
|
|
||||||
|
class Container {
|
||||||
|
public:
|
||||||
|
Container(Tegra::Host1x::Host1x& host1x);
|
||||||
|
~Container();
|
||||||
|
|
||||||
|
NvMap& GetNvMapFile();
|
||||||
|
|
||||||
|
const NvMap& GetNvMapFile() const;
|
||||||
|
|
||||||
|
SyncpointManager& GetSyncpointManager();
|
||||||
|
|
||||||
|
const SyncpointManager& GetSyncpointManager() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unique_ptr<ContainerImpl> impl;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
264
src/core/hle/service/nvdrv/core/nvmap.cpp
Executable file
264
src/core/hle/service/nvdrv/core/nvmap.cpp
Executable file
|
@ -0,0 +1,264 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
|
using Core::Memory::PAGE_SIZE;
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
NvMap::Handle::Handle(u64 size_, Id id_)
|
||||||
|
: size(size_), aligned_size(size), orig_size(size), id(id_) {
|
||||||
|
flags.raw = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
|
// Handles cannot be allocated twice
|
||||||
|
if (allocated) {
|
||||||
|
return NvResult::AccessDenied;
|
||||||
|
}
|
||||||
|
|
||||||
|
flags = pFlags;
|
||||||
|
kind = pKind;
|
||||||
|
align = pAlign < PAGE_SIZE ? PAGE_SIZE : pAlign;
|
||||||
|
|
||||||
|
// This flag is only applicable for handles with an address passed
|
||||||
|
if (pAddress) {
|
||||||
|
flags.keep_uncached_after_free.Assign(0);
|
||||||
|
} else {
|
||||||
|
LOG_CRITICAL(Service_NVDRV,
|
||||||
|
"Mapping nvmap handles without a CPU side address is unimplemented!");
|
||||||
|
}
|
||||||
|
|
||||||
|
size = Common::AlignUp(size, PAGE_SIZE);
|
||||||
|
aligned_size = Common::AlignUp(size, align);
|
||||||
|
address = pAddress;
|
||||||
|
|
||||||
|
// TODO: pin init
|
||||||
|
|
||||||
|
allocated = true;
|
||||||
|
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvResult NvMap::Handle::Duplicate(bool internal_session) {
|
||||||
|
// Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
|
||||||
|
if (!allocated) [[unlikely]] {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
|
// If we internally use FromId the duplication tracking of handles won't work accurately due to
|
||||||
|
// us not implementing per-process handle refs.
|
||||||
|
if (internal_session) {
|
||||||
|
internal_dupes++;
|
||||||
|
} else {
|
||||||
|
dupes++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||||
|
|
||||||
|
void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
|
||||||
|
std::scoped_lock lock(handles_lock);
|
||||||
|
|
||||||
|
handles.emplace(handle_description->id, std::move(handle_description));
|
||||||
|
}
|
||||||
|
|
||||||
|
void NvMap::UnmapHandle(Handle& handle_description) {
|
||||||
|
// Remove pending unmap queue entry if needed
|
||||||
|
if (handle_description.unmap_queue_entry) {
|
||||||
|
unmap_queue.erase(*handle_description.unmap_queue_entry);
|
||||||
|
handle_description.unmap_queue_entry.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free and unmap the handle from the SMMU
|
||||||
|
host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
|
||||||
|
handle_description.aligned_size);
|
||||||
|
host1x.Allocator().Free(handle_description.pin_virt_address,
|
||||||
|
static_cast<u32>(handle_description.aligned_size));
|
||||||
|
handle_description.pin_virt_address = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NvMap::TryRemoveHandle(const Handle& handle_description) {
|
||||||
|
// No dupes left, we can remove from handle map
|
||||||
|
if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
|
||||||
|
std::scoped_lock lock(handles_lock);
|
||||||
|
|
||||||
|
auto it{handles.find(handle_description.id)};
|
||||||
|
if (it != handles.end()) {
|
||||||
|
handles.erase(it);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
|
||||||
|
if (!size) [[unlikely]] {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
|
||||||
|
auto handle_description{std::make_shared<Handle>(size, id)};
|
||||||
|
AddHandle(handle_description);
|
||||||
|
|
||||||
|
result_out = handle_description;
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
|
||||||
|
std::scoped_lock lock(handles_lock);
|
||||||
|
try {
|
||||||
|
return handles.at(handle);
|
||||||
|
} catch ([[maybe_unused]] std::out_of_range& e) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||||
|
std::scoped_lock lock(handles_lock);
|
||||||
|
try {
|
||||||
|
return handles.at(handle)->address;
|
||||||
|
} catch ([[maybe_unused]] std::out_of_range& e) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||||
|
auto handle_description{GetHandle(handle)};
|
||||||
|
if (!handle_description) [[unlikely]] {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
|
if (!handle_description->pins) {
|
||||||
|
// If we're in the unmap queue we can just remove ourselves and return since we're already
|
||||||
|
// mapped
|
||||||
|
{
|
||||||
|
// Lock now to prevent our queue entry from being removed for allocation in-between the
|
||||||
|
// following check and erase
|
||||||
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
|
if (handle_description->unmap_queue_entry) {
|
||||||
|
unmap_queue.erase(*handle_description->unmap_queue_entry);
|
||||||
|
handle_description->unmap_queue_entry.reset();
|
||||||
|
|
||||||
|
handle_description->pins++;
|
||||||
|
return handle_description->pin_virt_address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not then allocate some space and map it
|
||||||
|
u32 address{};
|
||||||
|
auto& smmu_allocator = host1x.Allocator();
|
||||||
|
auto& smmu_memory_manager = host1x.MemoryManager();
|
||||||
|
while (!(address =
|
||||||
|
smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
|
||||||
|
// Free handles until the allocation succeeds
|
||||||
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
|
if (auto freeHandleDesc{unmap_queue.front()}) {
|
||||||
|
// Handles in the unmap queue are guaranteed not to be pinned so don't bother
|
||||||
|
// checking if they are before unmapping
|
||||||
|
std::scoped_lock freeLock(freeHandleDesc->mutex);
|
||||||
|
if (handle_description->pin_virt_address)
|
||||||
|
UnmapHandle(*freeHandleDesc);
|
||||||
|
} else {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
|
||||||
|
handle_description->aligned_size);
|
||||||
|
handle_description->pin_virt_address = address;
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_description->pins++;
|
||||||
|
return handle_description->pin_virt_address;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NvMap::UnpinHandle(Handle::Id handle) {
|
||||||
|
auto handle_description{GetHandle(handle)};
|
||||||
|
if (!handle_description) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
|
if (--handle_description->pins < 0) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
|
||||||
|
} else if (!handle_description->pins) {
|
||||||
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
|
|
||||||
|
// Add to the unmap queue allowing this handle's memory to be freed if needed
|
||||||
|
unmap_queue.push_back(handle_description);
|
||||||
|
handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
|
||||||
|
std::weak_ptr<Handle> hWeak{GetHandle(handle)};
|
||||||
|
FreeInfo freeInfo;
|
||||||
|
|
||||||
|
// We use a weak ptr here so we can tell when the handle has been freed and report that back to
|
||||||
|
// guest
|
||||||
|
if (auto handle_description = hWeak.lock()) {
|
||||||
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
|
|
||||||
|
if (internal_session) {
|
||||||
|
if (--handle_description->internal_dupes < 0)
|
||||||
|
LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
|
||||||
|
} else {
|
||||||
|
if (--handle_description->dupes < 0) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
|
||||||
|
} else if (handle_description->dupes == 0) {
|
||||||
|
// Force unmap the handle
|
||||||
|
if (handle_description->pin_virt_address) {
|
||||||
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
|
UnmapHandle(*handle_description);
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_description->pins = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to remove the shared ptr to the handle from the map, if nothing else is using the
|
||||||
|
// handle then it will now be freed when `handle_description` goes out of scope
|
||||||
|
if (TryRemoveHandle(*handle_description)) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG(Service_NVDRV,
|
||||||
|
"Tried to free nvmap handle: {} but didn't as it still has duplicates",
|
||||||
|
handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
freeInfo = {
|
||||||
|
.address = handle_description->address,
|
||||||
|
.size = handle_description->size,
|
||||||
|
.was_uncached = handle_description->flags.map_uncached.Value() != 0,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
|
||||||
|
if (!hWeak.expired()) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
|
||||||
|
freeInfo.address = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return freeInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
172
src/core/hle/service/nvdrv/core/nvmap.h
Executable file
172
src/core/hle/service/nvdrv/core/nvmap.h
Executable file
|
@ -0,0 +1,172 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <list>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <optional>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include "common/bit_field.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
/**
|
||||||
|
* @brief The nvmap core class holds the global state for nvmap and provides methods to manage
|
||||||
|
* handles
|
||||||
|
*/
|
||||||
|
class NvMap {
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief A handle to a contiguous block of memory in an application's address space
|
||||||
|
*/
|
||||||
|
struct Handle {
|
||||||
|
std::mutex mutex;
|
||||||
|
|
||||||
|
u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
|
||||||
|
u64 size; //!< Page-aligned size of the memory the handle refers to
|
||||||
|
u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to
|
||||||
|
u64 orig_size; //!< Original unaligned size of the memory this handle refers to
|
||||||
|
|
||||||
|
s32 dupes{1}; //!< How many guest references there are to this handle
|
||||||
|
s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle
|
||||||
|
|
||||||
|
using Id = u32;
|
||||||
|
Id id; //!< A globally unique identifier for this handle
|
||||||
|
|
||||||
|
s32 pins{};
|
||||||
|
u32 pin_virt_address{};
|
||||||
|
std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
|
||||||
|
|
||||||
|
union Flags {
|
||||||
|
u32 raw;
|
||||||
|
BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached
|
||||||
|
BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was
|
||||||
|
//!< allocated with a fixed address
|
||||||
|
BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
|
||||||
|
} flags{};
|
||||||
|
static_assert(sizeof(Flags) == sizeof(u32));
|
||||||
|
|
||||||
|
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||||
|
//!< this can also be in the nvdrv tmem
|
||||||
|
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
||||||
|
//!< call
|
||||||
|
|
||||||
|
u8 kind{}; //!< Used for memory compression
|
||||||
|
bool allocated{}; //!< If the handle has been allocated with `Alloc`
|
||||||
|
|
||||||
|
u64 dma_map_addr{}; //! remove me after implementing pinning.
|
||||||
|
|
||||||
|
Handle(u64 size, Id id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Sets up the handle with the given memory config, can allocate memory from the tmem
|
||||||
|
* if a 0 address is passed
|
||||||
|
*/
|
||||||
|
[[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Increases the dupe counter of the handle for the given session
|
||||||
|
*/
|
||||||
|
[[nodiscard]] NvResult Duplicate(bool internal_session);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Obtains a pointer to the handle's memory and marks the handle it as having been
|
||||||
|
* mapped
|
||||||
|
*/
|
||||||
|
u8* GetPointer() {
|
||||||
|
if (!address) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
is_shared_mem_mapped = true;
|
||||||
|
return reinterpret_cast<u8*>(address);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::list<std::shared_ptr<Handle>> unmap_queue{};
|
||||||
|
std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
|
||||||
|
|
||||||
|
std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
|
||||||
|
handles{}; //!< Main owning map of handles
|
||||||
|
std::mutex handles_lock; //!< Protects access to `handles`
|
||||||
|
|
||||||
|
static constexpr u32 HandleIdIncrement{
|
||||||
|
4}; //!< Each new handle ID is an increment of 4 from the previous
|
||||||
|
std::atomic<u32> next_handle_id{HandleIdIncrement};
|
||||||
|
Tegra::Host1x::Host1x& host1x;
|
||||||
|
|
||||||
|
void AddHandle(std::shared_ptr<Handle> handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Unmaps and frees the SMMU memory region a handle is mapped to
|
||||||
|
* @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
|
||||||
|
*/
|
||||||
|
void UnmapHandle(Handle& handle_description);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Removes a handle from the map taking its dupes into account
|
||||||
|
* @note handle_description.mutex MUST be locked when calling this
|
||||||
|
* @return If the handle was removed from the map
|
||||||
|
*/
|
||||||
|
bool TryRemoveHandle(const Handle& handle_description);
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
* @brief Encapsulates the result of a FreeHandle operation
|
||||||
|
*/
|
||||||
|
struct FreeInfo {
|
||||||
|
u64 address; //!< Address the handle referred to before deletion
|
||||||
|
u64 size; //!< Page-aligned handle size
|
||||||
|
bool was_uncached; //!< If the handle was allocated as uncached
|
||||||
|
};
|
||||||
|
|
||||||
|
NvMap(Tegra::Host1x::Host1x& host1x);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Creates an unallocated handle of the given size
|
||||||
|
*/
|
||||||
|
[[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out);
|
||||||
|
|
||||||
|
std::shared_ptr<Handle> GetHandle(Handle::Id handle);
|
||||||
|
|
||||||
|
VAddr GetHandleAddress(Handle::Id handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Maps a handle into the SMMU address space
|
||||||
|
* @note This operation is refcounted, the number of calls to this must eventually match the
|
||||||
|
* number of calls to `UnpinHandle`
|
||||||
|
* @return The SMMU virtual address that the handle has been mapped to
|
||||||
|
*/
|
||||||
|
u32 PinHandle(Handle::Id handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
|
||||||
|
* handle it will be added to a list of handles to be freed when necessary
|
||||||
|
*/
|
||||||
|
void UnpinHandle(Handle::Id handle);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Tries to free a handle and remove a single dupe
|
||||||
|
* @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned
|
||||||
|
* describing the prior state of the handle
|
||||||
|
*/
|
||||||
|
std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
|
||||||
|
};
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
122
src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
Executable file
122
src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
Executable file
|
@ -0,0 +1,122 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
|
||||||
|
SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {
|
||||||
|
constexpr u32 VBlank0SyncpointId{26};
|
||||||
|
constexpr u32 VBlank1SyncpointId{27};
|
||||||
|
|
||||||
|
// Reserve both vblank syncpoints as client managed as they use Continuous Mode
|
||||||
|
// Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode
|
||||||
|
// https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660
|
||||||
|
ReserveSyncpoint(VBlank0SyncpointId, true);
|
||||||
|
ReserveSyncpoint(VBlank1SyncpointId, true);
|
||||||
|
|
||||||
|
for (u32 syncpointId : channel_syncpoints) {
|
||||||
|
if (syncpointId) {
|
||||||
|
ReserveSyncpoint(syncpointId, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SyncpointManager::~SyncpointManager() = default;
|
||||||
|
|
||||||
|
u32 SyncpointManager::ReserveSyncpoint(u32 id, bool clientManaged) {
|
||||||
|
if (syncpoints.at(id).reserved) {
|
||||||
|
UNREACHABLE_MSG("Requested syncpoint is in use");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
syncpoints.at(id).reserved = true;
|
||||||
|
syncpoints.at(id).interfaceManaged = clientManaged;
|
||||||
|
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncpointManager::FindFreeSyncpoint() {
|
||||||
|
for (u32 i{1}; i < syncpoints.size(); i++) {
|
||||||
|
if (!syncpoints[i].reserved) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
UNREACHABLE_MSG("Failed to find a free syncpoint!");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncpointManager::AllocateSyncpoint(bool clientManaged) {
|
||||||
|
std::lock_guard lock(reservation_lock);
|
||||||
|
return ReserveSyncpoint(FindFreeSyncpoint(), clientManaged);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::FreeSyncpoint(u32 id) {
|
||||||
|
std::lock_guard lock(reservation_lock);
|
||||||
|
ASSERT(syncpoints.at(id).reserved);
|
||||||
|
syncpoints.at(id).reserved = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SyncpointManager::IsSyncpointAllocated(u32 id) {
|
||||||
|
return (id <= SyncpointCount) && syncpoints[id].reserved;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) {
|
||||||
|
const SyncpointInfo& syncpoint{syncpoints.at(id)};
|
||||||
|
|
||||||
|
if (!syncpoint.reserved) {
|
||||||
|
UNREACHABLE();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the interface manages counters then we don't keep track of the maximum value as it handles
|
||||||
|
// sanity checking the values then
|
||||||
|
if (syncpoint.interfaceManaged) {
|
||||||
|
return static_cast<s32>(syncpoint.counterMin - threshold) >= 0;
|
||||||
|
} else {
|
||||||
|
return (syncpoint.counterMax - threshold) >= (syncpoint.counterMin - threshold);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
|
||||||
|
if (!syncpoints.at(id).reserved) {
|
||||||
|
UNREACHABLE();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return syncpoints.at(id).counterMax += amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
|
||||||
|
if (!syncpoints.at(id).reserved) {
|
||||||
|
UNREACHABLE();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return syncpoints.at(id).counterMin;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncpointManager::UpdateMin(u32 id) {
|
||||||
|
if (!syncpoints.at(id).reserved) {
|
||||||
|
UNREACHABLE();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
syncpoints.at(id).counterMin = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
|
||||||
|
return syncpoints.at(id).counterMin;
|
||||||
|
}
|
||||||
|
|
||||||
|
NvFence SyncpointManager::GetSyncpointFence(u32 id) {
|
||||||
|
if (!syncpoints.at(id).reserved) {
|
||||||
|
UNREACHABLE();
|
||||||
|
return NvFence{};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counterMax};
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
139
src/core/hle/service/nvdrv/core/syncpoint_manager.h
Executable file
139
src/core/hle/service/nvdrv/core/syncpoint_manager.h
Executable file
|
@ -0,0 +1,139 @@
|
||||||
|
// SPDX-FileCopyrightText: 2022 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
|
||||||
|
enum class ChannelType : u32 {
|
||||||
|
MsEnc = 0,
|
||||||
|
VIC = 1,
|
||||||
|
GPU = 2,
|
||||||
|
NvDec = 3,
|
||||||
|
Display = 4,
|
||||||
|
NvJpg = 5,
|
||||||
|
TSec = 6,
|
||||||
|
Max = 7
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached
|
||||||
|
* versions of the HW syncpoints which are intermittently synced
|
||||||
|
* @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them
|
||||||
|
* @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html
|
||||||
|
* @url
|
||||||
|
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c
|
||||||
|
*/
|
||||||
|
class SyncpointManager final {
|
||||||
|
public:
|
||||||
|
explicit SyncpointManager(Tegra::Host1x::Host1x& host1x);
|
||||||
|
~SyncpointManager();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints
|
||||||
|
*/
|
||||||
|
bool IsSyncpointAllocated(u32 id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Finds a free syncpoint and reserves it
|
||||||
|
* @return The ID of the reserved syncpoint
|
||||||
|
*/
|
||||||
|
u32 AllocateSyncpoint(bool clientManaged);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @url
|
||||||
|
* https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
|
||||||
|
*/
|
||||||
|
bool HasSyncpointExpired(u32 id, u32 threshold);
|
||||||
|
|
||||||
|
bool IsFenceSignalled(NvFence fence) {
|
||||||
|
return HasSyncpointExpired(fence.id, fence.value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Atomically increments the maximum value of a syncpoint by the given amount
|
||||||
|
* @return The new max value of the syncpoint
|
||||||
|
*/
|
||||||
|
u32 IncrementSyncpointMaxExt(u32 id, u32 amount);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The minimum value of the syncpoint
|
||||||
|
*/
|
||||||
|
u32 ReadSyncpointMinValue(u32 id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Synchronises the minimum value of the syncpoint to with the GPU
|
||||||
|
* @return The new minimum value of the syncpoint
|
||||||
|
*/
|
||||||
|
u32 UpdateMin(u32 id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Frees the usage of a syncpoint.
|
||||||
|
*/
|
||||||
|
void FreeSyncpoint(u32 id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return A fence that will be signalled once this syncpoint hits its maximum value
|
||||||
|
*/
|
||||||
|
NvFence GetSyncpointFence(u32 id);
|
||||||
|
|
||||||
|
static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{
|
||||||
|
0x0, // `MsEnc` is unimplemented
|
||||||
|
0xC, // `VIC`
|
||||||
|
0x0, // `GPU` syncpoints are allocated per-channel instead
|
||||||
|
0x36, // `NvDec`
|
||||||
|
0x0, // `Display` is unimplemented
|
||||||
|
0x37, // `NvJpg`
|
||||||
|
0x0, // `TSec` is unimplemented
|
||||||
|
}; //!< Maps each channel ID to a constant syncpoint
|
||||||
|
|
||||||
|
private:
|
||||||
|
/**
|
||||||
|
* @note reservation_lock should be locked when calling this
|
||||||
|
*/
|
||||||
|
u32 ReserveSyncpoint(u32 id, bool clientManaged);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The ID of the first free syncpoint
|
||||||
|
*/
|
||||||
|
u32 FindFreeSyncpoint();
|
||||||
|
|
||||||
|
struct SyncpointInfo {
|
||||||
|
std::atomic<u32> counterMin; //!< The least value the syncpoint can be (The value it was
|
||||||
|
//!< when it was last synchronized with host1x)
|
||||||
|
std::atomic<u32> counterMax; //!< The maximum value the syncpoint can reach according to the
|
||||||
|
//!< current usage
|
||||||
|
bool interfaceManaged; //!< If the syncpoint is managed by a host1x client interface, a
|
||||||
|
//!< client interface is a HW block that can handle host1x
|
||||||
|
//!< transactions on behalf of a host1x client (Which would otherwise
|
||||||
|
//!< need to be manually synced using PIO which is synchronous and
|
||||||
|
//!< requires direct cooperation of the CPU)
|
||||||
|
bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
|
||||||
|
//!< value
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr static std::size_t SyncpointCount{192};
|
||||||
|
std::array<SyncpointInfo, SyncpointCount> syncpoints{};
|
||||||
|
std::mutex reservation_lock;
|
||||||
|
|
||||||
|
Tegra::Host1x::Host1x& host1x;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
|
@ -11,6 +11,10 @@ namespace Core {
|
||||||
class System;
|
class System;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
class KEvent;
|
||||||
|
}
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
|
/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
|
||||||
|
@ -64,6 +68,10 @@ public:
|
||||||
*/
|
*/
|
||||||
virtual void OnClose(DeviceFD fd) = 0;
|
virtual void OnClose(DeviceFD fd) = 0;
|
||||||
|
|
||||||
|
virtual Kernel::KEvent* QueryEvent(u32 event_id) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
};
|
};
|
||||||
|
|
|
@ -5,15 +5,16 @@
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
|
||||||
#include "core/perf_stats.h"
|
#include "core/perf_stats.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
|
nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
|
||||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
|
: nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
|
||||||
nvdisp_disp0::~nvdisp_disp0() = default;
|
nvdisp_disp0::~nvdisp_disp0() = default;
|
||||||
|
|
||||||
NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
||||||
u32 height, u32 stride, android::BufferTransformFlags transform,
|
u32 height, u32 stride, android::BufferTransformFlags transform,
|
||||||
const Common::Rectangle<int>& crop_rect) {
|
const Common::Rectangle<int>& crop_rect,
|
||||||
const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
|
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
|
||||||
|
const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
|
||||||
LOG_TRACE(Service,
|
LOG_TRACE(Service,
|
||||||
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
"Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
|
||||||
addr, offset, width, height, stride, format);
|
addr, offset, width, height, stride, format);
|
||||||
|
@ -49,9 +51,14 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
|
||||||
stride, format, transform, crop_rect};
|
stride, format, transform, crop_rect};
|
||||||
|
|
||||||
system.GetPerfStats().EndSystemFrame();
|
system.GetPerfStats().EndSystemFrame();
|
||||||
system.GPU().SwapBuffers(&framebuffer);
|
system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
|
||||||
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
|
system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
|
||||||
system.GetPerfStats().BeginSystemFrame();
|
system.GetPerfStats().BeginSystemFrame();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -11,13 +11,18 @@
|
||||||
#include "core/hle/service/nvflinger/buffer_transform_flags.h"
|
#include "core/hle/service/nvflinger/buffer_transform_flags.h"
|
||||||
#include "core/hle/service/nvflinger/pixel_format.h"
|
#include "core/hle/service/nvflinger/pixel_format.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
class Container;
|
||||||
|
class NvMap;
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvmap;
|
class nvmap;
|
||||||
|
|
||||||
class nvdisp_disp0 final : public nvdevice {
|
class nvdisp_disp0 final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
|
explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
|
||||||
~nvdisp_disp0() override;
|
~nvdisp_disp0() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -33,10 +38,14 @@ public:
|
||||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||||
void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height,
|
void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height,
|
||||||
u32 stride, android::BufferTransformFlags transform,
|
u32 stride, android::BufferTransformFlags transform,
|
||||||
const Common::Rectangle<int>& crop_rect);
|
const Common::Rectangle<int>& crop_rect,
|
||||||
|
std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences);
|
||||||
|
|
||||||
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
NvCore::Container& container;
|
||||||
|
NvCore::NvMap& nvmap;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -1,21 +1,31 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||||
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/rasterizer_interface.h"
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_)
|
nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core)
|
||||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {}
|
: nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{},
|
||||||
|
gmmu{} {}
|
||||||
|
|
||||||
nvhost_as_gpu::~nvhost_as_gpu() = default;
|
nvhost_as_gpu::~nvhost_as_gpu() = default;
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -82,12 +92,51 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
|
||||||
IoctlAllocAsEx params{};
|
IoctlAllocAsEx params{};
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
std::memcpy(¶ms, input.data(), input.size());
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size);
|
LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
|
||||||
if (params.big_page_size == 0) {
|
|
||||||
params.big_page_size = DEFAULT_BIG_PAGE_SIZE;
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
|
if (vm.initialised) {
|
||||||
|
UNREACHABLE_MSG("Cannot initialise an address space twice!");
|
||||||
|
return NvResult::InvalidState;
|
||||||
}
|
}
|
||||||
|
|
||||||
big_page_size = params.big_page_size;
|
if (params.big_page_size) {
|
||||||
|
if (!std::has_single_bit(params.big_page_size)) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size);
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES)) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
vm.big_page_size = params.big_page_size;
|
||||||
|
vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size));
|
||||||
|
|
||||||
|
vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is unspecified then default values should be used
|
||||||
|
if (params.va_range_start) {
|
||||||
|
vm.va_range_start = params.va_range_start;
|
||||||
|
vm.va_range_split = params.va_range_split;
|
||||||
|
vm.va_range_end = params.va_range_end;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u64 start_pages{vm.va_range_start >> VM::PAGE_SIZE_BITS};
|
||||||
|
const u64 end_pages{vm.va_range_split >> VM::PAGE_SIZE_BITS};
|
||||||
|
vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
|
||||||
|
|
||||||
|
const u64 start_big_pages{vm.va_range_split >> vm.big_page_size_bits};
|
||||||
|
const u64 end_big_pages{(vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits};
|
||||||
|
vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
|
||||||
|
|
||||||
|
gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
|
||||||
|
VM::PAGE_SIZE_BITS);
|
||||||
|
system.GPU().InitAddressSpace(*gmmu);
|
||||||
|
vm.initialised = true;
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
@ -99,21 +148,75 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
|
||||||
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
|
||||||
params.page_size, params.flags);
|
params.page_size, params.flags);
|
||||||
|
|
||||||
const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)};
|
std::scoped_lock lock(mutex);
|
||||||
if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) {
|
|
||||||
params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size);
|
if (!vm.initialised) {
|
||||||
} else {
|
return NvResult::BadValue;
|
||||||
params.offset = system.GPU().MemoryManager().Allocate(size, params.align);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto result = NvResult::Success;
|
if (params.page_size != VM::PAGE_SIZE && params.page_size != vm.big_page_size) {
|
||||||
if (!params.offset) {
|
return NvResult::BadValue;
|
||||||
LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size);
|
|
||||||
result = NvResult::InsufficientMemory;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.page_size != vm.big_page_size &&
|
||||||
|
((params.flags & MappingFlags::Sparse) != MappingFlags::None)) {
|
||||||
|
UNIMPLEMENTED_MSG("Sparse small pages are not implemented!");
|
||||||
|
return NvResult::NotImplemented;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS
|
||||||
|
: vm.big_page_size_bits};
|
||||||
|
|
||||||
|
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator
|
||||||
|
: *vm.big_page_allocator};
|
||||||
|
|
||||||
|
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
|
||||||
|
allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages);
|
||||||
|
} else {
|
||||||
|
params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits;
|
||||||
|
if (!params.offset) {
|
||||||
|
UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!");
|
||||||
|
return NvResult::InsufficientMemory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u64 size{static_cast<u64>(params.pages) * params.page_size};
|
||||||
|
|
||||||
|
if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) {
|
||||||
|
gmmu->MapSparse(params.offset, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
allocation_map[params.offset] = {
|
||||||
|
.size = size,
|
||||||
|
.mappings{},
|
||||||
|
.page_size = params.page_size,
|
||||||
|
.sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
|
||||||
|
.big_pages = params.page_size != VM::PAGE_SIZE,
|
||||||
|
};
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
return result;
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
|
||||||
|
auto mapping{mapping_map.at(offset)};
|
||||||
|
|
||||||
|
if (!mapping->fixed) {
|
||||||
|
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||||
|
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||||
|
|
||||||
|
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
|
||||||
|
static_cast<u32>(mapping->size >> page_size_bits));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
|
||||||
|
// Only FreeSpace can unmap them fully
|
||||||
|
if (mapping->sparse_alloc)
|
||||||
|
gmmu->MapSparse(offset, mapping->size, mapping->big_page);
|
||||||
|
else
|
||||||
|
gmmu->Unmap(offset, mapping->size);
|
||||||
|
|
||||||
|
mapping_map.erase(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
|
@ -123,8 +226,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
|
||||||
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
|
||||||
params.pages, params.page_size);
|
params.pages, params.page_size);
|
||||||
|
|
||||||
system.GPU().MemoryManager().Unmap(params.offset,
|
std::scoped_lock lock(mutex);
|
||||||
static_cast<std::size_t>(params.pages) * params.page_size);
|
|
||||||
|
if (!vm.initialised) {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
auto allocation{allocation_map[params.offset]};
|
||||||
|
|
||||||
|
if (allocation.page_size != params.page_size ||
|
||||||
|
allocation.size != (static_cast<u64>(params.pages) * params.page_size)) {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& mapping : allocation.mappings) {
|
||||||
|
FreeMappingLocked(mapping->offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unset sparse flag if required
|
||||||
|
if (allocation.sparse) {
|
||||||
|
gmmu->Unmap(params.offset, allocation.size);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& allocator{params.page_size == VM::PAGE_SIZE ? *vm.small_page_allocator
|
||||||
|
: *vm.big_page_allocator};
|
||||||
|
u32 page_size_bits{params.page_size == VM::PAGE_SIZE ? VM::PAGE_SIZE_BITS
|
||||||
|
: vm.big_page_size_bits};
|
||||||
|
|
||||||
|
allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
|
||||||
|
static_cast<u32>(allocation.size >> page_size_bits));
|
||||||
|
allocation_map.erase(params.offset);
|
||||||
|
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -135,35 +270,52 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
|
||||||
|
|
||||||
auto result = NvResult::Success;
|
|
||||||
std::vector<IoctlRemapEntry> entries(num_entries);
|
std::vector<IoctlRemapEntry> entries(num_entries);
|
||||||
std::memcpy(entries.data(), input.data(), input.size());
|
std::memcpy(entries.data(), input.data(), input.size());
|
||||||
|
|
||||||
for (const auto& entry : entries) {
|
std::scoped_lock lock(mutex);
|
||||||
LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}",
|
|
||||||
entry.offset, entry.nvmap_handle, entry.pages);
|
|
||||||
|
|
||||||
const auto object{nvmap_dev->GetObject(entry.nvmap_handle)};
|
if (!vm.initialised) {
|
||||||
if (!object) {
|
return NvResult::BadValue;
|
||||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle);
|
|
||||||
result = NvResult::InvalidState;
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10};
|
for (const auto& entry : entries) {
|
||||||
const auto size{static_cast<u64>(entry.pages) << 0x10};
|
GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages)
|
||||||
const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10};
|
<< vm.big_page_size_bits};
|
||||||
const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)};
|
u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits};
|
||||||
|
|
||||||
if (!addr) {
|
auto alloc{allocation_map.upper_bound(virtual_address)};
|
||||||
LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!");
|
|
||||||
result = NvResult::InvalidState;
|
if (alloc-- == allocation_map.begin() ||
|
||||||
break;
|
(virtual_address - alloc->first) + size > alloc->second.size) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!");
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!alloc->second.sparse) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!");
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool use_big_pages = alloc->second.big_pages;
|
||||||
|
if (!entry.handle) {
|
||||||
|
gmmu->MapSparse(virtual_address, size, use_big_pages);
|
||||||
|
} else {
|
||||||
|
auto handle{nvmap.GetHandle(entry.handle)};
|
||||||
|
if (!handle) {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr cpu_address{static_cast<VAddr>(
|
||||||
|
handle->address +
|
||||||
|
(static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
|
||||||
|
|
||||||
|
gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), entries.data(), output.size());
|
std::memcpy(output.data(), entries.data(), output.size());
|
||||||
return result;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
|
@ -173,79 +325,98 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
|
||||||
LOG_DEBUG(Service_NVDRV,
|
LOG_DEBUG(Service_NVDRV,
|
||||||
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
"called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
|
||||||
", offset={}",
|
", offset={}",
|
||||||
params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size,
|
params.flags, params.handle, params.buffer_offset, params.mapping_size,
|
||||||
params.offset);
|
params.offset);
|
||||||
|
|
||||||
const auto object{nvmap_dev->GetObject(params.nvmap_handle)};
|
std::scoped_lock lock(mutex);
|
||||||
if (!object) {
|
|
||||||
LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
|
if (!vm.initialised) {
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
return NvResult::BadValue;
|
||||||
return NvResult::InvalidState;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The real nvservices doesn't make a distinction between handles and ids, and
|
// Remaps a subregion of an existing mapping to a different PA
|
||||||
// object can only have one handle and it will be the same as its id. Assert that this is the
|
if ((params.flags & MappingFlags::Remap) != MappingFlags::None) {
|
||||||
// case to prevent unexpected behavior.
|
try {
|
||||||
ASSERT(object->id == params.nvmap_handle);
|
auto mapping{mapping_map.at(params.offset)};
|
||||||
auto& gpu = system.GPU();
|
|
||||||
|
|
||||||
u64 page_size{params.page_size};
|
if (mapping->size < params.mapping_size) {
|
||||||
if (!page_size) {
|
LOG_WARNING(Service_NVDRV,
|
||||||
page_size = object->align;
|
"Cannot remap a partially mapped GPU address space region: 0x{:X}",
|
||||||
|
params.offset);
|
||||||
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) {
|
u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
|
||||||
if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) {
|
VAddr cpu_address{mapping->ptr + params.buffer_offset};
|
||||||
const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)};
|
|
||||||
const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)};
|
|
||||||
|
|
||||||
if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) {
|
gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
|
||||||
LOG_CRITICAL(Service_NVDRV,
|
|
||||||
"remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, "
|
|
||||||
"mapping_size = {}, offset={}",
|
|
||||||
params.flags, params.nvmap_handle, params.buffer_offset,
|
|
||||||
params.mapping_size, params.offset);
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
return NvResult::Success;
|
||||||
return NvResult::InvalidState;
|
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
|
||||||
|
params.offset);
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto handle{nvmap.GetHandle(params.handle)};
|
||||||
|
if (!handle) {
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
|
||||||
|
u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
|
||||||
|
|
||||||
|
bool big_page{[&]() {
|
||||||
|
if (Common::IsAligned(handle->align, vm.big_page_size))
|
||||||
|
return true;
|
||||||
|
else if (Common::IsAligned(handle->align, VM::PAGE_SIZE))
|
||||||
|
return false;
|
||||||
|
else {
|
||||||
|
UNREACHABLE();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}()};
|
||||||
|
|
||||||
|
if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
|
||||||
|
auto alloc{allocation_map.upper_bound(params.offset)};
|
||||||
|
|
||||||
|
if (alloc-- == allocation_map.begin() ||
|
||||||
|
(params.offset - alloc->first) + size > alloc->second.size) {
|
||||||
|
UNREACHABLE_MSG("Cannot perform a fixed mapping into an unallocated region!");
|
||||||
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool use_big_pages = alloc->second.big_pages && big_page;
|
||||||
|
gmmu->Map(params.offset, cpu_address, size, use_big_pages);
|
||||||
|
|
||||||
|
auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
|
||||||
|
use_big_pages, alloc->second.sparse)};
|
||||||
|
alloc->second.mappings.push_back(mapping);
|
||||||
|
mapping_map[params.offset] = mapping;
|
||||||
|
} else {
|
||||||
|
|
||||||
|
auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||||
|
u32 page_size{big_page ? vm.big_page_size : VM::PAGE_SIZE};
|
||||||
|
u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||||
|
|
||||||
|
params.offset = static_cast<u64>(allocator.Allocate(
|
||||||
|
static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits)))
|
||||||
|
<< page_size_bits;
|
||||||
|
if (!params.offset) {
|
||||||
|
UNREACHABLE_MSG("Failed to allocate free space in the GPU AS!");
|
||||||
|
return NvResult::InsufficientMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
|
||||||
|
|
||||||
|
auto mapping{
|
||||||
|
std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
|
||||||
|
mapping_map[params.offset] = mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
} else {
|
|
||||||
LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset);
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::InvalidState;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can only map objects that have already been assigned a CPU address.
|
|
||||||
ASSERT(object->status == nvmap::Object::Status::Allocated);
|
|
||||||
|
|
||||||
const auto physical_address{object->addr + params.buffer_offset};
|
|
||||||
u64 size{params.mapping_size};
|
|
||||||
if (!size) {
|
|
||||||
size = object->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None};
|
|
||||||
if (is_alloc) {
|
|
||||||
params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size);
|
|
||||||
} else {
|
|
||||||
params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto result = NvResult::Success;
|
|
||||||
if (!params.offset) {
|
|
||||||
LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size);
|
|
||||||
result = NvResult::InvalidState;
|
|
||||||
} else {
|
|
||||||
AddBufferMap(params.offset, size, physical_address, is_alloc);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
|
@ -254,47 +425,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
|
||||||
|
|
||||||
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
|
||||||
|
|
||||||
if (const auto size{RemoveBufferMap(params.offset)}; size) {
|
std::scoped_lock lock(mutex);
|
||||||
system.GPU().MemoryManager().Unmap(params.offset, *size);
|
|
||||||
} else {
|
if (!vm.initialised) {
|
||||||
LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset);
|
return NvResult::BadValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
auto mapping{mapping_map.at(params.offset)};
|
||||||
|
|
||||||
|
if (!mapping->fixed) {
|
||||||
|
auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
|
||||||
|
u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
|
||||||
|
|
||||||
|
allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
|
||||||
|
static_cast<u32>(mapping->size >> page_size_bits));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
|
||||||
|
// Only FreeSpace can unmap them fully
|
||||||
|
if (mapping->sparse_alloc) {
|
||||||
|
gmmu->MapSparse(params.offset, mapping->size, mapping->big_page);
|
||||||
|
} else {
|
||||||
|
gmmu->Unmap(params.offset, mapping->size);
|
||||||
|
}
|
||||||
|
|
||||||
|
mapping_map.erase(params.offset);
|
||||||
|
} catch ([[maybe_unused]] const std::out_of_range& e) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IoctlBindChannel params{};
|
IoctlBindChannel params{};
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
std::memcpy(¶ms, input.data(), input.size());
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd);
|
LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
|
||||||
|
|
||||||
channel = params.fd;
|
auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
|
||||||
|
gpu_channel_device->channel_state->memory_manager = gmmu;
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
|
||||||
|
params.buf_size = 2 * sizeof(VaRegion);
|
||||||
|
|
||||||
|
params.regions = std::array<VaRegion, 2>{
|
||||||
|
VaRegion{
|
||||||
|
.offset = vm.small_page_allocator->vaStart << VM::PAGE_SIZE_BITS,
|
||||||
|
.page_size = VM::PAGE_SIZE,
|
||||||
|
._pad0_{},
|
||||||
|
.pages = vm.small_page_allocator->vaLimit - vm.small_page_allocator->vaStart,
|
||||||
|
},
|
||||||
|
VaRegion{
|
||||||
|
.offset = vm.big_page_allocator->vaStart << vm.big_page_size_bits,
|
||||||
|
.page_size = vm.big_page_size,
|
||||||
|
._pad0_{},
|
||||||
|
.pages = vm.big_page_allocator->vaLimit - vm.big_page_allocator->vaStart,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IoctlGetVaRegions params{};
|
IoctlGetVaRegions params{};
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
std::memcpy(¶ms, input.data(), input.size());
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||||
params.buf_size);
|
params.buf_size);
|
||||||
|
|
||||||
params.buf_size = 0x30;
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
params.small = IoctlVaRegion{
|
if (!vm.initialised) {
|
||||||
.offset = 0x04000000,
|
return NvResult::BadValue;
|
||||||
.page_size = DEFAULT_SMALL_PAGE_SIZE,
|
}
|
||||||
.pages = 0x3fbfff,
|
|
||||||
};
|
|
||||||
|
|
||||||
params.big = IoctlVaRegion{
|
GetVARegionsImpl(params);
|
||||||
.offset = 0x04000000,
|
|
||||||
.page_size = big_page_size,
|
|
||||||
.pages = 0x1bffff,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -305,62 +511,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u
|
||||||
IoctlGetVaRegions params{};
|
IoctlGetVaRegions params{};
|
||||||
std::memcpy(¶ms, input.data(), input.size());
|
std::memcpy(¶ms, input.data(), input.size());
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
|
||||||
params.buf_size);
|
params.buf_size);
|
||||||
|
|
||||||
params.buf_size = 0x30;
|
std::scoped_lock lock(mutex);
|
||||||
|
|
||||||
params.small = IoctlVaRegion{
|
if (!vm.initialised) {
|
||||||
.offset = 0x04000000,
|
return NvResult::BadValue;
|
||||||
.page_size = 0x1000,
|
}
|
||||||
.pages = 0x3fbfff,
|
|
||||||
};
|
|
||||||
|
|
||||||
params.big = IoctlVaRegion{
|
GetVARegionsImpl(params);
|
||||||
.offset = 0x04000000,
|
|
||||||
.page_size = big_page_size,
|
|
||||||
.pages = 0x1bffff,
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO(ogniK): This probably can stay stubbed but should add support way way later
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
std::memcpy(inline_output.data(), ¶ms.small, sizeof(IoctlVaRegion));
|
std::memcpy(inline_output.data(), ¶ms.regions[0], sizeof(VaRegion));
|
||||||
std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), ¶ms.big, sizeof(IoctlVaRegion));
|
std::memcpy(inline_output.data() + sizeof(VaRegion), ¶ms.regions[1], sizeof(VaRegion));
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const {
|
Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) {
|
||||||
const auto end{buffer_mappings.upper_bound(gpu_addr)};
|
LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id);
|
||||||
for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) {
|
return nullptr;
|
||||||
if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
|
|
||||||
return iter->second;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
|
|
||||||
bool is_allocated) {
|
|
||||||
buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
|
|
||||||
if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
|
|
||||||
std::size_t size{};
|
|
||||||
|
|
||||||
if (iter->second.IsAllocated()) {
|
|
||||||
size = iter->second.Size();
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer_mappings.erase(iter);
|
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -1,35 +1,51 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
|
#include <list>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/address_space.h"
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
class MemoryManager;
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace Service::Nvidia {
|
||||||
|
class Module;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
class Container;
|
||||||
|
class NvMap;
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16;
|
enum class MappingFlags : u32 {
|
||||||
constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12;
|
None = 0,
|
||||||
|
Fixed = 1 << 0,
|
||||||
class nvmap;
|
Sparse = 1 << 1,
|
||||||
|
Remap = 1 << 8,
|
||||||
enum class AddressSpaceFlags : u32 {
|
|
||||||
None = 0x0,
|
|
||||||
FixedOffset = 0x1,
|
|
||||||
Remap = 0x100,
|
|
||||||
};
|
};
|
||||||
DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags);
|
DECLARE_ENUM_FLAG_OPERATORS(MappingFlags);
|
||||||
|
|
||||||
class nvhost_as_gpu final : public nvdevice {
|
class nvhost_as_gpu final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_);
|
explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
|
||||||
~nvhost_as_gpu() override;
|
~nvhost_as_gpu() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -42,46 +58,17 @@ public:
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
class BufferMap final {
|
|
||||||
public:
|
|
||||||
constexpr BufferMap() = default;
|
|
||||||
|
|
||||||
constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_)
|
struct VaRegion {
|
||||||
: start_addr{start_addr_}, end_addr{start_addr_ + size_} {}
|
u64 offset;
|
||||||
|
u32 page_size;
|
||||||
constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_,
|
u32 _pad0_;
|
||||||
bool is_allocated_)
|
u64 pages;
|
||||||
: start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_},
|
|
||||||
is_allocated{is_allocated_} {}
|
|
||||||
|
|
||||||
constexpr VAddr StartAddr() const {
|
|
||||||
return start_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VAddr EndAddr() const {
|
|
||||||
return end_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr std::size_t Size() const {
|
|
||||||
return end_addr - start_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr VAddr CpuAddr() const {
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr bool IsAllocated() const {
|
|
||||||
return is_allocated;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
GPUVAddr start_addr{};
|
|
||||||
GPUVAddr end_addr{};
|
|
||||||
VAddr cpu_addr{};
|
|
||||||
bool is_allocated{};
|
|
||||||
};
|
};
|
||||||
|
static_assert(sizeof(VaRegion) == 0x18);
|
||||||
|
|
||||||
|
private:
|
||||||
struct IoctlAllocAsEx {
|
struct IoctlAllocAsEx {
|
||||||
u32_le flags{}; // usually passes 1
|
u32_le flags{}; // usually passes 1
|
||||||
s32_le as_fd{}; // ignored; passes 0
|
s32_le as_fd{}; // ignored; passes 0
|
||||||
|
@ -96,7 +83,7 @@ private:
|
||||||
struct IoctlAllocSpace {
|
struct IoctlAllocSpace {
|
||||||
u32_le pages{};
|
u32_le pages{};
|
||||||
u32_le page_size{};
|
u32_le page_size{};
|
||||||
AddressSpaceFlags flags{};
|
MappingFlags flags{};
|
||||||
INSERT_PADDING_WORDS(1);
|
INSERT_PADDING_WORDS(1);
|
||||||
union {
|
union {
|
||||||
u64_le offset;
|
u64_le offset;
|
||||||
|
@ -113,19 +100,19 @@ private:
|
||||||
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
|
static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
|
||||||
|
|
||||||
struct IoctlRemapEntry {
|
struct IoctlRemapEntry {
|
||||||
u16_le flags{};
|
u16 flags;
|
||||||
u16_le kind{};
|
u16 kind;
|
||||||
u32_le nvmap_handle{};
|
NvCore::NvMap::Handle::Id handle;
|
||||||
u32_le map_offset{};
|
u32 handle_offset_big_pages;
|
||||||
u32_le offset{};
|
u32 as_offset_big_pages;
|
||||||
u32_le pages{};
|
u32 big_pages;
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
|
static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
|
||||||
|
|
||||||
struct IoctlMapBufferEx {
|
struct IoctlMapBufferEx {
|
||||||
AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable
|
||||||
u32_le kind{}; // -1 is default
|
u32_le kind{}; // -1 is default
|
||||||
u32_le nvmap_handle{};
|
NvCore::NvMap::Handle::Id handle;
|
||||||
u32_le page_size{}; // 0 means don't care
|
u32_le page_size{}; // 0 means don't care
|
||||||
s64_le buffer_offset{};
|
s64_le buffer_offset{};
|
||||||
u64_le mapping_size{};
|
u64_le mapping_size{};
|
||||||
|
@ -143,27 +130,15 @@ private:
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
|
static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
|
||||||
|
|
||||||
struct IoctlVaRegion {
|
|
||||||
u64_le offset{};
|
|
||||||
u32_le page_size{};
|
|
||||||
INSERT_PADDING_WORDS(1);
|
|
||||||
u64_le pages{};
|
|
||||||
};
|
|
||||||
static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
|
|
||||||
|
|
||||||
struct IoctlGetVaRegions {
|
struct IoctlGetVaRegions {
|
||||||
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
|
u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
|
||||||
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
|
u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
|
||||||
u32_le reserved{};
|
u32_le reserved{};
|
||||||
IoctlVaRegion small{};
|
std::array<VaRegion, 2> regions{};
|
||||||
IoctlVaRegion big{};
|
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2,
|
static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
|
||||||
"IoctlGetVaRegions is incorrect size");
|
"IoctlGetVaRegions is incorrect size");
|
||||||
|
|
||||||
s32 channel{};
|
|
||||||
u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
|
|
||||||
|
|
||||||
NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
@ -172,18 +147,75 @@ private:
|
||||||
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
|
void GetVARegionsImpl(IoctlGetVaRegions& params);
|
||||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
|
||||||
std::vector<u8>& inline_output);
|
std::vector<u8>& inline_output);
|
||||||
|
|
||||||
std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const;
|
void FreeMappingLocked(u64 offset);
|
||||||
void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated);
|
|
||||||
std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr);
|
|
||||||
|
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
Module& module;
|
||||||
|
|
||||||
// This is expected to be ordered, therefore we must use a map, not unordered_map
|
NvCore::Container& container;
|
||||||
std::map<GPUVAddr, BufferMap> buffer_mappings;
|
NvCore::NvMap& nvmap;
|
||||||
|
|
||||||
|
struct Mapping {
|
||||||
|
VAddr ptr;
|
||||||
|
u64 offset;
|
||||||
|
u64 size;
|
||||||
|
bool fixed;
|
||||||
|
bool big_page; // Only valid if fixed == false
|
||||||
|
bool sparse_alloc;
|
||||||
|
|
||||||
|
Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
|
||||||
|
: ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
|
||||||
|
sparse_alloc(sparse_alloc_) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Allocation {
|
||||||
|
u64 size;
|
||||||
|
std::list<std::shared_ptr<Mapping>> mappings;
|
||||||
|
u32 page_size;
|
||||||
|
bool sparse;
|
||||||
|
bool big_pages;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::map<u64, std::shared_ptr<Mapping>>
|
||||||
|
mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and
|
||||||
|
//!< mapping type, this is needed as what was originally a single buffer may
|
||||||
|
//!< have been split into multiple GPU side buffers with the remap flag.
|
||||||
|
std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from
|
||||||
|
//!< which fixed buffers can be mapped into
|
||||||
|
std::mutex mutex; //!< Locks all AS operations
|
||||||
|
|
||||||
|
struct VM {
|
||||||
|
static constexpr u32 PAGE_SIZE{0x1000};
|
||||||
|
static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(PAGE_SIZE)};
|
||||||
|
|
||||||
|
static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
|
||||||
|
static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};
|
||||||
|
u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
|
||||||
|
u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)};
|
||||||
|
|
||||||
|
static constexpr u32 VA_START_SHIFT{10};
|
||||||
|
static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34};
|
||||||
|
static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37};
|
||||||
|
u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT};
|
||||||
|
u64 va_range_split{DEFAULT_VA_SPLIT};
|
||||||
|
u64 va_range_end{DEFAULT_VA_RANGE};
|
||||||
|
|
||||||
|
using Allocator = Common::FlatAllocator<u32, 0, 32>;
|
||||||
|
|
||||||
|
std::unique_ptr<Allocator> big_page_allocator;
|
||||||
|
std::shared_ptr<Allocator>
|
||||||
|
small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel
|
||||||
|
|
||||||
|
bool initialised{};
|
||||||
|
} vm;
|
||||||
|
std::shared_ptr<Tegra::MemoryManager> gmmu;
|
||||||
|
|
||||||
|
// s32 channel{};
|
||||||
|
// u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -1,24 +1,40 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
||||||
SyncpointManager& syncpoint_manager_)
|
NvCore::Container& core_)
|
||||||
: nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{
|
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
||||||
syncpoint_manager_} {}
|
syncpoint_manager{core_.GetSyncpointManager()} {}
|
||||||
nvhost_ctrl::~nvhost_ctrl() = default;
|
|
||||||
|
nvhost_ctrl::~nvhost_ctrl() {
|
||||||
|
for (auto& event : events) {
|
||||||
|
if (!event.registered) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
events_interface.FreeEvent(event.kevent);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
@ -30,13 +46,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||||
case 0x1c:
|
case 0x1c:
|
||||||
return IocCtrlClearEventWait(input, output);
|
return IocCtrlClearEventWait(input, output);
|
||||||
case 0x1d:
|
case 0x1d:
|
||||||
return IocCtrlEventWait(input, output, false);
|
|
||||||
case 0x1e:
|
|
||||||
return IocCtrlEventWait(input, output, true);
|
return IocCtrlEventWait(input, output, true);
|
||||||
|
case 0x1e:
|
||||||
|
return IocCtrlEventWait(input, output, false);
|
||||||
case 0x1f:
|
case 0x1f:
|
||||||
return IocCtrlEventRegister(input, output);
|
return IocCtrlEventRegister(input, output);
|
||||||
case 0x20:
|
case 0x20:
|
||||||
return IocCtrlEventUnregister(input, output);
|
return IocCtrlEventUnregister(input, output);
|
||||||
|
case 0x21:
|
||||||
|
return IocCtrlEventUnregisterBatch(input, output);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -60,6 +78,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
||||||
|
|
||||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
|
@ -71,116 +90,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||||
bool is_async) {
|
bool is_allocation) {
|
||||||
IocCtrlEventWaitParams params{};
|
IocCtrlEventWaitParams params{};
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}",
|
LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
||||||
params.syncpt_id, params.threshold, params.timeout, is_async);
|
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
||||||
|
|
||||||
if (params.syncpt_id >= MaxSyncPoints) {
|
bool must_unmark_fail = !is_allocation;
|
||||||
|
const u32 event_id = params.value.raw;
|
||||||
|
SCOPE_EXIT({
|
||||||
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
|
if (must_unmark_fail) {
|
||||||
|
events[event_id].fails = 0;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const u32 fence_id = static_cast<u32>(params.fence.id);
|
||||||
|
|
||||||
|
if (fence_id >= MaxSyncPoints) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 event_id = params.value & 0x00FF;
|
if (params.fence.value == 0) {
|
||||||
|
if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) {
|
||||||
|
LOG_WARNING(Service_NVDRV,
|
||||||
|
"Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
|
||||||
|
params.fence.id, params.fence.value, params.timeout, is_allocation);
|
||||||
|
} else {
|
||||||
|
params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
|
||||||
|
}
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
if (event_id >= MaxNvEvents) {
|
if (syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (const auto new_value = syncpoint_manager.UpdateMin(fence_id);
|
||||||
|
syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||||
|
params.value.raw = new_value;
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
|
||||||
|
const u32 target_value = params.fence.value;
|
||||||
|
|
||||||
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
|
u32 slot = [&]() {
|
||||||
|
if (is_allocation) {
|
||||||
|
params.value.raw = 0;
|
||||||
|
return FindFreeNvEvent(fence_id);
|
||||||
|
} else {
|
||||||
|
return params.value.raw;
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
|
||||||
|
must_unmark_fail = false;
|
||||||
|
|
||||||
|
const auto check_failing = [&]() {
|
||||||
|
if (events[slot].fails > 2) {
|
||||||
|
{
|
||||||
|
auto lk = system.StallProcesses();
|
||||||
|
host1x_syncpoint_manager.WaitHost(fence_id, target_value);
|
||||||
|
system.UnstallProcesses();
|
||||||
|
}
|
||||||
|
params.value.raw = target_value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (slot >= MaxNvEvents) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
|
||||||
params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id);
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
events_interface.failed[event_id] = false;
|
|
||||||
return NvResult::Success;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id);
|
|
||||||
syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) {
|
|
||||||
params.value = new_value;
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
events_interface.failed[event_id] = false;
|
|
||||||
return NvResult::Success;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& event = events_interface.events[event_id];
|
|
||||||
auto& gpu = system.GPU();
|
|
||||||
|
|
||||||
// This is mostly to take into account unimplemented features. As synced
|
|
||||||
// gpu is always synced.
|
|
||||||
if (!gpu.IsAsync()) {
|
|
||||||
event.event->GetWritableEvent().Signal();
|
|
||||||
return NvResult::Success;
|
|
||||||
}
|
|
||||||
const u32 current_syncpoint_value = event.fence.value;
|
|
||||||
const s32 diff = current_syncpoint_value - params.threshold;
|
|
||||||
if (diff >= 0) {
|
|
||||||
event.event->GetWritableEvent().Signal();
|
|
||||||
params.value = current_syncpoint_value;
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
events_interface.failed[event_id] = false;
|
|
||||||
return NvResult::Success;
|
|
||||||
}
|
|
||||||
const u32 target_value = current_syncpoint_value - diff;
|
|
||||||
|
|
||||||
if (!is_async) {
|
|
||||||
params.value = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (params.timeout == 0) {
|
if (params.timeout == 0) {
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
if (check_failing()) {
|
||||||
|
events[slot].fails = 0;
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
return NvResult::Timeout;
|
return NvResult::Timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
EventState status = events_interface.status[event_id];
|
auto& event = events[slot];
|
||||||
const bool bad_parameter = status == EventState::Busy;
|
|
||||||
if (bad_parameter) {
|
if (!event.registered) {
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
events_interface.SetEventStatus(event_id, EventState::Waiting);
|
|
||||||
events_interface.assigned_syncpt[event_id] = params.syncpt_id;
|
if (event.IsBeingUsed()) {
|
||||||
events_interface.assigned_value[event_id] = target_value;
|
return NvResult::BadParameter;
|
||||||
if (is_async) {
|
|
||||||
params.value = params.syncpt_id << 4;
|
|
||||||
} else {
|
|
||||||
params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000;
|
|
||||||
}
|
}
|
||||||
params.value |= event_id;
|
|
||||||
event.event->GetWritableEvent().Clear();
|
if (check_failing()) {
|
||||||
if (events_interface.failed[event_id]) {
|
event.fails = 0;
|
||||||
{
|
|
||||||
auto lk = system.StallProcesses();
|
|
||||||
gpu.WaitFence(params.syncpt_id, target_value);
|
|
||||||
system.UnstallProcesses();
|
|
||||||
}
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
|
||||||
events_interface.failed[event_id] = false;
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value);
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
params.value.raw = 0;
|
||||||
|
|
||||||
|
event.status.store(EventState::Waiting, std::memory_order_release);
|
||||||
|
event.assigned_syncpt = fence_id;
|
||||||
|
event.assigned_value = target_value;
|
||||||
|
if (is_allocation) {
|
||||||
|
params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
|
||||||
|
params.value.event_allocated.Assign(1);
|
||||||
|
} else {
|
||||||
|
params.value.syncpoint_id.Assign(fence_id);
|
||||||
|
}
|
||||||
|
params.value.raw |= slot;
|
||||||
|
|
||||||
|
event.wait_handle =
|
||||||
|
host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
|
||||||
|
auto& event_ = events[slot];
|
||||||
|
if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
|
||||||
|
EventState::Waiting) {
|
||||||
|
event_.kevent->GetWritableEvent().Signal();
|
||||||
|
}
|
||||||
|
event_.status.store(EventState::Signalled, std::memory_order_release);
|
||||||
|
});
|
||||||
return NvResult::Timeout;
|
return NvResult::Timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NvResult nvhost_ctrl::FreeEvent(u32 slot) {
|
||||||
|
if (slot >= MaxNvEvents) {
|
||||||
|
return NvResult::BadParameter;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& event = events[slot];
|
||||||
|
|
||||||
|
if (!event.registered) {
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (event.IsBeingUsed()) {
|
||||||
|
return NvResult::Busy;
|
||||||
|
}
|
||||||
|
|
||||||
|
FreeNvEvent(slot);
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IocCtrlEventRegisterParams params{};
|
IocCtrlEventRegisterParams params{};
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
const u32 event_id = params.user_event_id & 0x00FF;
|
const u32 event_id = params.user_event_id;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||||
if (event_id >= MaxNvEvents) {
|
if (event_id >= MaxNvEvents) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
if (events_interface.registered[event_id]) {
|
|
||||||
const auto event_state = events_interface.status[event_id];
|
auto lock = NvEventsLock();
|
||||||
if (event_state != EventState::Free) {
|
|
||||||
LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event");
|
if (events[event_id].registered) {
|
||||||
events_interface.UnregisterEvent(event_id);
|
const auto result = FreeEvent(event_id);
|
||||||
} else {
|
if (result != NvResult::Success) {
|
||||||
return NvResult::BadParameter;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
events_interface.RegisterEvent(event_id);
|
CreateNvEvent(event_id);
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,34 +260,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
const u32 event_id = params.user_event_id & 0x00FF;
|
const u32 event_id = params.user_event_id & 0x00FF;
|
||||||
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
|
||||||
if (event_id >= MaxNvEvents) {
|
|
||||||
return NvResult::BadParameter;
|
auto lock = NvEventsLock();
|
||||||
|
return FreeEvent(event_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
|
||||||
|
std::vector<u8>& output) {
|
||||||
|
IocCtrlEventUnregisterBatchParams params{};
|
||||||
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
u64 event_mask = params.user_events;
|
||||||
|
LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
|
||||||
|
|
||||||
|
auto lock = NvEventsLock();
|
||||||
|
while (event_mask != 0) {
|
||||||
|
const u64 event_id = std::countr_zero(event_mask);
|
||||||
|
event_mask &= ~(1ULL << event_id);
|
||||||
|
const auto result = FreeEvent(static_cast<u32>(event_id));
|
||||||
|
if (result != NvResult::Success) {
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
if (!events_interface.registered[event_id]) {
|
|
||||||
return NvResult::BadParameter;
|
|
||||||
}
|
}
|
||||||
events_interface.UnregisterEvent(event_id);
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IocCtrlEventSignalParams params{};
|
IocCtrlEventClearParams params{};
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
|
||||||
u32 event_id = params.event_id & 0x00FF;
|
u32 event_id = params.event_id.slot;
|
||||||
LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id);
|
LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
|
||||||
|
|
||||||
if (event_id >= MaxNvEvents) {
|
if (event_id >= MaxNvEvents) {
|
||||||
return NvResult::BadParameter;
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
if (events_interface.status[event_id] == EventState::Waiting) {
|
|
||||||
events_interface.LiberateEvent(event_id);
|
|
||||||
}
|
|
||||||
events_interface.failed[event_id] = true;
|
|
||||||
|
|
||||||
syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id);
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
|
auto& event = events[event_id];
|
||||||
|
if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) ==
|
||||||
|
EventState::Waiting) {
|
||||||
|
auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
|
||||||
|
host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle);
|
||||||
|
syncpoint_manager.UpdateMin(event.assigned_syncpt);
|
||||||
|
event.wait_handle = {};
|
||||||
|
}
|
||||||
|
event.fails++;
|
||||||
|
event.status.store(EventState::Cancelled, std::memory_order_release);
|
||||||
|
event.kevent->GetWritableEvent().Clear();
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
|
||||||
|
const auto desired_event = SyncpointEventValue{.raw = event_id};
|
||||||
|
|
||||||
|
const bool allocated = desired_event.event_allocated.Value() != 0;
|
||||||
|
const u32 slot{allocated ? desired_event.partial_slot.Value()
|
||||||
|
: static_cast<u32>(desired_event.slot)};
|
||||||
|
if (slot >= MaxNvEvents) {
|
||||||
|
ASSERT(false);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value()
|
||||||
|
: desired_event.syncpoint_id.Value()};
|
||||||
|
|
||||||
|
auto lock = NvEventsLock();
|
||||||
|
|
||||||
|
auto& event = events[slot];
|
||||||
|
if (event.registered && event.assigned_syncpt == syncpoint_id) {
|
||||||
|
ASSERT(event.kevent);
|
||||||
|
return event.kevent;
|
||||||
|
}
|
||||||
|
// Is this possible in hardware?
|
||||||
|
ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() {
|
||||||
|
return std::unique_lock<std::mutex>(events_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_ctrl::CreateNvEvent(u32 event_id) {
|
||||||
|
auto& event = events[event_id];
|
||||||
|
ASSERT(!event.kevent);
|
||||||
|
ASSERT(!event.registered);
|
||||||
|
ASSERT(!event.IsBeingUsed());
|
||||||
|
event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id));
|
||||||
|
event.status = EventState::Available;
|
||||||
|
event.registered = true;
|
||||||
|
const u64 mask = 1ULL << event_id;
|
||||||
|
event.fails = 0;
|
||||||
|
events_mask |= mask;
|
||||||
|
event.assigned_syncpt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_ctrl::FreeNvEvent(u32 event_id) {
|
||||||
|
auto& event = events[event_id];
|
||||||
|
ASSERT(event.kevent);
|
||||||
|
ASSERT(event.registered);
|
||||||
|
ASSERT(!event.IsBeingUsed());
|
||||||
|
events_interface.FreeEvent(event.kevent);
|
||||||
|
event.kevent = nullptr;
|
||||||
|
event.status = EventState::Available;
|
||||||
|
event.registered = false;
|
||||||
|
const u64 mask = ~(1ULL << event_id);
|
||||||
|
events_mask &= mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
|
||||||
|
u32 slot{MaxNvEvents};
|
||||||
|
u32 free_slot{MaxNvEvents};
|
||||||
|
for (u32 i = 0; i < MaxNvEvents; i++) {
|
||||||
|
auto& event = events[i];
|
||||||
|
if (event.registered) {
|
||||||
|
if (!event.IsBeingUsed()) {
|
||||||
|
slot = i;
|
||||||
|
if (event.assigned_syncpt == syncpoint_id) {
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (free_slot == MaxNvEvents) {
|
||||||
|
free_slot = i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (free_slot < MaxNvEvents) {
|
||||||
|
CreateNvEvent(free_slot);
|
||||||
|
return free_slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slot < MaxNvEvents) {
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -1,20 +1,29 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team, Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include "common/bit_field.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
class Container;
|
||||||
|
class SyncpointManager;
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvhost_ctrl final : public nvdevice {
|
class nvhost_ctrl final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
|
||||||
SyncpointManager& syncpoint_manager_);
|
NvCore::Container& core);
|
||||||
~nvhost_ctrl() override;
|
~nvhost_ctrl() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -27,7 +36,70 @@ public:
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
|
union SyncpointEventValue {
|
||||||
|
u32 raw;
|
||||||
|
|
||||||
|
union {
|
||||||
|
BitField<0, 4, u32> partial_slot;
|
||||||
|
BitField<4, 28, u32> syncpoint_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct {
|
||||||
|
u16 slot;
|
||||||
|
union {
|
||||||
|
BitField<0, 12, u16> syncpoint_id_for_allocation;
|
||||||
|
BitField<12, 1, u16> event_allocated;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
struct InternalEvent {
|
||||||
|
// Mask representing registered events
|
||||||
|
|
||||||
|
// Each kernel event associated to an NV event
|
||||||
|
Kernel::KEvent* kevent{};
|
||||||
|
// The status of the current NVEvent
|
||||||
|
std::atomic<EventState> status{};
|
||||||
|
|
||||||
|
// Tells the NVEvent that it has failed.
|
||||||
|
u32 fails{};
|
||||||
|
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
||||||
|
// associated with it.
|
||||||
|
u32 assigned_syncpt{};
|
||||||
|
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
||||||
|
// for.
|
||||||
|
u32 assigned_value{};
|
||||||
|
|
||||||
|
// Tells if an NVEvent is registered or not
|
||||||
|
bool registered{};
|
||||||
|
|
||||||
|
// Used for waiting on a syncpoint & canceling it.
|
||||||
|
Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
|
||||||
|
|
||||||
|
bool IsBeingUsed() {
|
||||||
|
const auto current_status = status.load(std::memory_order_acquire);
|
||||||
|
return current_status == EventState::Waiting ||
|
||||||
|
current_status == EventState::Cancelling ||
|
||||||
|
current_status == EventState::Signalling;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> NvEventsLock();
|
||||||
|
|
||||||
|
void CreateNvEvent(u32 event_id);
|
||||||
|
|
||||||
|
void FreeNvEvent(u32 event_id);
|
||||||
|
|
||||||
|
u32 FindFreeNvEvent(u32 syncpoint_id);
|
||||||
|
|
||||||
|
std::array<InternalEvent, MaxNvEvents> events{};
|
||||||
|
std::mutex events_mutex;
|
||||||
|
u64 events_mask{};
|
||||||
|
|
||||||
struct IocSyncptReadParams {
|
struct IocSyncptReadParams {
|
||||||
u32_le id{};
|
u32_le id{};
|
||||||
u32_le value{};
|
u32_le value{};
|
||||||
|
@ -83,27 +155,18 @@ private:
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
|
static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
|
||||||
|
|
||||||
struct IocCtrlEventSignalParams {
|
struct IocCtrlEventClearParams {
|
||||||
u32_le event_id{};
|
SyncpointEventValue event_id{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocCtrlEventSignalParams) == 4,
|
static_assert(sizeof(IocCtrlEventClearParams) == 4,
|
||||||
"IocCtrlEventSignalParams is incorrect size");
|
"IocCtrlEventClearParams is incorrect size");
|
||||||
|
|
||||||
struct IocCtrlEventWaitParams {
|
struct IocCtrlEventWaitParams {
|
||||||
u32_le syncpt_id{};
|
NvFence fence{};
|
||||||
u32_le threshold{};
|
|
||||||
s32_le timeout{};
|
|
||||||
u32_le value{};
|
|
||||||
};
|
|
||||||
static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
|
|
||||||
|
|
||||||
struct IocCtrlEventWaitAsyncParams {
|
|
||||||
u32_le syncpt_id{};
|
|
||||||
u32_le threshold{};
|
|
||||||
u32_le timeout{};
|
u32_le timeout{};
|
||||||
u32_le value{};
|
SyncpointEventValue value{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16,
|
static_assert(sizeof(IocCtrlEventWaitParams) == 16,
|
||||||
"IocCtrlEventWaitAsyncParams is incorrect size");
|
"IocCtrlEventWaitAsyncParams is incorrect size");
|
||||||
|
|
||||||
struct IocCtrlEventRegisterParams {
|
struct IocCtrlEventRegisterParams {
|
||||||
|
@ -118,19 +181,25 @@ private:
|
||||||
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
|
static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
|
||||||
"IocCtrlEventUnregisterParams is incorrect size");
|
"IocCtrlEventUnregisterParams is incorrect size");
|
||||||
|
|
||||||
struct IocCtrlEventKill {
|
struct IocCtrlEventUnregisterBatchParams {
|
||||||
u64_le user_events{};
|
u64_le user_events{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size");
|
static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
|
||||||
|
"IocCtrlEventKill is incorrect size");
|
||||||
|
|
||||||
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async);
|
NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
|
||||||
|
bool is_allocation);
|
||||||
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
|
NvResult FreeEvent(u32 slot);
|
||||||
|
|
||||||
EventInterface& events_interface;
|
EventInterface& events_interface;
|
||||||
SyncpointManager& syncpoint_manager;
|
NvCore::Container& core;
|
||||||
|
NvCore::SyncpointManager& syncpoint_manager;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -7,11 +7,19 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||||
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {}
|
nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
|
||||||
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default;
|
: nvdevice{system_}, events_interface{events_interface_} {
|
||||||
|
error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier");
|
||||||
|
unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent");
|
||||||
|
}
|
||||||
|
nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
|
||||||
|
events_interface.FreeEvent(error_notifier_event);
|
||||||
|
events_interface.FreeEvent(unknown_event);
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) {
|
||||||
|
switch (event_id) {
|
||||||
|
case 1:
|
||||||
|
return error_notifier_event;
|
||||||
|
case 2:
|
||||||
|
return unknown_event;
|
||||||
|
default: {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -10,11 +10,15 @@
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia {
|
||||||
|
class EventInterface;
|
||||||
|
}
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvhost_ctrl_gpu final : public nvdevice {
|
class nvhost_ctrl_gpu final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_ctrl_gpu(Core::System& system_);
|
explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_);
|
||||||
~nvhost_ctrl_gpu() override;
|
~nvhost_ctrl_gpu() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -27,6 +31,8 @@ public:
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct IoctlGpuCharacteristics {
|
struct IoctlGpuCharacteristics {
|
||||||
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
|
u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
|
||||||
|
@ -160,6 +166,12 @@ private:
|
||||||
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
|
EventInterface& events_interface;
|
||||||
|
|
||||||
|
// Events
|
||||||
|
Kernel::KEvent* error_notifier_event;
|
||||||
|
Kernel::KEvent* unknown_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -5,29 +5,46 @@
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/engines/puller.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
namespace {
|
namespace {
|
||||||
Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) {
|
Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) {
|
||||||
Tegra::GPU::FenceAction result{};
|
Tegra::Engines::Puller::FenceAction result{};
|
||||||
result.op.Assign(op);
|
result.op.Assign(op);
|
||||||
result.syncpoint_id.Assign(syncpoint_id);
|
result.syncpoint_id.Assign(syncpoint_id);
|
||||||
return {result.raw};
|
return {result.raw};
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
||||||
SyncpointManager& syncpoint_manager_)
|
NvCore::Container& core_)
|
||||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {
|
: nvdevice{system_}, events_interface{events_interface_}, core{core_},
|
||||||
channel_fence.id = syncpoint_manager_.AllocateSyncpoint();
|
syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
|
||||||
channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id);
|
channel_state{system.GPU().AllocateChannel()} {
|
||||||
|
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||||
|
sm_exception_breakpoint_int_report_event =
|
||||||
|
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
|
||||||
|
sm_exception_breakpoint_pause_report_event =
|
||||||
|
events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause");
|
||||||
|
error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier");
|
||||||
}
|
}
|
||||||
|
|
||||||
nvhost_gpu::~nvhost_gpu() = default;
|
nvhost_gpu::~nvhost_gpu() {
|
||||||
|
events_interface.FreeEvent(sm_exception_breakpoint_int_report_event);
|
||||||
|
events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event);
|
||||||
|
events_interface.FreeEvent(error_notifier_event);
|
||||||
|
syncpoint_manager.FreeSyncpoint(channel_syncpoint);
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) {
|
std::vector<u8>& output) {
|
||||||
|
@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
|
||||||
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
|
params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
|
||||||
params.unk3);
|
params.unk3);
|
||||||
|
|
||||||
channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id);
|
if (channel_state->initiated) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Already allocated!");
|
||||||
|
return NvResult::AlreadyAllocated;
|
||||||
|
}
|
||||||
|
|
||||||
params.fence_out = channel_fence;
|
system.GPU().InitChannel(*channel_state);
|
||||||
|
|
||||||
|
params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
std::memcpy(output.data(), ¶ms, output.size());
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
|
static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
|
||||||
return {
|
return {
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
{fence.value},
|
{fence.value},
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id),
|
BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence,
|
static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) {
|
||||||
u32 add_increment) {
|
|
||||||
std::vector<Tegra::CommandHeader> result{
|
std::vector<Tegra::CommandHeader> result{
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
{}};
|
{}};
|
||||||
|
|
||||||
for (u32 count = 0; count < add_increment; ++count) {
|
for (u32 count = 0; count < 2; ++count) {
|
||||||
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1,
|
result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
|
||||||
Tegra::SubmissionMode::Increasing));
|
Tegra::SubmissionMode::Increasing));
|
||||||
result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id));
|
result.emplace_back(
|
||||||
|
BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id));
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence,
|
static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) {
|
||||||
u32 add_increment) {
|
|
||||||
std::vector<Tegra::CommandHeader> result{
|
std::vector<Tegra::CommandHeader> result{
|
||||||
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1,
|
Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1,
|
||||||
Tegra::SubmissionMode::Increasing),
|
Tegra::SubmissionMode::Increasing),
|
||||||
{}};
|
{}};
|
||||||
const std::vector<Tegra::CommandHeader> increment{
|
const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)};
|
||||||
BuildIncrementCommandList(fence, add_increment)};
|
|
||||||
|
|
||||||
result.insert(result.end(), increment.begin(), increment.end());
|
result.insert(result.end(), increment.begin(), increment.end());
|
||||||
|
|
||||||
|
@ -234,32 +254,40 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
|
||||||
|
|
||||||
auto& gpu = system.GPU();
|
auto& gpu = system.GPU();
|
||||||
|
|
||||||
params.fence_out.id = channel_fence.id;
|
std::scoped_lock lock(channel_mutex);
|
||||||
|
|
||||||
if (params.flags.add_wait.Value() &&
|
const auto bind_id = channel_state->bind_id;
|
||||||
!syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
|
|
||||||
gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
|
auto& flags = params.flags;
|
||||||
|
|
||||||
|
if (flags.fence_wait.Value()) {
|
||||||
|
if (flags.increment_value.Value()) {
|
||||||
|
return NvResult::BadParameter;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.flags.add_increment.Value() || params.flags.increment.Value()) {
|
if (!syncpoint_manager.IsFenceSignalled(params.fence)) {
|
||||||
const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0;
|
gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)});
|
||||||
params.fence_out.value = syncpoint_manager.IncreaseSyncpoint(
|
}
|
||||||
params.fence_out.id, params.AddIncrementValue() + increment_value);
|
}
|
||||||
|
|
||||||
|
gpu.PushGPUEntries(bind_id, std::move(entries));
|
||||||
|
params.fence.id = channel_syncpoint;
|
||||||
|
|
||||||
|
u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) +
|
||||||
|
(flags.increment_value.Value() != 0 ? params.fence.value : 0)};
|
||||||
|
params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment);
|
||||||
|
|
||||||
|
if (flags.fence_increment.Value()) {
|
||||||
|
if (flags.suppress_wfi.Value()) {
|
||||||
|
gpu.PushGPUEntries(bind_id,
|
||||||
|
Tegra::CommandList{BuildIncrementCommandList(params.fence)});
|
||||||
} else {
|
} else {
|
||||||
params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id);
|
gpu.PushGPUEntries(bind_id,
|
||||||
|
Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu.PushGPUEntries(std::move(entries));
|
flags.raw = 0;
|
||||||
|
|
||||||
if (params.flags.add_increment.Value()) {
|
|
||||||
if (params.flags.suppress_wfi) {
|
|
||||||
gpu.PushGPUEntries(Tegra::CommandList{
|
|
||||||
BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())});
|
|
||||||
} else {
|
|
||||||
gpu.PushGPUEntries(Tegra::CommandList{
|
|
||||||
BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
std::memcpy(output.data(), ¶ms, sizeof(IoctlSubmitGpfifo));
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) {
|
||||||
|
switch (event_id) {
|
||||||
|
case 1:
|
||||||
|
return sm_exception_breakpoint_int_report_event;
|
||||||
|
case 2:
|
||||||
|
return sm_exception_breakpoint_pause_report_event;
|
||||||
|
case 3:
|
||||||
|
return error_notifier_event;
|
||||||
|
default: {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -13,17 +13,31 @@
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Tegra {
|
||||||
class SyncpointManager;
|
namespace Control {
|
||||||
|
struct ChannelState;
|
||||||
}
|
}
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
|
namespace NvCore {
|
||||||
|
class Container;
|
||||||
|
class NvMap;
|
||||||
|
class SyncpointManager;
|
||||||
|
} // namespace NvCore
|
||||||
|
|
||||||
|
class EventInterface;
|
||||||
|
} // namespace Service::Nvidia
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
|
class nvhost_as_gpu;
|
||||||
class nvmap;
|
class nvmap;
|
||||||
class nvhost_gpu final : public nvdevice {
|
class nvhost_gpu final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
|
||||||
SyncpointManager& syncpoint_manager_);
|
NvCore::Container& core);
|
||||||
~nvhost_gpu() override;
|
~nvhost_gpu() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -36,7 +50,10 @@ public:
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
friend class nvhost_as_gpu;
|
||||||
enum class CtxObjects : u32_le {
|
enum class CtxObjects : u32_le {
|
||||||
Ctx2D = 0x902D,
|
Ctx2D = 0x902D,
|
||||||
Ctx3D = 0xB197,
|
Ctx3D = 0xB197,
|
||||||
|
@ -146,17 +163,13 @@ private:
|
||||||
u32_le num_entries{}; // number of fence objects being submitted
|
u32_le num_entries{}; // number of fence objects being submitted
|
||||||
union {
|
union {
|
||||||
u32_le raw;
|
u32_le raw;
|
||||||
BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list
|
BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list
|
||||||
BitField<1, 1, u32_le> add_increment; // append an increment to the list
|
BitField<1, 1, u32_le> fence_increment; // append an increment to the list
|
||||||
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
BitField<2, 1, u32_le> new_hw_format; // mostly ignored
|
||||||
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
|
||||||
BitField<8, 1, u32_le> increment; // increment the returned fence
|
BitField<8, 1, u32_le> increment_value; // increment the returned fence
|
||||||
} flags;
|
} flags;
|
||||||
NvFence fence_out{}; // returned new fence object for others to wait on
|
NvFence fence{}; // returned new fence object for others to wait on
|
||||||
|
|
||||||
u32 AddIncrementValue() const {
|
|
||||||
return flags.add_increment.Value() << 1;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
|
static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
|
||||||
"IoctlSubmitGpfifo is incorrect size");
|
"IoctlSubmitGpfifo is incorrect size");
|
||||||
|
@ -191,9 +204,18 @@ private:
|
||||||
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
EventInterface& events_interface;
|
||||||
SyncpointManager& syncpoint_manager;
|
NvCore::Container& core;
|
||||||
NvFence channel_fence;
|
NvCore::SyncpointManager& syncpoint_manager;
|
||||||
|
NvCore::NvMap& nvmap;
|
||||||
|
std::shared_ptr<Tegra::Control::ChannelState> channel_state;
|
||||||
|
u32 channel_syncpoint;
|
||||||
|
std::mutex channel_mutex;
|
||||||
|
|
||||||
|
// Events
|
||||||
|
Kernel::KEvent* sm_exception_breakpoint_int_report_event;
|
||||||
|
Kernel::KEvent* sm_exception_breakpoint_pause_report_event;
|
||||||
|
Kernel::KEvent* error_notifier_event;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -9,9 +9,10 @@
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
u32 nvhost_nvdec::next_id{};
|
||||||
SyncpointManager& syncpoint_manager_)
|
|
||||||
: nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
|
nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
|
||||||
|
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
|
||||||
nvhost_nvdec::~nvhost_nvdec() = default;
|
nvhost_nvdec::~nvhost_nvdec() = default;
|
||||||
|
|
||||||
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
|
|
@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvhost_nvdec final : public nvhost_nvdec_common {
|
class nvhost_nvdec final : public nvhost_nvdec_common {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
|
||||||
SyncpointManager& syncpoint_manager_);
|
|
||||||
~nvhost_nvdec() override;
|
~nvhost_nvdec() override;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -25,7 +24,7 @@ public:
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u32 next_id{};
|
static u32 next_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -8,10 +8,12 @@
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
|
||||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
|
@ -44,10 +46,17 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
|
||||||
}
|
}
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
std::unordered_map<DeviceFD, u32> nvhost_nvdec_common::fd_to_id{};
|
||||||
SyncpointManager& syncpoint_manager_)
|
|
||||||
: nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {}
|
nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
|
||||||
nvhost_nvdec_common::~nvhost_nvdec_common() = default;
|
NvCore::ChannelType channel_type_)
|
||||||
|
: nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
|
||||||
|
nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
|
||||||
|
channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
|
||||||
|
}
|
||||||
|
nvhost_nvdec_common::~nvhost_nvdec_common() {
|
||||||
|
syncpoint_manager.FreeSyncpoint(channel_syncpoint);
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
|
||||||
IoctlSetNvmapFD params{};
|
IoctlSetNvmapFD params{};
|
||||||
|
@ -84,14 +93,14 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
|
||||||
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
|
||||||
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
const SyncptIncr& syncpt_incr = syncpt_increments[i];
|
||||||
fence_thresholds[i] =
|
fence_thresholds[i] =
|
||||||
syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments);
|
syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (const auto& cmd_buffer : command_buffers) {
|
for (const auto& cmd_buffer : command_buffers) {
|
||||||
const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id);
|
const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
|
||||||
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
|
||||||
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
|
||||||
system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(),
|
system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
|
||||||
cmdlist.size() * sizeof(u32));
|
cmdlist.size() * sizeof(u32));
|
||||||
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
|
gpu.PushCommandBuffer(fd_to_id[fd], cmdlist);
|
||||||
}
|
}
|
||||||
|
@ -112,10 +121,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
std::memcpy(¶ms, input.data(), sizeof(IoctlGetSyncpoint));
|
||||||
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
|
||||||
|
|
||||||
if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) {
|
// const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
|
||||||
device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint();
|
params.value = channel_syncpoint;
|
||||||
}
|
|
||||||
params.value = device_syncpoints[params.param];
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetSyncpoint));
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -123,6 +130,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IoctlGetWaitbase params{};
|
IoctlGetWaitbase params{};
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
|
||||||
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
std::memcpy(¶ms, input.data(), sizeof(IoctlGetWaitbase));
|
||||||
params.value = 0; // Seems to be hard coded at 0
|
params.value = 0; // Seems to be hard coded at 0
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
std::memcpy(output.data(), ¶ms, sizeof(IoctlGetWaitbase));
|
||||||
|
@ -136,28 +144,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
|
||||||
|
|
||||||
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||||
|
|
||||||
auto& gpu = system.GPU();
|
|
||||||
|
|
||||||
for (auto& cmd_buffer : cmd_buffer_handles) {
|
for (auto& cmd_buffer : cmd_buffer_handles) {
|
||||||
auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)};
|
cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
|
||||||
if (!object) {
|
|
||||||
LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
|
|
||||||
std::memcpy(output.data(), ¶ms, output.size());
|
|
||||||
return NvResult::InvalidState;
|
|
||||||
}
|
|
||||||
if (object->dma_map_addr == 0) {
|
|
||||||
// NVDEC and VIC memory is in the 32-bit address space
|
|
||||||
// MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
|
|
||||||
const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
|
|
||||||
object->dma_map_addr = static_cast<u32>(low_addr);
|
|
||||||
// Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
|
|
||||||
ASSERT(object->dma_map_addr == low_addr);
|
|
||||||
}
|
|
||||||
if (!object->dma_map_addr) {
|
|
||||||
LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
|
|
||||||
} else {
|
|
||||||
cmd_buffer.map_address = object->dma_map_addr;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
std::memcpy(output.data(), ¶ms, sizeof(IoctlMapBuffer));
|
||||||
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
|
||||||
|
@ -167,11 +155,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
// This is intntionally stubbed.
|
IoctlMapBuffer params{};
|
||||||
// Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame
|
std::memcpy(¶ms, input.data(), sizeof(IoctlMapBuffer));
|
||||||
// addresses, and risk invalidating data before the async GPU thread is done with it
|
std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
|
||||||
|
|
||||||
|
SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
|
||||||
|
for (auto& cmd_buffer : cmd_buffer_handles) {
|
||||||
|
nvmap.UnpinHandle(cmd_buffer.map_handle);
|
||||||
|
}
|
||||||
|
|
||||||
std::memset(output.data(), 0, output.size());
|
std::memset(output.data(), 0, output.size());
|
||||||
LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,4 +175,13 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvhost_nvdec_common::Reset() {
|
||||||
|
fd_to_id.clear();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -6,20 +6,26 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
class SyncpointManager;
|
|
||||||
|
namespace NvCore {
|
||||||
|
class Container;
|
||||||
|
class NvMap;
|
||||||
|
} // namespace NvCore
|
||||||
|
|
||||||
namespace Devices {
|
namespace Devices {
|
||||||
class nvmap;
|
|
||||||
|
|
||||||
class nvhost_nvdec_common : public nvdevice {
|
class nvhost_nvdec_common : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core,
|
||||||
SyncpointManager& syncpoint_manager_);
|
NvCore::ChannelType channel_type);
|
||||||
~nvhost_nvdec_common() override;
|
~nvhost_nvdec_common() override;
|
||||||
|
|
||||||
|
static void Reset();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
struct IoctlSetNvmapFD {
|
struct IoctlSetNvmapFD {
|
||||||
s32_le nvmap_fd{};
|
s32_le nvmap_fd{};
|
||||||
|
@ -110,11 +116,16 @@ protected:
|
||||||
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
std::unordered_map<DeviceFD, u32> fd_to_id{};
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
||||||
|
static std::unordered_map<DeviceFD, u32> fd_to_id;
|
||||||
|
u32 channel_syncpoint;
|
||||||
s32_le nvmap_fd{};
|
s32_le nvmap_fd{};
|
||||||
u32_le submit_timeout{};
|
u32_le submit_timeout{};
|
||||||
std::shared_ptr<nvmap> nvmap_dev;
|
NvCore::Container& core;
|
||||||
SyncpointManager& syncpoint_manager;
|
NvCore::SyncpointManager& syncpoint_manager;
|
||||||
|
NvCore::NvMap& nvmap;
|
||||||
|
NvCore::ChannelType channel_type;
|
||||||
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
||||||
};
|
};
|
||||||
}; // namespace Devices
|
}; // namespace Devices
|
||||||
|
|
|
@ -8,9 +8,11 @@
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
|
||||||
SyncpointManager& syncpoint_manager_)
|
u32 nvhost_vic::next_id{};
|
||||||
: nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
|
|
||||||
|
nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
|
||||||
|
: nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
|
||||||
|
|
||||||
nvhost_vic::~nvhost_vic() = default;
|
nvhost_vic::~nvhost_vic() = default;
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvhost_vic final : public nvhost_nvdec_common {
|
class nvhost_vic final : public nvhost_nvdec_common {
|
||||||
public:
|
public:
|
||||||
explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_,
|
explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
|
||||||
SyncpointManager& syncpoint_manager_);
|
|
||||||
~nvhost_vic();
|
~nvhost_vic();
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -24,6 +23,6 @@ public:
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u32 next_id{};
|
static u32 next_id;
|
||||||
};
|
};
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -2,19 +2,26 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <bit>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "common/alignment.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "core/hle/kernel/k_page_table.h"
|
||||||
|
#include "core/hle/kernel/k_process.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||||
|
#include "core/memory.h"
|
||||||
|
|
||||||
|
using Core::Memory::PAGE_SIZE;
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
nvmap::nvmap(Core::System& system_) : nvdevice{system_} {
|
nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
|
||||||
// Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to
|
: nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {}
|
||||||
// represent this.
|
|
||||||
CreateObject(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
nvmap::~nvmap() = default;
|
nvmap::~nvmap() = default;
|
||||||
|
|
||||||
|
@ -63,38 +70,31 @@ void nvmap::OnOpen(DeviceFD fd) {}
|
||||||
void nvmap::OnClose(DeviceFD fd) {}
|
void nvmap::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
VAddr nvmap::GetObjectAddress(u32 handle) const {
|
VAddr nvmap::GetObjectAddress(u32 handle) const {
|
||||||
auto object = GetObject(handle);
|
auto obj = file.GetHandle(handle);
|
||||||
ASSERT(object);
|
if (obj) {
|
||||||
ASSERT(object->status == Object::Status::Allocated);
|
return obj->address;
|
||||||
return object->addr;
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 nvmap::CreateObject(u32 size) {
|
std::shared_ptr<NvCore::NvMap::Handle> nvmap::GetObject(u32 handle) const {
|
||||||
// Create a new nvmap object and obtain a handle to it.
|
return file.GetHandle(handle);
|
||||||
auto object = std::make_shared<Object>();
|
|
||||||
object->id = next_id++;
|
|
||||||
object->size = size;
|
|
||||||
object->status = Object::Status::Created;
|
|
||||||
object->refcount = 1;
|
|
||||||
|
|
||||||
const u32 handle = next_handle++;
|
|
||||||
|
|
||||||
handles.insert_or_assign(handle, std::move(object));
|
|
||||||
|
|
||||||
return handle;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IocCreateParams params;
|
IocCreateParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size);
|
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
||||||
|
|
||||||
if (!params.size) {
|
std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
|
||||||
LOG_ERROR(Service_NVDRV, "Size is 0");
|
auto result = file.CreateHandle(Common::AlignUp(params.size, PAGE_SIZE), handle_description);
|
||||||
return NvResult::BadValue;
|
if (result != NvResult::Success) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
handle_description->orig_size = params.size; // Orig size is the unaligned size
|
||||||
params.handle = CreateObject(params.size);
|
params.handle = handle_description->id;
|
||||||
|
LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
@ -103,63 +103,68 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IocAllocParams params;
|
IocAllocParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr);
|
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
LOG_ERROR(Service_NVDRV, "Handle is 0");
|
LOG_CRITICAL(Service_NVDRV, "Handle is 0");
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((params.align - 1) & params.align) {
|
if ((params.align - 1) & params.align) {
|
||||||
LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
const u32 min_alignment = 0x1000;
|
// Force page size alignment at a minimum
|
||||||
if (params.align < min_alignment) {
|
if (params.align < PAGE_SIZE) {
|
||||||
params.align = min_alignment;
|
params.align = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto object = GetObject(params.handle);
|
auto handle_description{file.GetHandle(params.handle)};
|
||||||
if (!object) {
|
if (!handle_description) {
|
||||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (object->status == Object::Status::Allocated) {
|
if (handle_description->allocated) {
|
||||||
LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
|
||||||
return NvResult::InsufficientMemory;
|
return NvResult::InsufficientMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
object->flags = params.flags;
|
const auto result =
|
||||||
object->align = params.align;
|
handle_description->Alloc(params.flags, params.align, params.kind, params.address);
|
||||||
object->kind = params.kind;
|
if (result != NvResult::Success) {
|
||||||
object->addr = params.addr;
|
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||||
object->status = Object::Status::Allocated;
|
return result;
|
||||||
|
}
|
||||||
|
ASSERT(system.CurrentProcess()
|
||||||
|
->PageTable()
|
||||||
|
.LockForDeviceAddressSpace(handle_description->address, handle_description->size)
|
||||||
|
.IsSuccess());
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return NvResult::Success;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
IocGetIdParams params;
|
IocGetIdParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
|
// See the comment in FromId for extra info on this function
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
LOG_ERROR(Service_NVDRV, "Handle is zero");
|
LOG_CRITICAL(Service_NVDRV, "Error!");
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto object = GetObject(params.handle);
|
auto handle_description{file.GetHandle(params.handle)};
|
||||||
if (!object) {
|
if (!handle_description) {
|
||||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Error!");
|
||||||
return NvResult::BadValue;
|
return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
|
||||||
|
// handle exists or not
|
||||||
}
|
}
|
||||||
|
|
||||||
params.id = object->id;
|
params.id = handle_description->id;
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
@ -168,26 +173,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
IocFromIdParams params;
|
IocFromIdParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
|
||||||
|
|
||||||
auto itr = std::find_if(handles.begin(), handles.end(),
|
// Handles and IDs are always the same value in nvmap however IDs can be used globally given the
|
||||||
[&](const auto& entry) { return entry.second->id == params.id; });
|
// right permissions.
|
||||||
if (itr == handles.end()) {
|
// Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and
|
||||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
// so this function just does simple validation and passes through the handle id.
|
||||||
|
if (!params.id) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!");
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& object = itr->second;
|
auto handle_description{file.GetHandle(params.id)};
|
||||||
if (object->status != Object::Status::Allocated) {
|
if (!handle_description) {
|
||||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
itr->second->refcount++;
|
auto result = handle_description->Duplicate(false);
|
||||||
|
if (result != NvResult::Success) {
|
||||||
// Return the existing handle instead of creating a new one.
|
LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
|
||||||
params.handle = itr->first;
|
return result;
|
||||||
|
}
|
||||||
|
params.handle = handle_description->id;
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
@ -198,35 +206,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
IocParamParams params;
|
IocParamParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called type={}", params.param);
|
LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
|
||||||
|
|
||||||
auto object = GetObject(params.handle);
|
if (!params.handle) {
|
||||||
if (!object) {
|
LOG_CRITICAL(Service_NVDRV, "Invalid handle!");
|
||||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (object->status != Object::Status::Allocated) {
|
auto handle_description{file.GetHandle(params.handle)};
|
||||||
LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle);
|
if (!handle_description) {
|
||||||
|
LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
|
||||||
return NvResult::BadValue;
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (static_cast<ParamTypes>(params.param)) {
|
switch (params.param) {
|
||||||
case ParamTypes::Size:
|
case HandleParameterType::Size:
|
||||||
params.result = object->size;
|
params.result = static_cast<u32_le>(handle_description->orig_size);
|
||||||
break;
|
break;
|
||||||
case ParamTypes::Alignment:
|
case HandleParameterType::Alignment:
|
||||||
params.result = object->align;
|
params.result = static_cast<u32_le>(handle_description->align);
|
||||||
break;
|
break;
|
||||||
case ParamTypes::Heap:
|
case HandleParameterType::Base:
|
||||||
// TODO(Subv): Seems to be a hardcoded value?
|
params.result = static_cast<u32_le>(-22); // posix EINVAL
|
||||||
|
break;
|
||||||
|
case HandleParameterType::Heap:
|
||||||
|
if (handle_description->allocated)
|
||||||
params.result = 0x40000000;
|
params.result = 0x40000000;
|
||||||
|
else
|
||||||
|
params.result = 0;
|
||||||
break;
|
break;
|
||||||
case ParamTypes::Kind:
|
case HandleParameterType::Kind:
|
||||||
params.result = object->kind;
|
params.result = handle_description->kind;
|
||||||
|
break;
|
||||||
|
case HandleParameterType::IsSharedMemMapped:
|
||||||
|
params.result = handle_description->is_shared_mem_mapped;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNIMPLEMENTED();
|
return NvResult::BadValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
|
@ -234,46 +250,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
|
||||||
// TODO(Subv): These flags are unconfirmed.
|
|
||||||
enum FreeFlags {
|
|
||||||
Freed = 0,
|
|
||||||
NotFreedYet = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
IocFreeParams params;
|
IocFreeParams params;
|
||||||
std::memcpy(¶ms, input.data(), sizeof(params));
|
std::memcpy(¶ms, input.data(), sizeof(params));
|
||||||
|
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
auto itr = handles.find(params.handle);
|
if (!params.handle) {
|
||||||
if (itr == handles.end()) {
|
LOG_CRITICAL(Service_NVDRV, "Handle null freed?");
|
||||||
LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
|
return NvResult::Success;
|
||||||
return NvResult::BadValue;
|
|
||||||
}
|
|
||||||
if (!itr->second->refcount) {
|
|
||||||
LOG_ERROR(
|
|
||||||
Service_NVDRV,
|
|
||||||
"There is no references to this object. The object is already freed. handle={:08X}",
|
|
||||||
params.handle);
|
|
||||||
return NvResult::BadValue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
itr->second->refcount--;
|
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
||||||
|
ASSERT(system.CurrentProcess()
|
||||||
params.size = itr->second->size;
|
->PageTable()
|
||||||
|
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||||
if (itr->second->refcount == 0) {
|
.IsSuccess());
|
||||||
params.flags = Freed;
|
params.address = freeInfo->address;
|
||||||
// The address of the nvmap is written to the output if we're finally freeing it, otherwise
|
params.size = static_cast<u32>(freeInfo->size);
|
||||||
// 0 is written.
|
params.flags.raw = 0;
|
||||||
params.address = itr->second->addr;
|
params.flags.map_uncached.Assign(freeInfo->was_uncached);
|
||||||
} else {
|
} else {
|
||||||
params.flags = NotFreedYet;
|
// This is possible when there's internel dups or other duplicates.
|
||||||
params.address = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
handles.erase(params.handle);
|
|
||||||
|
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,15 +9,23 @@
|
||||||
#include "common/common_funcs.h"
|
#include "common/common_funcs.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
|
|
||||||
|
namespace Service::Nvidia::NvCore {
|
||||||
|
class Container;
|
||||||
|
} // namespace Service::Nvidia::NvCore
|
||||||
|
|
||||||
namespace Service::Nvidia::Devices {
|
namespace Service::Nvidia::Devices {
|
||||||
|
|
||||||
class nvmap final : public nvdevice {
|
class nvmap final : public nvdevice {
|
||||||
public:
|
public:
|
||||||
explicit nvmap(Core::System& system_);
|
explicit nvmap(Core::System& system_, NvCore::Container& container);
|
||||||
~nvmap() override;
|
~nvmap() override;
|
||||||
|
|
||||||
|
nvmap(nvmap const&) = delete;
|
||||||
|
nvmap& operator=(nvmap const&) = delete;
|
||||||
|
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
std::vector<u8>& output) override;
|
std::vector<u8>& output) override;
|
||||||
NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
|
||||||
|
@ -31,27 +39,16 @@ public:
|
||||||
/// Returns the allocated address of an nvmap object given its handle.
|
/// Returns the allocated address of an nvmap object given its handle.
|
||||||
VAddr GetObjectAddress(u32 handle) const;
|
VAddr GetObjectAddress(u32 handle) const;
|
||||||
|
|
||||||
/// Represents an nvmap object.
|
std::shared_ptr<NvCore::NvMap::Handle> GetObject(u32 handle) const;
|
||||||
struct Object {
|
|
||||||
enum class Status { Created, Allocated };
|
|
||||||
u32 id;
|
|
||||||
u32 size;
|
|
||||||
u32 flags;
|
|
||||||
u32 align;
|
|
||||||
u8 kind;
|
|
||||||
VAddr addr;
|
|
||||||
Status status;
|
|
||||||
u32 refcount;
|
|
||||||
u32 dma_map_addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::shared_ptr<Object> GetObject(u32 handle) const {
|
enum class HandleParameterType : u32_le {
|
||||||
auto itr = handles.find(handle);
|
Size = 1,
|
||||||
if (itr != handles.end()) {
|
Alignment = 2,
|
||||||
return itr->second;
|
Base = 3,
|
||||||
}
|
Heap = 4,
|
||||||
return {};
|
Kind = 5,
|
||||||
}
|
IsSharedMemMapped = 6
|
||||||
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Id to use for the next handle that is created.
|
/// Id to use for the next handle that is created.
|
||||||
|
@ -60,9 +57,6 @@ private:
|
||||||
/// Id to use for the next object that is created.
|
/// Id to use for the next object that is created.
|
||||||
u32 next_id = 0;
|
u32 next_id = 0;
|
||||||
|
|
||||||
/// Mapping of currently allocated handles to the objects they represent.
|
|
||||||
std::unordered_map<u32, std::shared_ptr<Object>> handles;
|
|
||||||
|
|
||||||
struct IocCreateParams {
|
struct IocCreateParams {
|
||||||
// Input
|
// Input
|
||||||
u32_le size{};
|
u32_le size{};
|
||||||
|
@ -83,11 +77,11 @@ private:
|
||||||
// Input
|
// Input
|
||||||
u32_le handle{};
|
u32_le handle{};
|
||||||
u32_le heap_mask{};
|
u32_le heap_mask{};
|
||||||
u32_le flags{};
|
NvCore::NvMap::Handle::Flags flags{};
|
||||||
u32_le align{};
|
u32_le align{};
|
||||||
u8 kind{};
|
u8 kind{};
|
||||||
INSERT_PADDING_BYTES(7);
|
INSERT_PADDING_BYTES(7);
|
||||||
u64_le addr{};
|
u64_le address{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
|
static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
|
||||||
|
|
||||||
|
@ -96,14 +90,14 @@ private:
|
||||||
INSERT_PADDING_BYTES(4);
|
INSERT_PADDING_BYTES(4);
|
||||||
u64_le address{};
|
u64_le address{};
|
||||||
u32_le size{};
|
u32_le size{};
|
||||||
u32_le flags{};
|
NvCore::NvMap::Handle::Flags flags{};
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
|
static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
|
||||||
|
|
||||||
struct IocParamParams {
|
struct IocParamParams {
|
||||||
// Input
|
// Input
|
||||||
u32_le handle{};
|
u32_le handle{};
|
||||||
u32_le param{};
|
HandleParameterType param{};
|
||||||
// Output
|
// Output
|
||||||
u32_le result{};
|
u32_le result{};
|
||||||
};
|
};
|
||||||
|
@ -117,14 +111,15 @@ private:
|
||||||
};
|
};
|
||||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||||
|
|
||||||
u32 CreateObject(u32 size);
|
|
||||||
|
|
||||||
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
|
NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
|
||||||
|
|
||||||
|
NvCore::Container& container;
|
||||||
|
NvCore::NvMap& file;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
@ -78,11 +80,15 @@ enum class NvResult : u32 {
|
||||||
ModuleNotPresent = 0xA000E,
|
ModuleNotPresent = 0xA000E,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// obtained from
|
||||||
|
// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47
|
||||||
enum class EventState {
|
enum class EventState {
|
||||||
Free = 0,
|
Available = 0,
|
||||||
Registered = 1,
|
Waiting = 1,
|
||||||
Waiting = 2,
|
Cancelling = 2,
|
||||||
Busy = 3,
|
Signalling = 3,
|
||||||
|
Signalled = 4,
|
||||||
|
Cancelled = 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
union Ioctl {
|
union Ioctl {
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
@ -8,6 +10,7 @@
|
||||||
#include "core/hle/ipc_helpers.h"
|
#include "core/hle/ipc_helpers.h"
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_writable_event.h"
|
#include "core/hle/kernel/k_writable_event.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
#include "core/hle/service/nvdrv/devices/nvdevice.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
|
||||||
|
@ -15,17 +18,31 @@
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
|
||||||
|
#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
|
||||||
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
#include "core/hle/service/nvdrv/devices/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
||||||
#include "core/hle/service/nvdrv/nvmemp.h"
|
#include "core/hle/service/nvdrv/nvmemp.h"
|
||||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
|
||||||
#include "core/hle/service/nvflinger/nvflinger.h"
|
#include "core/hle/service/nvflinger/nvflinger.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
|
EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {}
|
||||||
|
|
||||||
|
EventInterface::~EventInterface() = default;
|
||||||
|
|
||||||
|
Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
|
||||||
|
Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
|
||||||
|
return new_event;
|
||||||
|
}
|
||||||
|
|
||||||
|
void EventInterface::FreeEvent(Kernel::KEvent* event) {
|
||||||
|
module.service_context.CloseEvent(event);
|
||||||
|
}
|
||||||
|
|
||||||
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
|
||||||
Core::System& system) {
|
Core::System& system) {
|
||||||
auto module_ = std::make_shared<Module>(system);
|
auto module_ = std::make_shared<Module>(system);
|
||||||
|
@ -38,33 +55,55 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
|
||||||
}
|
}
|
||||||
|
|
||||||
Module::Module(Core::System& system)
|
Module::Module(Core::System& system)
|
||||||
: syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} {
|
: service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
events_interface.events[i].event =
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i));
|
std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
|
||||||
events_interface.status[i] = EventState::Free;
|
return open_files.emplace(fd, device).first;
|
||||||
events_interface.registered[i] = false;
|
};
|
||||||
}
|
builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
auto nvmap_dev = std::make_shared<Devices::nvmap>(system);
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev);
|
std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
|
||||||
devices["/dev/nvhost-gpu"] =
|
return open_files.emplace(fd, device).first;
|
||||||
std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager);
|
};
|
||||||
devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system);
|
builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) {
|
||||||
devices["/dev/nvmap"] = nvmap_dev;
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev);
|
std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
|
||||||
devices["/dev/nvhost-ctrl"] =
|
return open_files.emplace(fd, device).first;
|
||||||
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager);
|
};
|
||||||
devices["/dev/nvhost-nvdec"] =
|
builders["/dev/nvmap"] = [this, &system](DeviceFD fd) {
|
||||||
std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager);
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system);
|
std::make_shared<Devices::nvmap>(system, container);
|
||||||
devices["/dev/nvhost-vic"] =
|
return open_files.emplace(fd, device).first;
|
||||||
std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager);
|
};
|
||||||
|
builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvdisp_disp0>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_nvdec>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
|
builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) {
|
||||||
|
std::shared_ptr<Devices::nvdevice> device =
|
||||||
|
std::make_shared<Devices::nvhost_vic>(system, container);
|
||||||
|
return open_files.emplace(fd, device).first;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Module::~Module() {
|
Module::~Module() {
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
Devices::nvhost_nvdec_common::Reset();
|
||||||
service_context.CloseEvent(events_interface.events[i].event);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult Module::VerifyFD(DeviceFD fd) const {
|
NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||||
|
@ -82,18 +121,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
DeviceFD Module::Open(const std::string& device_name) {
|
DeviceFD Module::Open(const std::string& device_name) {
|
||||||
if (devices.find(device_name) == devices.end()) {
|
auto it = builders.find(device_name);
|
||||||
|
if (it == builders.end()) {
|
||||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||||
return INVALID_NVDRV_FD;
|
return INVALID_NVDRV_FD;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto device = devices[device_name];
|
|
||||||
const DeviceFD fd = next_fd++;
|
const DeviceFD fd = next_fd++;
|
||||||
|
auto& builder = it->second;
|
||||||
|
auto device = builder(fd)->second;
|
||||||
|
|
||||||
device->OnOpen(fd);
|
device->OnOpen(fd);
|
||||||
|
|
||||||
open_files[fd] = std::move(device);
|
|
||||||
|
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,22 +207,24 @@ NvResult Module::Close(DeviceFD fd) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) {
|
NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
if (fd < 0) {
|
||||||
if (events_interface.assigned_syncpt[i] == syncpoint_id &&
|
LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
|
||||||
events_interface.assigned_value[i] == value) {
|
return NvResult::InvalidState;
|
||||||
events_interface.LiberateEvent(i);
|
|
||||||
events_interface.events[i].event->GetWritableEvent().Signal();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const auto itr = open_files.find(fd);
|
||||||
|
|
||||||
|
if (itr == open_files.end()) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
|
||||||
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) {
|
event = itr->second->QueryEvent(event_id);
|
||||||
return events_interface.events[event_id].event->GetReadableEvent();
|
if (!event) {
|
||||||
}
|
return NvResult::BadParameter;
|
||||||
|
}
|
||||||
Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) {
|
return NvResult::Success;
|
||||||
return events_interface.events[event_id].event->GetWritableEvent();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia
|
} // namespace Service::Nvidia
|
||||||
|
|
|
@ -1,16 +1,21 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/hle/service/kernel_helpers.h"
|
#include "core/hle/service/kernel_helpers.h"
|
||||||
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "core/hle/service/nvdrv/syncpoint_manager.h"
|
|
||||||
#include "core/hle/service/nvflinger/ui/fence.h"
|
#include "core/hle/service/nvflinger/ui/fence.h"
|
||||||
#include "core/hle/service/service.h"
|
#include "core/hle/service/service.h"
|
||||||
|
|
||||||
|
@ -28,81 +33,31 @@ class NVFlinger;
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
|
namespace NvCore {
|
||||||
|
class Container;
|
||||||
class SyncpointManager;
|
class SyncpointManager;
|
||||||
|
} // namespace NvCore
|
||||||
|
|
||||||
namespace Devices {
|
namespace Devices {
|
||||||
class nvdevice;
|
class nvdevice;
|
||||||
}
|
class nvhost_ctrl;
|
||||||
|
} // namespace Devices
|
||||||
|
|
||||||
/// Represents an Nvidia event
|
class Module;
|
||||||
struct NvEvent {
|
|
||||||
Kernel::KEvent* event{};
|
|
||||||
NvFence fence{};
|
|
||||||
};
|
|
||||||
|
|
||||||
struct EventInterface {
|
class EventInterface {
|
||||||
// Mask representing currently busy events
|
public:
|
||||||
u64 events_mask{};
|
EventInterface(Module& module_);
|
||||||
// Each kernel event associated to an NV event
|
~EventInterface();
|
||||||
std::array<NvEvent, MaxNvEvents> events;
|
|
||||||
// The status of the current NVEvent
|
Kernel::KEvent* CreateEvent(std::string name);
|
||||||
std::array<EventState, MaxNvEvents> status{};
|
|
||||||
// Tells if an NVEvent is registered or not
|
void FreeEvent(Kernel::KEvent* event);
|
||||||
std::array<bool, MaxNvEvents> registered{};
|
|
||||||
// Tells the NVEvent that it has failed.
|
private:
|
||||||
std::array<bool, MaxNvEvents> failed{};
|
Module& module;
|
||||||
// When an NVEvent is waiting on GPU interrupt, this is the sync_point
|
std::mutex guard;
|
||||||
// associated with it.
|
std::list<Devices::nvhost_ctrl*> on_signal;
|
||||||
std::array<u32, MaxNvEvents> assigned_syncpt{};
|
|
||||||
// This is the value of the GPU interrupt for which the NVEvent is waiting
|
|
||||||
// for.
|
|
||||||
std::array<u32, MaxNvEvents> assigned_value{};
|
|
||||||
// Constant to denote an unasigned syncpoint.
|
|
||||||
static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
|
|
||||||
std::optional<u32> GetFreeEvent() const {
|
|
||||||
u64 mask = events_mask;
|
|
||||||
for (u32 i = 0; i < MaxNvEvents; i++) {
|
|
||||||
const bool is_free = (mask & 0x1) == 0;
|
|
||||||
if (is_free) {
|
|
||||||
if (status[i] == EventState::Registered || status[i] == EventState::Free) {
|
|
||||||
return {i};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mask = mask >> 1;
|
|
||||||
}
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
void SetEventStatus(const u32 event_id, EventState new_status) {
|
|
||||||
EventState old_status = status[event_id];
|
|
||||||
if (old_status == new_status) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
status[event_id] = new_status;
|
|
||||||
if (new_status == EventState::Registered) {
|
|
||||||
registered[event_id] = true;
|
|
||||||
}
|
|
||||||
if (new_status == EventState::Waiting || new_status == EventState::Busy) {
|
|
||||||
events_mask |= (1ULL << event_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void RegisterEvent(const u32 event_id) {
|
|
||||||
registered[event_id] = true;
|
|
||||||
if (status[event_id] == EventState::Free) {
|
|
||||||
status[event_id] = EventState::Registered;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void UnregisterEvent(const u32 event_id) {
|
|
||||||
registered[event_id] = false;
|
|
||||||
if (status[event_id] == EventState::Registered) {
|
|
||||||
status[event_id] = EventState::Free;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void LiberateEvent(const u32 event_id) {
|
|
||||||
status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
|
|
||||||
events_mask &= ~(1ULL << event_id);
|
|
||||||
assigned_syncpt[event_id] = unassigned_syncpt;
|
|
||||||
assigned_value[event_id] = 0;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class Module final {
|
class Module final {
|
||||||
|
@ -112,9 +67,9 @@ public:
|
||||||
|
|
||||||
/// Returns a pointer to one of the available devices, identified by its name.
|
/// Returns a pointer to one of the available devices, identified by its name.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::shared_ptr<T> GetDevice(const std::string& name) {
|
std::shared_ptr<T> GetDevice(DeviceFD fd) {
|
||||||
auto itr = devices.find(name);
|
auto itr = open_files.find(fd);
|
||||||
if (itr == devices.end())
|
if (itr == open_files.end())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return std::static_pointer_cast<T>(itr->second);
|
return std::static_pointer_cast<T>(itr->second);
|
||||||
}
|
}
|
||||||
|
@ -137,28 +92,28 @@ public:
|
||||||
/// Closes a device file descriptor and returns operation success.
|
/// Closes a device file descriptor and returns operation success.
|
||||||
NvResult Close(DeviceFD fd);
|
NvResult Close(DeviceFD fd);
|
||||||
|
|
||||||
void SignalSyncpt(const u32 syncpoint_id, const u32 value);
|
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
||||||
|
|
||||||
Kernel::KReadableEvent& GetEvent(u32 event_id);
|
|
||||||
|
|
||||||
Kernel::KWritableEvent& GetEventWriteable(u32 event_id);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Manages syncpoints on the host
|
friend class EventInterface;
|
||||||
SyncpointManager syncpoint_manager;
|
|
||||||
|
|
||||||
/// Id to use for the next open file descriptor.
|
/// Id to use for the next open file descriptor.
|
||||||
DeviceFD next_fd = 1;
|
DeviceFD next_fd = 1;
|
||||||
|
|
||||||
|
using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>;
|
||||||
/// Mapping of file descriptors to the devices they reference.
|
/// Mapping of file descriptors to the devices they reference.
|
||||||
std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files;
|
FilesContainerType open_files;
|
||||||
|
|
||||||
/// Mapping of device node names to their implementation.
|
KernelHelpers::ServiceContext service_context;
|
||||||
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
|
|
||||||
|
|
||||||
EventInterface events_interface;
|
EventInterface events_interface;
|
||||||
|
|
||||||
KernelHelpers::ServiceContext service_context;
|
/// Manages syncpoints on the host
|
||||||
|
NvCore::Container container;
|
||||||
|
|
||||||
|
void CreateEvent(u32 event_id);
|
||||||
|
void FreeEvent(u32 event_id);
|
||||||
|
std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Registers all NVDRV services with the specified service manager.
|
/// Registers all NVDRV services with the specified service manager.
|
||||||
|
|
|
@ -1,10 +1,13 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/ipc_helpers.h"
|
#include "core/hle/ipc_helpers.h"
|
||||||
|
#include "core/hle/kernel/k_event.h"
|
||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
|
@ -12,10 +15,6 @@
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
|
|
||||||
nvdrv->SignalSyncpt(syncpoint_id, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
|
void NVDRV::Open(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
IPC::ResponseBuilder rb{ctx, 4};
|
IPC::ResponseBuilder rb{ctx, 4};
|
||||||
|
@ -164,8 +163,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
|
||||||
void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
const auto fd = rp.Pop<DeviceFD>();
|
const auto fd = rp.Pop<DeviceFD>();
|
||||||
const auto event_id = rp.Pop<u32>() & 0x00FF;
|
const auto event_id = rp.Pop<u32>();
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
|
|
||||||
|
|
||||||
if (!is_initialized) {
|
if (!is_initialized) {
|
||||||
ServiceError(ctx, NvResult::NotInitialized);
|
ServiceError(ctx, NvResult::NotInitialized);
|
||||||
|
@ -173,24 +171,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto nv_result = nvdrv->VerifyFD(fd);
|
Kernel::KEvent* event = nullptr;
|
||||||
if (nv_result != NvResult::Success) {
|
NvResult result = nvdrv->QueryEvent(fd, event_id, event);
|
||||||
LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd);
|
|
||||||
ServiceError(ctx, nv_result);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (event_id < MaxNvEvents) {
|
if (result == NvResult::Success) {
|
||||||
IPC::ResponseBuilder rb{ctx, 3, 1};
|
IPC::ResponseBuilder rb{ctx, 3, 1};
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
auto& event = nvdrv->GetEvent(event_id);
|
auto& readable_event = event->GetReadableEvent();
|
||||||
event.Clear();
|
rb.PushCopyObjects(readable_event);
|
||||||
rb.PushCopyObjects(event);
|
|
||||||
rb.PushEnum(NvResult::Success);
|
rb.PushEnum(NvResult::Success);
|
||||||
} else {
|
} else {
|
||||||
|
LOG_ERROR(Service_NVDRV, "Invalid event request!");
|
||||||
IPC::ResponseBuilder rb{ctx, 3};
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
rb.PushEnum(NvResult::BadParameter);
|
rb.PushEnum(result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,8 +18,6 @@ public:
|
||||||
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
|
explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
|
||||||
~NVDRV() override;
|
~NVDRV() override;
|
||||||
|
|
||||||
void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void Open(Kernel::HLERequestContext& ctx);
|
void Open(Kernel::HLERequestContext& ctx);
|
||||||
void Ioctl1(Kernel::HLERequestContext& ctx);
|
void Ioctl1(Kernel::HLERequestContext& ctx);
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
#include "core/hle/service/vi/display/vi_display.h"
|
#include "core/hle/service/vi/display/vi_display.h"
|
||||||
#include "core/hle/service/vi/layer/vi_layer.h"
|
#include "core/hle/service/vi/layer/vi_layer.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
|
|
||||||
namespace Service::NVFlinger {
|
namespace Service::NVFlinger {
|
||||||
|
|
||||||
|
@ -95,10 +97,15 @@ NVFlinger::~NVFlinger() {
|
||||||
display.GetLayer(layer).Core().NotifyShutdown();
|
display.GetLayer(layer).Core().NotifyShutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (nvdrv) {
|
||||||
|
nvdrv->Close(disp_fd);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||||
nvdrv = std::move(instance);
|
nvdrv = std::move(instance);
|
||||||
|
disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
|
||||||
|
@ -252,30 +259,24 @@ void NVFlinger::Compose() {
|
||||||
return; // We are likely shutting down
|
return; // We are likely shutting down
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& gpu = system.GPU();
|
|
||||||
const auto& multi_fence = buffer.fence;
|
|
||||||
guard->unlock();
|
|
||||||
for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
|
|
||||||
const auto& fence = multi_fence.fences[fence_id];
|
|
||||||
gpu.WaitFence(fence.id, fence.value);
|
|
||||||
}
|
|
||||||
guard->lock();
|
|
||||||
|
|
||||||
MicroProfileFlip();
|
|
||||||
|
|
||||||
// Now send the buffer to the GPU for drawing.
|
// Now send the buffer to the GPU for drawing.
|
||||||
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
// TODO(Subv): Support more than just disp0. The display device selection is probably based
|
||||||
// on which display we're drawing (Default, Internal, External, etc)
|
// on which display we're drawing (Default, Internal, External, etc)
|
||||||
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0");
|
auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
|
||||||
ASSERT(nvdisp);
|
ASSERT(nvdisp);
|
||||||
|
|
||||||
|
guard->unlock();
|
||||||
Common::Rectangle<int> crop_rect{
|
Common::Rectangle<int> crop_rect{
|
||||||
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
|
||||||
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
|
||||||
|
|
||||||
nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(),
|
nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(),
|
||||||
igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(),
|
igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(),
|
||||||
static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect);
|
static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect,
|
||||||
|
buffer.fence.fences, buffer.fence.num_fences);
|
||||||
|
|
||||||
|
MicroProfileFlip();
|
||||||
|
guard->lock();
|
||||||
|
|
||||||
swap_interval = buffer.swap_interval;
|
swap_interval = buffer.swap_interval;
|
||||||
|
|
||||||
|
|
|
@ -114,6 +114,7 @@ private:
|
||||||
void SplitVSync(std::stop_token stop_token);
|
void SplitVSync(std::stop_token stop_token);
|
||||||
|
|
||||||
std::shared_ptr<Nvidia::Module> nvdrv;
|
std::shared_ptr<Nvidia::Module> nvdrv;
|
||||||
|
s32 disp_fd;
|
||||||
|
|
||||||
std::list<VI::Display> displays;
|
std::list<VI::Display> displays;
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
|
||||||
class NativeWindow final {
|
class NativeWindow final {
|
||||||
public:
|
public:
|
||||||
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
|
constexpr explicit NativeWindow(u32 id_) : id{id_} {}
|
||||||
|
constexpr explicit NativeWindow(const NativeWindow& other) = default;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const u32 magic = 2;
|
const u32 magic = 2;
|
||||||
|
|
|
@ -477,6 +477,11 @@ struct Memory::Impl {
|
||||||
[]() {});
|
[]() {});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
|
||||||
|
return GetPointerImpl(
|
||||||
|
vaddr, []() {}, []() {});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads a particular data type out of memory at the given virtual address.
|
* Reads a particular data type out of memory at the given virtual address.
|
||||||
*
|
*
|
||||||
|
@ -611,6 +616,10 @@ u8* Memory::GetPointer(VAddr vaddr) {
|
||||||
return impl->GetPointer(vaddr);
|
return impl->GetPointer(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u8* Memory::GetPointerSilent(VAddr vaddr) {
|
||||||
|
return impl->GetPointerSilent(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
const u8* Memory::GetPointer(VAddr vaddr) const {
|
const u8* Memory::GetPointer(VAddr vaddr) const {
|
||||||
return impl->GetPointer(vaddr);
|
return impl->GetPointer(vaddr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,6 +115,7 @@ public:
|
||||||
* If the address is not valid, nullptr will be returned.
|
* If the address is not valid, nullptr will be returned.
|
||||||
*/
|
*/
|
||||||
u8* GetPointer(VAddr vaddr);
|
u8* GetPointer(VAddr vaddr);
|
||||||
|
u8* GetPointerSilent(VAddr vaddr);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T* GetPointer(VAddr vaddr) {
|
T* GetPointer(VAddr vaddr) {
|
||||||
|
|
|
@ -964,9 +964,9 @@ private:
|
||||||
demote_endif_node.type = Type::EndIf;
|
demote_endif_node.type = Type::EndIf;
|
||||||
demote_endif_node.data.end_if.merge = return_block_it->data.block;
|
demote_endif_node.data.end_if.merge = return_block_it->data.block;
|
||||||
|
|
||||||
asl.insert(return_block_it, demote_endif_node);
|
const auto next_it_1 = asl.insert(return_block_it, demote_endif_node);
|
||||||
asl.insert(return_block_it, demote_node);
|
const auto next_it_2 = asl.insert(next_it_1, demote_node);
|
||||||
asl.insert(return_block_it, demote_if_node);
|
asl.insert(next_it_2, demote_if_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
ObjectPool<Statement>& stmt_pool;
|
ObjectPool<Statement>& stmt_pool;
|
||||||
|
|
|
@ -19,8 +19,10 @@ namespace {
|
||||||
struct ConstBufferAddr {
|
struct ConstBufferAddr {
|
||||||
u32 index;
|
u32 index;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
|
u32 shift_left;
|
||||||
u32 secondary_index;
|
u32 secondary_index;
|
||||||
u32 secondary_offset;
|
u32 secondary_offset;
|
||||||
|
u32 secondary_shift_left;
|
||||||
IR::U32 dynamic_offset;
|
IR::U32 dynamic_offset;
|
||||||
u32 count;
|
u32 count;
|
||||||
bool has_secondary;
|
bool has_secondary;
|
||||||
|
@ -172,19 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) {
|
||||||
return IndexedInstruction(inst) != IR::Opcode::Void;
|
return IndexedInstruction(inst) != IR::Opcode::Void;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst);
|
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env);
|
||||||
|
|
||||||
std::optional<ConstBufferAddr> Track(const IR::Value& value) {
|
std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env) {
|
||||||
return IR::BreadthFirstSearch(value, TryGetConstBuffer);
|
return IR::BreadthFirstSearch(
|
||||||
|
value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); });
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
|
std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) {
|
||||||
|
const IR::Inst* inst = value.InstRecursive();
|
||||||
|
if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
const IR::Value index{inst->Arg(0)};
|
||||||
|
const IR::Value offset{inst->Arg(1)};
|
||||||
|
if (!index.IsImmediate()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
if (!offset.IsImmediate()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
const auto index_number = index.U32();
|
||||||
|
if (index_number != 1) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
const auto offset_number = offset.U32();
|
||||||
|
return env.ReadCbufValue(index_number, offset_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) {
|
||||||
switch (inst->GetOpcode()) {
|
switch (inst->GetOpcode()) {
|
||||||
default:
|
default:
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
case IR::Opcode::BitwiseOr32: {
|
case IR::Opcode::BitwiseOr32: {
|
||||||
std::optional lhs{Track(inst->Arg(0))};
|
std::optional lhs{Track(inst->Arg(0), env)};
|
||||||
std::optional rhs{Track(inst->Arg(1))};
|
std::optional rhs{Track(inst->Arg(1), env)};
|
||||||
if (!lhs || !rhs) {
|
if (!lhs || !rhs) {
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
|
@ -194,19 +218,62 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
|
||||||
if (lhs->count > 1 || rhs->count > 1) {
|
if (lhs->count > 1 || rhs->count > 1) {
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
if (lhs->index > rhs->index || lhs->offset > rhs->offset) {
|
if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) {
|
||||||
std::swap(lhs, rhs);
|
std::swap(lhs, rhs);
|
||||||
}
|
}
|
||||||
return ConstBufferAddr{
|
return ConstBufferAddr{
|
||||||
.index = lhs->index,
|
.index = lhs->index,
|
||||||
.offset = lhs->offset,
|
.offset = lhs->offset,
|
||||||
|
.shift_left = lhs->shift_left,
|
||||||
.secondary_index = rhs->index,
|
.secondary_index = rhs->index,
|
||||||
.secondary_offset = rhs->offset,
|
.secondary_offset = rhs->offset,
|
||||||
|
.secondary_shift_left = rhs->shift_left,
|
||||||
.dynamic_offset = {},
|
.dynamic_offset = {},
|
||||||
.count = 1,
|
.count = 1,
|
||||||
.has_secondary = true,
|
.has_secondary = true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
case IR::Opcode::ShiftLeftLogical32: {
|
||||||
|
const IR::Value shift{inst->Arg(1)};
|
||||||
|
if (!shift.IsImmediate()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
std::optional lhs{Track(inst->Arg(0), env)};
|
||||||
|
if (lhs) {
|
||||||
|
lhs->shift_left = shift.U32();
|
||||||
|
}
|
||||||
|
return lhs;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case IR::Opcode::BitwiseAnd32: {
|
||||||
|
IR::Value op1{inst->Arg(0)};
|
||||||
|
IR::Value op2{inst->Arg(1)};
|
||||||
|
if (op1.IsImmediate()) {
|
||||||
|
std::swap(op1, op2);
|
||||||
|
}
|
||||||
|
if (!op2.IsImmediate() && !op1.IsImmediate()) {
|
||||||
|
do {
|
||||||
|
auto try_index = TryGetConstant(op1, env);
|
||||||
|
if (try_index) {
|
||||||
|
op1 = op2;
|
||||||
|
op2 = IR::Value{*try_index};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto try_index_2 = TryGetConstant(op2, env);
|
||||||
|
if (try_index_2) {
|
||||||
|
op2 = IR::Value{*try_index_2};
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return std::nullopt;
|
||||||
|
} while (false);
|
||||||
|
}
|
||||||
|
std::optional lhs{Track(op1, env)};
|
||||||
|
if (lhs) {
|
||||||
|
lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
|
||||||
|
}
|
||||||
|
return lhs;
|
||||||
|
break;
|
||||||
|
}
|
||||||
case IR::Opcode::GetCbufU32x2:
|
case IR::Opcode::GetCbufU32x2:
|
||||||
case IR::Opcode::GetCbufU32:
|
case IR::Opcode::GetCbufU32:
|
||||||
break;
|
break;
|
||||||
|
@ -222,8 +289,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
|
||||||
return ConstBufferAddr{
|
return ConstBufferAddr{
|
||||||
.index = index.U32(),
|
.index = index.U32(),
|
||||||
.offset = offset.U32(),
|
.offset = offset.U32(),
|
||||||
|
.shift_left = 0,
|
||||||
.secondary_index = 0,
|
.secondary_index = 0,
|
||||||
.secondary_offset = 0,
|
.secondary_offset = 0,
|
||||||
|
.secondary_shift_left = 0,
|
||||||
.dynamic_offset = {},
|
.dynamic_offset = {},
|
||||||
.count = 1,
|
.count = 1,
|
||||||
.has_secondary = false,
|
.has_secondary = false,
|
||||||
|
@ -247,8 +316,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
|
||||||
return ConstBufferAddr{
|
return ConstBufferAddr{
|
||||||
.index = index.U32(),
|
.index = index.U32(),
|
||||||
.offset = base_offset,
|
.offset = base_offset,
|
||||||
|
.shift_left = 0,
|
||||||
.secondary_index = 0,
|
.secondary_index = 0,
|
||||||
.secondary_offset = 0,
|
.secondary_offset = 0,
|
||||||
|
.secondary_shift_left = 0,
|
||||||
.dynamic_offset = dynamic_offset,
|
.dynamic_offset = dynamic_offset,
|
||||||
.count = 8,
|
.count = 8,
|
||||||
.has_secondary = false,
|
.has_secondary = false,
|
||||||
|
@ -258,7 +329,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
|
||||||
TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
|
TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
|
||||||
ConstBufferAddr addr;
|
ConstBufferAddr addr;
|
||||||
if (IsBindless(inst)) {
|
if (IsBindless(inst)) {
|
||||||
const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0))};
|
const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)};
|
||||||
if (!track_addr) {
|
if (!track_addr) {
|
||||||
throw NotImplementedException("Failed to track bindless texture constant buffer");
|
throw NotImplementedException("Failed to track bindless texture constant buffer");
|
||||||
}
|
}
|
||||||
|
@ -267,8 +338,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
|
||||||
addr = ConstBufferAddr{
|
addr = ConstBufferAddr{
|
||||||
.index = env.TextureBoundBuffer(),
|
.index = env.TextureBoundBuffer(),
|
||||||
.offset = inst.Arg(0).U32(),
|
.offset = inst.Arg(0).U32(),
|
||||||
|
.shift_left = 0,
|
||||||
.secondary_index = 0,
|
.secondary_index = 0,
|
||||||
.secondary_offset = 0,
|
.secondary_offset = 0,
|
||||||
|
.secondary_shift_left = 0,
|
||||||
.dynamic_offset = {},
|
.dynamic_offset = {},
|
||||||
.count = 1,
|
.count = 1,
|
||||||
.has_secondary = false,
|
.has_secondary = false,
|
||||||
|
@ -284,8 +357,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
|
||||||
TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
|
TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
|
||||||
const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
|
const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
|
||||||
const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
|
const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
|
||||||
const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)};
|
const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left};
|
||||||
const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)};
|
const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)
|
||||||
|
<< cbuf.secondary_shift_left};
|
||||||
return env.ReadTextureType(lhs_raw | rhs_raw);
|
return env.ReadTextureType(lhs_raw | rhs_raw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,8 +538,10 @@ void TexturePass(Environment& env, IR::Program& program) {
|
||||||
.has_secondary = cbuf.has_secondary,
|
.has_secondary = cbuf.has_secondary,
|
||||||
.cbuf_index = cbuf.index,
|
.cbuf_index = cbuf.index,
|
||||||
.cbuf_offset = cbuf.offset,
|
.cbuf_offset = cbuf.offset,
|
||||||
|
.shift_left = cbuf.shift_left,
|
||||||
.secondary_cbuf_index = cbuf.secondary_index,
|
.secondary_cbuf_index = cbuf.secondary_index,
|
||||||
.secondary_cbuf_offset = cbuf.secondary_offset,
|
.secondary_cbuf_offset = cbuf.secondary_offset,
|
||||||
|
.secondary_shift_left = cbuf.secondary_shift_left,
|
||||||
.count = cbuf.count,
|
.count = cbuf.count,
|
||||||
.size_shift = DESCRIPTOR_SIZE_SHIFT,
|
.size_shift = DESCRIPTOR_SIZE_SHIFT,
|
||||||
});
|
});
|
||||||
|
@ -476,8 +552,10 @@ void TexturePass(Environment& env, IR::Program& program) {
|
||||||
.has_secondary = cbuf.has_secondary,
|
.has_secondary = cbuf.has_secondary,
|
||||||
.cbuf_index = cbuf.index,
|
.cbuf_index = cbuf.index,
|
||||||
.cbuf_offset = cbuf.offset,
|
.cbuf_offset = cbuf.offset,
|
||||||
|
.shift_left = cbuf.shift_left,
|
||||||
.secondary_cbuf_index = cbuf.secondary_index,
|
.secondary_cbuf_index = cbuf.secondary_index,
|
||||||
.secondary_cbuf_offset = cbuf.secondary_offset,
|
.secondary_cbuf_offset = cbuf.secondary_offset,
|
||||||
|
.secondary_shift_left = cbuf.secondary_shift_left,
|
||||||
.count = cbuf.count,
|
.count = cbuf.count,
|
||||||
.size_shift = DESCRIPTOR_SIZE_SHIFT,
|
.size_shift = DESCRIPTOR_SIZE_SHIFT,
|
||||||
});
|
});
|
||||||
|
|
|
@ -60,8 +60,10 @@ struct TextureBufferDescriptor {
|
||||||
bool has_secondary;
|
bool has_secondary;
|
||||||
u32 cbuf_index;
|
u32 cbuf_index;
|
||||||
u32 cbuf_offset;
|
u32 cbuf_offset;
|
||||||
|
u32 shift_left;
|
||||||
u32 secondary_cbuf_index;
|
u32 secondary_cbuf_index;
|
||||||
u32 secondary_cbuf_offset;
|
u32 secondary_cbuf_offset;
|
||||||
|
u32 secondary_shift_left;
|
||||||
u32 count;
|
u32 count;
|
||||||
u32 size_shift;
|
u32 size_shift;
|
||||||
};
|
};
|
||||||
|
@ -84,8 +86,10 @@ struct TextureDescriptor {
|
||||||
bool has_secondary;
|
bool has_secondary;
|
||||||
u32 cbuf_index;
|
u32 cbuf_index;
|
||||||
u32 cbuf_offset;
|
u32 cbuf_offset;
|
||||||
|
u32 shift_left;
|
||||||
u32 secondary_cbuf_index;
|
u32 secondary_cbuf_index;
|
||||||
u32 secondary_cbuf_offset;
|
u32 secondary_cbuf_offset;
|
||||||
|
u32 secondary_shift_left;
|
||||||
u32 count;
|
u32 count;
|
||||||
u32 size_shift;
|
u32 size_shift;
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
add_subdirectory(host_shaders)
|
add_subdirectory(host_shaders)
|
||||||
|
|
||||||
if(LIBVA_FOUND)
|
if(LIBVA_FOUND)
|
||||||
set_source_files_properties(command_classes/codecs/codec.cpp
|
set_source_files_properties(host1x/codecs/codec.cpp
|
||||||
PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
|
PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
|
||||||
list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
|
list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
|
||||||
endif()
|
endif()
|
||||||
|
@ -12,26 +12,14 @@ add_library(video_core STATIC
|
||||||
buffer_cache/buffer_cache.h
|
buffer_cache/buffer_cache.h
|
||||||
cdma_pusher.cpp
|
cdma_pusher.cpp
|
||||||
cdma_pusher.h
|
cdma_pusher.h
|
||||||
command_classes/codecs/codec.cpp
|
|
||||||
command_classes/codecs/codec.h
|
|
||||||
command_classes/codecs/h264.cpp
|
|
||||||
command_classes/codecs/h264.h
|
|
||||||
command_classes/codecs/vp8.cpp
|
|
||||||
command_classes/codecs/vp8.h
|
|
||||||
command_classes/codecs/vp9.cpp
|
|
||||||
command_classes/codecs/vp9.h
|
|
||||||
command_classes/codecs/vp9_types.h
|
|
||||||
command_classes/host1x.cpp
|
|
||||||
command_classes/host1x.h
|
|
||||||
command_classes/nvdec.cpp
|
|
||||||
command_classes/nvdec.h
|
|
||||||
command_classes/nvdec_common.h
|
|
||||||
command_classes/sync_manager.cpp
|
|
||||||
command_classes/sync_manager.h
|
|
||||||
command_classes/vic.cpp
|
|
||||||
command_classes/vic.h
|
|
||||||
compatible_formats.cpp
|
compatible_formats.cpp
|
||||||
compatible_formats.h
|
compatible_formats.h
|
||||||
|
control/channel_state.cpp
|
||||||
|
control/channel_state.h
|
||||||
|
control/channel_state_cache.cpp
|
||||||
|
control/channel_state_cache.h
|
||||||
|
control/scheduler.cpp
|
||||||
|
control/scheduler.h
|
||||||
delayed_destruction_ring.h
|
delayed_destruction_ring.h
|
||||||
dirty_flags.cpp
|
dirty_flags.cpp
|
||||||
dirty_flags.h
|
dirty_flags.h
|
||||||
|
@ -51,7 +39,31 @@ add_library(video_core STATIC
|
||||||
engines/maxwell_3d.h
|
engines/maxwell_3d.h
|
||||||
engines/maxwell_dma.cpp
|
engines/maxwell_dma.cpp
|
||||||
engines/maxwell_dma.h
|
engines/maxwell_dma.h
|
||||||
|
engines/puller.cpp
|
||||||
|
engines/puller.h
|
||||||
framebuffer_config.h
|
framebuffer_config.h
|
||||||
|
host1x/codecs/codec.cpp
|
||||||
|
host1x/codecs/codec.h
|
||||||
|
host1x/codecs/h264.cpp
|
||||||
|
host1x/codecs/h264.h
|
||||||
|
host1x/codecs/vp8.cpp
|
||||||
|
host1x/codecs/vp8.h
|
||||||
|
host1x/codecs/vp9.cpp
|
||||||
|
host1x/codecs/vp9.h
|
||||||
|
host1x/codecs/vp9_types.h
|
||||||
|
host1x/control.cpp
|
||||||
|
host1x/control.h
|
||||||
|
host1x/host1x.cpp
|
||||||
|
host1x/host1x.h
|
||||||
|
host1x/nvdec.cpp
|
||||||
|
host1x/nvdec.h
|
||||||
|
host1x/nvdec_common.h
|
||||||
|
host1x/sync_manager.cpp
|
||||||
|
host1x/sync_manager.h
|
||||||
|
host1x/syncpoint_manager.cpp
|
||||||
|
host1x/syncpoint_manager.h
|
||||||
|
host1x/vic.cpp
|
||||||
|
host1x/vic.h
|
||||||
macro/macro.cpp
|
macro/macro.cpp
|
||||||
macro/macro.h
|
macro/macro.h
|
||||||
macro/macro_hle.cpp
|
macro/macro_hle.cpp
|
||||||
|
@ -192,6 +204,7 @@ add_library(video_core STATIC
|
||||||
texture_cache/render_targets.h
|
texture_cache/render_targets.h
|
||||||
texture_cache/samples_helper.h
|
texture_cache/samples_helper.h
|
||||||
texture_cache/slot_vector.h
|
texture_cache/slot_vector.h
|
||||||
|
texture_cache/texture_cache.cpp
|
||||||
texture_cache/texture_cache.h
|
texture_cache/texture_cache.h
|
||||||
texture_cache/texture_cache_base.h
|
texture_cache/texture_cache_base.h
|
||||||
texture_cache/types.h
|
texture_cache/types.h
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <deque>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
@ -23,6 +22,7 @@
|
||||||
#include "common/settings.h"
|
#include "common/settings.h"
|
||||||
#include "core/memory.h"
|
#include "core/memory.h"
|
||||||
#include "video_core/buffer_cache/buffer_base.h"
|
#include "video_core/buffer_cache/buffer_base.h"
|
||||||
|
#include "video_core/control/channel_state_cache.h"
|
||||||
#include "video_core/delayed_destruction_ring.h"
|
#include "video_core/delayed_destruction_ring.h"
|
||||||
#include "video_core/dirty_flags.h"
|
#include "video_core/dirty_flags.h"
|
||||||
#include "video_core/engines/kepler_compute.h"
|
#include "video_core/engines/kepler_compute.h"
|
||||||
|
@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE
|
||||||
using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
|
using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
|
||||||
|
|
||||||
template <typename P>
|
template <typename P>
|
||||||
class BufferCache {
|
class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
|
||||||
|
|
||||||
// Page size for caching purposes.
|
// Page size for caching purposes.
|
||||||
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
|
// This is unrelated to the CPU page size and it can be changed as it seems optimal.
|
||||||
|
@ -116,10 +116,7 @@ public:
|
||||||
static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
|
static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
|
||||||
|
|
||||||
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d_,
|
Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
|
||||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
|
||||||
Runtime& runtime_);
|
|
||||||
|
|
||||||
void TickFrame();
|
void TickFrame();
|
||||||
|
|
||||||
|
@ -129,7 +126,7 @@ public:
|
||||||
|
|
||||||
void DownloadMemory(VAddr cpu_addr, u64 size);
|
void DownloadMemory(VAddr cpu_addr, u64 size);
|
||||||
|
|
||||||
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer);
|
bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
|
||||||
|
|
||||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
|
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
|
||||||
|
|
||||||
|
@ -353,7 +350,7 @@ private:
|
||||||
|
|
||||||
void NotifyBufferDeletion();
|
void NotifyBufferDeletion();
|
||||||
|
|
||||||
[[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const;
|
[[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const;
|
||||||
|
|
||||||
[[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
|
[[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
|
||||||
PixelFormat format);
|
PixelFormat format);
|
||||||
|
@ -367,9 +364,6 @@ private:
|
||||||
void ClearDownload(IntervalType subtract_interval);
|
void ClearDownload(IntervalType subtract_interval);
|
||||||
|
|
||||||
VideoCore::RasterizerInterface& rasterizer;
|
VideoCore::RasterizerInterface& rasterizer;
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d;
|
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute;
|
|
||||||
Tegra::MemoryManager& gpu_memory;
|
|
||||||
Core::Memory::Memory& cpu_memory;
|
Core::Memory::Memory& cpu_memory;
|
||||||
|
|
||||||
SlotVector<Buffer> slot_buffers;
|
SlotVector<Buffer> slot_buffers;
|
||||||
|
@ -444,12 +438,8 @@ private:
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||||
Tegra::Engines::Maxwell3D& maxwell3d_,
|
Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
|
||||||
Tegra::Engines::KeplerCompute& kepler_compute_,
|
: runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} {
|
||||||
Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
|
|
||||||
Runtime& runtime_)
|
|
||||||
: runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
|
|
||||||
kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
|
|
||||||
// Ensure the first slot is used for the null buffer
|
// Ensure the first slot is used for the null buffer
|
||||||
void(slot_buffers.insert(runtime, NullBufferParams{}));
|
void(slot_buffers.insert(runtime, NullBufferParams{}));
|
||||||
common_ranges.clear();
|
common_ranges.clear();
|
||||||
|
@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
|
bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
|
||||||
const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address);
|
const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
|
||||||
const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address);
|
const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
|
||||||
if (!cpu_src_address || !cpu_dest_address) {
|
if (!cpu_src_address || !cpu_dest_address) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
|
bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
|
||||||
const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address);
|
const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
|
||||||
if (!cpu_dst_address) {
|
if (!cpu_dst_address) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
||||||
u32 size) {
|
u32 size) {
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
||||||
const Binding binding{
|
const Binding binding{
|
||||||
.cpu_addr = *cpu_addr,
|
.cpu_addr = *cpu_addr,
|
||||||
.size = size,
|
.size = size,
|
||||||
|
@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
BindHostIndexBuffer();
|
BindHostIndexBuffer();
|
||||||
} else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
} else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
||||||
const auto& regs = maxwell3d.regs;
|
const auto& regs = maxwell3d->regs;
|
||||||
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
|
if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
|
||||||
runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
|
runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
|
||||||
}
|
}
|
||||||
|
@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
|
||||||
enabled_storage_buffers[stage] |= 1U << ssbo_index;
|
enabled_storage_buffers[stage] |= 1U << ssbo_index;
|
||||||
written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
|
written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
|
||||||
|
|
||||||
const auto& cbufs = maxwell3d.state.shader_stages[stage];
|
const auto& cbufs = maxwell3d->state.shader_stages[stage];
|
||||||
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
|
const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
|
||||||
storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr);
|
storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
|
@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
|
||||||
enabled_compute_storage_buffers |= 1U << ssbo_index;
|
enabled_compute_storage_buffers |= 1U << ssbo_index;
|
||||||
written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
|
written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
|
||||||
|
|
||||||
const auto& launch_desc = kepler_compute.launch_description;
|
const auto& launch_desc = kepler_compute->launch_description;
|
||||||
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
|
ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
|
||||||
|
|
||||||
const auto& cbufs = launch_desc.const_buffer_config;
|
const auto& cbufs = launch_desc.const_buffer_config;
|
||||||
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
|
const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
|
||||||
compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr);
|
compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
|
@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
|
||||||
const bool is_accuracy_normal =
|
const bool is_accuracy_normal =
|
||||||
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
|
Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
|
||||||
|
|
||||||
|
auto it = committed_ranges.begin();
|
||||||
|
while (it != committed_ranges.end()) {
|
||||||
|
auto& current_intervals = *it;
|
||||||
|
auto next_it = std::next(it);
|
||||||
|
while (next_it != committed_ranges.end()) {
|
||||||
|
for (auto& interval : *next_it) {
|
||||||
|
current_intervals.subtract(interval);
|
||||||
|
}
|
||||||
|
next_it++;
|
||||||
|
}
|
||||||
|
it++;
|
||||||
|
}
|
||||||
|
|
||||||
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
|
boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
|
||||||
u64 total_size_bytes = 0;
|
u64 total_size_bytes = 0;
|
||||||
u64 largest_copy = 0;
|
u64 largest_copy = 0;
|
||||||
|
@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() {
|
||||||
const u32 size = index_buffer.size;
|
const u32 size = index_buffer.size;
|
||||||
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
|
SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
|
||||||
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
|
||||||
const u32 new_offset = offset + maxwell3d.regs.index_array.first *
|
const u32 new_offset = offset + maxwell3d->regs.index_array.first *
|
||||||
maxwell3d.regs.index_array.FormatSizeInBytes();
|
maxwell3d->regs.index_array.FormatSizeInBytes();
|
||||||
runtime.BindIndexBuffer(buffer, new_offset, size);
|
runtime.BindIndexBuffer(buffer, new_offset, size);
|
||||||
} else {
|
} else {
|
||||||
runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format,
|
runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format,
|
||||||
maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count,
|
maxwell3d->regs.index_array.first,
|
||||||
buffer, offset, size);
|
maxwell3d->regs.index_array.count, buffer, offset, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::BindHostVertexBuffers() {
|
void BufferCache<P>::BindHostVertexBuffers() {
|
||||||
auto& flags = maxwell3d.dirty.flags;
|
auto& flags = maxwell3d->dirty.flags;
|
||||||
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
|
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
|
||||||
const Binding& binding = vertex_buffers[index];
|
const Binding& binding = vertex_buffers[index];
|
||||||
Buffer& buffer = slot_buffers[binding.buffer_id];
|
Buffer& buffer = slot_buffers[binding.buffer_id];
|
||||||
|
@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
|
||||||
}
|
}
|
||||||
flags[Dirty::VertexBuffer0 + index] = false;
|
flags[Dirty::VertexBuffer0 + index] = false;
|
||||||
|
|
||||||
const u32 stride = maxwell3d.regs.vertex_array[index].stride;
|
const u32 stride = maxwell3d->regs.vertex_array[index].stride;
|
||||||
const u32 offset = buffer.Offset(binding.cpu_addr);
|
const u32 offset = buffer.Offset(binding.cpu_addr);
|
||||||
runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
|
runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
|
||||||
}
|
}
|
||||||
|
@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::BindHostTransformFeedbackBuffers() {
|
void BufferCache<P>::BindHostTransformFeedbackBuffers() {
|
||||||
if (maxwell3d.regs.tfb_enabled == 0) {
|
if (maxwell3d->regs.tfb_enabled == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
|
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
|
||||||
|
@ -1239,6 +1242,8 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
|
void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
|
||||||
|
do {
|
||||||
|
has_deleted_buffers = false;
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
UpdateIndexBuffer();
|
UpdateIndexBuffer();
|
||||||
}
|
}
|
||||||
|
@ -1249,6 +1254,7 @@ void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
|
||||||
UpdateStorageBuffers(stage);
|
UpdateStorageBuffers(stage);
|
||||||
UpdateTextureBuffers(stage);
|
UpdateTextureBuffers(stage);
|
||||||
}
|
}
|
||||||
|
} while (has_deleted_buffers);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
|
@ -1262,8 +1268,8 @@ template <class P>
|
||||||
void BufferCache<P>::UpdateIndexBuffer() {
|
void BufferCache<P>::UpdateIndexBuffer() {
|
||||||
// We have to check for the dirty flags and index count
|
// We have to check for the dirty flags and index count
|
||||||
// The index count is currently changed without updating the dirty flags
|
// The index count is currently changed without updating the dirty flags
|
||||||
const auto& index_array = maxwell3d.regs.index_array;
|
const auto& index_array = maxwell3d->regs.index_array;
|
||||||
auto& flags = maxwell3d.dirty.flags;
|
auto& flags = maxwell3d->dirty.flags;
|
||||||
if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
|
if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
|
||||||
|
|
||||||
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
|
const GPUVAddr gpu_addr_begin = index_array.StartAddress();
|
||||||
const GPUVAddr gpu_addr_end = index_array.EndAddress();
|
const GPUVAddr gpu_addr_end = index_array.EndAddress();
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
||||||
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
|
const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
|
||||||
const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
|
const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
|
||||||
const u32 size = std::min(address_size, draw_size);
|
const u32 size = std::min(address_size, draw_size);
|
||||||
|
@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::UpdateVertexBuffers() {
|
void BufferCache<P>::UpdateVertexBuffers() {
|
||||||
auto& flags = maxwell3d.dirty.flags;
|
auto& flags = maxwell3d->dirty.flags;
|
||||||
if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) {
|
if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
flags[Dirty::VertexBuffers] = false;
|
flags[Dirty::VertexBuffers] = false;
|
||||||
|
@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::UpdateVertexBuffer(u32 index) {
|
void BufferCache<P>::UpdateVertexBuffer(u32 index) {
|
||||||
if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) {
|
if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const auto& array = maxwell3d.regs.vertex_array[index];
|
const auto& array = maxwell3d->regs.vertex_array[index];
|
||||||
const auto& limit = maxwell3d.regs.vertex_array_limit[index];
|
const auto& limit = maxwell3d->regs.vertex_array_limit[index];
|
||||||
const GPUVAddr gpu_addr_begin = array.StartAddress();
|
const GPUVAddr gpu_addr_begin = array.StartAddress();
|
||||||
const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
|
const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
|
||||||
u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
|
u32 address_size = static_cast<u32>(
|
||||||
if (address_size >= 64_MiB) {
|
std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
|
||||||
// Reported vertex buffer size is very large, cap to mapped buffer size
|
if (array.enable == 0 || address_size == 0 || !cpu_addr) {
|
||||||
GPUVAddr submapped_addr_end = gpu_addr_begin;
|
|
||||||
|
|
||||||
const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)};
|
|
||||||
if (ranges.size() > 0) {
|
|
||||||
const auto& [addr, size] = *ranges.begin();
|
|
||||||
submapped_addr_end = addr + size;
|
|
||||||
}
|
|
||||||
|
|
||||||
address_size =
|
|
||||||
std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin));
|
|
||||||
}
|
|
||||||
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
|
||||||
if (array.enable == 0 || size == 0 || !cpu_addr) {
|
|
||||||
vertex_buffers[index] = NULL_BINDING;
|
vertex_buffers[index] = NULL_BINDING;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
|
||||||
|
address_size =
|
||||||
|
static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
|
||||||
|
}
|
||||||
|
const u32 size = address_size; // TODO: Analyze stride and number of vertices
|
||||||
vertex_buffers[index] = Binding{
|
vertex_buffers[index] = Binding{
|
||||||
.cpu_addr = *cpu_addr,
|
.cpu_addr = *cpu_addr,
|
||||||
.size = size,
|
.size = size,
|
||||||
|
@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::UpdateTransformFeedbackBuffers() {
|
void BufferCache<P>::UpdateTransformFeedbackBuffers() {
|
||||||
if (maxwell3d.regs.tfb_enabled == 0) {
|
if (maxwell3d->regs.tfb_enabled == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
|
for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
|
||||||
|
@ -1392,10 +1390,10 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() {
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
|
void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
|
||||||
const auto& binding = maxwell3d.regs.tfb_bindings[index];
|
const auto& binding = maxwell3d->regs.tfb_bindings[index];
|
||||||
const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
|
const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
|
||||||
const u32 size = binding.buffer_size;
|
const u32 size = binding.buffer_size;
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
||||||
if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
|
if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
|
||||||
transform_feedback_buffers[index] = NULL_BINDING;
|
transform_feedback_buffers[index] = NULL_BINDING;
|
||||||
return;
|
return;
|
||||||
|
@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
|
||||||
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
|
ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
|
||||||
Binding& binding = compute_uniform_buffers[index];
|
Binding& binding = compute_uniform_buffers[index];
|
||||||
binding = NULL_BINDING;
|
binding = NULL_BINDING;
|
||||||
const auto& launch_desc = kepler_compute.launch_description;
|
const auto& launch_desc = kepler_compute->launch_description;
|
||||||
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
|
if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
|
||||||
const auto& cbuf = launch_desc.const_buffer_config[index];
|
const auto& cbuf = launch_desc.const_buffer_config[index];
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address());
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
|
||||||
if (cpu_addr) {
|
if (cpu_addr) {
|
||||||
binding.cpu_addr = *cpu_addr;
|
binding.cpu_addr = *cpu_addr;
|
||||||
binding.size = cbuf.size;
|
binding.size = cbuf.size;
|
||||||
|
@ -1566,11 +1564,13 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
|
||||||
const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
|
const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
|
||||||
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
|
const u32 size = static_cast<u32>(overlap.end - overlap.begin);
|
||||||
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
|
const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
|
||||||
|
auto& new_buffer = slot_buffers[new_buffer_id];
|
||||||
|
runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0);
|
||||||
for (const BufferId overlap_id : overlap.ids) {
|
for (const BufferId overlap_id : overlap.ids) {
|
||||||
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
|
JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
|
||||||
}
|
}
|
||||||
Register(new_buffer_id);
|
Register(new_buffer_id);
|
||||||
TouchBuffer(slot_buffers[new_buffer_id], new_buffer_id);
|
TouchBuffer(new_buffer, new_buffer_id);
|
||||||
return new_buffer_id;
|
return new_buffer_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1694,7 +1694,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
|
bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
|
||||||
std::span<u8> inlined_buffer) {
|
std::span<const u8> inlined_buffer) {
|
||||||
const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
|
const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
|
||||||
if (!is_dirty) {
|
if (!is_dirty) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1830,7 +1830,7 @@ void BufferCache<P>::NotifyBufferDeletion() {
|
||||||
dirty_uniform_buffers.fill(~u32{0});
|
dirty_uniform_buffers.fill(~u32{0});
|
||||||
uniform_buffer_binding_sizes.fill({});
|
uniform_buffer_binding_sizes.fill({});
|
||||||
}
|
}
|
||||||
auto& flags = maxwell3d.dirty.flags;
|
auto& flags = maxwell3d->dirty.flags;
|
||||||
flags[Dirty::IndexBuffer] = true;
|
flags[Dirty::IndexBuffer] = true;
|
||||||
flags[Dirty::VertexBuffers] = true;
|
flags[Dirty::VertexBuffers] = true;
|
||||||
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
|
for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
|
||||||
|
@ -1840,16 +1840,18 @@ void BufferCache<P>::NotifyBufferDeletion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class P>
|
template <class P>
|
||||||
typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const {
|
typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
|
||||||
const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr);
|
bool is_written) const {
|
||||||
const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8);
|
const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8);
|
||||||
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
||||||
if (!cpu_addr || size == 0) {
|
if (!cpu_addr || size == 0) {
|
||||||
return NULL_BINDING;
|
return NULL_BINDING;
|
||||||
}
|
}
|
||||||
|
const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::PAGE_SIZE);
|
||||||
const Binding binding{
|
const Binding binding{
|
||||||
.cpu_addr = *cpu_addr,
|
.cpu_addr = *cpu_addr,
|
||||||
.size = size,
|
.size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),
|
||||||
.buffer_id = BufferId{},
|
.buffer_id = BufferId{},
|
||||||
};
|
};
|
||||||
return binding;
|
return binding;
|
||||||
|
@ -1858,7 +1860,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
|
||||||
template <class P>
|
template <class P>
|
||||||
typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
|
typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
|
||||||
GPUVAddr gpu_addr, u32 size, PixelFormat format) {
|
GPUVAddr gpu_addr, u32 size, PixelFormat format) {
|
||||||
const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr);
|
const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
|
||||||
TextureBufferBinding binding;
|
TextureBufferBinding binding;
|
||||||
if (!cpu_addr || size == 0) {
|
if (!cpu_addr || size == 0) {
|
||||||
binding.cpu_addr = 0;
|
binding.cpu_addr = 0;
|
||||||
|
|
|
@ -2,20 +2,22 @@
|
||||||
// SPDX-License-Identifier: MIT
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
#include <bit>
|
#include <bit>
|
||||||
#include "command_classes/host1x.h"
|
|
||||||
#include "command_classes/nvdec.h"
|
|
||||||
#include "command_classes/vic.h"
|
|
||||||
#include "video_core/cdma_pusher.h"
|
#include "video_core/cdma_pusher.h"
|
||||||
#include "video_core/command_classes/sync_manager.h"
|
|
||||||
#include "video_core/engines/maxwell_3d.h"
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/host1x/control.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/nvdec.h"
|
||||||
|
#include "video_core/host1x/nvdec_common.h"
|
||||||
|
#include "video_core/host1x/sync_manager.h"
|
||||||
|
#include "video_core/host1x/vic.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
CDmaPusher::CDmaPusher(GPU& gpu_)
|
CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
|
||||||
: gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)),
|
: host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
|
||||||
vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)),
|
vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
|
||||||
host1x_processor(std::make_unique<Host1x>(gpu)),
|
host1x_processor(std::make_unique<Host1x::Control>(host1x)),
|
||||||
sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {}
|
sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
|
||||||
|
|
||||||
CDmaPusher::~CDmaPusher() = default;
|
CDmaPusher::~CDmaPusher() = default;
|
||||||
|
|
||||||
|
@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||||
case ThiMethod::SetMethod1:
|
case ThiMethod::SetMethod1:
|
||||||
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
|
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
|
||||||
static_cast<u32>(vic_thi_state.method_0), data);
|
static_cast<u32>(vic_thi_state.method_0), data);
|
||||||
vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data);
|
vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
|
||||||
|
data);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ChClassId::Host1x:
|
case ChClassId::Control:
|
||||||
// This device is mainly for syncpoint synchronization
|
// This device is mainly for syncpoint synchronization
|
||||||
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
|
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
|
||||||
host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data);
|
host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
|
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
|
||||||
|
|
|
@ -12,11 +12,13 @@
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
class GPU;
|
namespace Host1x {
|
||||||
|
class Control;
|
||||||
class Host1x;
|
class Host1x;
|
||||||
class Nvdec;
|
class Nvdec;
|
||||||
class SyncptIncrManager;
|
class SyncptIncrManager;
|
||||||
class Vic;
|
class Vic;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
enum class ChSubmissionMode : u32 {
|
enum class ChSubmissionMode : u32 {
|
||||||
SetClass = 0,
|
SetClass = 0,
|
||||||
|
@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 {
|
||||||
|
|
||||||
enum class ChClassId : u32 {
|
enum class ChClassId : u32 {
|
||||||
NoClass = 0x0,
|
NoClass = 0x0,
|
||||||
Host1x = 0x1,
|
Control = 0x1,
|
||||||
VideoEncodeMpeg = 0x20,
|
VideoEncodeMpeg = 0x20,
|
||||||
VideoEncodeNvEnc = 0x21,
|
VideoEncodeNvEnc = 0x21,
|
||||||
VideoStreamingVi = 0x30,
|
VideoStreamingVi = 0x30,
|
||||||
|
@ -88,7 +90,7 @@ enum class ThiMethod : u32 {
|
||||||
|
|
||||||
class CDmaPusher {
|
class CDmaPusher {
|
||||||
public:
|
public:
|
||||||
explicit CDmaPusher(GPU& gpu_);
|
explicit CDmaPusher(Host1x::Host1x& host1x);
|
||||||
~CDmaPusher();
|
~CDmaPusher();
|
||||||
|
|
||||||
/// Process the command entry
|
/// Process the command entry
|
||||||
|
@ -101,11 +103,11 @@ private:
|
||||||
/// Write arguments value to the ThiRegisters member at the specified offset
|
/// Write arguments value to the ThiRegisters member at the specified offset
|
||||||
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
|
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
|
||||||
|
|
||||||
GPU& gpu;
|
Host1x::Host1x& host1x;
|
||||||
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
|
std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
|
||||||
std::unique_ptr<Tegra::Vic> vic_processor;
|
std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
|
||||||
std::unique_ptr<Tegra::Host1x> host1x_processor;
|
std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
|
||||||
std::unique_ptr<SyncptIncrManager> sync_manager;
|
std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
|
||||||
ChClassId current_class{};
|
ChClassId current_class{};
|
||||||
ThiRegisters vic_thi_state{};
|
ThiRegisters vic_thi_state{};
|
||||||
ThiRegisters nvdec_thi_state{};
|
ThiRegisters nvdec_thi_state{};
|
||||||
|
|
44
src/video_core/control/channel_state.cpp
Executable file
44
src/video_core/control/channel_state.cpp
Executable file
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/dma_pusher.h"
|
||||||
|
#include "video_core/engines/fermi_2d.h"
|
||||||
|
#include "video_core/engines/kepler_compute.h"
|
||||||
|
#include "video_core/engines/kepler_memory.h"
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
#include "video_core/engines/maxwell_dma.h"
|
||||||
|
#include "video_core/engines/puller.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra::Control {
|
||||||
|
|
||||||
|
ChannelState::ChannelState(s32 bind_id_) {
|
||||||
|
bind_id = bind_id_;
|
||||||
|
initiated = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChannelState::Init(Core::System& system, GPU& gpu) {
|
||||||
|
ASSERT(memory_manager);
|
||||||
|
dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
|
||||||
|
maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
|
||||||
|
fermi_2d = std::make_unique<Engines::Fermi2D>();
|
||||||
|
kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
|
||||||
|
maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
|
||||||
|
kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
|
||||||
|
initiated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
|
||||||
|
dma_pusher->BindRasterizer(rasterizer);
|
||||||
|
memory_manager->BindRasterizer(rasterizer);
|
||||||
|
maxwell_3d->BindRasterizer(rasterizer);
|
||||||
|
fermi_2d->BindRasterizer(rasterizer);
|
||||||
|
kepler_memory->BindRasterizer(rasterizer);
|
||||||
|
kepler_compute->BindRasterizer(rasterizer);
|
||||||
|
maxwell_dma->BindRasterizer(rasterizer);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Control
|
69
src/video_core/control/channel_state.h
Executable file
69
src/video_core/control/channel_state.h
Executable file
|
@ -0,0 +1,69 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace VideoCore {
|
||||||
|
class RasterizerInterface;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
class GPU;
|
||||||
|
|
||||||
|
namespace Engines {
|
||||||
|
class Puller;
|
||||||
|
class Fermi2D;
|
||||||
|
class Maxwell3D;
|
||||||
|
class MaxwellDMA;
|
||||||
|
class KeplerCompute;
|
||||||
|
class KeplerMemory;
|
||||||
|
} // namespace Engines
|
||||||
|
|
||||||
|
class MemoryManager;
|
||||||
|
class DmaPusher;
|
||||||
|
|
||||||
|
namespace Control {
|
||||||
|
|
||||||
|
struct ChannelState {
|
||||||
|
ChannelState(s32 bind_id);
|
||||||
|
ChannelState(const ChannelState& state) = delete;
|
||||||
|
ChannelState& operator=(const ChannelState&) = delete;
|
||||||
|
ChannelState(ChannelState&& other) noexcept = default;
|
||||||
|
ChannelState& operator=(ChannelState&& other) noexcept = default;
|
||||||
|
|
||||||
|
void Init(Core::System& system, GPU& gpu);
|
||||||
|
|
||||||
|
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||||
|
|
||||||
|
s32 bind_id = -1;
|
||||||
|
/// 3D engine
|
||||||
|
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
||||||
|
/// 2D engine
|
||||||
|
std::unique_ptr<Engines::Fermi2D> fermi_2d;
|
||||||
|
/// Compute engine
|
||||||
|
std::unique_ptr<Engines::KeplerCompute> kepler_compute;
|
||||||
|
/// DMA engine
|
||||||
|
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
|
||||||
|
/// Inline memory engine
|
||||||
|
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
|
||||||
|
|
||||||
|
std::shared_ptr<MemoryManager> memory_manager;
|
||||||
|
|
||||||
|
std::unique_ptr<DmaPusher> dma_pusher;
|
||||||
|
|
||||||
|
bool initiated{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Control
|
||||||
|
|
||||||
|
} // namespace Tegra
|
11
src/video_core/control/channel_state_cache.cpp
Executable file
11
src/video_core/control/channel_state_cache.cpp
Executable file
|
@ -0,0 +1,11 @@
|
||||||
|
#include "video_core/control/channel_state_cache.inc"
|
||||||
|
|
||||||
|
namespace VideoCommon {
|
||||||
|
|
||||||
|
ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state)
|
||||||
|
: maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute},
|
||||||
|
gpu_memory{*channel_state.memory_manager} {}
|
||||||
|
|
||||||
|
template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>;
|
||||||
|
|
||||||
|
} // namespace VideoCommon
|
102
src/video_core/control/channel_state_cache.h
Executable file
102
src/video_core/control/channel_state_cache.h
Executable file
|
@ -0,0 +1,102 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <deque>
|
||||||
|
#include <limits>
|
||||||
|
#include <mutex>
|
||||||
|
#include <optional>
|
||||||
|
#include <unordered_map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Engines {
|
||||||
|
class Maxwell3D;
|
||||||
|
class KeplerCompute;
|
||||||
|
} // namespace Engines
|
||||||
|
|
||||||
|
class MemoryManager;
|
||||||
|
|
||||||
|
namespace Control {
|
||||||
|
struct ChannelState;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace VideoCommon {
|
||||||
|
|
||||||
|
class ChannelInfo {
|
||||||
|
public:
|
||||||
|
ChannelInfo() = delete;
|
||||||
|
ChannelInfo(Tegra::Control::ChannelState& state);
|
||||||
|
ChannelInfo(const ChannelInfo& state) = delete;
|
||||||
|
ChannelInfo& operator=(const ChannelInfo&) = delete;
|
||||||
|
ChannelInfo(ChannelInfo&& other) = default;
|
||||||
|
ChannelInfo& operator=(ChannelInfo&& other) = default;
|
||||||
|
|
||||||
|
Tegra::Engines::Maxwell3D& maxwell3d;
|
||||||
|
Tegra::Engines::KeplerCompute& kepler_compute;
|
||||||
|
Tegra::MemoryManager& gpu_memory;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class P>
|
||||||
|
class ChannelSetupCaches {
|
||||||
|
public:
|
||||||
|
/// Operations for seting the channel of execution.
|
||||||
|
virtual ~ChannelSetupCaches();
|
||||||
|
|
||||||
|
/// Create channel state.
|
||||||
|
virtual void CreateChannel(Tegra::Control::ChannelState& channel);
|
||||||
|
|
||||||
|
/// Bind a channel for execution.
|
||||||
|
void BindToChannel(s32 id);
|
||||||
|
|
||||||
|
/// Erase channel's state.
|
||||||
|
void EraseChannel(s32 id);
|
||||||
|
|
||||||
|
Tegra::MemoryManager* GetFromID(size_t id) const {
|
||||||
|
std::unique_lock<std::mutex> lk(config_mutex);
|
||||||
|
const auto ref = address_spaces.find(id);
|
||||||
|
return ref->second.gpu_memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<size_t> getStorageID(size_t id) const {
|
||||||
|
std::unique_lock<std::mutex> lk(config_mutex);
|
||||||
|
const auto ref = address_spaces.find(id);
|
||||||
|
if (ref == address_spaces.end()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
return ref->second.storage_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
|
||||||
|
|
||||||
|
P* channel_state;
|
||||||
|
size_t current_channel_id{UNSET_CHANNEL};
|
||||||
|
size_t current_address_space{};
|
||||||
|
Tegra::Engines::Maxwell3D* maxwell3d;
|
||||||
|
Tegra::Engines::KeplerCompute* kepler_compute;
|
||||||
|
Tegra::MemoryManager* gpu_memory;
|
||||||
|
|
||||||
|
std::deque<P> channel_storage;
|
||||||
|
std::deque<size_t> free_channel_ids;
|
||||||
|
std::unordered_map<s32, size_t> channel_map;
|
||||||
|
std::vector<size_t> active_channel_ids;
|
||||||
|
struct AddresSpaceRef {
|
||||||
|
size_t ref_count;
|
||||||
|
size_t storage_id;
|
||||||
|
Tegra::MemoryManager* gpu_memory;
|
||||||
|
};
|
||||||
|
std::unordered_map<size_t, AddresSpaceRef> address_spaces;
|
||||||
|
mutable std::mutex config_mutex;
|
||||||
|
|
||||||
|
virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace VideoCommon
|
84
src/video_core/control/channel_state_cache.inc
Executable file
84
src/video_core/control/channel_state_cache.inc
Executable file
|
@ -0,0 +1,84 @@
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/control/channel_state_cache.h"
|
||||||
|
#include "video_core/engines/kepler_compute.h"
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace VideoCommon {
|
||||||
|
|
||||||
|
template <class P>
|
||||||
|
ChannelSetupCaches<P>::~ChannelSetupCaches() = default;
|
||||||
|
|
||||||
|
template <class P>
|
||||||
|
void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
|
||||||
|
std::unique_lock<std::mutex> lk(config_mutex);
|
||||||
|
ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0);
|
||||||
|
auto new_id = [this, &channel]() {
|
||||||
|
if (!free_channel_ids.empty()) {
|
||||||
|
auto id = free_channel_ids.front();
|
||||||
|
free_channel_ids.pop_front();
|
||||||
|
new (&channel_storage[id]) P(channel);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
channel_storage.emplace_back(channel);
|
||||||
|
return channel_storage.size() - 1;
|
||||||
|
}();
|
||||||
|
channel_map.emplace(channel.bind_id, new_id);
|
||||||
|
if (current_channel_id != UNSET_CHANNEL) {
|
||||||
|
channel_state = &channel_storage[current_channel_id];
|
||||||
|
}
|
||||||
|
active_channel_ids.push_back(new_id);
|
||||||
|
auto as_it = address_spaces.find(channel.memory_manager->GetID());
|
||||||
|
if (as_it != address_spaces.end()) {
|
||||||
|
as_it->second.ref_count++;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
AddresSpaceRef new_gpu_mem_ref{
|
||||||
|
.ref_count = 1,
|
||||||
|
.storage_id = address_spaces.size(),
|
||||||
|
.gpu_memory = channel.memory_manager.get(),
|
||||||
|
};
|
||||||
|
address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref);
|
||||||
|
OnGPUASRegister(channel.memory_manager->GetID());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bind a channel for execution.
|
||||||
|
template <class P>
|
||||||
|
void ChannelSetupCaches<P>::BindToChannel(s32 id) {
|
||||||
|
std::unique_lock<std::mutex> lk(config_mutex);
|
||||||
|
auto it = channel_map.find(id);
|
||||||
|
ASSERT(it != channel_map.end() && id >= 0);
|
||||||
|
current_channel_id = it->second;
|
||||||
|
channel_state = &channel_storage[current_channel_id];
|
||||||
|
maxwell3d = &channel_state->maxwell3d;
|
||||||
|
kepler_compute = &channel_state->kepler_compute;
|
||||||
|
gpu_memory = &channel_state->gpu_memory;
|
||||||
|
current_address_space = gpu_memory->GetID();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Erase channel's channel_state.
|
||||||
|
template <class P>
|
||||||
|
void ChannelSetupCaches<P>::EraseChannel(s32 id) {
|
||||||
|
std::unique_lock<std::mutex> lk(config_mutex);
|
||||||
|
const auto it = channel_map.find(id);
|
||||||
|
ASSERT(it != channel_map.end() && id >= 0);
|
||||||
|
const auto this_id = it->second;
|
||||||
|
free_channel_ids.push_back(this_id);
|
||||||
|
channel_map.erase(it);
|
||||||
|
if (this_id == current_channel_id) {
|
||||||
|
current_channel_id = UNSET_CHANNEL;
|
||||||
|
channel_state = nullptr;
|
||||||
|
maxwell3d = nullptr;
|
||||||
|
kepler_compute = nullptr;
|
||||||
|
gpu_memory = nullptr;
|
||||||
|
} else if (current_channel_id != UNSET_CHANNEL) {
|
||||||
|
channel_state = &channel_storage[current_channel_id];
|
||||||
|
}
|
||||||
|
active_channel_ids.erase(
|
||||||
|
std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace VideoCommon
|
31
src/video_core/control/scheduler.cpp
Executable file
31
src/video_core/control/scheduler.cpp
Executable file
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/control/scheduler.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
|
|
||||||
|
namespace Tegra::Control {
|
||||||
|
Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
|
||||||
|
|
||||||
|
Scheduler::~Scheduler() = default;
|
||||||
|
|
||||||
|
void Scheduler::Push(s32 channel, CommandList&& entries) {
|
||||||
|
std::unique_lock<std::mutex> lk(scheduling_guard);
|
||||||
|
auto it = channels.find(channel);
|
||||||
|
auto channel_state = it->second;
|
||||||
|
gpu.BindChannel(channel_state->bind_id);
|
||||||
|
channel_state->dma_pusher->Push(std::move(entries));
|
||||||
|
channel_state->dma_pusher->DispatchCalls();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
|
||||||
|
s32 channel = new_channel->bind_id;
|
||||||
|
std::unique_lock<std::mutex> lk(scheduling_guard);
|
||||||
|
channels.emplace(channel, new_channel);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Control
|
38
src/video_core/control/scheduler.h
Executable file
38
src/video_core/control/scheduler.h
Executable file
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
#include "video_core/dma_pusher.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
class GPU;
|
||||||
|
|
||||||
|
namespace Control {
|
||||||
|
|
||||||
|
struct ChannelState;
|
||||||
|
|
||||||
|
class Scheduler {
|
||||||
|
public:
|
||||||
|
Scheduler(GPU& gpu_);
|
||||||
|
~Scheduler();
|
||||||
|
|
||||||
|
void Push(s32 channel, CommandList&& entries);
|
||||||
|
|
||||||
|
void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
|
||||||
|
std::mutex scheduling_guard;
|
||||||
|
GPU& gpu;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Control
|
||||||
|
|
||||||
|
} // namespace Tegra
|
|
@ -12,7 +12,10 @@
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {}
|
DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
|
||||||
|
Control::ChannelState& channel_state_)
|
||||||
|
: gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
|
||||||
|
*this, channel_state_} {}
|
||||||
|
|
||||||
DmaPusher::~DmaPusher() = default;
|
DmaPusher::~DmaPusher() = default;
|
||||||
|
|
||||||
|
@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
|
||||||
void DmaPusher::DispatchCalls() {
|
void DmaPusher::DispatchCalls() {
|
||||||
MICROPROFILE_SCOPE(DispatchCalls);
|
MICROPROFILE_SCOPE(DispatchCalls);
|
||||||
|
|
||||||
gpu.SyncGuestHost();
|
|
||||||
|
|
||||||
dma_pushbuffer_subindex = 0;
|
dma_pushbuffer_subindex = 0;
|
||||||
|
|
||||||
dma_state.is_last_call = true;
|
dma_state.is_last_call = true;
|
||||||
|
@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gpu.FlushCommands();
|
gpu.FlushCommands();
|
||||||
gpu.SyncGuestHost();
|
|
||||||
gpu.OnCommandListEnd();
|
gpu.OnCommandListEnd();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,10 +76,10 @@ bool DmaPusher::Step() {
|
||||||
// Push buffer non-empty, read a word
|
// Push buffer non-empty, read a word
|
||||||
command_headers.resize(command_list_header.size);
|
command_headers.resize(command_list_header.size);
|
||||||
if (Settings::IsGPULevelHigh()) {
|
if (Settings::IsGPULevelHigh()) {
|
||||||
gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(),
|
memory_manager.ReadBlock(dma_get, command_headers.data(),
|
||||||
command_list_header.size * sizeof(u32));
|
command_list_header.size * sizeof(u32));
|
||||||
} else {
|
} else {
|
||||||
gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
|
memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
|
||||||
command_list_header.size * sizeof(u32));
|
command_list_header.size * sizeof(u32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
|
||||||
|
|
||||||
void DmaPusher::CallMethod(u32 argument) const {
|
void DmaPusher::CallMethod(u32 argument) const {
|
||||||
if (dma_state.method < non_puller_methods) {
|
if (dma_state.method < non_puller_methods) {
|
||||||
gpu.CallMethod(GPU::MethodCall{
|
puller.CallPullerMethod(Engines::Puller::MethodCall{
|
||||||
dma_state.method,
|
dma_state.method,
|
||||||
argument,
|
argument,
|
||||||
dma_state.subchannel,
|
dma_state.subchannel,
|
||||||
|
@ -168,7 +168,7 @@ void DmaPusher::CallMethod(u32 argument) const {
|
||||||
|
|
||||||
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
|
void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
|
||||||
if (dma_state.method < non_puller_methods) {
|
if (dma_state.method < non_puller_methods) {
|
||||||
gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
|
puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
|
||||||
dma_state.method_count);
|
dma_state.method_count);
|
||||||
} else {
|
} else {
|
||||||
subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
|
subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
|
||||||
|
@ -176,4 +176,8 @@ void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
|
||||||
|
puller.BindRasterizer(rasterizer);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/engines/engine_interface.h"
|
#include "video_core/engines/engine_interface.h"
|
||||||
|
#include "video_core/engines/puller.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
class System;
|
class System;
|
||||||
|
@ -17,7 +18,12 @@ class System;
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Control {
|
||||||
|
struct ChannelState;
|
||||||
|
}
|
||||||
|
|
||||||
class GPU;
|
class GPU;
|
||||||
|
class MemoryManager;
|
||||||
|
|
||||||
enum class SubmissionMode : u32 {
|
enum class SubmissionMode : u32 {
|
||||||
IncreasingOld = 0,
|
IncreasingOld = 0,
|
||||||
|
@ -31,24 +37,32 @@ enum class SubmissionMode : u32 {
|
||||||
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
|
// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
|
||||||
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
|
// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
|
||||||
// So the values you see in docs might be multiplied by 4.
|
// So the values you see in docs might be multiplied by 4.
|
||||||
|
// Register documentation:
|
||||||
|
// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h
|
||||||
|
//
|
||||||
|
// Register Description (approx):
|
||||||
|
// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt
|
||||||
enum class BufferMethods : u32 {
|
enum class BufferMethods : u32 {
|
||||||
BindObject = 0x0,
|
BindObject = 0x0,
|
||||||
|
Illegal = 0x1,
|
||||||
Nop = 0x2,
|
Nop = 0x2,
|
||||||
SemaphoreAddressHigh = 0x4,
|
SemaphoreAddressHigh = 0x4,
|
||||||
SemaphoreAddressLow = 0x5,
|
SemaphoreAddressLow = 0x5,
|
||||||
SemaphoreSequence = 0x6,
|
SemaphoreSequencePayload = 0x6,
|
||||||
SemaphoreTrigger = 0x7,
|
SemaphoreOperation = 0x7,
|
||||||
NotifyIntr = 0x8,
|
NonStallInterrupt = 0x8,
|
||||||
WrcacheFlush = 0x9,
|
WrcacheFlush = 0x9,
|
||||||
Unk28 = 0xA,
|
MemOpA = 0xA,
|
||||||
UnkCacheFlush = 0xB,
|
MemOpB = 0xB,
|
||||||
|
MemOpC = 0xC,
|
||||||
|
MemOpD = 0xD,
|
||||||
RefCnt = 0x14,
|
RefCnt = 0x14,
|
||||||
SemaphoreAcquire = 0x1A,
|
SemaphoreAcquire = 0x1A,
|
||||||
SemaphoreRelease = 0x1B,
|
SemaphoreRelease = 0x1B,
|
||||||
FenceValue = 0x1C,
|
SyncpointPayload = 0x1C,
|
||||||
FenceAction = 0x1D,
|
SyncpointOperation = 0x1D,
|
||||||
WaitForInterrupt = 0x1E,
|
WaitForIdle = 0x1E,
|
||||||
Unk7c = 0x1F,
|
CRCCheck = 0x1F,
|
||||||
Yield = 0x20,
|
Yield = 0x20,
|
||||||
NonPullerMethods = 0x40,
|
NonPullerMethods = 0x40,
|
||||||
};
|
};
|
||||||
|
@ -102,7 +116,8 @@ struct CommandList final {
|
||||||
*/
|
*/
|
||||||
class DmaPusher final {
|
class DmaPusher final {
|
||||||
public:
|
public:
|
||||||
explicit DmaPusher(Core::System& system_, GPU& gpu_);
|
explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
|
||||||
|
Control::ChannelState& channel_state_);
|
||||||
~DmaPusher();
|
~DmaPusher();
|
||||||
|
|
||||||
void Push(CommandList&& entries) {
|
void Push(CommandList&& entries) {
|
||||||
|
@ -115,6 +130,8 @@ public:
|
||||||
subchannels[subchannel_id] = engine;
|
subchannels[subchannel_id] = engine;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static constexpr u32 non_puller_methods = 0x40;
|
static constexpr u32 non_puller_methods = 0x40;
|
||||||
static constexpr u32 max_subchannels = 8;
|
static constexpr u32 max_subchannels = 8;
|
||||||
|
@ -148,6 +165,8 @@ private:
|
||||||
|
|
||||||
GPU& gpu;
|
GPU& gpu;
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
|
MemoryManager& memory_manager;
|
||||||
|
mutable Engines::Puller puller;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
#include "common/algorithm.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "video_core/engines/engine_upload.h"
|
#include "video_core/engines/engine_upload.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
|
@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
|
||||||
if (!is_last_call) {
|
if (!is_last_call) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
ProcessData(inner_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void State::ProcessData(const u32* data, size_t num_data) {
|
||||||
|
std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32));
|
||||||
|
ProcessData(read_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void State::ProcessData(std::span<const u8> read_buffer) {
|
||||||
const GPUVAddr address{regs.dest.Address()};
|
const GPUVAddr address{regs.dest.Address()};
|
||||||
if (is_linear) {
|
if (is_linear) {
|
||||||
rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer);
|
if (regs.line_count == 1) {
|
||||||
|
rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
|
||||||
} else {
|
} else {
|
||||||
UNIMPLEMENTED_IF(regs.dest.z != 0);
|
for (u32 line = 0; line < regs.line_count; ++line) {
|
||||||
UNIMPLEMENTED_IF(regs.dest.depth != 1);
|
const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch;
|
||||||
UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0);
|
memory_manager.WriteBlockUnsafe(
|
||||||
UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0);
|
dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in,
|
||||||
|
regs.line_length_in);
|
||||||
|
}
|
||||||
|
memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
u32 width = regs.dest.width;
|
||||||
|
u32 x_elements = regs.line_length_in;
|
||||||
|
u32 x_offset = regs.dest.x;
|
||||||
|
const u32 bpp_shift = Common::FoldRight(
|
||||||
|
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
|
||||||
|
width, x_elements, x_offset, static_cast<u32>(address));
|
||||||
|
width >>= bpp_shift;
|
||||||
|
x_elements >>= bpp_shift;
|
||||||
|
x_offset >>= bpp_shift;
|
||||||
|
const u32 bytes_per_pixel = 1U << bpp_shift;
|
||||||
const std::size_t dst_size = Tegra::Texture::CalculateSize(
|
const std::size_t dst_size = Tegra::Texture::CalculateSize(
|
||||||
true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0);
|
true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
|
||||||
|
regs.dest.BlockHeight(), regs.dest.BlockDepth());
|
||||||
tmp_buffer.resize(dst_size);
|
tmp_buffer.resize(dst_size);
|
||||||
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
|
memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
|
||||||
Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y,
|
Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
|
||||||
regs.dest.BlockHeight(), copy_size, inner_buffer.data(),
|
regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
|
||||||
tmp_buffer.data());
|
x_elements, regs.line_count, regs.dest.BlockHeight(),
|
||||||
|
regs.dest.BlockDepth(), regs.line_length_in);
|
||||||
memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
|
memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <span>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/bit_field.h"
|
#include "common/bit_field.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
@ -33,7 +34,7 @@ struct Registers {
|
||||||
u32 width;
|
u32 width;
|
||||||
u32 height;
|
u32 height;
|
||||||
u32 depth;
|
u32 depth;
|
||||||
u32 z;
|
u32 layer;
|
||||||
u32 x;
|
u32 x;
|
||||||
u32 y;
|
u32 y;
|
||||||
|
|
||||||
|
@ -62,11 +63,14 @@ public:
|
||||||
|
|
||||||
void ProcessExec(bool is_linear_);
|
void ProcessExec(bool is_linear_);
|
||||||
void ProcessData(u32 data, bool is_last_call);
|
void ProcessData(u32 data, bool is_last_call);
|
||||||
|
void ProcessData(const u32* data, size_t num_data);
|
||||||
|
|
||||||
/// Binds a rasterizer to this engine.
|
/// Binds a rasterizer to this engine.
|
||||||
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void ProcessData(std::span<const u8> read_buffer);
|
||||||
|
|
||||||
u32 write_offset = 0;
|
u32 write_offset = 0;
|
||||||
u32 copy_size = 0;
|
u32 copy_size = 0;
|
||||||
std::vector<u8> inner_buffer;
|
std::vector<u8> inner_buffer;
|
||||||
|
|
|
@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
|
||||||
}
|
}
|
||||||
case KEPLER_COMPUTE_REG_INDEX(data_upload): {
|
case KEPLER_COMPUTE_REG_INDEX(data_upload): {
|
||||||
upload_state.ProcessData(method_argument, is_last_call);
|
upload_state.ProcessData(method_argument, is_last_call);
|
||||||
if (is_last_call) {
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case KEPLER_COMPUTE_REG_INDEX(launch):
|
case KEPLER_COMPUTE_REG_INDEX(launch):
|
||||||
|
@ -50,9 +48,16 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
|
||||||
|
|
||||||
void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
||||||
u32 methods_pending) {
|
u32 methods_pending) {
|
||||||
|
switch (method) {
|
||||||
|
case KEPLER_COMPUTE_REG_INDEX(data_upload):
|
||||||
|
upload_state.ProcessData(base_start, static_cast<size_t>(amount));
|
||||||
|
return;
|
||||||
|
default:
|
||||||
for (std::size_t i = 0; i < amount; i++) {
|
for (std::size_t i = 0; i < amount; i++) {
|
||||||
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeplerCompute::ProcessLaunch() {
|
void KeplerCompute::ProcessLaunch() {
|
||||||
|
|
|
@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
|
||||||
}
|
}
|
||||||
case KEPLERMEMORY_REG_INDEX(data): {
|
case KEPLERMEMORY_REG_INDEX(data): {
|
||||||
upload_state.ProcessData(method_argument, is_last_call);
|
upload_state.ProcessData(method_argument, is_last_call);
|
||||||
if (is_last_call) {
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -42,9 +40,16 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
|
||||||
|
|
||||||
void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
||||||
u32 methods_pending) {
|
u32 methods_pending) {
|
||||||
|
switch (method) {
|
||||||
|
case KEPLERMEMORY_REG_INDEX(data):
|
||||||
|
upload_state.ProcessData(base_start, static_cast<size_t>(amount));
|
||||||
|
return;
|
||||||
|
default:
|
||||||
for (std::size_t i = 0; i < amount; i++) {
|
for (std::size_t i = 0; i < amount; i++) {
|
||||||
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Tegra::Engines
|
} // namespace Tegra::Engines
|
||||||
|
|
|
@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
|
||||||
regs.index_array.count = regs.small_index_2.count;
|
regs.index_array.count = regs.small_index_2.count;
|
||||||
regs.index_array.first = regs.small_index_2.first;
|
regs.index_array.first = regs.small_index_2.first;
|
||||||
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||||
|
// a macro calls this one over and over, should it increase instancing?
|
||||||
|
// Used by Hades and likely other Vulkan games.
|
||||||
return DrawArrays();
|
return DrawArrays();
|
||||||
case MAXWELL3D_REG_INDEX(topology_override):
|
case MAXWELL3D_REG_INDEX(topology_override):
|
||||||
use_topology_override = true;
|
use_topology_override = true;
|
||||||
|
@ -237,11 +239,12 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
|
||||||
return upload_state.ProcessExec(regs.exec_upload.linear != 0);
|
return upload_state.ProcessExec(regs.exec_upload.linear != 0);
|
||||||
case MAXWELL3D_REG_INDEX(data_upload):
|
case MAXWELL3D_REG_INDEX(data_upload):
|
||||||
upload_state.ProcessData(argument, is_last_call);
|
upload_state.ProcessData(argument, is_last_call);
|
||||||
if (is_last_call) {
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
case MAXWELL3D_REG_INDEX(fragment_barrier):
|
case MAXWELL3D_REG_INDEX(fragment_barrier):
|
||||||
return rasterizer->FragmentBarrier();
|
return rasterizer->FragmentBarrier();
|
||||||
|
case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache):
|
||||||
|
rasterizer->InvalidateGPUCache();
|
||||||
|
return rasterizer->WaitForIdle();
|
||||||
case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
|
case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
|
||||||
return rasterizer->TiledCacheBarrier();
|
return rasterizer->TiledCacheBarrier();
|
||||||
}
|
}
|
||||||
|
@ -311,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
|
||||||
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
|
case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
|
||||||
ProcessCBMultiData(base_start, amount);
|
ProcessCBMultiData(base_start, amount);
|
||||||
break;
|
break;
|
||||||
|
case MAXWELL3D_REG_INDEX(data_upload):
|
||||||
|
upload_state.ProcessData(base_start, static_cast<size_t>(amount));
|
||||||
|
return;
|
||||||
default:
|
default:
|
||||||
for (std::size_t i = 0; i < amount; i++) {
|
for (std::size_t i = 0; i < amount; i++) {
|
||||||
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
|
||||||
|
@ -447,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
|
void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
|
||||||
struct LongQueryResult {
|
|
||||||
u64_le value;
|
|
||||||
u64_le timestamp;
|
|
||||||
};
|
|
||||||
static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size");
|
|
||||||
const GPUVAddr sequence_address{regs.query.QueryAddress()};
|
const GPUVAddr sequence_address{regs.query.QueryAddress()};
|
||||||
if (long_query) {
|
if (long_query) {
|
||||||
// Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast
|
memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
|
||||||
// GPU, this command may actually take a while to complete in real hardware due to GPU
|
memory_manager.Write<u64>(sequence_address, payload);
|
||||||
// wait queues.
|
|
||||||
LongQueryResult query_result{payload, system.GPU().GetTicks()};
|
|
||||||
memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
|
|
||||||
} else {
|
} else {
|
||||||
memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
|
memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
|
||||||
}
|
}
|
||||||
|
@ -472,10 +470,25 @@ void Maxwell3D::ProcessQueryGet() {
|
||||||
|
|
||||||
switch (regs.query.query_get.operation) {
|
switch (regs.query.query_get.operation) {
|
||||||
case Regs::QueryOperation::Release:
|
case Regs::QueryOperation::Release:
|
||||||
if (regs.query.query_get.fence == 1) {
|
if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) {
|
||||||
rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
|
const GPUVAddr sequence_address{regs.query.QueryAddress()};
|
||||||
|
const u32 payload = regs.query.query_sequence;
|
||||||
|
std::function<void()> operation([this, sequence_address, payload] {
|
||||||
|
memory_manager.Write<u32>(sequence_address, payload);
|
||||||
|
});
|
||||||
|
rasterizer->SignalFence(std::move(operation));
|
||||||
} else {
|
} else {
|
||||||
StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
|
struct LongQueryResult {
|
||||||
|
u64_le value;
|
||||||
|
u64_le timestamp;
|
||||||
|
};
|
||||||
|
const GPUVAddr sequence_address{regs.query.QueryAddress()};
|
||||||
|
const u32 payload = regs.query.query_sequence;
|
||||||
|
std::function<void()> operation([this, sequence_address, payload] {
|
||||||
|
memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
|
||||||
|
memory_manager.Write<u64>(sequence_address, payload);
|
||||||
|
});
|
||||||
|
rasterizer->SyncOperation(std::move(operation));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case Regs::QueryOperation::Acquire:
|
case Regs::QueryOperation::Acquire:
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
|
||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include "common/algorithm.h"
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
|
@ -54,8 +55,6 @@ void MaxwellDMA::Launch() {
|
||||||
const LaunchDMA& launch = regs.launch_dma;
|
const LaunchDMA& launch = regs.launch_dma;
|
||||||
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
|
ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
|
||||||
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
|
ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
|
||||||
ASSERT(regs.dst_params.origin.x == 0);
|
|
||||||
ASSERT(regs.dst_params.origin.y == 0);
|
|
||||||
|
|
||||||
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
||||||
const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
|
||||||
|
@ -121,12 +120,13 @@ void MaxwellDMA::CopyPitchToPitch() {
|
||||||
|
|
||||||
void MaxwellDMA::CopyBlockLinearToPitch() {
|
void MaxwellDMA::CopyBlockLinearToPitch() {
|
||||||
UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
|
UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
|
||||||
UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
|
|
||||||
UNIMPLEMENTED_IF(regs.src_params.layer != 0);
|
UNIMPLEMENTED_IF(regs.src_params.layer != 0);
|
||||||
|
|
||||||
|
const bool is_remapping = regs.launch_dma.remap_enable != 0;
|
||||||
|
|
||||||
// Optimized path for micro copies.
|
// Optimized path for micro copies.
|
||||||
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
||||||
if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
|
if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
|
||||||
regs.src_params.height > GOB_SIZE_Y) {
|
regs.src_params.height > GOB_SIZE_Y) {
|
||||||
FastCopyBlockLinearToPitch();
|
FastCopyBlockLinearToPitch();
|
||||||
return;
|
return;
|
||||||
|
@ -134,10 +134,27 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
|
||||||
|
|
||||||
// Deswizzle the input and copy it over.
|
// Deswizzle the input and copy it over.
|
||||||
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
||||||
const u32 bytes_per_pixel =
|
|
||||||
regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
|
|
||||||
const Parameters& src_params = regs.src_params;
|
const Parameters& src_params = regs.src_params;
|
||||||
const u32 width = src_params.width;
|
|
||||||
|
const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
|
||||||
|
const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
|
||||||
|
|
||||||
|
const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
|
||||||
|
|
||||||
|
u32 width = src_params.width;
|
||||||
|
u32 x_elements = regs.line_length_in;
|
||||||
|
u32 x_offset = src_params.origin.x;
|
||||||
|
u32 bpp_shift = 0U;
|
||||||
|
if (!is_remapping) {
|
||||||
|
bpp_shift = Common::FoldRight(
|
||||||
|
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
|
||||||
|
width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
|
||||||
|
width >>= bpp_shift;
|
||||||
|
x_elements >>= bpp_shift;
|
||||||
|
x_offset >>= bpp_shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u32 bytes_per_pixel = base_bpp << bpp_shift;
|
||||||
const u32 height = src_params.height;
|
const u32 height = src_params.height;
|
||||||
const u32 depth = src_params.depth;
|
const u32 depth = src_params.depth;
|
||||||
const u32 block_height = src_params.block_size.height;
|
const u32 block_height = src_params.block_size.height;
|
||||||
|
@ -155,30 +172,46 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
|
||||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
|
|
||||||
UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel,
|
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
||||||
block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(),
|
src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
||||||
read_buffer.data());
|
regs.pitch_out);
|
||||||
|
|
||||||
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaxwellDMA::CopyPitchToBlockLinear() {
|
void MaxwellDMA::CopyPitchToBlockLinear() {
|
||||||
UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
|
UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
|
||||||
|
UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
|
||||||
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
|
||||||
|
|
||||||
|
const bool is_remapping = regs.launch_dma.remap_enable != 0;
|
||||||
|
const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
|
||||||
|
const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
|
||||||
|
|
||||||
const auto& dst_params = regs.dst_params;
|
const auto& dst_params = regs.dst_params;
|
||||||
const u32 bytes_per_pixel =
|
|
||||||
regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1;
|
const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
|
||||||
const u32 width = dst_params.width;
|
|
||||||
|
u32 width = dst_params.width;
|
||||||
|
u32 x_elements = regs.line_length_in;
|
||||||
|
u32 x_offset = dst_params.origin.x;
|
||||||
|
u32 bpp_shift = 0U;
|
||||||
|
if (!is_remapping) {
|
||||||
|
bpp_shift = Common::FoldRight(
|
||||||
|
4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
|
||||||
|
width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
|
||||||
|
width >>= bpp_shift;
|
||||||
|
x_elements >>= bpp_shift;
|
||||||
|
x_offset >>= bpp_shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
const u32 bytes_per_pixel = base_bpp << bpp_shift;
|
||||||
const u32 height = dst_params.height;
|
const u32 height = dst_params.height;
|
||||||
const u32 depth = dst_params.depth;
|
const u32 depth = dst_params.depth;
|
||||||
const u32 block_height = dst_params.block_size.height;
|
const u32 block_height = dst_params.block_size.height;
|
||||||
const u32 block_depth = dst_params.block_size.depth;
|
const u32 block_depth = dst_params.block_size.depth;
|
||||||
const size_t dst_size =
|
const size_t dst_size =
|
||||||
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
|
||||||
const size_t dst_layer_size =
|
|
||||||
CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
|
|
||||||
|
|
||||||
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
|
||||||
|
|
||||||
if (read_buffer.size() < src_size) {
|
if (read_buffer.size() < src_size) {
|
||||||
|
@ -188,32 +221,19 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
|
||||||
write_buffer.resize(dst_size);
|
write_buffer.resize(dst_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Settings::IsGPULevelExtreme()) {
|
|
||||||
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
|
||||||
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
} else {
|
|
||||||
memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
|
|
||||||
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
// If the input is linear and the output is tiled, swizzle the input and copy it over.
|
||||||
if (regs.dst_params.block_size.depth > 0) {
|
SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
|
||||||
ASSERT(dst_params.layer == 0);
|
dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
|
||||||
SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height,
|
regs.pitch_in);
|
||||||
bytes_per_pixel, block_height, block_depth, dst_params.origin.x,
|
|
||||||
dst_params.origin.y, write_buffer.data(), read_buffer.data());
|
|
||||||
} else {
|
|
||||||
SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel,
|
|
||||||
write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(),
|
|
||||||
block_height, dst_params.origin.x, dst_params.origin.y);
|
|
||||||
}
|
|
||||||
|
|
||||||
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
||||||
const u32 bytes_per_pixel =
|
const u32 bytes_per_pixel = 1U;
|
||||||
regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
|
|
||||||
const size_t src_size = GOB_SIZE;
|
const size_t src_size = GOB_SIZE;
|
||||||
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
|
||||||
u32 pos_x = regs.src_params.origin.x;
|
u32 pos_x = regs.src_params.origin.x;
|
||||||
|
@ -239,9 +259,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
||||||
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width,
|
UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
|
||||||
bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y,
|
regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
|
||||||
write_buffer.data(), read_buffer.data());
|
regs.src_params.block_size.height, regs.src_params.block_size.depth,
|
||||||
|
regs.pitch_out);
|
||||||
|
|
||||||
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
|
||||||
}
|
}
|
||||||
|
@ -249,16 +270,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
|
||||||
void MaxwellDMA::ReleaseSemaphore() {
|
void MaxwellDMA::ReleaseSemaphore() {
|
||||||
const auto type = regs.launch_dma.semaphore_type;
|
const auto type = regs.launch_dma.semaphore_type;
|
||||||
const GPUVAddr address = regs.semaphore.address;
|
const GPUVAddr address = regs.semaphore.address;
|
||||||
|
const u32 payload = regs.semaphore.payload;
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case LaunchDMA::SemaphoreType::NONE:
|
case LaunchDMA::SemaphoreType::NONE:
|
||||||
break;
|
break;
|
||||||
case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE:
|
case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
|
||||||
memory_manager.Write<u32>(address, regs.semaphore.payload);
|
std::function<void()> operation(
|
||||||
|
[this, address, payload] { memory_manager.Write<u32>(address, payload); });
|
||||||
|
rasterizer->SignalFence(std::move(operation));
|
||||||
break;
|
break;
|
||||||
case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE:
|
}
|
||||||
memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload));
|
case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
|
||||||
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
|
std::function<void()> operation([this, address, payload] {
|
||||||
|
memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
|
||||||
|
memory_manager.Write<u64>(address, payload);
|
||||||
|
});
|
||||||
|
rasterizer->SignalFence(std::move(operation));
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -189,10 +189,16 @@ public:
|
||||||
BitField<4, 3, Swizzle> dst_y;
|
BitField<4, 3, Swizzle> dst_y;
|
||||||
BitField<8, 3, Swizzle> dst_z;
|
BitField<8, 3, Swizzle> dst_z;
|
||||||
BitField<12, 3, Swizzle> dst_w;
|
BitField<12, 3, Swizzle> dst_w;
|
||||||
|
BitField<0, 12, u32> dst_components_raw;
|
||||||
BitField<16, 2, u32> component_size_minus_one;
|
BitField<16, 2, u32> component_size_minus_one;
|
||||||
BitField<20, 2, u32> num_src_components_minus_one;
|
BitField<20, 2, u32> num_src_components_minus_one;
|
||||||
BitField<24, 2, u32> num_dst_components_minus_one;
|
BitField<24, 2, u32> num_dst_components_minus_one;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Swizzle GetComponent(size_t i) {
|
||||||
|
const u32 raw = dst_components_raw;
|
||||||
|
return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
static_assert(sizeof(RemapConst) == 12);
|
static_assert(sizeof(RemapConst) == 12);
|
||||||
|
|
||||||
|
|
315
src/video_core/engines/puller.cpp
Executable file
315
src/video_core/engines/puller.cpp
Executable file
|
@ -0,0 +1,315 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#include "common/settings.h"
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/dma_pusher.h"
|
||||||
|
#include "video_core/engines/fermi_2d.h"
|
||||||
|
#include "video_core/engines/kepler_compute.h"
|
||||||
|
#include "video_core/engines/kepler_memory.h"
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
#include "video_core/engines/maxwell_dma.h"
|
||||||
|
#include "video_core/engines/puller.h"
|
||||||
|
#include "video_core/gpu.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
|
||||||
|
namespace Tegra::Engines {
|
||||||
|
|
||||||
|
Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_,
|
||||||
|
Control::ChannelState& channel_state_)
|
||||||
|
: gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{
|
||||||
|
channel_state_} {}
|
||||||
|
|
||||||
|
Puller::~Puller() = default;
|
||||||
|
|
||||||
|
void Puller::ProcessBindMethod(const MethodCall& method_call) {
|
||||||
|
// Bind the current subchannel to the desired engine id.
|
||||||
|
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
|
||||||
|
method_call.argument);
|
||||||
|
const auto engine_id = static_cast<EngineID>(method_call.argument);
|
||||||
|
bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
|
||||||
|
switch (engine_id) {
|
||||||
|
case EngineID::FERMI_TWOD_A:
|
||||||
|
dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel);
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_B:
|
||||||
|
dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel);
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_COMPUTE_B:
|
||||||
|
dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel);
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_DMA_COPY_A:
|
||||||
|
dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel);
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
||||||
|
dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Puller::ProcessFenceActionMethod() {
|
||||||
|
switch (regs.fence_action.op) {
|
||||||
|
case Puller::FenceOperation::Acquire:
|
||||||
|
// UNIMPLEMENTED_MSG("Channel Scheduling pending.");
|
||||||
|
// WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
|
||||||
|
rasterizer->ReleaseFences();
|
||||||
|
break;
|
||||||
|
case Puller::FenceOperation::Increment:
|
||||||
|
rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Puller::ProcessSemaphoreTriggerMethod() {
|
||||||
|
const auto semaphoreOperationMask = 0xF;
|
||||||
|
const auto op =
|
||||||
|
static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
|
||||||
|
if (op == GpuSemaphoreOperation::WriteLong) {
|
||||||
|
struct Block {
|
||||||
|
u32 sequence;
|
||||||
|
u32 zeros = 0;
|
||||||
|
u64 timestamp;
|
||||||
|
};
|
||||||
|
|
||||||
|
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
||||||
|
const u32 payload = regs.semaphore_sequence;
|
||||||
|
std::function<void()> operation([this, sequence_address, payload] {
|
||||||
|
Block block{};
|
||||||
|
block.sequence = payload;
|
||||||
|
block.timestamp = gpu.GetTicks();
|
||||||
|
memory_manager.WriteBlockUnsafe(sequence_address, &block, sizeof(block));
|
||||||
|
});
|
||||||
|
rasterizer->SyncOperation(std::move(operation));
|
||||||
|
} else {
|
||||||
|
do {
|
||||||
|
const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
||||||
|
regs.acquire_source = true;
|
||||||
|
regs.acquire_value = regs.semaphore_sequence;
|
||||||
|
if (op == GpuSemaphoreOperation::AcquireEqual) {
|
||||||
|
regs.acquire_active = true;
|
||||||
|
regs.acquire_mode = false;
|
||||||
|
if (word != regs.acquire_value) {
|
||||||
|
rasterizer->ReleaseFences();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else if (op == GpuSemaphoreOperation::AcquireGequal) {
|
||||||
|
regs.acquire_active = true;
|
||||||
|
regs.acquire_mode = true;
|
||||||
|
if (word < regs.acquire_value) {
|
||||||
|
rasterizer->ReleaseFences();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else if (op == GpuSemaphoreOperation::AcquireMask) {
|
||||||
|
if (word && regs.semaphore_sequence == 0) {
|
||||||
|
rasterizer->ReleaseFences();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG_ERROR(HW_GPU, "Invalid semaphore operation");
|
||||||
|
}
|
||||||
|
} while (false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Puller::ProcessSemaphoreRelease() {
|
||||||
|
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
||||||
|
const u32 payload = regs.semaphore_release;
|
||||||
|
std::function<void()> operation([this, sequence_address, payload] {
|
||||||
|
memory_manager.Write<u32>(sequence_address, payload);
|
||||||
|
});
|
||||||
|
rasterizer->SyncOperation(std::move(operation));
|
||||||
|
}
|
||||||
|
|
||||||
|
void Puller::ProcessSemaphoreAcquire() {
|
||||||
|
u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
|
||||||
|
const auto value = regs.semaphore_acquire;
|
||||||
|
while (word != value) {
|
||||||
|
regs.acquire_active = true;
|
||||||
|
regs.acquire_value = value;
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||||
|
rasterizer->ReleaseFences();
|
||||||
|
word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
|
||||||
|
// TODO(kemathe73) figure out how to do the acquire_timeout
|
||||||
|
regs.acquire_mode = false;
|
||||||
|
regs.acquire_source = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calls a GPU puller method.
|
||||||
|
void Puller::CallPullerMethod(const MethodCall& method_call) {
|
||||||
|
regs.reg_array[method_call.method] = method_call.argument;
|
||||||
|
const auto method = static_cast<BufferMethods>(method_call.method);
|
||||||
|
|
||||||
|
switch (method) {
|
||||||
|
case BufferMethods::BindObject: {
|
||||||
|
ProcessBindMethod(method_call);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::Nop:
|
||||||
|
case BufferMethods::SemaphoreAddressHigh:
|
||||||
|
case BufferMethods::SemaphoreAddressLow:
|
||||||
|
case BufferMethods::SemaphoreSequencePayload:
|
||||||
|
case BufferMethods::SyncpointPayload:
|
||||||
|
break;
|
||||||
|
case BufferMethods::WrcacheFlush:
|
||||||
|
case BufferMethods::RefCnt:
|
||||||
|
rasterizer->SignalReference();
|
||||||
|
break;
|
||||||
|
case BufferMethods::SyncpointOperation:
|
||||||
|
ProcessFenceActionMethod();
|
||||||
|
break;
|
||||||
|
case BufferMethods::WaitForIdle:
|
||||||
|
rasterizer->WaitForIdle();
|
||||||
|
break;
|
||||||
|
case BufferMethods::SemaphoreOperation: {
|
||||||
|
ProcessSemaphoreTriggerMethod();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::NonStallInterrupt: {
|
||||||
|
LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::MemOpA: {
|
||||||
|
LOG_ERROR(HW_GPU, "Memory Operation A");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::MemOpB: {
|
||||||
|
// Implement this better.
|
||||||
|
rasterizer->InvalidateGPUCache();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::MemOpC:
|
||||||
|
case BufferMethods::MemOpD: {
|
||||||
|
LOG_ERROR(HW_GPU, "Memory Operation C,D");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::SemaphoreAcquire: {
|
||||||
|
ProcessSemaphoreAcquire();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::SemaphoreRelease: {
|
||||||
|
ProcessSemaphoreRelease();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case BufferMethods::Yield: {
|
||||||
|
// TODO(Kmather73): Research and implement this method.
|
||||||
|
LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calls a GPU engine method.
|
||||||
|
void Puller::CallEngineMethod(const MethodCall& method_call) {
|
||||||
|
const EngineID engine = bound_engines[method_call.subchannel];
|
||||||
|
|
||||||
|
switch (engine) {
|
||||||
|
case EngineID::FERMI_TWOD_A:
|
||||||
|
channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
|
||||||
|
method_call.IsLastCall());
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_B:
|
||||||
|
channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
|
||||||
|
method_call.IsLastCall());
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_COMPUTE_B:
|
||||||
|
channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
|
||||||
|
method_call.IsLastCall());
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_DMA_COPY_A:
|
||||||
|
channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
|
||||||
|
method_call.IsLastCall());
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
||||||
|
channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
|
||||||
|
method_call.IsLastCall());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unimplemented engine");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calls a GPU engine multivalue method.
|
||||||
|
void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
||||||
|
u32 methods_pending) {
|
||||||
|
const EngineID engine = bound_engines[subchannel];
|
||||||
|
|
||||||
|
switch (engine) {
|
||||||
|
case EngineID::FERMI_TWOD_A:
|
||||||
|
channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_B:
|
||||||
|
channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_COMPUTE_B:
|
||||||
|
channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
|
||||||
|
break;
|
||||||
|
case EngineID::MAXWELL_DMA_COPY_A:
|
||||||
|
channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
|
||||||
|
break;
|
||||||
|
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
||||||
|
channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unimplemented engine");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calls a GPU method.
|
||||||
|
void Puller::CallMethod(const MethodCall& method_call) {
|
||||||
|
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
|
||||||
|
method_call.subchannel);
|
||||||
|
|
||||||
|
ASSERT(method_call.subchannel < bound_engines.size());
|
||||||
|
|
||||||
|
if (ExecuteMethodOnEngine(method_call.method)) {
|
||||||
|
CallEngineMethod(method_call);
|
||||||
|
} else {
|
||||||
|
CallPullerMethod(method_call);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calls a GPU multivalue method.
|
||||||
|
void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
||||||
|
u32 methods_pending) {
|
||||||
|
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
|
||||||
|
|
||||||
|
ASSERT(subchannel < bound_engines.size());
|
||||||
|
|
||||||
|
if (ExecuteMethodOnEngine(method)) {
|
||||||
|
CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
|
||||||
|
} else {
|
||||||
|
for (std::size_t i = 0; i < amount; i++) {
|
||||||
|
CallPullerMethod(MethodCall{
|
||||||
|
method,
|
||||||
|
base_start[i],
|
||||||
|
subchannel,
|
||||||
|
methods_pending - static_cast<u32>(i),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
|
||||||
|
rasterizer = rasterizer_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determines where the method should be executed.
|
||||||
|
[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) {
|
||||||
|
const auto buffer_method = static_cast<BufferMethods>(method);
|
||||||
|
return buffer_method >= BufferMethods::NonPullerMethods;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Engines
|
178
src/video_core/engines/puller.h
Executable file
178
src/video_core/engines/puller.h
Executable file
|
@ -0,0 +1,178 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <cstddef>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/bit_field.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/engines/engine_interface.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
class MemoryManager;
|
||||||
|
class DmaPusher;
|
||||||
|
|
||||||
|
enum class EngineID {
|
||||||
|
FERMI_TWOD_A = 0x902D, // 2D Engine
|
||||||
|
MAXWELL_B = 0xB197, // 3D Engine
|
||||||
|
KEPLER_COMPUTE_B = 0xB1C0,
|
||||||
|
KEPLER_INLINE_TO_MEMORY_B = 0xA140,
|
||||||
|
MAXWELL_DMA_COPY_A = 0xB0B5,
|
||||||
|
};
|
||||||
|
|
||||||
|
namespace Control {
|
||||||
|
struct ChannelState;
|
||||||
|
}
|
||||||
|
} // namespace Tegra
|
||||||
|
|
||||||
|
namespace VideoCore {
|
||||||
|
class RasterizerInterface;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra::Engines {
|
||||||
|
|
||||||
|
class Puller final {
|
||||||
|
public:
|
||||||
|
struct MethodCall {
|
||||||
|
u32 method{};
|
||||||
|
u32 argument{};
|
||||||
|
u32 subchannel{};
|
||||||
|
u32 method_count{};
|
||||||
|
|
||||||
|
explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
|
||||||
|
: method(method_), argument(argument_), subchannel(subchannel_),
|
||||||
|
method_count(method_count_) {}
|
||||||
|
|
||||||
|
[[nodiscard]] bool IsLastCall() const {
|
||||||
|
return method_count <= 1;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class FenceOperation : u32 {
|
||||||
|
Acquire = 0,
|
||||||
|
Increment = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
union FenceAction {
|
||||||
|
u32 raw;
|
||||||
|
BitField<0, 1, FenceOperation> op;
|
||||||
|
BitField<8, 24, u32> syncpoint_id;
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher,
|
||||||
|
Control::ChannelState& channel_state);
|
||||||
|
~Puller();
|
||||||
|
|
||||||
|
void CallMethod(const MethodCall& method_call);
|
||||||
|
|
||||||
|
void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
||||||
|
u32 methods_pending);
|
||||||
|
|
||||||
|
void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
|
||||||
|
|
||||||
|
void CallPullerMethod(const MethodCall& method_call);
|
||||||
|
|
||||||
|
void CallEngineMethod(const MethodCall& method_call);
|
||||||
|
|
||||||
|
void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
||||||
|
u32 methods_pending);
|
||||||
|
|
||||||
|
private:
|
||||||
|
Tegra::GPU& gpu;
|
||||||
|
|
||||||
|
MemoryManager& memory_manager;
|
||||||
|
DmaPusher& dma_pusher;
|
||||||
|
Control::ChannelState& channel_state;
|
||||||
|
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
||||||
|
|
||||||
|
static constexpr std::size_t NUM_REGS = 0x800;
|
||||||
|
struct Regs {
|
||||||
|
static constexpr size_t NUM_REGS = 0x40;
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(0x4);
|
||||||
|
struct {
|
||||||
|
u32 address_high;
|
||||||
|
u32 address_low;
|
||||||
|
|
||||||
|
[[nodiscard]] GPUVAddr SemaphoreAddress() const {
|
||||||
|
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
|
||||||
|
address_low);
|
||||||
|
}
|
||||||
|
} semaphore_address;
|
||||||
|
|
||||||
|
u32 semaphore_sequence;
|
||||||
|
u32 semaphore_trigger;
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(0xC);
|
||||||
|
|
||||||
|
// The pusher and the puller share the reference counter, the pusher only has read
|
||||||
|
// access
|
||||||
|
u32 reference_count;
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(0x5);
|
||||||
|
|
||||||
|
u32 semaphore_acquire;
|
||||||
|
u32 semaphore_release;
|
||||||
|
u32 fence_value;
|
||||||
|
FenceAction fence_action;
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(0xE2);
|
||||||
|
|
||||||
|
// Puller state
|
||||||
|
u32 acquire_mode;
|
||||||
|
u32 acquire_source;
|
||||||
|
u32 acquire_active;
|
||||||
|
u32 acquire_timeout;
|
||||||
|
u32 acquire_value;
|
||||||
|
};
|
||||||
|
std::array<u32, NUM_REGS> reg_array;
|
||||||
|
};
|
||||||
|
} regs{};
|
||||||
|
|
||||||
|
void ProcessBindMethod(const MethodCall& method_call);
|
||||||
|
void ProcessFenceActionMethod();
|
||||||
|
void ProcessSemaphoreAcquire();
|
||||||
|
void ProcessSemaphoreRelease();
|
||||||
|
void ProcessSemaphoreTriggerMethod();
|
||||||
|
[[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
|
||||||
|
|
||||||
|
/// Mapping of command subchannels to their bound engine ids
|
||||||
|
std::array<EngineID, 8> bound_engines{};
|
||||||
|
|
||||||
|
enum class GpuSemaphoreOperation {
|
||||||
|
AcquireEqual = 0x1,
|
||||||
|
WriteLong = 0x2,
|
||||||
|
AcquireGequal = 0x4,
|
||||||
|
AcquireMask = 0x8,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define ASSERT_REG_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(Regs, field_name) == position * 4, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_REG_POSITION(semaphore_address, 0x4);
|
||||||
|
ASSERT_REG_POSITION(semaphore_sequence, 0x6);
|
||||||
|
ASSERT_REG_POSITION(semaphore_trigger, 0x7);
|
||||||
|
ASSERT_REG_POSITION(reference_count, 0x14);
|
||||||
|
ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
|
||||||
|
ASSERT_REG_POSITION(semaphore_release, 0x1B);
|
||||||
|
ASSERT_REG_POSITION(fence_value, 0x1C);
|
||||||
|
ASSERT_REG_POSITION(fence_action, 0x1D);
|
||||||
|
|
||||||
|
ASSERT_REG_POSITION(acquire_mode, 0x100);
|
||||||
|
ASSERT_REG_POSITION(acquire_source, 0x101);
|
||||||
|
ASSERT_REG_POSITION(acquire_active, 0x102);
|
||||||
|
ASSERT_REG_POSITION(acquire_timeout, 0x103);
|
||||||
|
ASSERT_REG_POSITION(acquire_value, 0x104);
|
||||||
|
|
||||||
|
#undef ASSERT_REG_POSITION
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Tegra::Engines
|
|
@ -4,40 +4,24 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cstring>
|
||||||
|
#include <deque>
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/delayed_destruction_ring.h"
|
#include "video_core/delayed_destruction_ring.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
#include "video_core/rasterizer_interface.h"
|
#include "video_core/rasterizer_interface.h"
|
||||||
|
|
||||||
namespace VideoCommon {
|
namespace VideoCommon {
|
||||||
|
|
||||||
class FenceBase {
|
class FenceBase {
|
||||||
public:
|
public:
|
||||||
explicit FenceBase(u32 payload_, bool is_stubbed_)
|
explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
|
||||||
: address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
|
|
||||||
|
|
||||||
explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_)
|
|
||||||
: address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
|
|
||||||
|
|
||||||
GPUVAddr GetAddress() const {
|
|
||||||
return address;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GetPayload() const {
|
|
||||||
return payload;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsSemaphore() const {
|
|
||||||
return is_semaphore;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
GPUVAddr address;
|
|
||||||
u32 payload;
|
|
||||||
bool is_semaphore;
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool is_stubbed;
|
bool is_stubbed;
|
||||||
|
@ -57,30 +41,28 @@ public:
|
||||||
buffer_cache.AccumulateFlushes();
|
buffer_cache.AccumulateFlushes();
|
||||||
}
|
}
|
||||||
|
|
||||||
void SignalSemaphore(GPUVAddr addr, u32 value) {
|
void SyncOperation(std::function<void()>&& func) {
|
||||||
|
uncommitted_operations.emplace_back(std::move(func));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SignalFence(std::function<void()>&& func) {
|
||||||
TryReleasePendingFences();
|
TryReleasePendingFences();
|
||||||
const bool should_flush = ShouldFlush();
|
const bool should_flush = ShouldFlush();
|
||||||
CommitAsyncFlushes();
|
CommitAsyncFlushes();
|
||||||
TFence new_fence = CreateFence(addr, value, !should_flush);
|
uncommitted_operations.emplace_back(std::move(func));
|
||||||
|
CommitOperations();
|
||||||
|
TFence new_fence = CreateFence(!should_flush);
|
||||||
fences.push(new_fence);
|
fences.push(new_fence);
|
||||||
QueueFence(new_fence);
|
QueueFence(new_fence);
|
||||||
if (should_flush) {
|
if (should_flush) {
|
||||||
rasterizer.FlushCommands();
|
rasterizer.FlushCommands();
|
||||||
}
|
}
|
||||||
rasterizer.SyncGuestHost();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void SignalSyncPoint(u32 value) {
|
void SignalSyncPoint(u32 value) {
|
||||||
TryReleasePendingFences();
|
syncpoint_manager.IncrementGuest(value);
|
||||||
const bool should_flush = ShouldFlush();
|
std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
|
||||||
CommitAsyncFlushes();
|
SignalFence(std::move(func));
|
||||||
TFence new_fence = CreateFence(value, !should_flush);
|
|
||||||
fences.push(new_fence);
|
|
||||||
QueueFence(new_fence);
|
|
||||||
if (should_flush) {
|
|
||||||
rasterizer.FlushCommands();
|
|
||||||
}
|
|
||||||
rasterizer.SyncGuestHost();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitPendingFences() {
|
void WaitPendingFences() {
|
||||||
|
@ -90,11 +72,10 @@ public:
|
||||||
WaitFence(current_fence);
|
WaitFence(current_fence);
|
||||||
}
|
}
|
||||||
PopAsyncFlushes();
|
PopAsyncFlushes();
|
||||||
if (current_fence->IsSemaphore()) {
|
auto operations = std::move(pending_operations.front());
|
||||||
gpu_memory.template Write<u32>(current_fence->GetAddress(),
|
pending_operations.pop_front();
|
||||||
current_fence->GetPayload());
|
for (auto& operation : operations) {
|
||||||
} else {
|
operation();
|
||||||
gpu.IncrementSyncPoint(current_fence->GetPayload());
|
|
||||||
}
|
}
|
||||||
PopFence();
|
PopFence();
|
||||||
}
|
}
|
||||||
|
@ -104,16 +85,14 @@ protected:
|
||||||
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
|
||||||
TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
|
TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
|
||||||
TQueryCache& query_cache_)
|
TQueryCache& query_cache_)
|
||||||
: rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()},
|
: rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()},
|
||||||
texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
|
texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
|
||||||
|
|
||||||
virtual ~FenceManager() = default;
|
virtual ~FenceManager() = default;
|
||||||
|
|
||||||
/// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
|
/// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
|
||||||
/// true
|
/// true
|
||||||
virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
|
virtual TFence CreateFence(bool is_stubbed) = 0;
|
||||||
/// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
|
|
||||||
virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
|
|
||||||
/// Queues a fence into the backend if the fence isn't stubbed.
|
/// Queues a fence into the backend if the fence isn't stubbed.
|
||||||
virtual void QueueFence(TFence& fence) = 0;
|
virtual void QueueFence(TFence& fence) = 0;
|
||||||
/// Notifies that the backend fence has been signaled/reached in host GPU.
|
/// Notifies that the backend fence has been signaled/reached in host GPU.
|
||||||
|
@ -123,7 +102,7 @@ protected:
|
||||||
|
|
||||||
VideoCore::RasterizerInterface& rasterizer;
|
VideoCore::RasterizerInterface& rasterizer;
|
||||||
Tegra::GPU& gpu;
|
Tegra::GPU& gpu;
|
||||||
Tegra::MemoryManager& gpu_memory;
|
Tegra::Host1x::SyncpointManager& syncpoint_manager;
|
||||||
TTextureCache& texture_cache;
|
TTextureCache& texture_cache;
|
||||||
TTBufferCache& buffer_cache;
|
TTBufferCache& buffer_cache;
|
||||||
TQueryCache& query_cache;
|
TQueryCache& query_cache;
|
||||||
|
@ -136,11 +115,10 @@ private:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
PopAsyncFlushes();
|
PopAsyncFlushes();
|
||||||
if (current_fence->IsSemaphore()) {
|
auto operations = std::move(pending_operations.front());
|
||||||
gpu_memory.template Write<u32>(current_fence->GetAddress(),
|
pending_operations.pop_front();
|
||||||
current_fence->GetPayload());
|
for (auto& operation : operations) {
|
||||||
} else {
|
operation();
|
||||||
gpu.IncrementSyncPoint(current_fence->GetPayload());
|
|
||||||
}
|
}
|
||||||
PopFence();
|
PopFence();
|
||||||
}
|
}
|
||||||
|
@ -159,16 +137,20 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void PopAsyncFlushes() {
|
void PopAsyncFlushes() {
|
||||||
|
{
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
texture_cache.PopAsyncFlushes();
|
texture_cache.PopAsyncFlushes();
|
||||||
buffer_cache.PopAsyncFlushes();
|
buffer_cache.PopAsyncFlushes();
|
||||||
|
}
|
||||||
query_cache.PopAsyncFlushes();
|
query_cache.PopAsyncFlushes();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommitAsyncFlushes() {
|
void CommitAsyncFlushes() {
|
||||||
|
{
|
||||||
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
|
||||||
texture_cache.CommitAsyncFlushes();
|
texture_cache.CommitAsyncFlushes();
|
||||||
buffer_cache.CommitAsyncFlushes();
|
buffer_cache.CommitAsyncFlushes();
|
||||||
|
}
|
||||||
query_cache.CommitAsyncFlushes();
|
query_cache.CommitAsyncFlushes();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +159,13 @@ private:
|
||||||
fences.pop();
|
fences.pop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CommitOperations() {
|
||||||
|
pending_operations.emplace_back(std::move(uncommitted_operations));
|
||||||
|
}
|
||||||
|
|
||||||
std::queue<TFence> fences;
|
std::queue<TFence> fences;
|
||||||
|
std::deque<std::function<void()>> uncommitted_operations;
|
||||||
|
std::deque<std::deque<std::function<void()>>> pending_operations;
|
||||||
|
|
||||||
DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
|
DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
|
||||||
};
|
};
|
||||||
|
|
|
@ -14,10 +14,11 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/frontend/emu_window.h"
|
#include "core/frontend/emu_window.h"
|
||||||
#include "core/hardware_interrupt_manager.h"
|
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "core/perf_stats.h"
|
#include "core/perf_stats.h"
|
||||||
#include "video_core/cdma_pusher.h"
|
#include "video_core/cdma_pusher.h"
|
||||||
|
#include "video_core/control/channel_state.h"
|
||||||
|
#include "video_core/control/scheduler.h"
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
#include "video_core/engines/fermi_2d.h"
|
#include "video_core/engines/fermi_2d.h"
|
||||||
#include "video_core/engines/kepler_compute.h"
|
#include "video_core/engines/kepler_compute.h"
|
||||||
|
@ -26,75 +27,64 @@
|
||||||
#include "video_core/engines/maxwell_dma.h"
|
#include "video_core/engines/maxwell_dma.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/gpu_thread.h"
|
#include "video_core/gpu_thread.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
#include "video_core/memory_manager.h"
|
#include "video_core/memory_manager.h"
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/shader_notify.h"
|
#include "video_core/shader_notify.h"
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
|
|
||||||
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
|
||||||
|
|
||||||
struct GPU::Impl {
|
struct GPU::Impl {
|
||||||
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
|
explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
|
||||||
: gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(
|
: gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
|
||||||
system)},
|
|
||||||
dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_},
|
|
||||||
maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
|
|
||||||
fermi_2d{std::make_unique<Engines::Fermi2D>()},
|
|
||||||
kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
|
|
||||||
maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
|
|
||||||
kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
|
|
||||||
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
|
shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
|
||||||
gpu_thread{system_, is_async_} {}
|
gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
|
||||||
|
|
||||||
~Impl() = default;
|
~Impl() = default;
|
||||||
|
|
||||||
|
std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
|
||||||
|
auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
|
||||||
|
channels.emplace(channel_id, channel_state);
|
||||||
|
scheduler->DeclareChannel(channel_state);
|
||||||
|
return channel_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
void BindChannel(s32 channel_id) {
|
||||||
|
if (bound_channel == channel_id) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto it = channels.find(channel_id);
|
||||||
|
ASSERT(it != channels.end());
|
||||||
|
bound_channel = channel_id;
|
||||||
|
current_channel = it->second.get();
|
||||||
|
|
||||||
|
rasterizer->BindChannel(*current_channel);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<Control::ChannelState> AllocateChannel() {
|
||||||
|
return CreateChannel(new_channel_id++);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitChannel(Control::ChannelState& to_init) {
|
||||||
|
to_init.Init(system, gpu);
|
||||||
|
to_init.BindRasterizer(rasterizer);
|
||||||
|
rasterizer->InitializeChannel(to_init);
|
||||||
|
}
|
||||||
|
|
||||||
|
void InitAddressSpace(Tegra::MemoryManager& memory_manager) {
|
||||||
|
memory_manager.BindRasterizer(rasterizer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ReleaseChannel(Control::ChannelState& to_release) {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
/// Binds a renderer to the GPU.
|
/// Binds a renderer to the GPU.
|
||||||
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
|
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
|
||||||
renderer = std::move(renderer_);
|
renderer = std::move(renderer_);
|
||||||
rasterizer = renderer->ReadRasterizer();
|
rasterizer = renderer->ReadRasterizer();
|
||||||
|
host1x.MemoryManager().BindRasterizer(rasterizer);
|
||||||
memory_manager->BindRasterizer(rasterizer);
|
|
||||||
maxwell_3d->BindRasterizer(rasterizer);
|
|
||||||
fermi_2d->BindRasterizer(rasterizer);
|
|
||||||
kepler_compute->BindRasterizer(rasterizer);
|
|
||||||
kepler_memory->BindRasterizer(rasterizer);
|
|
||||||
maxwell_dma->BindRasterizer(rasterizer);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls a GPU method.
|
|
||||||
void CallMethod(const GPU::MethodCall& method_call) {
|
|
||||||
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
|
|
||||||
method_call.subchannel);
|
|
||||||
|
|
||||||
ASSERT(method_call.subchannel < bound_engines.size());
|
|
||||||
|
|
||||||
if (ExecuteMethodOnEngine(method_call.method)) {
|
|
||||||
CallEngineMethod(method_call);
|
|
||||||
} else {
|
|
||||||
CallPullerMethod(method_call);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls a GPU multivalue method.
|
|
||||||
void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
||||||
u32 methods_pending) {
|
|
||||||
LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
|
|
||||||
|
|
||||||
ASSERT(subchannel < bound_engines.size());
|
|
||||||
|
|
||||||
if (ExecuteMethodOnEngine(method)) {
|
|
||||||
CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
|
|
||||||
} else {
|
|
||||||
for (std::size_t i = 0; i < amount; i++) {
|
|
||||||
CallPullerMethod(GPU::MethodCall{
|
|
||||||
method,
|
|
||||||
base_start[i],
|
|
||||||
subchannel,
|
|
||||||
methods_pending - static_cast<u32>(i),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush all current written commands into the host GPU for execution.
|
/// Flush all current written commands into the host GPU for execution.
|
||||||
|
@ -103,85 +93,82 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Synchronizes CPU writes with Host GPU memory.
|
/// Synchronizes CPU writes with Host GPU memory.
|
||||||
void SyncGuestHost() {
|
void InvalidateGPUCache() {
|
||||||
rasterizer->SyncGuestHost();
|
rasterizer->InvalidateGPUCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signal the ending of command list.
|
/// Signal the ending of command list.
|
||||||
void OnCommandListEnd() {
|
void OnCommandListEnd() {
|
||||||
if (is_async) {
|
|
||||||
// This command only applies to asynchronous GPU mode
|
|
||||||
gpu_thread.OnCommandListEnd();
|
gpu_thread.OnCommandListEnd();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// Request a host GPU memory flush from the CPU.
|
/// Request a host GPU memory flush from the CPU.
|
||||||
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) {
|
template <typename Func>
|
||||||
std::unique_lock lck{flush_request_mutex};
|
[[nodiscard]] u64 RequestSyncOperation(Func&& action) {
|
||||||
const u64 fence = ++last_flush_fence;
|
std::unique_lock lck{sync_request_mutex};
|
||||||
flush_requests.emplace_back(fence, addr, size);
|
const u64 fence = ++last_sync_fence;
|
||||||
|
sync_requests.emplace_back(action);
|
||||||
return fence;
|
return fence;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Obtains current flush request fence id.
|
/// Obtains current flush request fence id.
|
||||||
[[nodiscard]] u64 CurrentFlushRequestFence() const {
|
[[nodiscard]] u64 CurrentSyncRequestFence() const {
|
||||||
return current_flush_fence.load(std::memory_order_relaxed);
|
return current_sync_fence.load(std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
void WaitForSyncOperation(const u64 fence) {
|
||||||
|
std::unique_lock lck{sync_request_mutex};
|
||||||
|
sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; });
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tick pending requests within the GPU.
|
/// Tick pending requests within the GPU.
|
||||||
void TickWork() {
|
void TickWork() {
|
||||||
std::unique_lock lck{flush_request_mutex};
|
std::unique_lock lck{sync_request_mutex};
|
||||||
while (!flush_requests.empty()) {
|
while (!sync_requests.empty()) {
|
||||||
auto& request = flush_requests.front();
|
auto request = std::move(sync_requests.front());
|
||||||
const u64 fence = request.fence;
|
sync_requests.pop_front();
|
||||||
const VAddr addr = request.addr;
|
sync_request_mutex.unlock();
|
||||||
const std::size_t size = request.size;
|
request();
|
||||||
flush_requests.pop_front();
|
current_sync_fence.fetch_add(1, std::memory_order_release);
|
||||||
flush_request_mutex.unlock();
|
sync_request_mutex.lock();
|
||||||
rasterizer->FlushRegion(addr, size);
|
sync_request_cv.notify_all();
|
||||||
current_flush_fence.store(fence);
|
|
||||||
flush_request_mutex.lock();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the Maxwell3D GPU engine.
|
/// Returns a reference to the Maxwell3D GPU engine.
|
||||||
[[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
|
[[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
|
||||||
return *maxwell_3d;
|
ASSERT(current_channel);
|
||||||
|
return *current_channel->maxwell_3d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a const reference to the Maxwell3D GPU engine.
|
/// Returns a const reference to the Maxwell3D GPU engine.
|
||||||
[[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
|
[[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
|
||||||
return *maxwell_3d;
|
ASSERT(current_channel);
|
||||||
|
return *current_channel->maxwell_3d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the KeplerCompute GPU engine.
|
/// Returns a reference to the KeplerCompute GPU engine.
|
||||||
[[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
|
[[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
|
||||||
return *kepler_compute;
|
ASSERT(current_channel);
|
||||||
|
return *current_channel->kepler_compute;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the KeplerCompute GPU engine.
|
/// Returns a reference to the KeplerCompute GPU engine.
|
||||||
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
|
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
|
||||||
return *kepler_compute;
|
ASSERT(current_channel);
|
||||||
}
|
return *current_channel->kepler_compute;
|
||||||
|
|
||||||
/// Returns a reference to the GPU memory manager.
|
|
||||||
[[nodiscard]] Tegra::MemoryManager& MemoryManager() {
|
|
||||||
return *memory_manager;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a const reference to the GPU memory manager.
|
|
||||||
[[nodiscard]] const Tegra::MemoryManager& MemoryManager() const {
|
|
||||||
return *memory_manager;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the GPU DMA pusher.
|
/// Returns a reference to the GPU DMA pusher.
|
||||||
[[nodiscard]] Tegra::DmaPusher& DmaPusher() {
|
[[nodiscard]] Tegra::DmaPusher& DmaPusher() {
|
||||||
return *dma_pusher;
|
ASSERT(current_channel);
|
||||||
|
return *current_channel->dma_pusher;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a const reference to the GPU DMA pusher.
|
/// Returns a const reference to the GPU DMA pusher.
|
||||||
[[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
|
[[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
|
||||||
return *dma_pusher;
|
ASSERT(current_channel);
|
||||||
|
return *current_channel->dma_pusher;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the underlying renderer.
|
/// Returns a reference to the underlying renderer.
|
||||||
|
@ -204,77 +191,6 @@ struct GPU::Impl {
|
||||||
return *shader_notify;
|
return *shader_notify;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
|
||||||
void WaitFence(u32 syncpoint_id, u32 value) {
|
|
||||||
// Synced GPU, is always in sync
|
|
||||||
if (!is_async) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (syncpoint_id == UINT32_MAX) {
|
|
||||||
// TODO: Research what this does.
|
|
||||||
LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
MICROPROFILE_SCOPE(GPU_wait);
|
|
||||||
std::unique_lock lock{sync_mutex};
|
|
||||||
sync_cv.wait(lock, [=, this] {
|
|
||||||
if (shutting_down.load(std::memory_order_relaxed)) {
|
|
||||||
// We're shutting down, ensure no threads continue to wait for the next syncpoint
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return syncpoints.at(syncpoint_id).load() >= value;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void IncrementSyncPoint(u32 syncpoint_id) {
|
|
||||||
auto& syncpoint = syncpoints.at(syncpoint_id);
|
|
||||||
syncpoint++;
|
|
||||||
std::scoped_lock lock{sync_mutex};
|
|
||||||
sync_cv.notify_all();
|
|
||||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
|
||||||
if (!interrupt.empty()) {
|
|
||||||
u32 value = syncpoint.load();
|
|
||||||
auto it = interrupt.begin();
|
|
||||||
while (it != interrupt.end()) {
|
|
||||||
if (value >= *it) {
|
|
||||||
TriggerCpuInterrupt(syncpoint_id, *it);
|
|
||||||
it = interrupt.erase(it);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
|
|
||||||
return syncpoints.at(syncpoint_id).load();
|
|
||||||
}
|
|
||||||
|
|
||||||
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
|
||||||
std::scoped_lock lock{sync_mutex};
|
|
||||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
|
||||||
bool contains = std::any_of(interrupt.begin(), interrupt.end(),
|
|
||||||
[value](u32 in_value) { return in_value == value; });
|
|
||||||
if (contains) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
interrupt.emplace_back(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
|
||||||
std::scoped_lock lock{sync_mutex};
|
|
||||||
auto& interrupt = syncpt_interrupts.at(syncpoint_id);
|
|
||||||
const auto iter =
|
|
||||||
std::find_if(interrupt.begin(), interrupt.end(),
|
|
||||||
[value](u32 interrupt_value) { return value == interrupt_value; });
|
|
||||||
|
|
||||||
if (iter == interrupt.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
interrupt.erase(iter);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] u64 GetTicks() const {
|
[[nodiscard]] u64 GetTicks() const {
|
||||||
// This values were reversed engineered by fincs from NVN
|
// This values were reversed engineered by fincs from NVN
|
||||||
// The gpu clock is reported in units of 385/625 nanoseconds
|
// The gpu clock is reported in units of 385/625 nanoseconds
|
||||||
|
@ -306,7 +222,7 @@ struct GPU::Impl {
|
||||||
/// This can be used to launch any necessary threads and register any necessary
|
/// This can be used to launch any necessary threads and register any necessary
|
||||||
/// core timing events.
|
/// core timing events.
|
||||||
void Start() {
|
void Start() {
|
||||||
gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher);
|
gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
|
||||||
cpu_context = renderer->GetRenderWindow().CreateSharedContext();
|
cpu_context = renderer->GetRenderWindow().CreateSharedContext();
|
||||||
cpu_context->MakeCurrent();
|
cpu_context->MakeCurrent();
|
||||||
}
|
}
|
||||||
|
@ -328,8 +244,8 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Push GPU command entries to be processed
|
/// Push GPU command entries to be processed
|
||||||
void PushGPUEntries(Tegra::CommandList&& entries) {
|
void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
|
||||||
gpu_thread.SubmitList(std::move(entries));
|
gpu_thread.SubmitList(channel, std::move(entries));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Push GPU command buffer entries to be processed
|
/// Push GPU command buffer entries to be processed
|
||||||
|
@ -339,7 +255,7 @@ struct GPU::Impl {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cdma_pushers.contains(id)) {
|
if (!cdma_pushers.contains(id)) {
|
||||||
cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu));
|
cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
|
// SubmitCommandBuffer would make the nvdec operations async, this is not currently working
|
||||||
|
@ -376,308 +292,55 @@ struct GPU::Impl {
|
||||||
gpu_thread.FlushAndInvalidateRegion(addr, size);
|
gpu_thread.FlushAndInvalidateRegion(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const {
|
void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
|
||||||
auto& interrupt_manager = system.InterruptManager();
|
std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
|
||||||
interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value);
|
size_t current_request_counter{};
|
||||||
}
|
{
|
||||||
|
std::unique_lock<std::mutex> lk(request_swap_mutex);
|
||||||
void ProcessBindMethod(const GPU::MethodCall& method_call) {
|
if (free_swap_counters.empty()) {
|
||||||
// Bind the current subchannel to the desired engine id.
|
current_request_counter = request_swap_counters.size();
|
||||||
LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
|
request_swap_counters.emplace_back(num_fences);
|
||||||
method_call.argument);
|
|
||||||
const auto engine_id = static_cast<EngineID>(method_call.argument);
|
|
||||||
bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
|
|
||||||
switch (engine_id) {
|
|
||||||
case EngineID::FERMI_TWOD_A:
|
|
||||||
dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_B:
|
|
||||||
dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_COMPUTE_B:
|
|
||||||
dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_DMA_COPY_A:
|
|
||||||
dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
|
||||||
dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProcessFenceActionMethod() {
|
|
||||||
switch (regs.fence_action.op) {
|
|
||||||
case GPU::FenceOperation::Acquire:
|
|
||||||
WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
|
|
||||||
break;
|
|
||||||
case GPU::FenceOperation::Increment:
|
|
||||||
IncrementSyncPoint(regs.fence_action.syncpoint_id);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProcessWaitForInterruptMethod() {
|
|
||||||
// TODO(bunnei) ImplementMe
|
|
||||||
LOG_WARNING(HW_GPU, "(STUBBED) called");
|
|
||||||
}
|
|
||||||
|
|
||||||
void ProcessSemaphoreTriggerMethod() {
|
|
||||||
const auto semaphoreOperationMask = 0xF;
|
|
||||||
const auto op =
|
|
||||||
static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
|
|
||||||
if (op == GpuSemaphoreOperation::WriteLong) {
|
|
||||||
struct Block {
|
|
||||||
u32 sequence;
|
|
||||||
u32 zeros = 0;
|
|
||||||
u64 timestamp;
|
|
||||||
};
|
|
||||||
|
|
||||||
Block block{};
|
|
||||||
block.sequence = regs.semaphore_sequence;
|
|
||||||
// TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
|
|
||||||
// CoreTiming
|
|
||||||
block.timestamp = GetTicks();
|
|
||||||
memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
|
|
||||||
sizeof(block));
|
|
||||||
} else {
|
} else {
|
||||||
const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
current_request_counter = free_swap_counters.front();
|
||||||
if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
|
request_swap_counters[current_request_counter] = num_fences;
|
||||||
(op == GpuSemaphoreOperation::AcquireGequal &&
|
free_swap_counters.pop_front();
|
||||||
static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
|
|
||||||
(op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
|
|
||||||
// Nothing to do in this case
|
|
||||||
} else {
|
|
||||||
regs.acquire_source = true;
|
|
||||||
regs.acquire_value = regs.semaphore_sequence;
|
|
||||||
if (op == GpuSemaphoreOperation::AcquireEqual) {
|
|
||||||
regs.acquire_active = true;
|
|
||||||
regs.acquire_mode = false;
|
|
||||||
} else if (op == GpuSemaphoreOperation::AcquireGequal) {
|
|
||||||
regs.acquire_active = true;
|
|
||||||
regs.acquire_mode = true;
|
|
||||||
} else if (op == GpuSemaphoreOperation::AcquireMask) {
|
|
||||||
// TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
|
|
||||||
// semaphore_sequence, gives a non-0 result
|
|
||||||
LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
|
|
||||||
} else {
|
|
||||||
LOG_ERROR(HW_GPU, "Invalid semaphore operation");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
const auto wait_fence =
|
||||||
|
RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] {
|
||||||
|
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||||
|
if (num_fences == 0) {
|
||||||
|
renderer->SwapBuffers(framebuffer);
|
||||||
}
|
}
|
||||||
|
const auto executer = [this, current_request_counter,
|
||||||
|
framebuffer_copy = *framebuffer]() {
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lk(request_swap_mutex);
|
||||||
|
if (--request_swap_counters[current_request_counter] != 0) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
free_swap_counters.push_back(current_request_counter);
|
||||||
void ProcessSemaphoreRelease() {
|
|
||||||
memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(),
|
|
||||||
regs.semaphore_release);
|
|
||||||
}
|
}
|
||||||
|
renderer->SwapBuffers(&framebuffer_copy);
|
||||||
void ProcessSemaphoreAcquire() {
|
|
||||||
const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
|
|
||||||
const auto value = regs.semaphore_acquire;
|
|
||||||
if (word != value) {
|
|
||||||
regs.acquire_active = true;
|
|
||||||
regs.acquire_value = value;
|
|
||||||
// TODO(kemathe73) figure out how to do the acquire_timeout
|
|
||||||
regs.acquire_mode = false;
|
|
||||||
regs.acquire_source = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls a GPU puller method.
|
|
||||||
void CallPullerMethod(const GPU::MethodCall& method_call) {
|
|
||||||
regs.reg_array[method_call.method] = method_call.argument;
|
|
||||||
const auto method = static_cast<BufferMethods>(method_call.method);
|
|
||||||
|
|
||||||
switch (method) {
|
|
||||||
case BufferMethods::BindObject: {
|
|
||||||
ProcessBindMethod(method_call);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::Nop:
|
|
||||||
case BufferMethods::SemaphoreAddressHigh:
|
|
||||||
case BufferMethods::SemaphoreAddressLow:
|
|
||||||
case BufferMethods::SemaphoreSequence:
|
|
||||||
break;
|
|
||||||
case BufferMethods::UnkCacheFlush:
|
|
||||||
rasterizer->SyncGuestHost();
|
|
||||||
break;
|
|
||||||
case BufferMethods::WrcacheFlush:
|
|
||||||
rasterizer->SignalReference();
|
|
||||||
break;
|
|
||||||
case BufferMethods::FenceValue:
|
|
||||||
break;
|
|
||||||
case BufferMethods::RefCnt:
|
|
||||||
rasterizer->SignalReference();
|
|
||||||
break;
|
|
||||||
case BufferMethods::FenceAction:
|
|
||||||
ProcessFenceActionMethod();
|
|
||||||
break;
|
|
||||||
case BufferMethods::WaitForInterrupt:
|
|
||||||
rasterizer->WaitForIdle();
|
|
||||||
break;
|
|
||||||
case BufferMethods::SemaphoreTrigger: {
|
|
||||||
ProcessSemaphoreTriggerMethod();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::NotifyIntr: {
|
|
||||||
// TODO(Kmather73): Research and implement this method.
|
|
||||||
LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::Unk28: {
|
|
||||||
// TODO(Kmather73): Research and implement this method.
|
|
||||||
LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::SemaphoreAcquire: {
|
|
||||||
ProcessSemaphoreAcquire();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::SemaphoreRelease: {
|
|
||||||
ProcessSemaphoreRelease();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case BufferMethods::Yield: {
|
|
||||||
// TODO(Kmather73): Research and implement this method.
|
|
||||||
LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls a GPU engine method.
|
|
||||||
void CallEngineMethod(const GPU::MethodCall& method_call) {
|
|
||||||
const EngineID engine = bound_engines[method_call.subchannel];
|
|
||||||
|
|
||||||
switch (engine) {
|
|
||||||
case EngineID::FERMI_TWOD_A:
|
|
||||||
fermi_2d->CallMethod(method_call.method, method_call.argument,
|
|
||||||
method_call.IsLastCall());
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_B:
|
|
||||||
maxwell_3d->CallMethod(method_call.method, method_call.argument,
|
|
||||||
method_call.IsLastCall());
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_COMPUTE_B:
|
|
||||||
kepler_compute->CallMethod(method_call.method, method_call.argument,
|
|
||||||
method_call.IsLastCall());
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_DMA_COPY_A:
|
|
||||||
maxwell_dma->CallMethod(method_call.method, method_call.argument,
|
|
||||||
method_call.IsLastCall());
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
|
||||||
kepler_memory->CallMethod(method_call.method, method_call.argument,
|
|
||||||
method_call.IsLastCall());
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNIMPLEMENTED_MSG("Unimplemented engine");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calls a GPU engine multivalue method.
|
|
||||||
void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
||||||
u32 methods_pending) {
|
|
||||||
const EngineID engine = bound_engines[subchannel];
|
|
||||||
|
|
||||||
switch (engine) {
|
|
||||||
case EngineID::FERMI_TWOD_A:
|
|
||||||
fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_B:
|
|
||||||
maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_COMPUTE_B:
|
|
||||||
kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
||||||
break;
|
|
||||||
case EngineID::MAXWELL_DMA_COPY_A:
|
|
||||||
maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
||||||
break;
|
|
||||||
case EngineID::KEPLER_INLINE_TO_MEMORY_B:
|
|
||||||
kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNIMPLEMENTED_MSG("Unimplemented engine");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Determines where the method should be executed.
|
|
||||||
[[nodiscard]] bool ExecuteMethodOnEngine(u32 method) {
|
|
||||||
const auto buffer_method = static_cast<BufferMethods>(method);
|
|
||||||
return buffer_method >= BufferMethods::NonPullerMethods;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Regs {
|
|
||||||
static constexpr size_t NUM_REGS = 0x40;
|
|
||||||
|
|
||||||
union {
|
|
||||||
struct {
|
|
||||||
INSERT_PADDING_WORDS_NOINIT(0x4);
|
|
||||||
struct {
|
|
||||||
u32 address_high;
|
|
||||||
u32 address_low;
|
|
||||||
|
|
||||||
[[nodiscard]] GPUVAddr SemaphoreAddress() const {
|
|
||||||
return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
|
|
||||||
address_low);
|
|
||||||
}
|
|
||||||
} semaphore_address;
|
|
||||||
|
|
||||||
u32 semaphore_sequence;
|
|
||||||
u32 semaphore_trigger;
|
|
||||||
INSERT_PADDING_WORDS_NOINIT(0xC);
|
|
||||||
|
|
||||||
// The pusher and the puller share the reference counter, the pusher only has read
|
|
||||||
// access
|
|
||||||
u32 reference_count;
|
|
||||||
INSERT_PADDING_WORDS_NOINIT(0x5);
|
|
||||||
|
|
||||||
u32 semaphore_acquire;
|
|
||||||
u32 semaphore_release;
|
|
||||||
u32 fence_value;
|
|
||||||
GPU::FenceAction fence_action;
|
|
||||||
INSERT_PADDING_WORDS_NOINIT(0xE2);
|
|
||||||
|
|
||||||
// Puller state
|
|
||||||
u32 acquire_mode;
|
|
||||||
u32 acquire_source;
|
|
||||||
u32 acquire_active;
|
|
||||||
u32 acquire_timeout;
|
|
||||||
u32 acquire_value;
|
|
||||||
};
|
};
|
||||||
std::array<u32, NUM_REGS> reg_array;
|
for (size_t i = 0; i < num_fences; i++) {
|
||||||
};
|
syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
|
||||||
} regs{};
|
}
|
||||||
|
});
|
||||||
|
gpu_thread.TickGPU();
|
||||||
|
WaitForSyncOperation(wait_fence);
|
||||||
|
}
|
||||||
|
|
||||||
GPU& gpu;
|
GPU& gpu;
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
std::unique_ptr<Tegra::MemoryManager> memory_manager;
|
Host1x::Host1x& host1x;
|
||||||
std::unique_ptr<Tegra::DmaPusher> dma_pusher;
|
|
||||||
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
|
std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
|
||||||
std::unique_ptr<VideoCore::RendererBase> renderer;
|
std::unique_ptr<VideoCore::RendererBase> renderer;
|
||||||
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
VideoCore::RasterizerInterface* rasterizer = nullptr;
|
||||||
const bool use_nvdec;
|
const bool use_nvdec;
|
||||||
|
|
||||||
/// Mapping of command subchannels to their bound engine ids
|
s32 new_channel_id{1};
|
||||||
std::array<EngineID, 8> bound_engines{};
|
|
||||||
/// 3D engine
|
|
||||||
std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
|
|
||||||
/// 2D engine
|
|
||||||
std::unique_ptr<Engines::Fermi2D> fermi_2d;
|
|
||||||
/// Compute engine
|
|
||||||
std::unique_ptr<Engines::KeplerCompute> kepler_compute;
|
|
||||||
/// DMA engine
|
|
||||||
std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
|
|
||||||
/// Inline memory engine
|
|
||||||
std::unique_ptr<Engines::KeplerMemory> kepler_memory;
|
|
||||||
/// Shader build notifier
|
/// Shader build notifier
|
||||||
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
|
std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
|
||||||
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
|
/// When true, we are about to shut down emulation session, so terminate outstanding tasks
|
||||||
|
@ -692,51 +355,25 @@ struct GPU::Impl {
|
||||||
|
|
||||||
std::condition_variable sync_cv;
|
std::condition_variable sync_cv;
|
||||||
|
|
||||||
struct FlushRequest {
|
std::list<std::function<void(void)>> sync_requests;
|
||||||
explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_)
|
std::atomic<u64> current_sync_fence{};
|
||||||
: fence{fence_}, addr{addr_}, size{size_} {}
|
u64 last_sync_fence{};
|
||||||
u64 fence;
|
std::mutex sync_request_mutex;
|
||||||
VAddr addr;
|
std::condition_variable sync_request_cv;
|
||||||
std::size_t size;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::list<FlushRequest> flush_requests;
|
|
||||||
std::atomic<u64> current_flush_fence{};
|
|
||||||
u64 last_flush_fence{};
|
|
||||||
std::mutex flush_request_mutex;
|
|
||||||
|
|
||||||
const bool is_async;
|
const bool is_async;
|
||||||
|
|
||||||
VideoCommon::GPUThread::ThreadManager gpu_thread;
|
VideoCommon::GPUThread::ThreadManager gpu_thread;
|
||||||
std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
|
std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
|
||||||
|
|
||||||
#define ASSERT_REG_POSITION(field_name, position) \
|
std::unique_ptr<Tegra::Control::Scheduler> scheduler;
|
||||||
static_assert(offsetof(Regs, field_name) == position * 4, \
|
std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels;
|
||||||
"Field " #field_name " has invalid position")
|
Tegra::Control::ChannelState* current_channel;
|
||||||
|
s32 bound_channel{-1};
|
||||||
|
|
||||||
ASSERT_REG_POSITION(semaphore_address, 0x4);
|
std::deque<size_t> free_swap_counters;
|
||||||
ASSERT_REG_POSITION(semaphore_sequence, 0x6);
|
std::deque<size_t> request_swap_counters;
|
||||||
ASSERT_REG_POSITION(semaphore_trigger, 0x7);
|
std::mutex request_swap_mutex;
|
||||||
ASSERT_REG_POSITION(reference_count, 0x14);
|
|
||||||
ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
|
|
||||||
ASSERT_REG_POSITION(semaphore_release, 0x1B);
|
|
||||||
ASSERT_REG_POSITION(fence_value, 0x1C);
|
|
||||||
ASSERT_REG_POSITION(fence_action, 0x1D);
|
|
||||||
|
|
||||||
ASSERT_REG_POSITION(acquire_mode, 0x100);
|
|
||||||
ASSERT_REG_POSITION(acquire_source, 0x101);
|
|
||||||
ASSERT_REG_POSITION(acquire_active, 0x102);
|
|
||||||
ASSERT_REG_POSITION(acquire_timeout, 0x103);
|
|
||||||
ASSERT_REG_POSITION(acquire_value, 0x104);
|
|
||||||
|
|
||||||
#undef ASSERT_REG_POSITION
|
|
||||||
|
|
||||||
enum class GpuSemaphoreOperation {
|
|
||||||
AcquireEqual = 0x1,
|
|
||||||
WriteLong = 0x2,
|
|
||||||
AcquireGequal = 0x4,
|
|
||||||
AcquireMask = 0x8,
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
|
GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
|
||||||
|
@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
|
||||||
|
|
||||||
GPU::~GPU() = default;
|
GPU::~GPU() = default;
|
||||||
|
|
||||||
|
std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
|
||||||
|
return impl->AllocateChannel();
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::InitChannel(Control::ChannelState& to_init) {
|
||||||
|
impl->InitChannel(to_init);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::BindChannel(s32 channel_id) {
|
||||||
|
impl->BindChannel(channel_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::ReleaseChannel(Control::ChannelState& to_release) {
|
||||||
|
impl->ReleaseChannel(to_release);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
|
||||||
|
impl->InitAddressSpace(memory_manager);
|
||||||
|
}
|
||||||
|
|
||||||
void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
|
void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
|
||||||
impl->BindRenderer(std::move(renderer));
|
impl->BindRenderer(std::move(renderer));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::CallMethod(const MethodCall& method_call) {
|
|
||||||
impl->CallMethod(method_call);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
||||||
u32 methods_pending) {
|
|
||||||
impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::FlushCommands() {
|
void GPU::FlushCommands() {
|
||||||
impl->FlushCommands();
|
impl->FlushCommands();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::SyncGuestHost() {
|
void GPU::InvalidateGPUCache() {
|
||||||
impl->SyncGuestHost();
|
impl->InvalidateGPUCache();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::OnCommandListEnd() {
|
void GPU::OnCommandListEnd() {
|
||||||
|
@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() {
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
|
u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
|
||||||
return impl->RequestFlush(addr, size);
|
return impl->RequestSyncOperation(
|
||||||
|
[this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GPU::CurrentFlushRequestFence() const {
|
u64 GPU::CurrentSyncRequestFence() const {
|
||||||
return impl->CurrentFlushRequestFence();
|
return impl->CurrentSyncRequestFence();
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU::WaitForSyncOperation(u64 fence) {
|
||||||
|
return impl->WaitForSyncOperation(fence);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::TickWork() {
|
void GPU::TickWork() {
|
||||||
impl->TickWork();
|
impl->TickWork();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets a mutable reference to the Host1x interface
|
||||||
|
Host1x::Host1x& GPU::Host1x() {
|
||||||
|
return impl->host1x;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets an immutable reference to the Host1x interface.
|
||||||
|
const Host1x::Host1x& GPU::Host1x() const {
|
||||||
|
return impl->host1x;
|
||||||
|
}
|
||||||
|
|
||||||
Engines::Maxwell3D& GPU::Maxwell3D() {
|
Engines::Maxwell3D& GPU::Maxwell3D() {
|
||||||
return impl->Maxwell3D();
|
return impl->Maxwell3D();
|
||||||
}
|
}
|
||||||
|
@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const {
|
||||||
return impl->KeplerCompute();
|
return impl->KeplerCompute();
|
||||||
}
|
}
|
||||||
|
|
||||||
Tegra::MemoryManager& GPU::MemoryManager() {
|
|
||||||
return impl->MemoryManager();
|
|
||||||
}
|
|
||||||
|
|
||||||
const Tegra::MemoryManager& GPU::MemoryManager() const {
|
|
||||||
return impl->MemoryManager();
|
|
||||||
}
|
|
||||||
|
|
||||||
Tegra::DmaPusher& GPU::DmaPusher() {
|
Tegra::DmaPusher& GPU::DmaPusher() {
|
||||||
return impl->DmaPusher();
|
return impl->DmaPusher();
|
||||||
}
|
}
|
||||||
|
@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
|
||||||
return impl->ShaderNotify();
|
return impl->ShaderNotify();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::WaitFence(u32 syncpoint_id, u32 value) {
|
void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
|
||||||
impl->WaitFence(syncpoint_id, value);
|
std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
|
||||||
}
|
impl->RequestSwapBuffers(framebuffer, fences, num_fences);
|
||||||
|
|
||||||
void GPU::IncrementSyncPoint(u32 syncpoint_id) {
|
|
||||||
impl->IncrementSyncPoint(syncpoint_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
|
|
||||||
return impl->GetSyncpointValue(syncpoint_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
|
||||||
impl->RegisterSyncptInterrupt(syncpoint_id, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
|
|
||||||
return impl->CancelSyncptInterrupt(syncpoint_id, value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 GPU::GetTicks() const {
|
u64 GPU::GetTicks() const {
|
||||||
|
@ -881,8 +521,8 @@ void GPU::ReleaseContext() {
|
||||||
impl->ReleaseContext();
|
impl->ReleaseContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::PushGPUEntries(Tegra::CommandList&& entries) {
|
void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
|
||||||
impl->PushGPUEntries(std::move(entries));
|
impl->PushGPUEntries(channel, std::move(entries));
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
|
||||||
|
|
|
@ -89,73 +89,58 @@ class Maxwell3D;
|
||||||
class KeplerCompute;
|
class KeplerCompute;
|
||||||
} // namespace Engines
|
} // namespace Engines
|
||||||
|
|
||||||
enum class EngineID {
|
namespace Control {
|
||||||
FERMI_TWOD_A = 0x902D, // 2D Engine
|
struct ChannelState;
|
||||||
MAXWELL_B = 0xB197, // 3D Engine
|
}
|
||||||
KEPLER_COMPUTE_B = 0xB1C0,
|
|
||||||
KEPLER_INLINE_TO_MEMORY_B = 0xA140,
|
namespace Host1x {
|
||||||
MAXWELL_DMA_COPY_A = 0xB0B5,
|
class Host1x;
|
||||||
};
|
} // namespace Host1x
|
||||||
|
|
||||||
class MemoryManager;
|
class MemoryManager;
|
||||||
|
|
||||||
class GPU final {
|
class GPU final {
|
||||||
public:
|
public:
|
||||||
struct MethodCall {
|
|
||||||
u32 method{};
|
|
||||||
u32 argument{};
|
|
||||||
u32 subchannel{};
|
|
||||||
u32 method_count{};
|
|
||||||
|
|
||||||
explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
|
|
||||||
: method(method_), argument(argument_), subchannel(subchannel_),
|
|
||||||
method_count(method_count_) {}
|
|
||||||
|
|
||||||
[[nodiscard]] bool IsLastCall() const {
|
|
||||||
return method_count <= 1;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class FenceOperation : u32 {
|
|
||||||
Acquire = 0,
|
|
||||||
Increment = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
union FenceAction {
|
|
||||||
u32 raw;
|
|
||||||
BitField<0, 1, FenceOperation> op;
|
|
||||||
BitField<8, 24, u32> syncpoint_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
|
explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
|
||||||
~GPU();
|
~GPU();
|
||||||
|
|
||||||
/// Binds a renderer to the GPU.
|
/// Binds a renderer to the GPU.
|
||||||
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
|
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
|
||||||
|
|
||||||
/// Calls a GPU method.
|
|
||||||
void CallMethod(const MethodCall& method_call);
|
|
||||||
|
|
||||||
/// Calls a GPU multivalue method.
|
|
||||||
void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
|
|
||||||
u32 methods_pending);
|
|
||||||
|
|
||||||
/// Flush all current written commands into the host GPU for execution.
|
/// Flush all current written commands into the host GPU for execution.
|
||||||
void FlushCommands();
|
void FlushCommands();
|
||||||
/// Synchronizes CPU writes with Host GPU memory.
|
/// Synchronizes CPU writes with Host GPU memory.
|
||||||
void SyncGuestHost();
|
void InvalidateGPUCache();
|
||||||
/// Signal the ending of command list.
|
/// Signal the ending of command list.
|
||||||
void OnCommandListEnd();
|
void OnCommandListEnd();
|
||||||
|
|
||||||
|
std::shared_ptr<Control::ChannelState> AllocateChannel();
|
||||||
|
|
||||||
|
void InitChannel(Control::ChannelState& to_init);
|
||||||
|
|
||||||
|
void BindChannel(s32 channel_id);
|
||||||
|
|
||||||
|
void ReleaseChannel(Control::ChannelState& to_release);
|
||||||
|
|
||||||
|
void InitAddressSpace(Tegra::MemoryManager& memory_manager);
|
||||||
|
|
||||||
/// Request a host GPU memory flush from the CPU.
|
/// Request a host GPU memory flush from the CPU.
|
||||||
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
|
[[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
|
||||||
|
|
||||||
/// Obtains current flush request fence id.
|
/// Obtains current flush request fence id.
|
||||||
[[nodiscard]] u64 CurrentFlushRequestFence() const;
|
[[nodiscard]] u64 CurrentSyncRequestFence() const;
|
||||||
|
|
||||||
|
void WaitForSyncOperation(u64 fence);
|
||||||
|
|
||||||
/// Tick pending requests within the GPU.
|
/// Tick pending requests within the GPU.
|
||||||
void TickWork();
|
void TickWork();
|
||||||
|
|
||||||
|
/// Gets a mutable reference to the Host1x interface
|
||||||
|
[[nodiscard]] Host1x::Host1x& Host1x();
|
||||||
|
|
||||||
|
/// Gets an immutable reference to the Host1x interface.
|
||||||
|
[[nodiscard]] const Host1x::Host1x& Host1x() const;
|
||||||
|
|
||||||
/// Returns a reference to the Maxwell3D GPU engine.
|
/// Returns a reference to the Maxwell3D GPU engine.
|
||||||
[[nodiscard]] Engines::Maxwell3D& Maxwell3D();
|
[[nodiscard]] Engines::Maxwell3D& Maxwell3D();
|
||||||
|
|
||||||
|
@ -168,12 +153,6 @@ public:
|
||||||
/// Returns a reference to the KeplerCompute GPU engine.
|
/// Returns a reference to the KeplerCompute GPU engine.
|
||||||
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
|
[[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
|
||||||
|
|
||||||
/// Returns a reference to the GPU memory manager.
|
|
||||||
[[nodiscard]] Tegra::MemoryManager& MemoryManager();
|
|
||||||
|
|
||||||
/// Returns a const reference to the GPU memory manager.
|
|
||||||
[[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
|
|
||||||
|
|
||||||
/// Returns a reference to the GPU DMA pusher.
|
/// Returns a reference to the GPU DMA pusher.
|
||||||
[[nodiscard]] Tegra::DmaPusher& DmaPusher();
|
[[nodiscard]] Tegra::DmaPusher& DmaPusher();
|
||||||
|
|
||||||
|
@ -192,17 +171,6 @@ public:
|
||||||
/// Returns a const reference to the shader notifier.
|
/// Returns a const reference to the shader notifier.
|
||||||
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
[[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
|
||||||
|
|
||||||
/// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
|
|
||||||
void WaitFence(u32 syncpoint_id, u32 value);
|
|
||||||
|
|
||||||
void IncrementSyncPoint(u32 syncpoint_id);
|
|
||||||
|
|
||||||
[[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
|
|
||||||
|
|
||||||
void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
|
|
||||||
|
|
||||||
[[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
|
|
||||||
|
|
||||||
[[nodiscard]] u64 GetTicks() const;
|
[[nodiscard]] u64 GetTicks() const;
|
||||||
|
|
||||||
[[nodiscard]] bool IsAsync() const;
|
[[nodiscard]] bool IsAsync() const;
|
||||||
|
@ -211,6 +179,9 @@ public:
|
||||||
|
|
||||||
void RendererFrameEndNotify();
|
void RendererFrameEndNotify();
|
||||||
|
|
||||||
|
void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
|
||||||
|
std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
|
||||||
|
|
||||||
/// Performs any additional setup necessary in order to begin GPU emulation.
|
/// Performs any additional setup necessary in order to begin GPU emulation.
|
||||||
/// This can be used to launch any necessary threads and register any necessary
|
/// This can be used to launch any necessary threads and register any necessary
|
||||||
/// core timing events.
|
/// core timing events.
|
||||||
|
@ -226,7 +197,7 @@ public:
|
||||||
void ReleaseContext();
|
void ReleaseContext();
|
||||||
|
|
||||||
/// Push GPU command entries to be processed
|
/// Push GPU command entries to be processed
|
||||||
void PushGPUEntries(Tegra::CommandList&& entries);
|
void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
|
||||||
|
|
||||||
/// Push GPU command buffer entries to be processed
|
/// Push GPU command buffer entries to be processed
|
||||||
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
|
void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
|
||||||
|
@ -248,7 +219,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Impl;
|
struct Impl;
|
||||||
std::unique_ptr<Impl> impl;
|
mutable std::unique_ptr<Impl> impl;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/frontend/emu_window.h"
|
#include "core/frontend/emu_window.h"
|
||||||
|
#include "video_core/control/scheduler.h"
|
||||||
#include "video_core/dma_pusher.h"
|
#include "video_core/dma_pusher.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/gpu_thread.h"
|
#include "video_core/gpu_thread.h"
|
||||||
|
@ -18,7 +19,7 @@ namespace VideoCommon::GPUThread {
|
||||||
/// Runs the GPU thread
|
/// Runs the GPU thread
|
||||||
static void RunThread(std::stop_token stop_token, Core::System& system,
|
static void RunThread(std::stop_token stop_token, Core::System& system,
|
||||||
VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
|
VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
|
||||||
Tegra::DmaPusher& dma_pusher, SynchState& state) {
|
Tegra::Control::Scheduler& scheduler, SynchState& state) {
|
||||||
std::string name = "yuzu:GPU";
|
std::string name = "yuzu:GPU";
|
||||||
MicroProfileOnThreadCreate(name.c_str());
|
MicroProfileOnThreadCreate(name.c_str());
|
||||||
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
|
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
|
||||||
|
@ -37,8 +38,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
|
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
|
||||||
dma_pusher.Push(std::move(submit_list->entries));
|
scheduler.Push(submit_list->channel, std::move(submit_list->entries));
|
||||||
dma_pusher.DispatchCalls();
|
|
||||||
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
|
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
|
||||||
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
|
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
|
||||||
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
|
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
|
||||||
|
@ -69,14 +69,14 @@ ThreadManager::~ThreadManager() = default;
|
||||||
|
|
||||||
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
||||||
Core::Frontend::GraphicsContext& context,
|
Core::Frontend::GraphicsContext& context,
|
||||||
Tegra::DmaPusher& dma_pusher) {
|
Tegra::Control::Scheduler& scheduler) {
|
||||||
rasterizer = renderer.ReadRasterizer();
|
rasterizer = renderer.ReadRasterizer();
|
||||||
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
||||||
std::ref(dma_pusher), std::ref(state));
|
std::ref(scheduler), std::ref(state));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
|
void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
|
||||||
PushCommand(SubmitListCommand(std::move(entries)));
|
PushCommand(SubmitListCommand(channel, std::move(entries)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||||
|
@ -94,8 +94,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
|
||||||
}
|
}
|
||||||
auto& gpu = system.GPU();
|
auto& gpu = system.GPU();
|
||||||
u64 fence = gpu.RequestFlush(addr, size);
|
u64 fence = gpu.RequestFlush(addr, size);
|
||||||
PushCommand(GPUTickCommand(), true);
|
TickGPU();
|
||||||
ASSERT(fence <= gpu.CurrentFlushRequestFence());
|
gpu.WaitForSyncOperation(fence);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ThreadManager::TickGPU() {
|
||||||
|
PushCommand(GPUTickCommand());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
|
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
|
||||||
|
|
|
@ -15,7 +15,9 @@
|
||||||
|
|
||||||
namespace Tegra {
|
namespace Tegra {
|
||||||
struct FramebufferConfig;
|
struct FramebufferConfig;
|
||||||
class DmaPusher;
|
namespace Control {
|
||||||
|
class Scheduler;
|
||||||
|
}
|
||||||
} // namespace Tegra
|
} // namespace Tegra
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread {
|
||||||
|
|
||||||
/// Command to signal to the GPU thread that a command list is ready for processing
|
/// Command to signal to the GPU thread that a command list is ready for processing
|
||||||
struct SubmitListCommand final {
|
struct SubmitListCommand final {
|
||||||
explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {}
|
explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
|
||||||
|
: channel{channel_}, entries{std::move(entries_)} {}
|
||||||
|
|
||||||
|
s32 channel;
|
||||||
Tegra::CommandList entries;
|
Tegra::CommandList entries;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -112,10 +116,10 @@ public:
|
||||||
|
|
||||||
/// Creates and starts the GPU thread.
|
/// Creates and starts the GPU thread.
|
||||||
void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
|
void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
|
||||||
Tegra::DmaPusher& dma_pusher);
|
Tegra::Control::Scheduler& scheduler);
|
||||||
|
|
||||||
/// Push GPU command entries to be processed
|
/// Push GPU command entries to be processed
|
||||||
void SubmitList(Tegra::CommandList&& entries);
|
void SubmitList(s32 channel, Tegra::CommandList&& entries);
|
||||||
|
|
||||||
/// Swap buffers (render frame)
|
/// Swap buffers (render frame)
|
||||||
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
|
||||||
|
@ -131,6 +135,8 @@ public:
|
||||||
|
|
||||||
void OnCommandListEnd();
|
void OnCommandListEnd();
|
||||||
|
|
||||||
|
void TickGPU();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Pushes a command to be executed by the GPU thread
|
/// Pushes a command to be executed by the GPU thread
|
||||||
u64 PushCommand(CommandData&& command_data, bool block = false);
|
u64 PushCommand(CommandData&& command_data, bool block = false);
|
||||||
|
|
310
src/video_core/host1x/codecs/codec.cpp
Executable file
310
src/video_core/host1x/codecs/codec.cpp
Executable file
|
@ -0,0 +1,310 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <fstream>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/settings.h"
|
||||||
|
#include "video_core/host1x/codecs/codec.h"
|
||||||
|
#include "video_core/host1x/codecs/h264.h"
|
||||||
|
#include "video_core/host1x/codecs/vp8.h"
|
||||||
|
#include "video_core/host1x/codecs/vp9.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <libavutil/opt.h>
|
||||||
|
#ifdef LIBVA_FOUND
|
||||||
|
// for querying VAAPI driver information
|
||||||
|
#include <libavutil/hwcontext_vaapi.h>
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
namespace {
|
||||||
|
constexpr AVPixelFormat PREFERRED_GPU_FMT = AV_PIX_FMT_NV12;
|
||||||
|
constexpr AVPixelFormat PREFERRED_CPU_FMT = AV_PIX_FMT_YUV420P;
|
||||||
|
constexpr std::array PREFERRED_GPU_DECODERS = {
|
||||||
|
AV_HWDEVICE_TYPE_CUDA,
|
||||||
|
#ifdef _WIN32
|
||||||
|
AV_HWDEVICE_TYPE_D3D11VA,
|
||||||
|
AV_HWDEVICE_TYPE_DXVA2,
|
||||||
|
#elif defined(__unix__)
|
||||||
|
AV_HWDEVICE_TYPE_VAAPI,
|
||||||
|
AV_HWDEVICE_TYPE_VDPAU,
|
||||||
|
#endif
|
||||||
|
// last resort for Linux Flatpak (w/ NVIDIA)
|
||||||
|
AV_HWDEVICE_TYPE_VULKAN,
|
||||||
|
};
|
||||||
|
|
||||||
|
void AVPacketDeleter(AVPacket* ptr) {
|
||||||
|
av_packet_free(&ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
using AVPacketPtr = std::unique_ptr<AVPacket, decltype(&AVPacketDeleter)>;
|
||||||
|
|
||||||
|
AVPixelFormat GetGpuFormat(AVCodecContext* av_codec_ctx, const AVPixelFormat* pix_fmts) {
|
||||||
|
for (const AVPixelFormat* p = pix_fmts; *p != AV_PIX_FMT_NONE; ++p) {
|
||||||
|
if (*p == av_codec_ctx->pix_fmt) {
|
||||||
|
return av_codec_ctx->pix_fmt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG_INFO(Service_NVDRV, "Could not find compatible GPU AV format, falling back to CPU");
|
||||||
|
av_buffer_unref(&av_codec_ctx->hw_device_ctx);
|
||||||
|
av_codec_ctx->pix_fmt = PREFERRED_CPU_FMT;
|
||||||
|
return PREFERRED_CPU_FMT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// List all the currently available hwcontext in ffmpeg
|
||||||
|
std::vector<AVHWDeviceType> ListSupportedContexts() {
|
||||||
|
std::vector<AVHWDeviceType> contexts{};
|
||||||
|
AVHWDeviceType current_device_type = AV_HWDEVICE_TYPE_NONE;
|
||||||
|
do {
|
||||||
|
current_device_type = av_hwdevice_iterate_types(current_device_type);
|
||||||
|
contexts.push_back(current_device_type);
|
||||||
|
} while (current_device_type != AV_HWDEVICE_TYPE_NONE);
|
||||||
|
return contexts;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void AVFrameDeleter(AVFrame* ptr) {
|
||||||
|
av_frame_free(&ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
|
||||||
|
: host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
|
||||||
|
vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
|
||||||
|
vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
|
||||||
|
|
||||||
|
Codec::~Codec() {
|
||||||
|
if (!initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Free libav memory
|
||||||
|
avcodec_free_context(&av_codec_ctx);
|
||||||
|
av_buffer_unref(&av_gpu_decoder);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Codec::CreateGpuAvDevice() {
|
||||||
|
static constexpr auto HW_CONFIG_METHOD = AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX;
|
||||||
|
static const auto supported_contexts = ListSupportedContexts();
|
||||||
|
for (const auto& type : PREFERRED_GPU_DECODERS) {
|
||||||
|
if (std::none_of(supported_contexts.begin(), supported_contexts.end(),
|
||||||
|
[&type](const auto& context) { return context == type; })) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "{} explicitly unsupported", av_hwdevice_get_type_name(type));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Avoid memory leak from not cleaning up after av_hwdevice_ctx_create
|
||||||
|
av_buffer_unref(&av_gpu_decoder);
|
||||||
|
const int hwdevice_res = av_hwdevice_ctx_create(&av_gpu_decoder, type, nullptr, nullptr, 0);
|
||||||
|
if (hwdevice_res < 0) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "{} av_hwdevice_ctx_create failed {}",
|
||||||
|
av_hwdevice_get_type_name(type), hwdevice_res);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
#ifdef LIBVA_FOUND
|
||||||
|
if (type == AV_HWDEVICE_TYPE_VAAPI) {
|
||||||
|
// we need to determine if this is an impersonated VAAPI driver
|
||||||
|
AVHWDeviceContext* hwctx =
|
||||||
|
static_cast<AVHWDeviceContext*>(static_cast<void*>(av_gpu_decoder->data));
|
||||||
|
AVVAAPIDeviceContext* vactx = static_cast<AVVAAPIDeviceContext*>(hwctx->hwctx);
|
||||||
|
const char* vendor_name = vaQueryVendorString(vactx->display);
|
||||||
|
if (strstr(vendor_name, "VDPAU backend")) {
|
||||||
|
// VDPAU impersonated VAAPI impl's are super buggy, we need to skip them
|
||||||
|
LOG_DEBUG(Service_NVDRV, "Skipping vdapu impersonated VAAPI driver");
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
// according to some user testing, certain vaapi driver (Intel?) could be buggy
|
||||||
|
// so let's log the driver name which may help the developers/supporters
|
||||||
|
LOG_DEBUG(Service_NVDRV, "Using VAAPI driver: {}", vendor_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
for (int i = 0;; i++) {
|
||||||
|
const AVCodecHWConfig* config = avcodec_get_hw_config(av_codec, i);
|
||||||
|
if (!config) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "{} decoder does not support device type {}.",
|
||||||
|
av_codec->name, av_hwdevice_get_type_name(type));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if ((config->methods & HW_CONFIG_METHOD) != 0 && config->device_type == type) {
|
||||||
|
#if defined(__unix__)
|
||||||
|
// Some linux decoding backends are reported to crash with this config method
|
||||||
|
// TODO(ameerj): Properly support this method
|
||||||
|
if ((config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX) != 0) {
|
||||||
|
// skip zero-copy decoders, we don't currently support them
|
||||||
|
LOG_DEBUG(Service_NVDRV, "Skipping decoder {} with unsupported capability {}.",
|
||||||
|
av_hwdevice_get_type_name(type), config->methods);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
LOG_INFO(Service_NVDRV, "Using {} GPU decoder", av_hwdevice_get_type_name(type));
|
||||||
|
av_codec_ctx->pix_fmt = config->pix_fmt;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Codec::InitializeAvCodecContext() {
|
||||||
|
av_codec_ctx = avcodec_alloc_context3(av_codec);
|
||||||
|
av_opt_set(av_codec_ctx->priv_data, "tune", "zerolatency", 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Codec::InitializeGpuDecoder() {
|
||||||
|
if (!CreateGpuAvDevice()) {
|
||||||
|
av_buffer_unref(&av_gpu_decoder);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto* hw_device_ctx = av_buffer_ref(av_gpu_decoder);
|
||||||
|
ASSERT_MSG(hw_device_ctx, "av_buffer_ref failed");
|
||||||
|
av_codec_ctx->hw_device_ctx = hw_device_ctx;
|
||||||
|
av_codec_ctx->get_format = GetGpuFormat;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Codec::Initialize() {
|
||||||
|
const AVCodecID codec = [&] {
|
||||||
|
switch (current_codec) {
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||||
|
return AV_CODEC_ID_H264;
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||||
|
return AV_CODEC_ID_VP8;
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||||
|
return AV_CODEC_ID_VP9;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
|
||||||
|
return AV_CODEC_ID_NONE;
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
av_codec = avcodec_find_decoder(codec);
|
||||||
|
|
||||||
|
InitializeAvCodecContext();
|
||||||
|
if (Settings::values.nvdec_emulation.GetValue() == Settings::NvdecEmulation::GPU) {
|
||||||
|
InitializeGpuDecoder();
|
||||||
|
}
|
||||||
|
if (const int res = avcodec_open2(av_codec_ctx, av_codec, nullptr); res < 0) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "avcodec_open2() Failed with result {}", res);
|
||||||
|
avcodec_free_context(&av_codec_ctx);
|
||||||
|
av_buffer_unref(&av_gpu_decoder);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!av_codec_ctx->hw_device_ctx) {
|
||||||
|
LOG_INFO(Service_NVDRV, "Using FFmpeg software decoding");
|
||||||
|
}
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
|
||||||
|
if (current_codec != codec) {
|
||||||
|
current_codec = codec;
|
||||||
|
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Codec::Decode() {
|
||||||
|
const bool is_first_frame = !initialized;
|
||||||
|
if (is_first_frame) {
|
||||||
|
Initialize();
|
||||||
|
}
|
||||||
|
if (!initialized) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
bool vp9_hidden_frame = false;
|
||||||
|
const auto& frame_data = [&]() {
|
||||||
|
switch (current_codec) {
|
||||||
|
case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
|
||||||
|
return h264_decoder->ComposeFrame(state, is_first_frame);
|
||||||
|
case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
|
||||||
|
return vp8_decoder->ComposeFrame(state);
|
||||||
|
case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
|
||||||
|
vp9_decoder->ComposeFrame(state);
|
||||||
|
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
|
||||||
|
return vp9_decoder->GetFrameBytes();
|
||||||
|
default:
|
||||||
|
ASSERT(false);
|
||||||
|
return std::vector<u8>{};
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
AVPacketPtr packet{av_packet_alloc(), AVPacketDeleter};
|
||||||
|
if (!packet) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "av_packet_alloc failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
packet->data = const_cast<u8*>(frame_data.data());
|
||||||
|
packet->size = static_cast<s32>(frame_data.size());
|
||||||
|
if (const int res = avcodec_send_packet(av_codec_ctx, packet.get()); res != 0) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", res);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Only receive/store visible frames
|
||||||
|
if (vp9_hidden_frame) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
AVFramePtr initial_frame{av_frame_alloc(), AVFrameDeleter};
|
||||||
|
AVFramePtr final_frame{nullptr, AVFrameDeleter};
|
||||||
|
ASSERT_MSG(initial_frame, "av_frame_alloc initial_frame failed");
|
||||||
|
if (const int ret = avcodec_receive_frame(av_codec_ctx, initial_frame.get()); ret) {
|
||||||
|
LOG_DEBUG(Service_NVDRV, "avcodec_receive_frame error {}", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (initial_frame->width == 0 || initial_frame->height == 0) {
|
||||||
|
LOG_WARNING(Service_NVDRV, "Zero width or height in frame");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (av_codec_ctx->hw_device_ctx) {
|
||||||
|
final_frame = AVFramePtr{av_frame_alloc(), AVFrameDeleter};
|
||||||
|
ASSERT_MSG(final_frame, "av_frame_alloc final_frame failed");
|
||||||
|
// Can't use AV_PIX_FMT_YUV420P and share code with software decoding in vic.cpp
|
||||||
|
// because Intel drivers crash unless using AV_PIX_FMT_NV12
|
||||||
|
final_frame->format = PREFERRED_GPU_FMT;
|
||||||
|
const int ret = av_hwframe_transfer_data(final_frame.get(), initial_frame.get(), 0);
|
||||||
|
ASSERT_MSG(!ret, "av_hwframe_transfer_data error {}", ret);
|
||||||
|
} else {
|
||||||
|
final_frame = std::move(initial_frame);
|
||||||
|
}
|
||||||
|
if (final_frame->format != PREFERRED_CPU_FMT && final_frame->format != PREFERRED_GPU_FMT) {
|
||||||
|
UNIMPLEMENTED_MSG("Unexpected video format: {}", final_frame->format);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
av_frames.push(std::move(final_frame));
|
||||||
|
if (av_frames.size() > 10) {
|
||||||
|
LOG_TRACE(Service_NVDRV, "av_frames.push overflow dropped frame");
|
||||||
|
av_frames.pop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AVFramePtr Codec::GetCurrentFrame() {
|
||||||
|
// Sometimes VIC will request more frames than have been decoded.
|
||||||
|
// in this case, return a nullptr and don't overwrite previous frame data
|
||||||
|
if (av_frames.empty()) {
|
||||||
|
return AVFramePtr{nullptr, AVFrameDeleter};
|
||||||
|
}
|
||||||
|
AVFramePtr frame = std::move(av_frames.front());
|
||||||
|
av_frames.pop();
|
||||||
|
return frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
|
||||||
|
return current_codec;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string_view Codec::GetCurrentCodecName() const {
|
||||||
|
switch (current_codec) {
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::None:
|
||||||
|
return "None";
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::H264:
|
||||||
|
return "H264";
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::VP8:
|
||||||
|
return "VP8";
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::H265:
|
||||||
|
return "H265";
|
||||||
|
case Host1x::NvdecCommon::VideoCodec::VP9:
|
||||||
|
return "VP9";
|
||||||
|
default:
|
||||||
|
return "Unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace Tegra
|
84
src/video_core/host1x/codecs/codec.h
Executable file
84
src/video_core/host1x/codecs/codec.h
Executable file
|
@ -0,0 +1,84 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <string_view>
|
||||||
|
#include <queue>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/host1x/nvdec_common.h"
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wconversion"
|
||||||
|
#endif
|
||||||
|
#include <libavcodec/avcodec.h>
|
||||||
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
void AVFrameDeleter(AVFrame* ptr);
|
||||||
|
using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
|
||||||
|
|
||||||
|
namespace Decoder {
|
||||||
|
class H264;
|
||||||
|
class VP8;
|
||||||
|
class VP9;
|
||||||
|
} // namespace Decoder
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
class Codec {
|
||||||
|
public:
|
||||||
|
explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
|
||||||
|
~Codec();
|
||||||
|
|
||||||
|
/// Initialize the codec, returning success or failure
|
||||||
|
void Initialize();
|
||||||
|
|
||||||
|
/// Sets NVDEC video stream codec
|
||||||
|
void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
|
||||||
|
|
||||||
|
/// Call decoders to construct headers, decode AVFrame with ffmpeg
|
||||||
|
void Decode();
|
||||||
|
|
||||||
|
/// Returns next decoded frame
|
||||||
|
[[nodiscard]] AVFramePtr GetCurrentFrame();
|
||||||
|
|
||||||
|
/// Returns the value of current_codec
|
||||||
|
[[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
|
||||||
|
|
||||||
|
/// Return name of the current codec
|
||||||
|
[[nodiscard]] std::string_view GetCurrentCodecName() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void InitializeAvCodecContext();
|
||||||
|
|
||||||
|
void InitializeGpuDecoder();
|
||||||
|
|
||||||
|
bool CreateGpuAvDevice();
|
||||||
|
|
||||||
|
bool initialized{};
|
||||||
|
Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
|
||||||
|
|
||||||
|
const AVCodec* av_codec{nullptr};
|
||||||
|
AVCodecContext* av_codec_ctx{nullptr};
|
||||||
|
AVBufferRef* av_gpu_decoder{nullptr};
|
||||||
|
|
||||||
|
Host1x::Host1x& host1x;
|
||||||
|
const Host1x::NvdecCommon::NvdecRegisters& state;
|
||||||
|
std::unique_ptr<Decoder::H264> h264_decoder;
|
||||||
|
std::unique_ptr<Decoder::VP8> vp8_decoder;
|
||||||
|
std::unique_ptr<Decoder::VP9> vp9_decoder;
|
||||||
|
|
||||||
|
std::queue<AVFramePtr> av_frames{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Tegra
|
278
src/video_core/host1x/codecs/h264.cpp
Executable file
278
src/video_core/host1x/codecs/h264.cpp
Executable file
|
@ -0,0 +1,278 @@
|
||||||
|
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <bit>
|
||||||
|
|
||||||
|
#include "common/settings.h"
|
||||||
|
#include "video_core/host1x/codecs/h264.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra::Decoder {
|
||||||
|
namespace {
|
||||||
|
// ZigZag LUTs from libavcodec.
|
||||||
|
constexpr std::array<u8, 64> zig_zag_direct{
|
||||||
|
0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48,
|
||||||
|
41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23,
|
||||||
|
30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::array<u8, 16> zig_zag_scan{
|
||||||
|
0 + 0 * 4, 1 + 0 * 4, 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 2 + 0 * 4, 3 + 0 * 4, 2 + 1 * 4,
|
||||||
|
1 + 2 * 4, 0 + 3 * 4, 1 + 3 * 4, 2 + 2 * 4, 3 + 1 * 4, 3 + 2 * 4, 2 + 3 * 4, 3 + 3 * 4,
|
||||||
|
};
|
||||||
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||||
|
|
||||||
|
H264::~H264() = default;
|
||||||
|
|
||||||
|
const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
|
||||||
|
bool is_first_frame) {
|
||||||
|
H264DecoderContext context;
|
||||||
|
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
|
||||||
|
sizeof(H264DecoderContext));
|
||||||
|
|
||||||
|
const s64 frame_number = context.h264_parameter_set.frame_number.Value();
|
||||||
|
if (!is_first_frame && frame_number != 0) {
|
||||||
|
frame.resize(context.stream_len);
|
||||||
|
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
|
||||||
|
return frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode header
|
||||||
|
H264BitWriter writer{};
|
||||||
|
writer.WriteU(1, 24);
|
||||||
|
writer.WriteU(0, 1);
|
||||||
|
writer.WriteU(3, 2);
|
||||||
|
writer.WriteU(7, 5);
|
||||||
|
writer.WriteU(100, 8);
|
||||||
|
writer.WriteU(0, 8);
|
||||||
|
writer.WriteU(31, 8);
|
||||||
|
writer.WriteUe(0);
|
||||||
|
const u32 chroma_format_idc =
|
||||||
|
static_cast<u32>(context.h264_parameter_set.chroma_format_idc.Value());
|
||||||
|
writer.WriteUe(chroma_format_idc);
|
||||||
|
if (chroma_format_idc == 3) {
|
||||||
|
writer.WriteBit(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.WriteUe(0);
|
||||||
|
writer.WriteUe(0);
|
||||||
|
writer.WriteBit(false); // QpprimeYZeroTransformBypassFlag
|
||||||
|
writer.WriteBit(false); // Scaling matrix present flag
|
||||||
|
|
||||||
|
writer.WriteUe(static_cast<u32>(context.h264_parameter_set.log2_max_frame_num_minus4.Value()));
|
||||||
|
|
||||||
|
const auto order_cnt_type =
|
||||||
|
static_cast<u32>(context.h264_parameter_set.pic_order_cnt_type.Value());
|
||||||
|
writer.WriteUe(order_cnt_type);
|
||||||
|
if (order_cnt_type == 0) {
|
||||||
|
writer.WriteUe(context.h264_parameter_set.log2_max_pic_order_cnt_lsb_minus4);
|
||||||
|
} else if (order_cnt_type == 1) {
|
||||||
|
writer.WriteBit(context.h264_parameter_set.delta_pic_order_always_zero_flag != 0);
|
||||||
|
|
||||||
|
writer.WriteSe(0);
|
||||||
|
writer.WriteSe(0);
|
||||||
|
writer.WriteUe(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
const s32 pic_height = context.h264_parameter_set.frame_height_in_map_units /
|
||||||
|
(context.h264_parameter_set.frame_mbs_only_flag ? 1 : 2);
|
||||||
|
|
||||||
|
// TODO (ameerj): Where do we get this number, it seems to be particular for each stream
|
||||||
|
const auto nvdec_decoding = Settings::values.nvdec_emulation.GetValue();
|
||||||
|
const bool uses_gpu_decoding = nvdec_decoding == Settings::NvdecEmulation::GPU;
|
||||||
|
const u32 max_num_ref_frames = uses_gpu_decoding ? 6u : 16u;
|
||||||
|
writer.WriteUe(max_num_ref_frames);
|
||||||
|
writer.WriteBit(false);
|
||||||
|
writer.WriteUe(context.h264_parameter_set.pic_width_in_mbs - 1);
|
||||||
|
writer.WriteUe(pic_height - 1);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.frame_mbs_only_flag != 0);
|
||||||
|
|
||||||
|
if (!context.h264_parameter_set.frame_mbs_only_flag) {
|
||||||
|
writer.WriteBit(context.h264_parameter_set.flags.mbaff_frame.Value() != 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.WriteBit(context.h264_parameter_set.flags.direct_8x8_inference.Value() != 0);
|
||||||
|
writer.WriteBit(false); // Frame cropping flag
|
||||||
|
writer.WriteBit(false); // VUI parameter present flag
|
||||||
|
|
||||||
|
writer.End();
|
||||||
|
|
||||||
|
// H264 PPS
|
||||||
|
writer.WriteU(1, 24);
|
||||||
|
writer.WriteU(0, 1);
|
||||||
|
writer.WriteU(3, 2);
|
||||||
|
writer.WriteU(8, 5);
|
||||||
|
|
||||||
|
writer.WriteUe(0);
|
||||||
|
writer.WriteUe(0);
|
||||||
|
|
||||||
|
writer.WriteBit(context.h264_parameter_set.entropy_coding_mode_flag != 0);
|
||||||
|
writer.WriteBit(false);
|
||||||
|
writer.WriteUe(0);
|
||||||
|
writer.WriteUe(context.h264_parameter_set.num_refidx_l0_default_active);
|
||||||
|
writer.WriteUe(context.h264_parameter_set.num_refidx_l1_default_active);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.flags.weighted_pred.Value() != 0);
|
||||||
|
writer.WriteU(static_cast<s32>(context.h264_parameter_set.weighted_bipred_idc.Value()), 2);
|
||||||
|
s32 pic_init_qp = static_cast<s32>(context.h264_parameter_set.pic_init_qp_minus26.Value());
|
||||||
|
writer.WriteSe(pic_init_qp);
|
||||||
|
writer.WriteSe(0);
|
||||||
|
s32 chroma_qp_index_offset =
|
||||||
|
static_cast<s32>(context.h264_parameter_set.chroma_qp_index_offset.Value());
|
||||||
|
|
||||||
|
writer.WriteSe(chroma_qp_index_offset);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.deblocking_filter_control_present_flag != 0);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.flags.constrained_intra_pred.Value() != 0);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.redundant_pic_cnt_present_flag != 0);
|
||||||
|
writer.WriteBit(context.h264_parameter_set.transform_8x8_mode_flag != 0);
|
||||||
|
|
||||||
|
writer.WriteBit(true);
|
||||||
|
|
||||||
|
for (s32 index = 0; index < 6; index++) {
|
||||||
|
writer.WriteBit(true);
|
||||||
|
std::span<const u8> matrix{context.weight_scale};
|
||||||
|
writer.WriteScalingList(matrix, index * 16, 16);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (context.h264_parameter_set.transform_8x8_mode_flag) {
|
||||||
|
for (s32 index = 0; index < 2; index++) {
|
||||||
|
writer.WriteBit(true);
|
||||||
|
std::span<const u8> matrix{context.weight_scale_8x8};
|
||||||
|
writer.WriteScalingList(matrix, index * 64, 64);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 chroma_qp_index_offset2 =
|
||||||
|
static_cast<s32>(context.h264_parameter_set.second_chroma_qp_index_offset.Value());
|
||||||
|
|
||||||
|
writer.WriteSe(chroma_qp_index_offset2);
|
||||||
|
|
||||||
|
writer.End();
|
||||||
|
|
||||||
|
const auto& encoded_header = writer.GetByteArray();
|
||||||
|
frame.resize(encoded_header.size() + context.stream_len);
|
||||||
|
std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
|
||||||
|
|
||||||
|
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
|
||||||
|
frame.data() + encoded_header.size(), context.stream_len);
|
||||||
|
|
||||||
|
return frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
H264BitWriter::H264BitWriter() = default;
|
||||||
|
|
||||||
|
H264BitWriter::~H264BitWriter() = default;
|
||||||
|
|
||||||
|
void H264BitWriter::WriteU(s32 value, s32 value_sz) {
|
||||||
|
WriteBits(value, value_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteSe(s32 value) {
|
||||||
|
WriteExpGolombCodedInt(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteUe(u32 value) {
|
||||||
|
WriteExpGolombCodedUInt(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::End() {
|
||||||
|
WriteBit(true);
|
||||||
|
Flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteBit(bool state) {
|
||||||
|
WriteBits(state ? 1 : 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteScalingList(std::span<const u8> list, s32 start, s32 count) {
|
||||||
|
std::vector<u8> scan(count);
|
||||||
|
if (count == 16) {
|
||||||
|
std::memcpy(scan.data(), zig_zag_scan.data(), scan.size());
|
||||||
|
} else {
|
||||||
|
std::memcpy(scan.data(), zig_zag_direct.data(), scan.size());
|
||||||
|
}
|
||||||
|
u8 last_scale = 8;
|
||||||
|
|
||||||
|
for (s32 index = 0; index < count; index++) {
|
||||||
|
const u8 value = list[start + scan[index]];
|
||||||
|
const s32 delta_scale = static_cast<s32>(value - last_scale);
|
||||||
|
|
||||||
|
WriteSe(delta_scale);
|
||||||
|
|
||||||
|
last_scale = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8>& H264BitWriter::GetByteArray() {
|
||||||
|
return byte_array;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::vector<u8>& H264BitWriter::GetByteArray() const {
|
||||||
|
return byte_array;
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteBits(s32 value, s32 bit_count) {
|
||||||
|
s32 value_pos = 0;
|
||||||
|
|
||||||
|
s32 remaining = bit_count;
|
||||||
|
|
||||||
|
while (remaining > 0) {
|
||||||
|
s32 copy_size = remaining;
|
||||||
|
|
||||||
|
const s32 free_bits = GetFreeBufferBits();
|
||||||
|
|
||||||
|
if (copy_size > free_bits) {
|
||||||
|
copy_size = free_bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
const s32 mask = (1 << copy_size) - 1;
|
||||||
|
|
||||||
|
const s32 src_shift = (bit_count - value_pos) - copy_size;
|
||||||
|
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
|
||||||
|
|
||||||
|
buffer |= ((value >> src_shift) & mask) << dst_shift;
|
||||||
|
|
||||||
|
value_pos += copy_size;
|
||||||
|
buffer_pos += copy_size;
|
||||||
|
remaining -= copy_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteExpGolombCodedInt(s32 value) {
|
||||||
|
const s32 sign = value <= 0 ? 0 : 1;
|
||||||
|
if (value < 0) {
|
||||||
|
value = -value;
|
||||||
|
}
|
||||||
|
value = (value << 1) - sign;
|
||||||
|
WriteExpGolombCodedUInt(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::WriteExpGolombCodedUInt(u32 value) {
|
||||||
|
const s32 size = 32 - std::countl_zero(value + 1);
|
||||||
|
WriteBits(1, size);
|
||||||
|
|
||||||
|
value -= (1U << (size - 1)) - 1;
|
||||||
|
WriteBits(static_cast<s32>(value), size - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 H264BitWriter::GetFreeBufferBits() {
|
||||||
|
if (buffer_pos == buffer_size) {
|
||||||
|
Flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer_size - buffer_pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
void H264BitWriter::Flush() {
|
||||||
|
if (buffer_pos == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
byte_array.push_back(static_cast<u8>(buffer));
|
||||||
|
|
||||||
|
buffer = 0;
|
||||||
|
buffer_pos = 0;
|
||||||
|
}
|
||||||
|
} // namespace Tegra::Decoder
|
177
src/video_core/host1x/codecs/h264.h
Executable file
177
src/video_core/host1x/codecs/h264.h
Executable file
|
@ -0,0 +1,177 @@
|
||||||
|
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <span>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/bit_field.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/host1x/nvdec_common.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
namespace Decoder {
|
||||||
|
|
||||||
|
class H264BitWriter {
|
||||||
|
public:
|
||||||
|
H264BitWriter();
|
||||||
|
~H264BitWriter();
|
||||||
|
|
||||||
|
/// The following Write methods are based on clause 9.1 in the H.264 specification.
|
||||||
|
/// WriteSe and WriteUe write in the Exp-Golomb-coded syntax
|
||||||
|
void WriteU(s32 value, s32 value_sz);
|
||||||
|
void WriteSe(s32 value);
|
||||||
|
void WriteUe(u32 value);
|
||||||
|
|
||||||
|
/// Finalize the bitstream
|
||||||
|
void End();
|
||||||
|
|
||||||
|
/// append a bit to the stream, equivalent value to the state parameter
|
||||||
|
void WriteBit(bool state);
|
||||||
|
|
||||||
|
/// Based on section 7.3.2.1.1.1 and Table 7-4 in the H.264 specification
|
||||||
|
/// Writes the scaling matrices of the sream
|
||||||
|
void WriteScalingList(std::span<const u8> list, s32 start, s32 count);
|
||||||
|
|
||||||
|
/// Return the bitstream as a vector.
|
||||||
|
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||||
|
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void WriteBits(s32 value, s32 bit_count);
|
||||||
|
void WriteExpGolombCodedInt(s32 value);
|
||||||
|
void WriteExpGolombCodedUInt(u32 value);
|
||||||
|
[[nodiscard]] s32 GetFreeBufferBits();
|
||||||
|
void Flush();
|
||||||
|
|
||||||
|
s32 buffer_size{8};
|
||||||
|
|
||||||
|
s32 buffer{};
|
||||||
|
s32 buffer_pos{};
|
||||||
|
std::vector<u8> byte_array;
|
||||||
|
};
|
||||||
|
|
||||||
|
class H264 {
|
||||||
|
public:
|
||||||
|
explicit H264(Host1x::Host1x& host1x);
|
||||||
|
~H264();
|
||||||
|
|
||||||
|
/// Compose the H264 frame for FFmpeg decoding
|
||||||
|
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||||
|
const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<u8> frame;
|
||||||
|
Host1x::Host1x& host1x;
|
||||||
|
|
||||||
|
struct H264ParameterSet {
|
||||||
|
s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
|
||||||
|
s32 delta_pic_order_always_zero_flag; ///< 0x04
|
||||||
|
s32 frame_mbs_only_flag; ///< 0x08
|
||||||
|
u32 pic_width_in_mbs; ///< 0x0C
|
||||||
|
u32 frame_height_in_map_units; ///< 0x10
|
||||||
|
union { ///< 0x14
|
||||||
|
BitField<0, 2, u32> tile_format;
|
||||||
|
BitField<2, 3, u32> gob_height;
|
||||||
|
};
|
||||||
|
u32 entropy_coding_mode_flag; ///< 0x18
|
||||||
|
s32 pic_order_present_flag; ///< 0x1C
|
||||||
|
s32 num_refidx_l0_default_active; ///< 0x20
|
||||||
|
s32 num_refidx_l1_default_active; ///< 0x24
|
||||||
|
s32 deblocking_filter_control_present_flag; ///< 0x28
|
||||||
|
s32 redundant_pic_cnt_present_flag; ///< 0x2C
|
||||||
|
u32 transform_8x8_mode_flag; ///< 0x30
|
||||||
|
u32 pitch_luma; ///< 0x34
|
||||||
|
u32 pitch_chroma; ///< 0x38
|
||||||
|
u32 luma_top_offset; ///< 0x3C
|
||||||
|
u32 luma_bot_offset; ///< 0x40
|
||||||
|
u32 luma_frame_offset; ///< 0x44
|
||||||
|
u32 chroma_top_offset; ///< 0x48
|
||||||
|
u32 chroma_bot_offset; ///< 0x4C
|
||||||
|
u32 chroma_frame_offset; ///< 0x50
|
||||||
|
u32 hist_buffer_size; ///< 0x54
|
||||||
|
union { ///< 0x58
|
||||||
|
union {
|
||||||
|
BitField<0, 1, u64> mbaff_frame;
|
||||||
|
BitField<1, 1, u64> direct_8x8_inference;
|
||||||
|
BitField<2, 1, u64> weighted_pred;
|
||||||
|
BitField<3, 1, u64> constrained_intra_pred;
|
||||||
|
BitField<4, 1, u64> ref_pic;
|
||||||
|
BitField<5, 1, u64> field_pic;
|
||||||
|
BitField<6, 1, u64> bottom_field;
|
||||||
|
BitField<7, 1, u64> second_field;
|
||||||
|
} flags;
|
||||||
|
BitField<8, 4, u64> log2_max_frame_num_minus4;
|
||||||
|
BitField<12, 2, u64> chroma_format_idc;
|
||||||
|
BitField<14, 2, u64> pic_order_cnt_type;
|
||||||
|
BitField<16, 6, s64> pic_init_qp_minus26;
|
||||||
|
BitField<22, 5, s64> chroma_qp_index_offset;
|
||||||
|
BitField<27, 5, s64> second_chroma_qp_index_offset;
|
||||||
|
BitField<32, 2, u64> weighted_bipred_idc;
|
||||||
|
BitField<34, 7, u64> curr_pic_idx;
|
||||||
|
BitField<41, 5, u64> curr_col_idx;
|
||||||
|
BitField<46, 16, u64> frame_number;
|
||||||
|
BitField<62, 1, u64> frame_surfaces;
|
||||||
|
BitField<63, 1, u64> output_memory_layout;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
static_assert(sizeof(H264ParameterSet) == 0x60, "H264ParameterSet is an invalid size");
|
||||||
|
|
||||||
|
struct H264DecoderContext {
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(18); ///< 0x0000
|
||||||
|
u32 stream_len; ///< 0x0048
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(3); ///< 0x004C
|
||||||
|
H264ParameterSet h264_parameter_set; ///< 0x0058
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(66); ///< 0x00B8
|
||||||
|
std::array<u8, 0x60> weight_scale; ///< 0x01C0
|
||||||
|
std::array<u8, 0x80> weight_scale_8x8; ///< 0x0220
|
||||||
|
};
|
||||||
|
static_assert(sizeof(H264DecoderContext) == 0x2A0, "H264DecoderContext is an invalid size");
|
||||||
|
|
||||||
|
#define ASSERT_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(H264ParameterSet, field_name) == position, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_POSITION(log2_max_pic_order_cnt_lsb_minus4, 0x00);
|
||||||
|
ASSERT_POSITION(delta_pic_order_always_zero_flag, 0x04);
|
||||||
|
ASSERT_POSITION(frame_mbs_only_flag, 0x08);
|
||||||
|
ASSERT_POSITION(pic_width_in_mbs, 0x0C);
|
||||||
|
ASSERT_POSITION(frame_height_in_map_units, 0x10);
|
||||||
|
ASSERT_POSITION(tile_format, 0x14);
|
||||||
|
ASSERT_POSITION(entropy_coding_mode_flag, 0x18);
|
||||||
|
ASSERT_POSITION(pic_order_present_flag, 0x1C);
|
||||||
|
ASSERT_POSITION(num_refidx_l0_default_active, 0x20);
|
||||||
|
ASSERT_POSITION(num_refidx_l1_default_active, 0x24);
|
||||||
|
ASSERT_POSITION(deblocking_filter_control_present_flag, 0x28);
|
||||||
|
ASSERT_POSITION(redundant_pic_cnt_present_flag, 0x2C);
|
||||||
|
ASSERT_POSITION(transform_8x8_mode_flag, 0x30);
|
||||||
|
ASSERT_POSITION(pitch_luma, 0x34);
|
||||||
|
ASSERT_POSITION(pitch_chroma, 0x38);
|
||||||
|
ASSERT_POSITION(luma_top_offset, 0x3C);
|
||||||
|
ASSERT_POSITION(luma_bot_offset, 0x40);
|
||||||
|
ASSERT_POSITION(luma_frame_offset, 0x44);
|
||||||
|
ASSERT_POSITION(chroma_top_offset, 0x48);
|
||||||
|
ASSERT_POSITION(chroma_bot_offset, 0x4C);
|
||||||
|
ASSERT_POSITION(chroma_frame_offset, 0x50);
|
||||||
|
ASSERT_POSITION(hist_buffer_size, 0x54);
|
||||||
|
ASSERT_POSITION(flags, 0x58);
|
||||||
|
#undef ASSERT_POSITION
|
||||||
|
|
||||||
|
#define ASSERT_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(H264DecoderContext, field_name) == position, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_POSITION(stream_len, 0x48);
|
||||||
|
ASSERT_POSITION(h264_parameter_set, 0x58);
|
||||||
|
ASSERT_POSITION(weight_scale, 0x1C0);
|
||||||
|
#undef ASSERT_POSITION
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Decoder
|
||||||
|
} // namespace Tegra
|
53
src/video_core/host1x/codecs/vp8.cpp
Executable file
53
src/video_core/host1x/codecs/vp8.cpp
Executable file
|
@ -0,0 +1,53 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "video_core/host1x/codecs/vp8.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra::Decoder {
|
||||||
|
VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||||
|
|
||||||
|
VP8::~VP8() = default;
|
||||||
|
|
||||||
|
const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||||
|
VP8PictureInfo info;
|
||||||
|
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
|
||||||
|
|
||||||
|
const bool is_key_frame = info.key_frame == 1u;
|
||||||
|
const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
|
||||||
|
const size_t header_size = is_key_frame ? 10u : 3u;
|
||||||
|
frame.resize(header_size + bitstream_size);
|
||||||
|
|
||||||
|
// Based on page 30 of the VP8 specification.
|
||||||
|
// https://datatracker.ietf.org/doc/rfc6386/
|
||||||
|
frame[0] = is_key_frame ? 0u : 1u; // 1-bit frame type (0: keyframe, 1: interframes).
|
||||||
|
frame[0] |= static_cast<u8>((info.version & 7u) << 1u); // 3-bit version number
|
||||||
|
frame[0] |= static_cast<u8>(1u << 4u); // 1-bit show_frame flag
|
||||||
|
|
||||||
|
// The next 19-bits are the first partition size
|
||||||
|
frame[0] |= static_cast<u8>((info.first_part_size & 7u) << 5u);
|
||||||
|
frame[1] = static_cast<u8>((info.first_part_size & 0x7f8u) >> 3u);
|
||||||
|
frame[2] = static_cast<u8>((info.first_part_size & 0x7f800u) >> 11u);
|
||||||
|
|
||||||
|
if (is_key_frame) {
|
||||||
|
frame[3] = 0x9du;
|
||||||
|
frame[4] = 0x01u;
|
||||||
|
frame[5] = 0x2au;
|
||||||
|
// TODO(ameerj): Horizontal/Vertical Scale
|
||||||
|
// 16 bits: (2 bits Horizontal Scale << 14) | Width (14 bits)
|
||||||
|
frame[6] = static_cast<u8>(info.frame_width & 0xff);
|
||||||
|
frame[7] = static_cast<u8>(((info.frame_width >> 8) & 0x3f));
|
||||||
|
// 16 bits:(2 bits Vertical Scale << 14) | Height (14 bits)
|
||||||
|
frame[8] = static_cast<u8>(info.frame_height & 0xff);
|
||||||
|
frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
|
||||||
|
}
|
||||||
|
const u64 bitstream_offset = state.frame_bitstream_offset;
|
||||||
|
host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
|
||||||
|
|
||||||
|
return frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Decoder
|
78
src/video_core/host1x/codecs/vp8.h
Executable file
78
src/video_core/host1x/codecs/vp8.h
Executable file
|
@ -0,0 +1,78 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2021 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/host1x/nvdec_common.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
namespace Decoder {
|
||||||
|
|
||||||
|
class VP8 {
|
||||||
|
public:
|
||||||
|
explicit VP8(Host1x::Host1x& host1x);
|
||||||
|
~VP8();
|
||||||
|
|
||||||
|
/// Compose the VP8 frame for FFmpeg decoding
|
||||||
|
[[nodiscard]] const std::vector<u8>& ComposeFrame(
|
||||||
|
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<u8> frame;
|
||||||
|
Host1x::Host1x& host1x;
|
||||||
|
|
||||||
|
struct VP8PictureInfo {
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(14);
|
||||||
|
u16 frame_width; // actual frame width
|
||||||
|
u16 frame_height; // actual frame height
|
||||||
|
u8 key_frame;
|
||||||
|
u8 version;
|
||||||
|
union {
|
||||||
|
u8 raw;
|
||||||
|
BitField<0, 2, u8> tile_format;
|
||||||
|
BitField<2, 3, u8> gob_height;
|
||||||
|
BitField<5, 3, u8> reserverd_surface_format;
|
||||||
|
};
|
||||||
|
u8 error_conceal_on; // 1: error conceal on; 0: off
|
||||||
|
u32 first_part_size; // the size of first partition(frame header and mb header partition)
|
||||||
|
u32 hist_buffer_size; // in units of 256
|
||||||
|
u32 vld_buffer_size; // in units of 1
|
||||||
|
// Current frame buffers
|
||||||
|
std::array<u32, 2> frame_stride; // [y_c]
|
||||||
|
u32 luma_top_offset; // offset of luma top field in units of 256
|
||||||
|
u32 luma_bot_offset; // offset of luma bottom field in units of 256
|
||||||
|
u32 luma_frame_offset; // offset of luma frame in units of 256
|
||||||
|
u32 chroma_top_offset; // offset of chroma top field in units of 256
|
||||||
|
u32 chroma_bot_offset; // offset of chroma bottom field in units of 256
|
||||||
|
u32 chroma_frame_offset; // offset of chroma frame in units of 256
|
||||||
|
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(0x1c); // NvdecDisplayParams
|
||||||
|
|
||||||
|
// Decode picture buffer related
|
||||||
|
s8 current_output_memory_layout;
|
||||||
|
// output NV12/NV24 setting. index 0: golden; 1: altref; 2: last
|
||||||
|
std::array<s8, 3> output_memory_layout;
|
||||||
|
|
||||||
|
u8 segmentation_feature_data_update;
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(3);
|
||||||
|
|
||||||
|
// ucode return result
|
||||||
|
u32 result_value;
|
||||||
|
std::array<u32, 8> partition_offset;
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(3);
|
||||||
|
};
|
||||||
|
static_assert(sizeof(VP8PictureInfo) == 0xc0, "PictureInfo is an invalid size");
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Decoder
|
||||||
|
} // namespace Tegra
|
947
src/video_core/host1x/codecs/vp9.cpp
Executable file
947
src/video_core/host1x/codecs/vp9.cpp
Executable file
|
@ -0,0 +1,947 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <algorithm> // for std::copy
|
||||||
|
#include <numeric>
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "video_core/host1x/codecs/vp9.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra::Decoder {
|
||||||
|
namespace {
|
||||||
|
constexpr u32 diff_update_probability = 252;
|
||||||
|
constexpr u32 frame_sync_code = 0x498342;
|
||||||
|
|
||||||
|
// Default compressed header probabilities once frame context resets
|
||||||
|
constexpr Vp9EntropyProbs default_probs{
|
||||||
|
.y_mode_prob{
|
||||||
|
65, 32, 18, 144, 162, 194, 41, 51, 98, 132, 68, 18, 165, 217, 196, 45, 40, 78,
|
||||||
|
173, 80, 19, 176, 240, 193, 64, 35, 46, 221, 135, 38, 194, 248, 121, 96, 85, 29,
|
||||||
|
},
|
||||||
|
.partition_prob{
|
||||||
|
199, 122, 141, 0, 147, 63, 159, 0, 148, 133, 118, 0, 121, 104, 114, 0,
|
||||||
|
174, 73, 87, 0, 92, 41, 83, 0, 82, 99, 50, 0, 53, 39, 39, 0,
|
||||||
|
177, 58, 59, 0, 68, 26, 63, 0, 52, 79, 25, 0, 17, 14, 12, 0,
|
||||||
|
222, 34, 30, 0, 72, 16, 44, 0, 58, 32, 12, 0, 10, 7, 6, 0,
|
||||||
|
},
|
||||||
|
.coef_probs{
|
||||||
|
195, 29, 183, 84, 49, 136, 8, 42, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
31, 107, 169, 35, 99, 159, 17, 82, 140, 8, 66, 114, 2, 44, 76, 1, 19, 32,
|
||||||
|
40, 132, 201, 29, 114, 187, 13, 91, 157, 7, 75, 127, 3, 58, 95, 1, 28, 47,
|
||||||
|
69, 142, 221, 42, 122, 201, 15, 91, 159, 6, 67, 121, 1, 42, 77, 1, 17, 31,
|
||||||
|
102, 148, 228, 67, 117, 204, 17, 82, 154, 6, 59, 114, 2, 39, 75, 1, 15, 29,
|
||||||
|
156, 57, 233, 119, 57, 212, 58, 48, 163, 29, 40, 124, 12, 30, 81, 3, 12, 31,
|
||||||
|
191, 107, 226, 124, 117, 204, 25, 99, 155, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
29, 148, 210, 37, 126, 194, 8, 93, 157, 2, 68, 118, 1, 39, 69, 1, 17, 33,
|
||||||
|
41, 151, 213, 27, 123, 193, 3, 82, 144, 1, 58, 105, 1, 32, 60, 1, 13, 26,
|
||||||
|
59, 159, 220, 23, 126, 198, 4, 88, 151, 1, 66, 114, 1, 38, 71, 1, 18, 34,
|
||||||
|
114, 136, 232, 51, 114, 207, 11, 83, 155, 3, 56, 105, 1, 33, 65, 1, 17, 34,
|
||||||
|
149, 65, 234, 121, 57, 215, 61, 49, 166, 28, 36, 114, 12, 25, 76, 3, 16, 42,
|
||||||
|
214, 49, 220, 132, 63, 188, 42, 65, 137, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
85, 137, 221, 104, 131, 216, 49, 111, 192, 21, 87, 155, 2, 49, 87, 1, 16, 28,
|
||||||
|
89, 163, 230, 90, 137, 220, 29, 100, 183, 10, 70, 135, 2, 42, 81, 1, 17, 33,
|
||||||
|
108, 167, 237, 55, 133, 222, 15, 97, 179, 4, 72, 135, 1, 45, 85, 1, 19, 38,
|
||||||
|
124, 146, 240, 66, 124, 224, 17, 88, 175, 4, 58, 122, 1, 36, 75, 1, 18, 37,
|
||||||
|
141, 79, 241, 126, 70, 227, 66, 58, 182, 30, 44, 136, 12, 34, 96, 2, 20, 47,
|
||||||
|
229, 99, 249, 143, 111, 235, 46, 109, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
82, 158, 236, 94, 146, 224, 25, 117, 191, 9, 87, 149, 3, 56, 99, 1, 33, 57,
|
||||||
|
83, 167, 237, 68, 145, 222, 10, 103, 177, 2, 72, 131, 1, 41, 79, 1, 20, 39,
|
||||||
|
99, 167, 239, 47, 141, 224, 10, 104, 178, 2, 73, 133, 1, 44, 85, 1, 22, 47,
|
||||||
|
127, 145, 243, 71, 129, 228, 17, 93, 177, 3, 61, 124, 1, 41, 84, 1, 21, 52,
|
||||||
|
157, 78, 244, 140, 72, 231, 69, 58, 184, 31, 44, 137, 14, 38, 105, 8, 23, 61,
|
||||||
|
125, 34, 187, 52, 41, 133, 6, 31, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
37, 109, 153, 51, 102, 147, 23, 87, 128, 8, 67, 101, 1, 41, 63, 1, 19, 29,
|
||||||
|
31, 154, 185, 17, 127, 175, 6, 96, 145, 2, 73, 114, 1, 51, 82, 1, 28, 45,
|
||||||
|
23, 163, 200, 10, 131, 185, 2, 93, 148, 1, 67, 111, 1, 41, 69, 1, 14, 24,
|
||||||
|
29, 176, 217, 12, 145, 201, 3, 101, 156, 1, 69, 111, 1, 39, 63, 1, 14, 23,
|
||||||
|
57, 192, 233, 25, 154, 215, 6, 109, 167, 3, 78, 118, 1, 48, 69, 1, 21, 29,
|
||||||
|
202, 105, 245, 108, 106, 216, 18, 90, 144, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
33, 172, 219, 64, 149, 206, 14, 117, 177, 5, 90, 141, 2, 61, 95, 1, 37, 57,
|
||||||
|
33, 179, 220, 11, 140, 198, 1, 89, 148, 1, 60, 104, 1, 33, 57, 1, 12, 21,
|
||||||
|
30, 181, 221, 8, 141, 198, 1, 87, 145, 1, 58, 100, 1, 31, 55, 1, 12, 20,
|
||||||
|
32, 186, 224, 7, 142, 198, 1, 86, 143, 1, 58, 100, 1, 31, 55, 1, 12, 22,
|
||||||
|
57, 192, 227, 20, 143, 204, 3, 96, 154, 1, 68, 112, 1, 42, 69, 1, 19, 32,
|
||||||
|
212, 35, 215, 113, 47, 169, 29, 48, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
74, 129, 203, 106, 120, 203, 49, 107, 178, 19, 84, 144, 4, 50, 84, 1, 15, 25,
|
||||||
|
71, 172, 217, 44, 141, 209, 15, 102, 173, 6, 76, 133, 2, 51, 89, 1, 24, 42,
|
||||||
|
64, 185, 231, 31, 148, 216, 8, 103, 175, 3, 74, 131, 1, 46, 81, 1, 18, 30,
|
||||||
|
65, 196, 235, 25, 157, 221, 5, 105, 174, 1, 67, 120, 1, 38, 69, 1, 15, 30,
|
||||||
|
65, 204, 238, 30, 156, 224, 7, 107, 177, 2, 70, 124, 1, 42, 73, 1, 18, 34,
|
||||||
|
225, 86, 251, 144, 104, 235, 42, 99, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
85, 175, 239, 112, 165, 229, 29, 136, 200, 12, 103, 162, 6, 77, 123, 2, 53, 84,
|
||||||
|
75, 183, 239, 30, 155, 221, 3, 106, 171, 1, 74, 128, 1, 44, 76, 1, 17, 28,
|
||||||
|
73, 185, 240, 27, 159, 222, 2, 107, 172, 1, 75, 127, 1, 42, 73, 1, 17, 29,
|
||||||
|
62, 190, 238, 21, 159, 222, 2, 107, 172, 1, 72, 122, 1, 40, 71, 1, 18, 32,
|
||||||
|
61, 199, 240, 27, 161, 226, 4, 113, 180, 1, 76, 129, 1, 46, 80, 1, 23, 41,
|
||||||
|
7, 27, 153, 5, 30, 95, 1, 16, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
50, 75, 127, 57, 75, 124, 27, 67, 108, 10, 54, 86, 1, 33, 52, 1, 12, 18,
|
||||||
|
43, 125, 151, 26, 108, 148, 7, 83, 122, 2, 59, 89, 1, 38, 60, 1, 17, 27,
|
||||||
|
23, 144, 163, 13, 112, 154, 2, 75, 117, 1, 50, 81, 1, 31, 51, 1, 14, 23,
|
||||||
|
18, 162, 185, 6, 123, 171, 1, 78, 125, 1, 51, 86, 1, 31, 54, 1, 14, 23,
|
||||||
|
15, 199, 227, 3, 150, 204, 1, 91, 146, 1, 55, 95, 1, 30, 53, 1, 11, 20,
|
||||||
|
19, 55, 240, 19, 59, 196, 3, 52, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
41, 166, 207, 104, 153, 199, 31, 123, 181, 14, 101, 152, 5, 72, 106, 1, 36, 52,
|
||||||
|
35, 176, 211, 12, 131, 190, 2, 88, 144, 1, 60, 101, 1, 36, 60, 1, 16, 28,
|
||||||
|
28, 183, 213, 8, 134, 191, 1, 86, 142, 1, 56, 96, 1, 30, 53, 1, 12, 20,
|
||||||
|
20, 190, 215, 4, 135, 192, 1, 84, 139, 1, 53, 91, 1, 28, 49, 1, 11, 20,
|
||||||
|
13, 196, 216, 2, 137, 192, 1, 86, 143, 1, 57, 99, 1, 32, 56, 1, 13, 24,
|
||||||
|
211, 29, 217, 96, 47, 156, 22, 43, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
78, 120, 193, 111, 116, 186, 46, 102, 164, 15, 80, 128, 2, 49, 76, 1, 18, 28,
|
||||||
|
71, 161, 203, 42, 132, 192, 10, 98, 150, 3, 69, 109, 1, 44, 70, 1, 18, 29,
|
||||||
|
57, 186, 211, 30, 140, 196, 4, 93, 146, 1, 62, 102, 1, 38, 65, 1, 16, 27,
|
||||||
|
47, 199, 217, 14, 145, 196, 1, 88, 142, 1, 57, 98, 1, 36, 62, 1, 15, 26,
|
||||||
|
26, 219, 229, 5, 155, 207, 1, 94, 151, 1, 60, 104, 1, 36, 62, 1, 16, 28,
|
||||||
|
233, 29, 248, 146, 47, 220, 43, 52, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
100, 163, 232, 179, 161, 222, 63, 142, 204, 37, 113, 174, 26, 89, 137, 18, 68, 97,
|
||||||
|
85, 181, 230, 32, 146, 209, 7, 100, 164, 3, 71, 121, 1, 45, 77, 1, 18, 30,
|
||||||
|
65, 187, 230, 20, 148, 207, 2, 97, 159, 1, 68, 116, 1, 40, 70, 1, 14, 29,
|
||||||
|
40, 194, 227, 8, 147, 204, 1, 94, 155, 1, 65, 112, 1, 39, 66, 1, 14, 26,
|
||||||
|
16, 208, 228, 3, 151, 207, 1, 98, 160, 1, 67, 117, 1, 41, 74, 1, 17, 31,
|
||||||
|
17, 38, 140, 7, 34, 80, 1, 17, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
37, 75, 128, 41, 76, 128, 26, 66, 116, 12, 52, 94, 2, 32, 55, 1, 10, 16,
|
||||||
|
50, 127, 154, 37, 109, 152, 16, 82, 121, 5, 59, 85, 1, 35, 54, 1, 13, 20,
|
||||||
|
40, 142, 167, 17, 110, 157, 2, 71, 112, 1, 44, 72, 1, 27, 45, 1, 11, 17,
|
||||||
|
30, 175, 188, 9, 124, 169, 1, 74, 116, 1, 48, 78, 1, 30, 49, 1, 11, 18,
|
||||||
|
10, 222, 223, 2, 150, 194, 1, 83, 128, 1, 48, 79, 1, 27, 45, 1, 11, 17,
|
||||||
|
36, 41, 235, 29, 36, 193, 10, 27, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
85, 165, 222, 177, 162, 215, 110, 135, 195, 57, 113, 168, 23, 83, 120, 10, 49, 61,
|
||||||
|
85, 190, 223, 36, 139, 200, 5, 90, 146, 1, 60, 103, 1, 38, 65, 1, 18, 30,
|
||||||
|
72, 202, 223, 23, 141, 199, 2, 86, 140, 1, 56, 97, 1, 36, 61, 1, 16, 27,
|
||||||
|
55, 218, 225, 13, 145, 200, 1, 86, 141, 1, 57, 99, 1, 35, 61, 1, 13, 22,
|
||||||
|
15, 235, 212, 1, 132, 184, 1, 84, 139, 1, 57, 97, 1, 34, 56, 1, 14, 23,
|
||||||
|
181, 21, 201, 61, 37, 123, 10, 38, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
47, 106, 172, 95, 104, 173, 42, 93, 159, 18, 77, 131, 4, 50, 81, 1, 17, 23,
|
||||||
|
62, 147, 199, 44, 130, 189, 28, 102, 154, 18, 75, 115, 2, 44, 65, 1, 12, 19,
|
||||||
|
55, 153, 210, 24, 130, 194, 3, 93, 146, 1, 61, 97, 1, 31, 50, 1, 10, 16,
|
||||||
|
49, 186, 223, 17, 148, 204, 1, 96, 142, 1, 53, 83, 1, 26, 44, 1, 11, 17,
|
||||||
|
13, 217, 212, 2, 136, 180, 1, 78, 124, 1, 50, 83, 1, 29, 49, 1, 14, 23,
|
||||||
|
197, 13, 247, 82, 17, 222, 25, 17, 162, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
126, 186, 247, 234, 191, 243, 176, 177, 234, 104, 158, 220, 66, 128, 186, 55, 90, 137,
|
||||||
|
111, 197, 242, 46, 158, 219, 9, 104, 171, 2, 65, 125, 1, 44, 80, 1, 17, 91,
|
||||||
|
104, 208, 245, 39, 168, 224, 3, 109, 162, 1, 79, 124, 1, 50, 102, 1, 43, 102,
|
||||||
|
84, 220, 246, 31, 177, 231, 2, 115, 180, 1, 79, 134, 1, 55, 77, 1, 60, 79,
|
||||||
|
43, 243, 240, 8, 180, 217, 1, 115, 166, 1, 84, 121, 1, 51, 67, 1, 16, 6,
|
||||||
|
},
|
||||||
|
.switchable_interp_prob{235, 162, 36, 255, 34, 3, 149, 144},
|
||||||
|
.inter_mode_prob{
|
||||||
|
2, 173, 34, 0, 7, 145, 85, 0, 7, 166, 63, 0, 7, 94,
|
||||||
|
66, 0, 8, 64, 46, 0, 17, 81, 31, 0, 25, 29, 30, 0,
|
||||||
|
},
|
||||||
|
.intra_inter_prob{9, 102, 187, 225},
|
||||||
|
.comp_inter_prob{9, 102, 187, 225, 0},
|
||||||
|
.single_ref_prob{33, 16, 77, 74, 142, 142, 172, 170, 238, 247},
|
||||||
|
.comp_ref_prob{50, 126, 123, 221, 226},
|
||||||
|
.tx_32x32_prob{3, 136, 37, 5, 52, 13},
|
||||||
|
.tx_16x16_prob{20, 152, 15, 101},
|
||||||
|
.tx_8x8_prob{100, 66},
|
||||||
|
.skip_probs{192, 128, 64},
|
||||||
|
.joints{32, 64, 96},
|
||||||
|
.sign{128, 128},
|
||||||
|
.classes{
|
||||||
|
224, 144, 192, 168, 192, 176, 192, 198, 198, 245,
|
||||||
|
216, 128, 176, 160, 176, 176, 192, 198, 198, 208,
|
||||||
|
},
|
||||||
|
.class_0{216, 208},
|
||||||
|
.prob_bits{
|
||||||
|
136, 140, 148, 160, 176, 192, 224, 234, 234, 240,
|
||||||
|
136, 140, 148, 160, 176, 192, 224, 234, 234, 240,
|
||||||
|
},
|
||||||
|
.class_0_fr{128, 128, 64, 96, 112, 64, 128, 128, 64, 96, 112, 64},
|
||||||
|
.fr{64, 96, 64, 64, 96, 64},
|
||||||
|
.class_0_hp{160, 160},
|
||||||
|
.high_precision{128, 128},
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::array<u8, 256> norm_lut{
|
||||||
|
0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||||
|
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
constexpr std::array<u8, 254> map_lut{
|
||||||
|
20, 21, 22, 23, 24, 25, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
||||||
|
1, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 2, 50, 51, 52, 53, 54,
|
||||||
|
55, 56, 57, 58, 59, 60, 61, 3, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
|
||||||
|
73, 4, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 5, 86, 87, 88, 89,
|
||||||
|
90, 91, 92, 93, 94, 95, 96, 97, 6, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
|
||||||
|
108, 109, 7, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 8, 122, 123, 124,
|
||||||
|
125, 126, 127, 128, 129, 130, 131, 132, 133, 9, 134, 135, 136, 137, 138, 139, 140, 141, 142,
|
||||||
|
143, 144, 145, 10, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 11, 158, 159,
|
||||||
|
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 12, 170, 171, 172, 173, 174, 175, 176, 177,
|
||||||
|
178, 179, 180, 181, 13, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 14, 194,
|
||||||
|
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 15, 206, 207, 208, 209, 210, 211, 212,
|
||||||
|
213, 214, 215, 216, 217, 16, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 17,
|
||||||
|
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 18, 242, 243, 244, 245, 246, 247,
|
||||||
|
248, 249, 250, 251, 252, 253, 19,
|
||||||
|
};
|
||||||
|
|
||||||
|
// 6.2.14 Tile size calculation
|
||||||
|
|
||||||
|
[[nodiscard]] s32 CalcMinLog2TileCols(s32 frame_width) {
|
||||||
|
const s32 sb64_cols = (frame_width + 63) / 64;
|
||||||
|
s32 min_log2 = 0;
|
||||||
|
|
||||||
|
while ((64 << min_log2) < sb64_cols) {
|
||||||
|
min_log2++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return min_log2;
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] s32 CalcMaxLog2TileCols(s32 frame_width) {
|
||||||
|
const s32 sb64_cols = (frame_width + 63) / 64;
|
||||||
|
s32 max_log2 = 1;
|
||||||
|
|
||||||
|
while ((sb64_cols >> max_log2) >= 4) {
|
||||||
|
max_log2++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return max_log2 - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recenters probability. Based on section 6.3.6 of VP9 Specification
|
||||||
|
[[nodiscard]] s32 RecenterNonNeg(s32 new_prob, s32 old_prob) {
|
||||||
|
if (new_prob > old_prob * 2) {
|
||||||
|
return new_prob;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_prob >= old_prob) {
|
||||||
|
return (new_prob - old_prob) * 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (old_prob - new_prob) * 2 - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adjusts old_prob depending on new_prob. Based on section 6.3.5 of VP9 Specification
|
||||||
|
[[nodiscard]] s32 RemapProbability(s32 new_prob, s32 old_prob) {
|
||||||
|
new_prob--;
|
||||||
|
old_prob--;
|
||||||
|
|
||||||
|
std::size_t index{};
|
||||||
|
|
||||||
|
if (old_prob * 2 <= 0xff) {
|
||||||
|
index = static_cast<std::size_t>(std::max(0, RecenterNonNeg(new_prob, old_prob) - 1));
|
||||||
|
} else {
|
||||||
|
index = static_cast<std::size_t>(
|
||||||
|
std::max(0, RecenterNonNeg(0xff - 1 - new_prob, 0xff - 1 - old_prob) - 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
return static_cast<s32>(map_lut[index]);
|
||||||
|
}
|
||||||
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
|
||||||
|
|
||||||
|
VP9::~VP9() = default;
|
||||||
|
|
||||||
|
void VP9::WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
|
||||||
|
const bool update = new_prob != old_prob;
|
||||||
|
|
||||||
|
writer.Write(update, diff_update_probability);
|
||||||
|
|
||||||
|
if (update) {
|
||||||
|
WriteProbabilityDelta(writer, new_prob, old_prob);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template <typename T, std::size_t N>
|
||||||
|
void VP9::WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||||
|
const std::array<T, N>& old_prob) {
|
||||||
|
for (std::size_t offset = 0; offset < new_prob.size(); ++offset) {
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[offset], old_prob[offset]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, std::size_t N>
|
||||||
|
void VP9::WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||||
|
const std::array<T, N>& old_prob) {
|
||||||
|
for (std::size_t offset = 0; offset < new_prob.size(); offset += 4) {
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[offset + 0], old_prob[offset + 0]);
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[offset + 1], old_prob[offset + 1]);
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[offset + 2], old_prob[offset + 2]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
|
||||||
|
const int delta = RemapProbability(new_prob, old_prob);
|
||||||
|
|
||||||
|
EncodeTermSubExp(writer, delta);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::EncodeTermSubExp(VpxRangeEncoder& writer, s32 value) {
|
||||||
|
if (WriteLessThan(writer, value, 16)) {
|
||||||
|
writer.Write(value, 4);
|
||||||
|
} else if (WriteLessThan(writer, value, 32)) {
|
||||||
|
writer.Write(value - 16, 4);
|
||||||
|
} else if (WriteLessThan(writer, value, 64)) {
|
||||||
|
writer.Write(value - 32, 5);
|
||||||
|
} else {
|
||||||
|
value -= 64;
|
||||||
|
|
||||||
|
constexpr s32 size = 8;
|
||||||
|
|
||||||
|
const s32 mask = (1 << size) - 191;
|
||||||
|
|
||||||
|
const s32 delta = value - mask;
|
||||||
|
|
||||||
|
if (delta < 0) {
|
||||||
|
writer.Write(value, size - 1);
|
||||||
|
} else {
|
||||||
|
writer.Write(delta / 2 + mask, size - 1);
|
||||||
|
writer.Write(delta & 1, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool VP9::WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test) {
|
||||||
|
const bool is_lt = value < test;
|
||||||
|
writer.Write(!is_lt);
|
||||||
|
return is_lt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
|
||||||
|
const std::array<u8, 1728>& new_prob,
|
||||||
|
const std::array<u8, 1728>& old_prob) {
|
||||||
|
constexpr u32 block_bytes = 2 * 2 * 6 * 6 * 3;
|
||||||
|
|
||||||
|
const auto needs_update = [&](u32 base_index) {
|
||||||
|
return !std::equal(new_prob.begin() + base_index,
|
||||||
|
new_prob.begin() + base_index + block_bytes,
|
||||||
|
old_prob.begin() + base_index);
|
||||||
|
};
|
||||||
|
|
||||||
|
for (u32 block_index = 0; block_index < 4; block_index++) {
|
||||||
|
const u32 base_index = block_index * block_bytes;
|
||||||
|
const bool update = needs_update(base_index);
|
||||||
|
writer.Write(update);
|
||||||
|
|
||||||
|
if (update) {
|
||||||
|
u32 index = base_index;
|
||||||
|
for (s32 i = 0; i < 2; i++) {
|
||||||
|
for (s32 j = 0; j < 2; j++) {
|
||||||
|
for (s32 k = 0; k < 6; k++) {
|
||||||
|
for (s32 l = 0; l < 6; l++) {
|
||||||
|
if (k != 0 || l < 3) {
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[index + 0],
|
||||||
|
old_prob[index + 0]);
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[index + 1],
|
||||||
|
old_prob[index + 1]);
|
||||||
|
WriteProbabilityUpdate(writer, new_prob[index + 2],
|
||||||
|
old_prob[index + 2]);
|
||||||
|
}
|
||||||
|
index += 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (block_index == static_cast<u32>(tx_mode)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob) {
|
||||||
|
const bool update = new_prob != old_prob;
|
||||||
|
writer.Write(update, diff_update_probability);
|
||||||
|
|
||||||
|
if (update) {
|
||||||
|
writer.Write(new_prob >> 1, 7);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||||
|
PictureInfo picture_info;
|
||||||
|
host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
|
||||||
|
Vp9PictureInfo vp9_info = picture_info.Convert();
|
||||||
|
|
||||||
|
InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
|
||||||
|
|
||||||
|
// surface_luma_offset[0:3] contains the address of the reference frame offsets in the following
|
||||||
|
// order: last, golden, altref, current.
|
||||||
|
std::copy(state.surface_luma_offset.begin(), state.surface_luma_offset.begin() + 4,
|
||||||
|
vp9_info.frame_offsets.begin());
|
||||||
|
|
||||||
|
return vp9_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
|
||||||
|
EntropyProbs entropy;
|
||||||
|
host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
|
||||||
|
entropy.Convert(dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||||
|
Vp9FrameContainer current_frame{};
|
||||||
|
{
|
||||||
|
// gpu.SyncGuestHost(); epic, why?
|
||||||
|
current_frame.info = GetVp9PictureInfo(state);
|
||||||
|
current_frame.bit_stream.resize(current_frame.info.bitstream_size);
|
||||||
|
host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
|
||||||
|
current_frame.bit_stream.data(),
|
||||||
|
current_frame.info.bitstream_size);
|
||||||
|
}
|
||||||
|
if (!next_frame.bit_stream.empty()) {
|
||||||
|
Vp9FrameContainer temp{
|
||||||
|
.info = current_frame.info,
|
||||||
|
.bit_stream = std::move(current_frame.bit_stream),
|
||||||
|
};
|
||||||
|
next_frame.info.show_frame = current_frame.info.last_frame_shown;
|
||||||
|
current_frame.info = next_frame.info;
|
||||||
|
current_frame.bit_stream = std::move(next_frame.bit_stream);
|
||||||
|
next_frame = std::move(temp);
|
||||||
|
} else {
|
||||||
|
next_frame.info = current_frame.info;
|
||||||
|
next_frame.bit_stream = current_frame.bit_stream;
|
||||||
|
}
|
||||||
|
return current_frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8> VP9::ComposeCompressedHeader() {
|
||||||
|
VpxRangeEncoder writer{};
|
||||||
|
const bool update_probs = !current_frame_info.is_key_frame && current_frame_info.show_frame;
|
||||||
|
if (!current_frame_info.lossless) {
|
||||||
|
if (static_cast<u32>(current_frame_info.transform_mode) >= 3) {
|
||||||
|
writer.Write(3, 2);
|
||||||
|
writer.Write(current_frame_info.transform_mode == 4);
|
||||||
|
} else {
|
||||||
|
writer.Write(current_frame_info.transform_mode, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_frame_info.transform_mode == 4) {
|
||||||
|
// tx_mode_probs() in the spec
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_8x8_prob,
|
||||||
|
prev_frame_probs.tx_8x8_prob);
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_16x16_prob,
|
||||||
|
prev_frame_probs.tx_16x16_prob);
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.tx_32x32_prob,
|
||||||
|
prev_frame_probs.tx_32x32_prob);
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.tx_8x8_prob = current_frame_info.entropy.tx_8x8_prob;
|
||||||
|
prev_frame_probs.tx_16x16_prob = current_frame_info.entropy.tx_16x16_prob;
|
||||||
|
prev_frame_probs.tx_32x32_prob = current_frame_info.entropy.tx_32x32_prob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// read_coef_probs() in the spec
|
||||||
|
WriteCoefProbabilityUpdate(writer, current_frame_info.transform_mode,
|
||||||
|
current_frame_info.entropy.coef_probs, prev_frame_probs.coef_probs);
|
||||||
|
// read_skip_probs() in the spec
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.skip_probs,
|
||||||
|
prev_frame_probs.skip_probs);
|
||||||
|
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.coef_probs = current_frame_info.entropy.coef_probs;
|
||||||
|
prev_frame_probs.skip_probs = current_frame_info.entropy.skip_probs;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!current_frame_info.intra_only) {
|
||||||
|
// read_inter_probs() in the spec
|
||||||
|
WriteProbabilityUpdateAligned4(writer, current_frame_info.entropy.inter_mode_prob,
|
||||||
|
prev_frame_probs.inter_mode_prob);
|
||||||
|
|
||||||
|
if (current_frame_info.interp_filter == 4) {
|
||||||
|
// read_interp_filter_probs() in the spec
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.switchable_interp_prob,
|
||||||
|
prev_frame_probs.switchable_interp_prob);
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.switchable_interp_prob =
|
||||||
|
current_frame_info.entropy.switchable_interp_prob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read_is_inter_probs() in the spec
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.intra_inter_prob,
|
||||||
|
prev_frame_probs.intra_inter_prob);
|
||||||
|
|
||||||
|
// frame_reference_mode() in the spec
|
||||||
|
if ((current_frame_info.ref_frame_sign_bias[1] & 1) !=
|
||||||
|
(current_frame_info.ref_frame_sign_bias[2] & 1) ||
|
||||||
|
(current_frame_info.ref_frame_sign_bias[1] & 1) !=
|
||||||
|
(current_frame_info.ref_frame_sign_bias[3] & 1)) {
|
||||||
|
if (current_frame_info.reference_mode >= 1) {
|
||||||
|
writer.Write(1, 1);
|
||||||
|
writer.Write(current_frame_info.reference_mode == 2);
|
||||||
|
} else {
|
||||||
|
writer.Write(0, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// frame_reference_mode_probs() in the spec
|
||||||
|
if (current_frame_info.reference_mode == 2) {
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.comp_inter_prob,
|
||||||
|
prev_frame_probs.comp_inter_prob);
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.comp_inter_prob = current_frame_info.entropy.comp_inter_prob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_frame_info.reference_mode != 1) {
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.single_ref_prob,
|
||||||
|
prev_frame_probs.single_ref_prob);
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.single_ref_prob = current_frame_info.entropy.single_ref_prob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_frame_info.reference_mode != 0) {
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.comp_ref_prob,
|
||||||
|
prev_frame_probs.comp_ref_prob);
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.comp_ref_prob = current_frame_info.entropy.comp_ref_prob;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// read_y_mode_probs
|
||||||
|
for (std::size_t index = 0; index < current_frame_info.entropy.y_mode_prob.size();
|
||||||
|
++index) {
|
||||||
|
WriteProbabilityUpdate(writer, current_frame_info.entropy.y_mode_prob[index],
|
||||||
|
prev_frame_probs.y_mode_prob[index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// read_partition_probs
|
||||||
|
WriteProbabilityUpdateAligned4(writer, current_frame_info.entropy.partition_prob,
|
||||||
|
prev_frame_probs.partition_prob);
|
||||||
|
|
||||||
|
// mv_probs
|
||||||
|
for (s32 i = 0; i < 3; i++) {
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.joints[i],
|
||||||
|
prev_frame_probs.joints[i]);
|
||||||
|
}
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.inter_mode_prob = current_frame_info.entropy.inter_mode_prob;
|
||||||
|
prev_frame_probs.intra_inter_prob = current_frame_info.entropy.intra_inter_prob;
|
||||||
|
prev_frame_probs.y_mode_prob = current_frame_info.entropy.y_mode_prob;
|
||||||
|
prev_frame_probs.partition_prob = current_frame_info.entropy.partition_prob;
|
||||||
|
prev_frame_probs.joints = current_frame_info.entropy.joints;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (s32 i = 0; i < 2; i++) {
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.sign[i],
|
||||||
|
prev_frame_probs.sign[i]);
|
||||||
|
for (s32 j = 0; j < 10; j++) {
|
||||||
|
const int index = i * 10 + j;
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.classes[index],
|
||||||
|
prev_frame_probs.classes[index]);
|
||||||
|
}
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0[i],
|
||||||
|
prev_frame_probs.class_0[i]);
|
||||||
|
|
||||||
|
for (s32 j = 0; j < 10; j++) {
|
||||||
|
const int index = i * 10 + j;
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.prob_bits[index],
|
||||||
|
prev_frame_probs.prob_bits[index]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (s32 i = 0; i < 2; i++) {
|
||||||
|
for (s32 j = 0; j < 2; j++) {
|
||||||
|
for (s32 k = 0; k < 3; k++) {
|
||||||
|
const int index = i * 2 * 3 + j * 3 + k;
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0_fr[index],
|
||||||
|
prev_frame_probs.class_0_fr[index]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (s32 j = 0; j < 3; j++) {
|
||||||
|
const int index = i * 3 + j;
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.fr[index],
|
||||||
|
prev_frame_probs.fr[index]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current_frame_info.allow_high_precision_mv) {
|
||||||
|
for (s32 index = 0; index < 2; index++) {
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.class_0_hp[index],
|
||||||
|
prev_frame_probs.class_0_hp[index]);
|
||||||
|
WriteMvProbabilityUpdate(writer, current_frame_info.entropy.high_precision[index],
|
||||||
|
prev_frame_probs.high_precision[index]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// save previous probs
|
||||||
|
if (update_probs) {
|
||||||
|
prev_frame_probs.sign = current_frame_info.entropy.sign;
|
||||||
|
prev_frame_probs.classes = current_frame_info.entropy.classes;
|
||||||
|
prev_frame_probs.class_0 = current_frame_info.entropy.class_0;
|
||||||
|
prev_frame_probs.prob_bits = current_frame_info.entropy.prob_bits;
|
||||||
|
prev_frame_probs.class_0_fr = current_frame_info.entropy.class_0_fr;
|
||||||
|
prev_frame_probs.fr = current_frame_info.entropy.fr;
|
||||||
|
prev_frame_probs.class_0_hp = current_frame_info.entropy.class_0_hp;
|
||||||
|
prev_frame_probs.high_precision = current_frame_info.entropy.high_precision;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
writer.End();
|
||||||
|
return writer.GetBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
|
||||||
|
VpxBitStreamWriter uncomp_writer{};
|
||||||
|
|
||||||
|
uncomp_writer.WriteU(2, 2); // Frame marker.
|
||||||
|
uncomp_writer.WriteU(0, 2); // Profile.
|
||||||
|
uncomp_writer.WriteBit(false); // Show existing frame.
|
||||||
|
uncomp_writer.WriteBit(!current_frame_info.is_key_frame); // is key frame?
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.show_frame); // show frame?
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.error_resilient_mode); // error reslience
|
||||||
|
|
||||||
|
if (current_frame_info.is_key_frame) {
|
||||||
|
uncomp_writer.WriteU(frame_sync_code, 24);
|
||||||
|
uncomp_writer.WriteU(0, 3); // Color space.
|
||||||
|
uncomp_writer.WriteU(0, 1); // Color range.
|
||||||
|
uncomp_writer.WriteU(current_frame_info.frame_size.width - 1, 16);
|
||||||
|
uncomp_writer.WriteU(current_frame_info.frame_size.height - 1, 16);
|
||||||
|
uncomp_writer.WriteBit(false); // Render and frame size different.
|
||||||
|
|
||||||
|
// Reset context
|
||||||
|
prev_frame_probs = default_probs;
|
||||||
|
swap_ref_indices = false;
|
||||||
|
loop_filter_ref_deltas.fill(0);
|
||||||
|
loop_filter_mode_deltas.fill(0);
|
||||||
|
frame_ctxs.fill(default_probs);
|
||||||
|
|
||||||
|
// intra only, meaning the frame can be recreated with no other references
|
||||||
|
current_frame_info.intra_only = true;
|
||||||
|
} else {
|
||||||
|
if (!current_frame_info.show_frame) {
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.intra_only);
|
||||||
|
} else {
|
||||||
|
current_frame_info.intra_only = false;
|
||||||
|
}
|
||||||
|
if (!current_frame_info.error_resilient_mode) {
|
||||||
|
uncomp_writer.WriteU(0, 2); // Reset frame context.
|
||||||
|
}
|
||||||
|
const auto& curr_offsets = current_frame_info.frame_offsets;
|
||||||
|
const auto& next_offsets = next_frame.info.frame_offsets;
|
||||||
|
const bool ref_frames_different = curr_offsets[1] != curr_offsets[2];
|
||||||
|
const bool next_references_swap =
|
||||||
|
(next_offsets[1] == curr_offsets[2]) || (next_offsets[2] == curr_offsets[1]);
|
||||||
|
const bool needs_ref_swap = ref_frames_different && next_references_swap;
|
||||||
|
if (needs_ref_swap) {
|
||||||
|
swap_ref_indices = !swap_ref_indices;
|
||||||
|
}
|
||||||
|
union {
|
||||||
|
u32 raw;
|
||||||
|
BitField<0, 1, u32> refresh_last;
|
||||||
|
BitField<1, 2, u32> refresh_golden;
|
||||||
|
BitField<2, 1, u32> refresh_alt;
|
||||||
|
} refresh_frame_flags;
|
||||||
|
|
||||||
|
refresh_frame_flags.raw = 0;
|
||||||
|
for (u32 index = 0; index < 3; ++index) {
|
||||||
|
// Refresh indices that use the current frame as an index
|
||||||
|
if (curr_offsets[3] == next_offsets[index]) {
|
||||||
|
refresh_frame_flags.raw |= 1u << index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (swap_ref_indices) {
|
||||||
|
const u32 temp = refresh_frame_flags.refresh_golden;
|
||||||
|
refresh_frame_flags.refresh_golden.Assign(refresh_frame_flags.refresh_alt.Value());
|
||||||
|
refresh_frame_flags.refresh_alt.Assign(temp);
|
||||||
|
}
|
||||||
|
if (current_frame_info.intra_only) {
|
||||||
|
uncomp_writer.WriteU(frame_sync_code, 24);
|
||||||
|
uncomp_writer.WriteU(refresh_frame_flags.raw, 8);
|
||||||
|
uncomp_writer.WriteU(current_frame_info.frame_size.width - 1, 16);
|
||||||
|
uncomp_writer.WriteU(current_frame_info.frame_size.height - 1, 16);
|
||||||
|
uncomp_writer.WriteBit(false); // Render and frame size different.
|
||||||
|
} else {
|
||||||
|
const bool swap_indices = needs_ref_swap ^ swap_ref_indices;
|
||||||
|
const auto ref_frame_index = swap_indices ? std::array{0, 2, 1} : std::array{0, 1, 2};
|
||||||
|
uncomp_writer.WriteU(refresh_frame_flags.raw, 8);
|
||||||
|
for (size_t index = 1; index < 4; index++) {
|
||||||
|
uncomp_writer.WriteU(ref_frame_index[index - 1], 3);
|
||||||
|
uncomp_writer.WriteU(current_frame_info.ref_frame_sign_bias[index], 1);
|
||||||
|
}
|
||||||
|
uncomp_writer.WriteBit(true); // Frame size with refs.
|
||||||
|
uncomp_writer.WriteBit(false); // Render and frame size different.
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.allow_high_precision_mv);
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.interp_filter == 4);
|
||||||
|
|
||||||
|
if (current_frame_info.interp_filter != 4) {
|
||||||
|
uncomp_writer.WriteU(current_frame_info.interp_filter, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!current_frame_info.error_resilient_mode) {
|
||||||
|
uncomp_writer.WriteBit(true); // Refresh frame context. where do i get this info from?
|
||||||
|
uncomp_writer.WriteBit(true); // Frame parallel decoding mode.
|
||||||
|
}
|
||||||
|
|
||||||
|
int frame_ctx_idx = 0;
|
||||||
|
if (!current_frame_info.show_frame) {
|
||||||
|
frame_ctx_idx = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
uncomp_writer.WriteU(frame_ctx_idx, 2); // Frame context index.
|
||||||
|
prev_frame_probs = frame_ctxs[frame_ctx_idx]; // reference probabilities for compressed header
|
||||||
|
frame_ctxs[frame_ctx_idx] = current_frame_info.entropy;
|
||||||
|
|
||||||
|
uncomp_writer.WriteU(current_frame_info.first_level, 6);
|
||||||
|
uncomp_writer.WriteU(current_frame_info.sharpness_level, 3);
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.mode_ref_delta_enabled);
|
||||||
|
|
||||||
|
if (current_frame_info.mode_ref_delta_enabled) {
|
||||||
|
// check if ref deltas are different, update accordingly
|
||||||
|
std::array<bool, 4> update_loop_filter_ref_deltas;
|
||||||
|
std::array<bool, 2> update_loop_filter_mode_deltas;
|
||||||
|
|
||||||
|
bool loop_filter_delta_update = false;
|
||||||
|
|
||||||
|
for (std::size_t index = 0; index < current_frame_info.ref_deltas.size(); index++) {
|
||||||
|
const s8 old_deltas = loop_filter_ref_deltas[index];
|
||||||
|
const s8 new_deltas = current_frame_info.ref_deltas[index];
|
||||||
|
const bool differing_delta = old_deltas != new_deltas;
|
||||||
|
|
||||||
|
update_loop_filter_ref_deltas[index] = differing_delta;
|
||||||
|
loop_filter_delta_update |= differing_delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (std::size_t index = 0; index < current_frame_info.mode_deltas.size(); index++) {
|
||||||
|
const s8 old_deltas = loop_filter_mode_deltas[index];
|
||||||
|
const s8 new_deltas = current_frame_info.mode_deltas[index];
|
||||||
|
const bool differing_delta = old_deltas != new_deltas;
|
||||||
|
|
||||||
|
update_loop_filter_mode_deltas[index] = differing_delta;
|
||||||
|
loop_filter_delta_update |= differing_delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
uncomp_writer.WriteBit(loop_filter_delta_update);
|
||||||
|
|
||||||
|
if (loop_filter_delta_update) {
|
||||||
|
for (std::size_t index = 0; index < current_frame_info.ref_deltas.size(); index++) {
|
||||||
|
uncomp_writer.WriteBit(update_loop_filter_ref_deltas[index]);
|
||||||
|
|
||||||
|
if (update_loop_filter_ref_deltas[index]) {
|
||||||
|
uncomp_writer.WriteS(current_frame_info.ref_deltas[index], 6);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (std::size_t index = 0; index < current_frame_info.mode_deltas.size(); index++) {
|
||||||
|
uncomp_writer.WriteBit(update_loop_filter_mode_deltas[index]);
|
||||||
|
|
||||||
|
if (update_loop_filter_mode_deltas[index]) {
|
||||||
|
uncomp_writer.WriteS(current_frame_info.mode_deltas[index], 6);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// save new deltas
|
||||||
|
loop_filter_ref_deltas = current_frame_info.ref_deltas;
|
||||||
|
loop_filter_mode_deltas = current_frame_info.mode_deltas;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
uncomp_writer.WriteU(current_frame_info.base_q_index, 8);
|
||||||
|
|
||||||
|
uncomp_writer.WriteDeltaQ(current_frame_info.y_dc_delta_q);
|
||||||
|
uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q);
|
||||||
|
uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q);
|
||||||
|
|
||||||
|
ASSERT(!current_frame_info.segment_enabled);
|
||||||
|
uncomp_writer.WriteBit(false); // Segmentation enabled (TODO).
|
||||||
|
|
||||||
|
const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width);
|
||||||
|
const s32 max_tile_cols_log2 = CalcMaxLog2TileCols(current_frame_info.frame_size.width);
|
||||||
|
|
||||||
|
const s32 tile_cols_log2_diff = current_frame_info.log2_tile_cols - min_tile_cols_log2;
|
||||||
|
const s32 tile_cols_log2_inc_mask = (1 << tile_cols_log2_diff) - 1;
|
||||||
|
|
||||||
|
// If it's less than the maximum, we need to add an extra 0 on the bitstream
|
||||||
|
// to indicate that it should stop reading.
|
||||||
|
if (current_frame_info.log2_tile_cols < max_tile_cols_log2) {
|
||||||
|
uncomp_writer.WriteU(tile_cols_log2_inc_mask << 1, tile_cols_log2_diff + 1);
|
||||||
|
} else {
|
||||||
|
uncomp_writer.WriteU(tile_cols_log2_inc_mask, tile_cols_log2_diff);
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool tile_rows_log2_is_nonzero = current_frame_info.log2_tile_rows != 0;
|
||||||
|
|
||||||
|
uncomp_writer.WriteBit(tile_rows_log2_is_nonzero);
|
||||||
|
|
||||||
|
if (tile_rows_log2_is_nonzero) {
|
||||||
|
uncomp_writer.WriteBit(current_frame_info.log2_tile_rows > 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return uncomp_writer;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
|
||||||
|
std::vector<u8> bitstream;
|
||||||
|
{
|
||||||
|
Vp9FrameContainer curr_frame = GetCurrentFrame(state);
|
||||||
|
current_frame_info = curr_frame.info;
|
||||||
|
bitstream = std::move(curr_frame.bit_stream);
|
||||||
|
}
|
||||||
|
// The uncompressed header routine sets PrevProb parameters needed for the compressed header
|
||||||
|
auto uncomp_writer = ComposeUncompressedHeader();
|
||||||
|
std::vector<u8> compressed_header = ComposeCompressedHeader();
|
||||||
|
|
||||||
|
uncomp_writer.WriteU(static_cast<s32>(compressed_header.size()), 16);
|
||||||
|
uncomp_writer.Flush();
|
||||||
|
std::vector<u8> uncompressed_header = uncomp_writer.GetByteArray();
|
||||||
|
|
||||||
|
// Write headers and frame to buffer
|
||||||
|
frame.resize(uncompressed_header.size() + compressed_header.size() + bitstream.size());
|
||||||
|
std::copy(uncompressed_header.begin(), uncompressed_header.end(), frame.begin());
|
||||||
|
std::copy(compressed_header.begin(), compressed_header.end(),
|
||||||
|
frame.begin() + uncompressed_header.size());
|
||||||
|
std::copy(bitstream.begin(), bitstream.end(),
|
||||||
|
frame.begin() + uncompressed_header.size() + compressed_header.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
VpxRangeEncoder::VpxRangeEncoder() {
|
||||||
|
Write(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
VpxRangeEncoder::~VpxRangeEncoder() = default;
|
||||||
|
|
||||||
|
void VpxRangeEncoder::Write(s32 value, s32 value_size) {
|
||||||
|
for (s32 bit = value_size - 1; bit >= 0; bit--) {
|
||||||
|
Write(((value >> bit) & 1) != 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxRangeEncoder::Write(bool bit) {
|
||||||
|
Write(bit, half_probability);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxRangeEncoder::Write(bool bit, s32 probability) {
|
||||||
|
u32 local_range = range;
|
||||||
|
const u32 split = 1 + (((local_range - 1) * static_cast<u32>(probability)) >> 8);
|
||||||
|
local_range = split;
|
||||||
|
|
||||||
|
if (bit) {
|
||||||
|
low_value += split;
|
||||||
|
local_range = range - split;
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 shift = static_cast<s32>(norm_lut[local_range]);
|
||||||
|
local_range <<= shift;
|
||||||
|
count += shift;
|
||||||
|
|
||||||
|
if (count >= 0) {
|
||||||
|
const s32 offset = shift - count;
|
||||||
|
|
||||||
|
if (((low_value << (offset - 1)) >> 31) != 0) {
|
||||||
|
const s32 current_pos = static_cast<s32>(base_stream.GetPosition());
|
||||||
|
base_stream.Seek(-1, Common::SeekOrigin::FromCurrentPos);
|
||||||
|
while (PeekByte() == 0xff) {
|
||||||
|
base_stream.WriteByte(0);
|
||||||
|
|
||||||
|
base_stream.Seek(-2, Common::SeekOrigin::FromCurrentPos);
|
||||||
|
}
|
||||||
|
base_stream.WriteByte(static_cast<u8>((PeekByte() + 1)));
|
||||||
|
base_stream.Seek(current_pos, Common::SeekOrigin::SetOrigin);
|
||||||
|
}
|
||||||
|
base_stream.WriteByte(static_cast<u8>((low_value >> (24 - offset))));
|
||||||
|
|
||||||
|
low_value <<= offset;
|
||||||
|
shift = count;
|
||||||
|
low_value &= 0xffffff;
|
||||||
|
count -= 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
low_value <<= shift;
|
||||||
|
range = local_range;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxRangeEncoder::End() {
|
||||||
|
for (std::size_t index = 0; index < 32; ++index) {
|
||||||
|
Write(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u8 VpxRangeEncoder::PeekByte() {
|
||||||
|
const u8 value = base_stream.ReadByte();
|
||||||
|
base_stream.Seek(-1, Common::SeekOrigin::FromCurrentPos);
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
VpxBitStreamWriter::VpxBitStreamWriter() = default;
|
||||||
|
|
||||||
|
VpxBitStreamWriter::~VpxBitStreamWriter() = default;
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::WriteU(u32 value, u32 value_size) {
|
||||||
|
WriteBits(value, value_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::WriteS(s32 value, u32 value_size) {
|
||||||
|
const bool sign = value < 0;
|
||||||
|
if (sign) {
|
||||||
|
value = -value;
|
||||||
|
}
|
||||||
|
|
||||||
|
WriteBits(static_cast<u32>(value << 1) | (sign ? 1 : 0), value_size + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::WriteDeltaQ(u32 value) {
|
||||||
|
const bool delta_coded = value != 0;
|
||||||
|
WriteBit(delta_coded);
|
||||||
|
|
||||||
|
if (delta_coded) {
|
||||||
|
WriteBits(value, 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::WriteBits(u32 value, u32 bit_count) {
|
||||||
|
s32 value_pos = 0;
|
||||||
|
s32 remaining = bit_count;
|
||||||
|
|
||||||
|
while (remaining > 0) {
|
||||||
|
s32 copy_size = remaining;
|
||||||
|
|
||||||
|
const s32 free = GetFreeBufferBits();
|
||||||
|
|
||||||
|
if (copy_size > free) {
|
||||||
|
copy_size = free;
|
||||||
|
}
|
||||||
|
|
||||||
|
const s32 mask = (1 << copy_size) - 1;
|
||||||
|
|
||||||
|
const s32 src_shift = (bit_count - value_pos) - copy_size;
|
||||||
|
const s32 dst_shift = (buffer_size - buffer_pos) - copy_size;
|
||||||
|
|
||||||
|
buffer |= ((value >> src_shift) & mask) << dst_shift;
|
||||||
|
|
||||||
|
value_pos += copy_size;
|
||||||
|
buffer_pos += copy_size;
|
||||||
|
remaining -= copy_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::WriteBit(bool state) {
|
||||||
|
WriteBits(state ? 1 : 0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
s32 VpxBitStreamWriter::GetFreeBufferBits() {
|
||||||
|
if (buffer_pos == buffer_size) {
|
||||||
|
Flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer_size - buffer_pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
void VpxBitStreamWriter::Flush() {
|
||||||
|
if (buffer_pos == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
byte_array.push_back(static_cast<u8>(buffer));
|
||||||
|
buffer = 0;
|
||||||
|
buffer_pos = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u8>& VpxBitStreamWriter::GetByteArray() {
|
||||||
|
return byte_array;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::vector<u8>& VpxBitStreamWriter::GetByteArray() const {
|
||||||
|
return byte_array;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Decoder
|
198
src/video_core/host1x/codecs/vp9.h
Executable file
198
src/video_core/host1x/codecs/vp9.h
Executable file
|
@ -0,0 +1,198 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/stream.h"
|
||||||
|
#include "video_core/host1x/codecs/vp9_types.h"
|
||||||
|
#include "video_core/host1x/nvdec_common.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
class Host1x;
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
namespace Decoder {
|
||||||
|
|
||||||
|
/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
|
||||||
|
/// VP9 header bitstreams.
|
||||||
|
|
||||||
|
class VpxRangeEncoder {
|
||||||
|
public:
|
||||||
|
VpxRangeEncoder();
|
||||||
|
~VpxRangeEncoder();
|
||||||
|
|
||||||
|
VpxRangeEncoder(const VpxRangeEncoder&) = delete;
|
||||||
|
VpxRangeEncoder& operator=(const VpxRangeEncoder&) = delete;
|
||||||
|
|
||||||
|
VpxRangeEncoder(VpxRangeEncoder&&) = default;
|
||||||
|
VpxRangeEncoder& operator=(VpxRangeEncoder&&) = default;
|
||||||
|
|
||||||
|
/// Writes the rightmost value_size bits from value into the stream
|
||||||
|
void Write(s32 value, s32 value_size);
|
||||||
|
|
||||||
|
/// Writes a single bit with half probability
|
||||||
|
void Write(bool bit);
|
||||||
|
|
||||||
|
/// Writes a bit to the base_stream encoded with probability
|
||||||
|
void Write(bool bit, s32 probability);
|
||||||
|
|
||||||
|
/// Signal the end of the bitstream
|
||||||
|
void End();
|
||||||
|
|
||||||
|
[[nodiscard]] std::vector<u8>& GetBuffer() {
|
||||||
|
return base_stream.GetBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] const std::vector<u8>& GetBuffer() const {
|
||||||
|
return base_stream.GetBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
u8 PeekByte();
|
||||||
|
Common::Stream base_stream{};
|
||||||
|
u32 low_value{};
|
||||||
|
u32 range{0xff};
|
||||||
|
s32 count{-24};
|
||||||
|
s32 half_probability{128};
|
||||||
|
};
|
||||||
|
|
||||||
|
class VpxBitStreamWriter {
|
||||||
|
public:
|
||||||
|
VpxBitStreamWriter();
|
||||||
|
~VpxBitStreamWriter();
|
||||||
|
|
||||||
|
VpxBitStreamWriter(const VpxBitStreamWriter&) = delete;
|
||||||
|
VpxBitStreamWriter& operator=(const VpxBitStreamWriter&) = delete;
|
||||||
|
|
||||||
|
VpxBitStreamWriter(VpxBitStreamWriter&&) = default;
|
||||||
|
VpxBitStreamWriter& operator=(VpxBitStreamWriter&&) = default;
|
||||||
|
|
||||||
|
/// Write an unsigned integer value
|
||||||
|
void WriteU(u32 value, u32 value_size);
|
||||||
|
|
||||||
|
/// Write a signed integer value
|
||||||
|
void WriteS(s32 value, u32 value_size);
|
||||||
|
|
||||||
|
/// Based on 6.2.10 of VP9 Spec, writes a delta coded value
|
||||||
|
void WriteDeltaQ(u32 value);
|
||||||
|
|
||||||
|
/// Write a single bit.
|
||||||
|
void WriteBit(bool state);
|
||||||
|
|
||||||
|
/// Pushes current buffer into buffer_array, resets buffer
|
||||||
|
void Flush();
|
||||||
|
|
||||||
|
/// Returns byte_array
|
||||||
|
[[nodiscard]] std::vector<u8>& GetByteArray();
|
||||||
|
|
||||||
|
/// Returns const byte_array
|
||||||
|
[[nodiscard]] const std::vector<u8>& GetByteArray() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Write bit_count bits from value into buffer
|
||||||
|
void WriteBits(u32 value, u32 bit_count);
|
||||||
|
|
||||||
|
/// Gets next available position in buffer, invokes Flush() if buffer is full
|
||||||
|
s32 GetFreeBufferBits();
|
||||||
|
|
||||||
|
s32 buffer_size{8};
|
||||||
|
|
||||||
|
s32 buffer{};
|
||||||
|
s32 buffer_pos{};
|
||||||
|
std::vector<u8> byte_array;
|
||||||
|
};
|
||||||
|
|
||||||
|
class VP9 {
|
||||||
|
public:
|
||||||
|
explicit VP9(Host1x::Host1x& host1x);
|
||||||
|
~VP9();
|
||||||
|
|
||||||
|
VP9(const VP9&) = delete;
|
||||||
|
VP9& operator=(const VP9&) = delete;
|
||||||
|
|
||||||
|
VP9(VP9&&) = default;
|
||||||
|
VP9& operator=(VP9&&) = delete;
|
||||||
|
|
||||||
|
/// Composes the VP9 frame from the GPU state information.
|
||||||
|
/// Based on the official VP9 spec documentation
|
||||||
|
void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||||
|
|
||||||
|
/// Returns true if the most recent frame was a hidden frame.
|
||||||
|
[[nodiscard]] bool WasFrameHidden() const {
|
||||||
|
return !current_frame_info.show_frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a const reference to the composed frame data.
|
||||||
|
[[nodiscard]] const std::vector<u8>& GetFrameBytes() const {
|
||||||
|
return frame;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Generates compressed header probability updates in the bitstream writer
|
||||||
|
template <typename T, std::size_t N>
|
||||||
|
void WriteProbabilityUpdate(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||||
|
const std::array<T, N>& old_prob);
|
||||||
|
|
||||||
|
/// Generates compressed header probability updates in the bitstream writer
|
||||||
|
/// If probs are not equal, WriteProbabilityDelta is invoked
|
||||||
|
void WriteProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||||
|
|
||||||
|
/// Generates compressed header probability deltas in the bitstream writer
|
||||||
|
void WriteProbabilityDelta(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||||
|
|
||||||
|
/// Inverse of 6.3.4 Decode term subexp
|
||||||
|
void EncodeTermSubExp(VpxRangeEncoder& writer, s32 value);
|
||||||
|
|
||||||
|
/// Writes if the value is less than the test value
|
||||||
|
bool WriteLessThan(VpxRangeEncoder& writer, s32 value, s32 test);
|
||||||
|
|
||||||
|
/// Writes probability updates for the Coef probabilities
|
||||||
|
void WriteCoefProbabilityUpdate(VpxRangeEncoder& writer, s32 tx_mode,
|
||||||
|
const std::array<u8, 1728>& new_prob,
|
||||||
|
const std::array<u8, 1728>& old_prob);
|
||||||
|
|
||||||
|
/// Write probabilities for 4-byte aligned structures
|
||||||
|
template <typename T, std::size_t N>
|
||||||
|
void WriteProbabilityUpdateAligned4(VpxRangeEncoder& writer, const std::array<T, N>& new_prob,
|
||||||
|
const std::array<T, N>& old_prob);
|
||||||
|
|
||||||
|
/// Write motion vector probability updates. 6.3.17 in the spec
|
||||||
|
void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
|
||||||
|
|
||||||
|
/// Returns VP9 information from NVDEC provided offset and size
|
||||||
|
[[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
|
||||||
|
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||||
|
|
||||||
|
/// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
|
||||||
|
void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
|
||||||
|
|
||||||
|
/// Returns frame to be decoded after buffering
|
||||||
|
[[nodiscard]] Vp9FrameContainer GetCurrentFrame(
|
||||||
|
const Host1x::NvdecCommon::NvdecRegisters& state);
|
||||||
|
|
||||||
|
/// Use NVDEC providied information to compose the headers for the current frame
|
||||||
|
[[nodiscard]] std::vector<u8> ComposeCompressedHeader();
|
||||||
|
[[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
|
||||||
|
|
||||||
|
Host1x::Host1x& host1x;
|
||||||
|
std::vector<u8> frame;
|
||||||
|
|
||||||
|
std::array<s8, 4> loop_filter_ref_deltas{};
|
||||||
|
std::array<s8, 2> loop_filter_mode_deltas{};
|
||||||
|
|
||||||
|
Vp9FrameContainer next_frame{};
|
||||||
|
std::array<Vp9EntropyProbs, 4> frame_ctxs{};
|
||||||
|
bool swap_ref_indices{};
|
||||||
|
|
||||||
|
Vp9PictureInfo current_frame_info{};
|
||||||
|
Vp9EntropyProbs prev_frame_probs{};
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Decoder
|
||||||
|
} // namespace Tegra
|
305
src/video_core/host1x/codecs/vp9_types.h
Executable file
305
src/video_core/host1x/codecs/vp9_types.h
Executable file
|
@ -0,0 +1,305 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Decoder {
|
||||||
|
struct Vp9FrameDimensions {
|
||||||
|
s16 width;
|
||||||
|
s16 height;
|
||||||
|
s16 luma_pitch;
|
||||||
|
s16 chroma_pitch;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size");
|
||||||
|
|
||||||
|
enum class FrameFlags : u32 {
|
||||||
|
IsKeyFrame = 1 << 0,
|
||||||
|
LastFrameIsKeyFrame = 1 << 1,
|
||||||
|
FrameSizeChanged = 1 << 2,
|
||||||
|
ErrorResilientMode = 1 << 3,
|
||||||
|
LastShowFrame = 1 << 4,
|
||||||
|
IntraOnly = 1 << 5,
|
||||||
|
};
|
||||||
|
DECLARE_ENUM_FLAG_OPERATORS(FrameFlags)
|
||||||
|
|
||||||
|
enum class TxSize {
|
||||||
|
Tx4x4 = 0, // 4x4 transform
|
||||||
|
Tx8x8 = 1, // 8x8 transform
|
||||||
|
Tx16x16 = 2, // 16x16 transform
|
||||||
|
Tx32x32 = 3, // 32x32 transform
|
||||||
|
TxSizes = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class TxMode {
|
||||||
|
Only4X4 = 0, // Only 4x4 transform used
|
||||||
|
Allow8X8 = 1, // Allow block transform size up to 8x8
|
||||||
|
Allow16X16 = 2, // Allow block transform size up to 16x16
|
||||||
|
Allow32X32 = 3, // Allow block transform size up to 32x32
|
||||||
|
TxModeSelect = 4, // Transform specified for each block
|
||||||
|
TxModes = 5
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Segmentation {
|
||||||
|
u8 enabled;
|
||||||
|
u8 update_map;
|
||||||
|
u8 temporal_update;
|
||||||
|
u8 abs_delta;
|
||||||
|
std::array<u32, 8> feature_mask;
|
||||||
|
std::array<std::array<s16, 4>, 8> feature_data;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(Segmentation) == 0x64, "Segmentation is an invalid size");
|
||||||
|
|
||||||
|
struct LoopFilter {
|
||||||
|
u8 mode_ref_delta_enabled;
|
||||||
|
std::array<s8, 4> ref_deltas;
|
||||||
|
std::array<s8, 2> mode_deltas;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(LoopFilter) == 0x7, "LoopFilter is an invalid size");
|
||||||
|
|
||||||
|
struct Vp9EntropyProbs {
|
||||||
|
std::array<u8, 36> y_mode_prob; ///< 0x0000
|
||||||
|
std::array<u8, 64> partition_prob; ///< 0x0024
|
||||||
|
std::array<u8, 1728> coef_probs; ///< 0x0064
|
||||||
|
std::array<u8, 8> switchable_interp_prob; ///< 0x0724
|
||||||
|
std::array<u8, 28> inter_mode_prob; ///< 0x072C
|
||||||
|
std::array<u8, 4> intra_inter_prob; ///< 0x0748
|
||||||
|
std::array<u8, 5> comp_inter_prob; ///< 0x074C
|
||||||
|
std::array<u8, 10> single_ref_prob; ///< 0x0751
|
||||||
|
std::array<u8, 5> comp_ref_prob; ///< 0x075B
|
||||||
|
std::array<u8, 6> tx_32x32_prob; ///< 0x0760
|
||||||
|
std::array<u8, 4> tx_16x16_prob; ///< 0x0766
|
||||||
|
std::array<u8, 2> tx_8x8_prob; ///< 0x076A
|
||||||
|
std::array<u8, 3> skip_probs; ///< 0x076C
|
||||||
|
std::array<u8, 3> joints; ///< 0x076F
|
||||||
|
std::array<u8, 2> sign; ///< 0x0772
|
||||||
|
std::array<u8, 20> classes; ///< 0x0774
|
||||||
|
std::array<u8, 2> class_0; ///< 0x0788
|
||||||
|
std::array<u8, 20> prob_bits; ///< 0x078A
|
||||||
|
std::array<u8, 12> class_0_fr; ///< 0x079E
|
||||||
|
std::array<u8, 6> fr; ///< 0x07AA
|
||||||
|
std::array<u8, 2> class_0_hp; ///< 0x07B0
|
||||||
|
std::array<u8, 2> high_precision; ///< 0x07B2
|
||||||
|
};
|
||||||
|
static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size");
|
||||||
|
|
||||||
|
struct Vp9PictureInfo {
|
||||||
|
u32 bitstream_size;
|
||||||
|
std::array<u64, 4> frame_offsets;
|
||||||
|
std::array<s8, 4> ref_frame_sign_bias;
|
||||||
|
s32 base_q_index;
|
||||||
|
s32 y_dc_delta_q;
|
||||||
|
s32 uv_dc_delta_q;
|
||||||
|
s32 uv_ac_delta_q;
|
||||||
|
s32 transform_mode;
|
||||||
|
s32 interp_filter;
|
||||||
|
s32 reference_mode;
|
||||||
|
s32 log2_tile_cols;
|
||||||
|
s32 log2_tile_rows;
|
||||||
|
std::array<s8, 4> ref_deltas;
|
||||||
|
std::array<s8, 2> mode_deltas;
|
||||||
|
Vp9EntropyProbs entropy;
|
||||||
|
Vp9FrameDimensions frame_size;
|
||||||
|
u8 first_level;
|
||||||
|
u8 sharpness_level;
|
||||||
|
bool is_key_frame;
|
||||||
|
bool intra_only;
|
||||||
|
bool last_frame_was_key;
|
||||||
|
bool error_resilient_mode;
|
||||||
|
bool last_frame_shown;
|
||||||
|
bool show_frame;
|
||||||
|
bool lossless;
|
||||||
|
bool allow_high_precision_mv;
|
||||||
|
bool segment_enabled;
|
||||||
|
bool mode_ref_delta_enabled;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Vp9FrameContainer {
|
||||||
|
Vp9PictureInfo info{};
|
||||||
|
std::vector<u8> bit_stream;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct PictureInfo {
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(12); ///< 0x00
|
||||||
|
u32 bitstream_size; ///< 0x30
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(5); ///< 0x34
|
||||||
|
Vp9FrameDimensions last_frame_size; ///< 0x48
|
||||||
|
Vp9FrameDimensions golden_frame_size; ///< 0x50
|
||||||
|
Vp9FrameDimensions alt_frame_size; ///< 0x58
|
||||||
|
Vp9FrameDimensions current_frame_size; ///< 0x60
|
||||||
|
FrameFlags vp9_flags; ///< 0x68
|
||||||
|
std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C
|
||||||
|
u8 first_level; ///< 0x70
|
||||||
|
u8 sharpness_level; ///< 0x71
|
||||||
|
u8 base_q_index; ///< 0x72
|
||||||
|
u8 y_dc_delta_q; ///< 0x73
|
||||||
|
u8 uv_ac_delta_q; ///< 0x74
|
||||||
|
u8 uv_dc_delta_q; ///< 0x75
|
||||||
|
u8 lossless; ///< 0x76
|
||||||
|
u8 tx_mode; ///< 0x77
|
||||||
|
u8 allow_high_precision_mv; ///< 0x78
|
||||||
|
u8 interp_filter; ///< 0x79
|
||||||
|
u8 reference_mode; ///< 0x7A
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B
|
||||||
|
u8 log2_tile_cols; ///< 0x7E
|
||||||
|
u8 log2_tile_rows; ///< 0x7F
|
||||||
|
Segmentation segmentation; ///< 0x80
|
||||||
|
LoopFilter loop_filter; ///< 0xE4
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB
|
||||||
|
|
||||||
|
[[nodiscard]] Vp9PictureInfo Convert() const {
|
||||||
|
return {
|
||||||
|
.bitstream_size = bitstream_size,
|
||||||
|
.frame_offsets{},
|
||||||
|
.ref_frame_sign_bias = ref_frame_sign_bias,
|
||||||
|
.base_q_index = base_q_index,
|
||||||
|
.y_dc_delta_q = y_dc_delta_q,
|
||||||
|
.uv_dc_delta_q = uv_dc_delta_q,
|
||||||
|
.uv_ac_delta_q = uv_ac_delta_q,
|
||||||
|
.transform_mode = tx_mode,
|
||||||
|
.interp_filter = interp_filter,
|
||||||
|
.reference_mode = reference_mode,
|
||||||
|
.log2_tile_cols = log2_tile_cols,
|
||||||
|
.log2_tile_rows = log2_tile_rows,
|
||||||
|
.ref_deltas = loop_filter.ref_deltas,
|
||||||
|
.mode_deltas = loop_filter.mode_deltas,
|
||||||
|
.entropy{},
|
||||||
|
.frame_size = current_frame_size,
|
||||||
|
.first_level = first_level,
|
||||||
|
.sharpness_level = sharpness_level,
|
||||||
|
.is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame),
|
||||||
|
.intra_only = True(vp9_flags & FrameFlags::IntraOnly),
|
||||||
|
.last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame),
|
||||||
|
.error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode),
|
||||||
|
.last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame),
|
||||||
|
.show_frame = true,
|
||||||
|
.lossless = lossless != 0,
|
||||||
|
.allow_high_precision_mv = allow_high_precision_mv != 0,
|
||||||
|
.segment_enabled = segmentation.enabled != 0,
|
||||||
|
.mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(sizeof(PictureInfo) == 0x100, "PictureInfo is an invalid size");
|
||||||
|
|
||||||
|
struct EntropyProbs {
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(1024); ///< 0x0000
|
||||||
|
std::array<u8, 28> inter_mode_prob; ///< 0x0400
|
||||||
|
std::array<u8, 4> intra_inter_prob; ///< 0x041C
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(80); ///< 0x0420
|
||||||
|
std::array<u8, 2> tx_8x8_prob; ///< 0x0470
|
||||||
|
std::array<u8, 4> tx_16x16_prob; ///< 0x0472
|
||||||
|
std::array<u8, 6> tx_32x32_prob; ///< 0x0476
|
||||||
|
std::array<u8, 4> y_mode_prob_e8; ///< 0x047C
|
||||||
|
std::array<std::array<u8, 8>, 4> y_mode_prob_e0e7; ///< 0x0480
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(64); ///< 0x04A0
|
||||||
|
std::array<u8, 64> partition_prob; ///< 0x04E0
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(10); ///< 0x0520
|
||||||
|
std::array<u8, 8> switchable_interp_prob; ///< 0x052A
|
||||||
|
std::array<u8, 5> comp_inter_prob; ///< 0x0532
|
||||||
|
std::array<u8, 3> skip_probs; ///< 0x0537
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(1); ///< 0x053A
|
||||||
|
std::array<u8, 3> joints; ///< 0x053B
|
||||||
|
std::array<u8, 2> sign; ///< 0x053E
|
||||||
|
std::array<u8, 2> class_0; ///< 0x0540
|
||||||
|
std::array<u8, 6> fr; ///< 0x0542
|
||||||
|
std::array<u8, 2> class_0_hp; ///< 0x0548
|
||||||
|
std::array<u8, 2> high_precision; ///< 0x054A
|
||||||
|
std::array<u8, 20> classes; ///< 0x054C
|
||||||
|
std::array<u8, 12> class_0_fr; ///< 0x0560
|
||||||
|
std::array<u8, 20> pred_bits; ///< 0x056C
|
||||||
|
std::array<u8, 10> single_ref_prob; ///< 0x0580
|
||||||
|
std::array<u8, 5> comp_ref_prob; ///< 0x058A
|
||||||
|
INSERT_PADDING_BYTES_NOINIT(17); ///< 0x058F
|
||||||
|
std::array<u8, 2304> coef_probs; ///< 0x05A0
|
||||||
|
|
||||||
|
void Convert(Vp9EntropyProbs& fc) {
|
||||||
|
fc.inter_mode_prob = inter_mode_prob;
|
||||||
|
fc.intra_inter_prob = intra_inter_prob;
|
||||||
|
fc.tx_8x8_prob = tx_8x8_prob;
|
||||||
|
fc.tx_16x16_prob = tx_16x16_prob;
|
||||||
|
fc.tx_32x32_prob = tx_32x32_prob;
|
||||||
|
|
||||||
|
for (std::size_t i = 0; i < 4; i++) {
|
||||||
|
for (std::size_t j = 0; j < 9; j++) {
|
||||||
|
fc.y_mode_prob[j + 9 * i] = j < 8 ? y_mode_prob_e0e7[i][j] : y_mode_prob_e8[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fc.partition_prob = partition_prob;
|
||||||
|
fc.switchable_interp_prob = switchable_interp_prob;
|
||||||
|
fc.comp_inter_prob = comp_inter_prob;
|
||||||
|
fc.skip_probs = skip_probs;
|
||||||
|
fc.joints = joints;
|
||||||
|
fc.sign = sign;
|
||||||
|
fc.class_0 = class_0;
|
||||||
|
fc.fr = fr;
|
||||||
|
fc.class_0_hp = class_0_hp;
|
||||||
|
fc.high_precision = high_precision;
|
||||||
|
fc.classes = classes;
|
||||||
|
fc.class_0_fr = class_0_fr;
|
||||||
|
fc.prob_bits = pred_bits;
|
||||||
|
fc.single_ref_prob = single_ref_prob;
|
||||||
|
fc.comp_ref_prob = comp_ref_prob;
|
||||||
|
|
||||||
|
// Skip the 4th element as it goes unused
|
||||||
|
for (std::size_t i = 0; i < coef_probs.size(); i += 4) {
|
||||||
|
const std::size_t j = i - i / 4;
|
||||||
|
fc.coef_probs[j] = coef_probs[i];
|
||||||
|
fc.coef_probs[j + 1] = coef_probs[i + 1];
|
||||||
|
fc.coef_probs[j + 2] = coef_probs[i + 2];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
static_assert(sizeof(EntropyProbs) == 0xEA0, "EntropyProbs is an invalid size");
|
||||||
|
|
||||||
|
enum class Ref { Last, Golden, AltRef };
|
||||||
|
|
||||||
|
struct RefPoolElement {
|
||||||
|
s64 frame{};
|
||||||
|
Ref ref{};
|
||||||
|
bool refresh{};
|
||||||
|
};
|
||||||
|
|
||||||
|
#define ASSERT_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(Vp9EntropyProbs, field_name) == position, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_POSITION(partition_prob, 0x0024);
|
||||||
|
ASSERT_POSITION(switchable_interp_prob, 0x0724);
|
||||||
|
ASSERT_POSITION(sign, 0x0772);
|
||||||
|
ASSERT_POSITION(class_0_fr, 0x079E);
|
||||||
|
ASSERT_POSITION(high_precision, 0x07B2);
|
||||||
|
#undef ASSERT_POSITION
|
||||||
|
|
||||||
|
#define ASSERT_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(PictureInfo, field_name) == position, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_POSITION(bitstream_size, 0x30);
|
||||||
|
ASSERT_POSITION(last_frame_size, 0x48);
|
||||||
|
ASSERT_POSITION(first_level, 0x70);
|
||||||
|
ASSERT_POSITION(segmentation, 0x80);
|
||||||
|
ASSERT_POSITION(loop_filter, 0xE4);
|
||||||
|
#undef ASSERT_POSITION
|
||||||
|
|
||||||
|
#define ASSERT_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(EntropyProbs, field_name) == position, \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_POSITION(inter_mode_prob, 0x400);
|
||||||
|
ASSERT_POSITION(tx_8x8_prob, 0x470);
|
||||||
|
ASSERT_POSITION(partition_prob, 0x4E0);
|
||||||
|
ASSERT_POSITION(class_0, 0x540);
|
||||||
|
ASSERT_POSITION(class_0_fr, 0x560);
|
||||||
|
ASSERT_POSITION(coef_probs, 0x5A0);
|
||||||
|
#undef ASSERT_POSITION
|
||||||
|
|
||||||
|
}; // namespace Decoder
|
||||||
|
}; // namespace Tegra
|
34
src/video_core/host1x/control.cpp
Executable file
34
src/video_core/host1x/control.cpp
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "video_core/host1x/control.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
|
namespace Tegra::Host1x {
|
||||||
|
|
||||||
|
Control::Control(Host1x& host1x_) : host1x(host1x_) {}
|
||||||
|
|
||||||
|
Control::~Control() = default;
|
||||||
|
|
||||||
|
void Control::ProcessMethod(Method method, u32 argument) {
|
||||||
|
switch (method) {
|
||||||
|
case Method::LoadSyncptPayload32:
|
||||||
|
syncpoint_value = argument;
|
||||||
|
break;
|
||||||
|
case Method::WaitSyncpt:
|
||||||
|
case Method::WaitSyncpt32:
|
||||||
|
Execute(argument);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Control::Execute(u32 data) {
|
||||||
|
host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Host1x
|
41
src/video_core/host1x/control.h
Executable file
41
src/video_core/host1x/control.h
Executable file
|
@ -0,0 +1,41 @@
|
||||||
|
// SPDX-FileCopyrightText: 2021 yuzu emulator team and Skyline Team and Contributors
|
||||||
|
// (https://github.com/skyline-emu/)
|
||||||
|
// SPDX-License-Identifier: GPL-3.0-or-later Licensed under GPLv3
|
||||||
|
// or any later version Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
class Host1x;
|
||||||
|
class Nvdec;
|
||||||
|
|
||||||
|
class Control {
|
||||||
|
public:
|
||||||
|
enum class Method : u32 {
|
||||||
|
WaitSyncpt = 0x8,
|
||||||
|
LoadSyncptPayload32 = 0x4e,
|
||||||
|
WaitSyncpt32 = 0x50,
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit Control(Host1x& host1x);
|
||||||
|
~Control();
|
||||||
|
|
||||||
|
/// Writes the method into the state, Invoke Execute() if encountered
|
||||||
|
void ProcessMethod(Method method, u32 argument);
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// For Host1x, execute is waiting on a syncpoint previously written into the state
|
||||||
|
void Execute(u32 data);
|
||||||
|
|
||||||
|
u32 syncpoint_value{};
|
||||||
|
Host1x& host1x;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
18
src/video_core/host1x/host1x.cpp
Executable file
18
src/video_core/host1x/host1x.cpp
Executable file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "core/core.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
Host1x::Host1x(Core::System& system_)
|
||||||
|
: system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
|
||||||
|
allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
58
src/video_core/host1x/host1x.h
Executable file
58
src/video_core/host1x/host1x.h
Executable file
|
@ -0,0 +1,58 @@
|
||||||
|
// Copyright 2022 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
#include "common/address_space.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
|
||||||
|
namespace Core {
|
||||||
|
class System;
|
||||||
|
} // namespace Core
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
class Host1x {
|
||||||
|
public:
|
||||||
|
Host1x(Core::System& system);
|
||||||
|
|
||||||
|
SyncpointManager& GetSyncpointManager() {
|
||||||
|
return syncpoint_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SyncpointManager& GetSyncpointManager() const {
|
||||||
|
return syncpoint_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
Tegra::MemoryManager& MemoryManager() {
|
||||||
|
return memory_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Tegra::MemoryManager& MemoryManager() const {
|
||||||
|
return memory_manager;
|
||||||
|
}
|
||||||
|
|
||||||
|
Common::FlatAllocator<u32, 0, 32>& Allocator() {
|
||||||
|
return *allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
|
||||||
|
return *allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Core::System& system;
|
||||||
|
SyncpointManager syncpoint_manager;
|
||||||
|
Tegra::MemoryManager memory_manager;
|
||||||
|
std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
48
src/video_core/host1x/nvdec.cpp
Executable file
48
src/video_core/host1x/nvdec.cpp
Executable file
|
@ -0,0 +1,48 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/nvdec.h"
|
||||||
|
|
||||||
|
namespace Tegra::Host1x {
|
||||||
|
|
||||||
|
#define NVDEC_REG_INDEX(field_name) \
|
||||||
|
(offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
|
||||||
|
|
||||||
|
Nvdec::Nvdec(Host1x& host1x_)
|
||||||
|
: host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
|
||||||
|
|
||||||
|
Nvdec::~Nvdec() = default;
|
||||||
|
|
||||||
|
void Nvdec::ProcessMethod(u32 method, u32 argument) {
|
||||||
|
state.reg_array[method] = static_cast<u64>(argument) << 8;
|
||||||
|
|
||||||
|
switch (method) {
|
||||||
|
case NVDEC_REG_INDEX(set_codec_id):
|
||||||
|
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||||
|
break;
|
||||||
|
case NVDEC_REG_INDEX(execute):
|
||||||
|
Execute();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AVFramePtr Nvdec::GetFrame() {
|
||||||
|
return codec->GetCurrentFrame();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Nvdec::Execute() {
|
||||||
|
switch (codec->GetCurrentCodec()) {
|
||||||
|
case NvdecCommon::VideoCodec::H264:
|
||||||
|
case NvdecCommon::VideoCodec::VP8:
|
||||||
|
case NvdecCommon::VideoCodec::VP9:
|
||||||
|
codec->Decode();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Codec {}", codec->GetCurrentCodecName());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Tegra::Host1x
|
39
src/video_core/host1x/nvdec.h
Executable file
39
src/video_core/host1x/nvdec.h
Executable file
|
@ -0,0 +1,39 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "video_core/host1x/codecs/codec.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
class Host1x;
|
||||||
|
|
||||||
|
class Nvdec {
|
||||||
|
public:
|
||||||
|
explicit Nvdec(Host1x& host1x);
|
||||||
|
~Nvdec();
|
||||||
|
|
||||||
|
/// Writes the method into the state, Invoke Execute() if encountered
|
||||||
|
void ProcessMethod(u32 method, u32 argument);
|
||||||
|
|
||||||
|
/// Return most recently decoded frame
|
||||||
|
[[nodiscard]] AVFramePtr GetFrame();
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Invoke codec to decode a frame
|
||||||
|
void Execute();
|
||||||
|
|
||||||
|
Host1x& host1x;
|
||||||
|
NvdecCommon::NvdecRegisters state;
|
||||||
|
std::unique_ptr<Codec> codec;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
97
src/video_core/host1x/nvdec_common.h
Executable file
97
src/video_core/host1x/nvdec_common.h
Executable file
|
@ -0,0 +1,97 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "common/bit_field.h"
|
||||||
|
#include "common/common_funcs.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra::Host1x::NvdecCommon {
|
||||||
|
|
||||||
|
enum class VideoCodec : u64 {
|
||||||
|
None = 0x0,
|
||||||
|
H264 = 0x3,
|
||||||
|
VP8 = 0x5,
|
||||||
|
H265 = 0x7,
|
||||||
|
VP9 = 0x9,
|
||||||
|
};
|
||||||
|
|
||||||
|
// NVDEC should use a 32-bit address space, but is mapped to 64-bit,
|
||||||
|
// doubling the sizes here is compensating for that.
|
||||||
|
struct NvdecRegisters {
|
||||||
|
static constexpr std::size_t NUM_REGS = 0x178;
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(256); ///< 0x0000
|
||||||
|
VideoCodec set_codec_id; ///< 0x0400
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0408
|
||||||
|
u64 execute; ///< 0x0600
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(126); ///< 0x0608
|
||||||
|
struct { ///< 0x0800
|
||||||
|
union {
|
||||||
|
BitField<0, 3, VideoCodec> codec;
|
||||||
|
BitField<4, 1, u64> gp_timer_on;
|
||||||
|
BitField<13, 1, u64> mb_timer_on;
|
||||||
|
BitField<14, 1, u64> intra_frame_pslc;
|
||||||
|
BitField<17, 1, u64> all_intra_frame;
|
||||||
|
};
|
||||||
|
} control_params;
|
||||||
|
u64 picture_info_offset; ///< 0x0808
|
||||||
|
u64 frame_bitstream_offset; ///< 0x0810
|
||||||
|
u64 frame_number; ///< 0x0818
|
||||||
|
u64 h264_slice_data_offsets; ///< 0x0820
|
||||||
|
u64 h264_mv_dump_offset; ///< 0x0828
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(6); ///< 0x0830
|
||||||
|
u64 frame_stats_offset; ///< 0x0848
|
||||||
|
u64 h264_last_surface_luma_offset; ///< 0x0850
|
||||||
|
u64 h264_last_surface_chroma_offset; ///< 0x0858
|
||||||
|
std::array<u64, 17> surface_luma_offset; ///< 0x0860
|
||||||
|
std::array<u64, 17> surface_chroma_offset; ///< 0x08E8
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(68); ///< 0x0970
|
||||||
|
u64 vp8_prob_data_offset; ///< 0x0A80
|
||||||
|
u64 vp8_header_partition_buf_offset; ///< 0x0A88
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(60); ///< 0x0A90
|
||||||
|
u64 vp9_entropy_probs_offset; ///< 0x0B80
|
||||||
|
u64 vp9_backward_updates_offset; ///< 0x0B88
|
||||||
|
u64 vp9_last_frame_segmap_offset; ///< 0x0B90
|
||||||
|
u64 vp9_curr_frame_segmap_offset; ///< 0x0B98
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BA0
|
||||||
|
u64 vp9_last_frame_mvs_offset; ///< 0x0BA8
|
||||||
|
u64 vp9_curr_frame_mvs_offset; ///< 0x0BB0
|
||||||
|
INSERT_PADDING_WORDS_NOINIT(2); ///< 0x0BB8
|
||||||
|
};
|
||||||
|
std::array<u64, NUM_REGS> reg_array;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
static_assert(sizeof(NvdecRegisters) == (0xBC0), "NvdecRegisters is incorrect size");
|
||||||
|
|
||||||
|
#define ASSERT_REG_POSITION(field_name, position) \
|
||||||
|
static_assert(offsetof(NvdecRegisters, field_name) == position * sizeof(u64), \
|
||||||
|
"Field " #field_name " has invalid position")
|
||||||
|
|
||||||
|
ASSERT_REG_POSITION(set_codec_id, 0x80);
|
||||||
|
ASSERT_REG_POSITION(execute, 0xC0);
|
||||||
|
ASSERT_REG_POSITION(control_params, 0x100);
|
||||||
|
ASSERT_REG_POSITION(picture_info_offset, 0x101);
|
||||||
|
ASSERT_REG_POSITION(frame_bitstream_offset, 0x102);
|
||||||
|
ASSERT_REG_POSITION(frame_number, 0x103);
|
||||||
|
ASSERT_REG_POSITION(h264_slice_data_offsets, 0x104);
|
||||||
|
ASSERT_REG_POSITION(frame_stats_offset, 0x109);
|
||||||
|
ASSERT_REG_POSITION(h264_last_surface_luma_offset, 0x10A);
|
||||||
|
ASSERT_REG_POSITION(h264_last_surface_chroma_offset, 0x10B);
|
||||||
|
ASSERT_REG_POSITION(surface_luma_offset, 0x10C);
|
||||||
|
ASSERT_REG_POSITION(surface_chroma_offset, 0x11D);
|
||||||
|
ASSERT_REG_POSITION(vp8_prob_data_offset, 0x150);
|
||||||
|
ASSERT_REG_POSITION(vp8_header_partition_buf_offset, 0x151);
|
||||||
|
ASSERT_REG_POSITION(vp9_entropy_probs_offset, 0x170);
|
||||||
|
ASSERT_REG_POSITION(vp9_backward_updates_offset, 0x171);
|
||||||
|
ASSERT_REG_POSITION(vp9_last_frame_segmap_offset, 0x172);
|
||||||
|
ASSERT_REG_POSITION(vp9_curr_frame_segmap_offset, 0x173);
|
||||||
|
ASSERT_REG_POSITION(vp9_last_frame_mvs_offset, 0x175);
|
||||||
|
ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
|
||||||
|
|
||||||
|
#undef ASSERT_REG_POSITION
|
||||||
|
|
||||||
|
} // namespace Tegra::Host1x::NvdecCommon
|
50
src/video_core/host1x/sync_manager.cpp
Executable file
50
src/video_core/host1x/sync_manager.cpp
Executable file
|
@ -0,0 +1,50 @@
|
||||||
|
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include "sync_manager.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
|
||||||
|
SyncptIncrManager::~SyncptIncrManager() = default;
|
||||||
|
|
||||||
|
void SyncptIncrManager::Increment(u32 id) {
|
||||||
|
increments.emplace_back(0, 0, id, true);
|
||||||
|
IncrementAllDone();
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 SyncptIncrManager::IncrementWhenDone(u32 class_id, u32 id) {
|
||||||
|
const u32 handle = current_id++;
|
||||||
|
increments.emplace_back(handle, class_id, id);
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncptIncrManager::SignalDone(u32 handle) {
|
||||||
|
const auto done_incr =
|
||||||
|
std::find_if(increments.begin(), increments.end(),
|
||||||
|
[handle](const SyncptIncr& incr) { return incr.id == handle; });
|
||||||
|
if (done_incr != increments.cend()) {
|
||||||
|
done_incr->complete = true;
|
||||||
|
}
|
||||||
|
IncrementAllDone();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncptIncrManager::IncrementAllDone() {
|
||||||
|
std::size_t done_count = 0;
|
||||||
|
for (; done_count < increments.size(); ++done_count) {
|
||||||
|
if (!increments[done_count].complete) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto& syncpoint_manager = host1x.GetSyncpointManager();
|
||||||
|
syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
|
||||||
|
syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
|
||||||
|
}
|
||||||
|
increments.erase(increments.begin(), increments.begin() + done_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
} // namespace Tegra
|
53
src/video_core/host1x/sync_manager.h
Executable file
53
src/video_core/host1x/sync_manager.h
Executable file
|
@ -0,0 +1,53 @@
|
||||||
|
// SPDX-FileCopyrightText: Ryujinx Team and Contributors
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
|
#include <vector>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
class Host1x;
|
||||||
|
|
||||||
|
struct SyncptIncr {
|
||||||
|
u32 id;
|
||||||
|
u32 class_id;
|
||||||
|
u32 syncpt_id;
|
||||||
|
bool complete;
|
||||||
|
|
||||||
|
SyncptIncr(u32 id_, u32 class_id_, u32 syncpt_id_, bool done = false)
|
||||||
|
: id(id_), class_id(class_id_), syncpt_id(syncpt_id_), complete(done) {}
|
||||||
|
};
|
||||||
|
|
||||||
|
class SyncptIncrManager {
|
||||||
|
public:
|
||||||
|
explicit SyncptIncrManager(Host1x& host1x);
|
||||||
|
~SyncptIncrManager();
|
||||||
|
|
||||||
|
/// Add syncpoint id and increment all
|
||||||
|
void Increment(u32 id);
|
||||||
|
|
||||||
|
/// Returns a handle to increment later
|
||||||
|
u32 IncrementWhenDone(u32 class_id, u32 id);
|
||||||
|
|
||||||
|
/// IncrememntAllDone, including handle
|
||||||
|
void SignalDone(u32 handle);
|
||||||
|
|
||||||
|
/// Increment all sequential pending increments that are already done.
|
||||||
|
void IncrementAllDone();
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::vector<SyncptIncr> increments;
|
||||||
|
std::mutex increment_lock;
|
||||||
|
u32 current_id{};
|
||||||
|
|
||||||
|
Host1x& host1x;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
97
src/video_core/host1x/syncpoint_manager.cpp
Executable file
97
src/video_core/host1x/syncpoint_manager.cpp
Executable file
|
@ -0,0 +1,97 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/microprofile.h"
|
||||||
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||||
|
|
||||||
|
SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
|
||||||
|
std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||||
|
std::function<void(void)>& action) {
|
||||||
|
if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
|
||||||
|
action();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
|
||||||
|
action();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
auto it = action_storage.begin();
|
||||||
|
while (it != action_storage.end()) {
|
||||||
|
if (it->expected_value >= expected_value) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
return action_storage.emplace(it, expected_value, action);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
|
||||||
|
ActionHandle& handle) {
|
||||||
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
action_storage.erase(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||||
|
DeregisterAction(guest_action_storage[syncpoint_id], handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
|
||||||
|
DeregisterAction(host_action_storage[syncpoint_id], handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
|
||||||
|
Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::IncrementHost(u32 syncpoint_id) {
|
||||||
|
Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
|
||||||
|
Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
|
||||||
|
MICROPROFILE_SCOPE(GPU_wait);
|
||||||
|
Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||||
|
std::list<RegisteredAction>& action_storage) {
|
||||||
|
auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
auto it = action_storage.begin();
|
||||||
|
while (it != action_storage.end()) {
|
||||||
|
if (it->expected_value > new_value) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
it->action();
|
||||||
|
it = action_storage.erase(it);
|
||||||
|
}
|
||||||
|
wait_cv.notify_all();
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||||
|
u32 expected_value) {
|
||||||
|
const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
|
||||||
|
if (pred()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> lk(guard);
|
||||||
|
wait_cv.wait(lk, pred);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
99
src/video_core/host1x/syncpoint_manager.h
Executable file
99
src/video_core/host1x/syncpoint_manager.h
Executable file
|
@ -0,0 +1,99 @@
|
||||||
|
// Copyright 2021 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv3 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
#include <atomic>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <functional>
|
||||||
|
#include <list>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
class SyncpointManager {
|
||||||
|
public:
|
||||||
|
u32 GetGuestSyncpointValue(u32 id) {
|
||||||
|
return syncpoints_guest[id].load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 GetHostSyncpointValue(u32 id) {
|
||||||
|
return syncpoints_host[id].load(std::memory_order_acquire);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct RegisteredAction {
|
||||||
|
RegisteredAction(u32 expected_value_, std::function<void(void)>& action_)
|
||||||
|
: expected_value{expected_value_}, action{action_} {}
|
||||||
|
u32 expected_value;
|
||||||
|
std::function<void(void)> action;
|
||||||
|
};
|
||||||
|
using ActionHandle = std::list<RegisteredAction>::iterator;
|
||||||
|
|
||||||
|
template <typename Func>
|
||||||
|
ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||||
|
std::function<void(void)> func(action);
|
||||||
|
return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
|
||||||
|
expected_value, func);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Func>
|
||||||
|
ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
|
||||||
|
std::function<void(void)> func(action);
|
||||||
|
return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
|
||||||
|
expected_value, func);
|
||||||
|
}
|
||||||
|
|
||||||
|
void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
|
||||||
|
|
||||||
|
void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
|
||||||
|
|
||||||
|
void IncrementGuest(u32 syncpoint_id);
|
||||||
|
|
||||||
|
void IncrementHost(u32 syncpoint_id);
|
||||||
|
|
||||||
|
void WaitGuest(u32 syncpoint_id, u32 expected_value);
|
||||||
|
|
||||||
|
void WaitHost(u32 syncpoint_id, u32 expected_value);
|
||||||
|
|
||||||
|
bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) {
|
||||||
|
return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsReadyHost(u32 syncpoint_id, u32 expected_value) {
|
||||||
|
return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
|
||||||
|
std::list<RegisteredAction>& action_storage);
|
||||||
|
|
||||||
|
ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
|
||||||
|
std::list<RegisteredAction>& action_storage, u32 expected_value,
|
||||||
|
std::function<void(void)>& action);
|
||||||
|
|
||||||
|
void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
|
||||||
|
|
||||||
|
void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
|
||||||
|
|
||||||
|
static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
|
||||||
|
|
||||||
|
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
|
||||||
|
std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
|
||||||
|
|
||||||
|
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
|
||||||
|
std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
|
||||||
|
|
||||||
|
std::mutex guard;
|
||||||
|
std::condition_variable wait_guest_cv;
|
||||||
|
std::condition_variable wait_host_cv;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
244
src/video_core/host1x/vic.cpp
Executable file
244
src/video_core/host1x/vic.cpp
Executable file
|
@ -0,0 +1,244 @@
|
||||||
|
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||||
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
|
#pragma GCC diagnostic push
|
||||||
|
#pragma GCC diagnostic ignored "-Wconversion"
|
||||||
|
#endif
|
||||||
|
#include <libswscale/swscale.h>
|
||||||
|
#if defined(__GNUC__) || defined(__clang__)
|
||||||
|
#pragma GCC diagnostic pop
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/bit_field.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
|
#include "video_core/engines/maxwell_3d.h"
|
||||||
|
#include "video_core/host1x/host1x.h"
|
||||||
|
#include "video_core/host1x/nvdec.h"
|
||||||
|
#include "video_core/host1x/vic.h"
|
||||||
|
#include "video_core/memory_manager.h"
|
||||||
|
#include "video_core/textures/decoders.h"
|
||||||
|
|
||||||
|
namespace Tegra {
|
||||||
|
|
||||||
|
namespace Host1x {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
enum class VideoPixelFormat : u64_le {
|
||||||
|
RGBA8 = 0x1f,
|
||||||
|
BGRA8 = 0x20,
|
||||||
|
RGBX8 = 0x23,
|
||||||
|
YUV420 = 0x44,
|
||||||
|
};
|
||||||
|
} // Anonymous namespace
|
||||||
|
|
||||||
|
union VicConfig {
|
||||||
|
u64_le raw{};
|
||||||
|
BitField<0, 7, VideoPixelFormat> pixel_format;
|
||||||
|
BitField<7, 2, u64_le> chroma_loc_horiz;
|
||||||
|
BitField<9, 2, u64_le> chroma_loc_vert;
|
||||||
|
BitField<11, 4, u64_le> block_linear_kind;
|
||||||
|
BitField<15, 4, u64_le> block_linear_height_log2;
|
||||||
|
BitField<32, 14, u64_le> surface_width_minus1;
|
||||||
|
BitField<46, 14, u64_le> surface_height_minus1;
|
||||||
|
};
|
||||||
|
|
||||||
|
Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
|
||||||
|
: host1x(host1x_),
|
||||||
|
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
|
||||||
|
|
||||||
|
Vic::~Vic() = default;
|
||||||
|
|
||||||
|
void Vic::ProcessMethod(Method method, u32 argument) {
|
||||||
|
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
|
||||||
|
const u64 arg = static_cast<u64>(argument) << 8;
|
||||||
|
switch (method) {
|
||||||
|
case Method::Execute:
|
||||||
|
Execute();
|
||||||
|
break;
|
||||||
|
case Method::SetConfigStructOffset:
|
||||||
|
config_struct_address = arg;
|
||||||
|
break;
|
||||||
|
case Method::SetOutputSurfaceLumaOffset:
|
||||||
|
output_surface_luma_address = arg;
|
||||||
|
break;
|
||||||
|
case Method::SetOutputSurfaceChromaOffset:
|
||||||
|
output_surface_chroma_address = arg;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Vic::Execute() {
|
||||||
|
if (output_surface_luma_address == 0) {
|
||||||
|
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
|
||||||
|
const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
|
||||||
|
const auto* frame = frame_ptr.get();
|
||||||
|
if (!frame) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const u64 surface_width = config.surface_width_minus1 + 1;
|
||||||
|
const u64 surface_height = config.surface_height_minus1 + 1;
|
||||||
|
if (static_cast<u64>(frame->width) != surface_width ||
|
||||||
|
static_cast<u64>(frame->height) != surface_height) {
|
||||||
|
// TODO: Properly support multiple video streams with differing frame dimensions
|
||||||
|
LOG_WARNING(Service_NVDRV, "Frame dimensions {}x{} don't match surface dimensions {}x{}",
|
||||||
|
frame->width, frame->height, surface_width, surface_height);
|
||||||
|
}
|
||||||
|
switch (config.pixel_format) {
|
||||||
|
case VideoPixelFormat::RGBA8:
|
||||||
|
case VideoPixelFormat::BGRA8:
|
||||||
|
case VideoPixelFormat::RGBX8:
|
||||||
|
WriteRGBFrame(frame, config);
|
||||||
|
break;
|
||||||
|
case VideoPixelFormat::YUV420:
|
||||||
|
WriteYUVFrame(frame, config);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNIMPLEMENTED_MSG("Unknown video pixel format {:X}", config.pixel_format.Value());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
|
||||||
|
LOG_TRACE(Service_NVDRV, "Writing RGB Frame");
|
||||||
|
|
||||||
|
if (!scaler_ctx || frame->width != scaler_width || frame->height != scaler_height) {
|
||||||
|
const AVPixelFormat target_format = [pixel_format = config.pixel_format]() {
|
||||||
|
switch (pixel_format) {
|
||||||
|
case VideoPixelFormat::RGBA8:
|
||||||
|
return AV_PIX_FMT_RGBA;
|
||||||
|
case VideoPixelFormat::BGRA8:
|
||||||
|
return AV_PIX_FMT_BGRA;
|
||||||
|
case VideoPixelFormat::RGBX8:
|
||||||
|
return AV_PIX_FMT_RGB0;
|
||||||
|
default:
|
||||||
|
return AV_PIX_FMT_RGBA;
|
||||||
|
}
|
||||||
|
}();
|
||||||
|
|
||||||
|
sws_freeContext(scaler_ctx);
|
||||||
|
// Frames are decoded into either YUV420 or NV12 formats. Convert to desired RGB format
|
||||||
|
scaler_ctx = sws_getContext(frame->width, frame->height,
|
||||||
|
static_cast<AVPixelFormat>(frame->format), frame->width,
|
||||||
|
frame->height, target_format, 0, nullptr, nullptr, nullptr);
|
||||||
|
scaler_width = frame->width;
|
||||||
|
scaler_height = frame->height;
|
||||||
|
converted_frame_buffer.reset();
|
||||||
|
}
|
||||||
|
if (!converted_frame_buffer) {
|
||||||
|
const size_t frame_size = frame->width * frame->height * 4;
|
||||||
|
converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(frame_size)), av_free};
|
||||||
|
}
|
||||||
|
const std::array<int, 4> converted_stride{frame->width * 4, frame->height * 4, 0, 0};
|
||||||
|
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
|
||||||
|
sws_scale(scaler_ctx, frame->data, frame->linesize, 0, frame->height, &converted_frame_buf_addr,
|
||||||
|
converted_stride.data());
|
||||||
|
|
||||||
|
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||||
|
const u32 surface_width = static_cast<u32>(config.surface_width_minus1) + 1;
|
||||||
|
const u32 surface_height = static_cast<u32>(config.surface_height_minus1) + 1;
|
||||||
|
const u32 width = std::min(surface_width, static_cast<u32>(frame->width));
|
||||||
|
const u32 height = std::min(surface_height, static_cast<u32>(frame->height));
|
||||||
|
const u32 blk_kind = static_cast<u32>(config.block_linear_kind);
|
||||||
|
if (blk_kind != 0) {
|
||||||
|
// swizzle pitch linear to block linear
|
||||||
|
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||||
|
const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
|
||||||
|
luma_buffer.resize(size);
|
||||||
|
std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
|
||||||
|
Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
|
||||||
|
block_height, 0, width * 4);
|
||||||
|
|
||||||
|
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
|
||||||
|
} else {
|
||||||
|
// send pitch linear frame
|
||||||
|
const size_t linear_size = width * height * 4;
|
||||||
|
host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
|
||||||
|
linear_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
||||||
|
LOG_TRACE(Service_NVDRV, "Writing YUV420 Frame");
|
||||||
|
|
||||||
|
const std::size_t surface_width = config.surface_width_minus1 + 1;
|
||||||
|
const std::size_t surface_height = config.surface_height_minus1 + 1;
|
||||||
|
const std::size_t aligned_width = (surface_width + 0xff) & ~0xffUL;
|
||||||
|
// Use the minimum of surface/frame dimensions to avoid buffer overflow.
|
||||||
|
const auto frame_width = std::min(surface_width, static_cast<size_t>(frame->width));
|
||||||
|
const auto frame_height = std::min(surface_height, static_cast<size_t>(frame->height));
|
||||||
|
|
||||||
|
const auto stride = static_cast<size_t>(frame->linesize[0]);
|
||||||
|
|
||||||
|
luma_buffer.resize(aligned_width * surface_height);
|
||||||
|
chroma_buffer.resize(aligned_width * surface_height / 2);
|
||||||
|
|
||||||
|
// Populate luma buffer
|
||||||
|
const u8* luma_src = frame->data[0];
|
||||||
|
for (std::size_t y = 0; y < frame_height; ++y) {
|
||||||
|
const std::size_t src = y * stride;
|
||||||
|
const std::size_t dst = y * aligned_width;
|
||||||
|
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||||
|
luma_buffer[dst + x] = luma_src[src + x];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
|
||||||
|
luma_buffer.size());
|
||||||
|
|
||||||
|
// Chroma
|
||||||
|
const std::size_t half_height = frame_height / 2;
|
||||||
|
const auto half_stride = static_cast<size_t>(frame->linesize[1]);
|
||||||
|
|
||||||
|
switch (frame->format) {
|
||||||
|
case AV_PIX_FMT_YUV420P: {
|
||||||
|
// Frame from FFmpeg software
|
||||||
|
// Populate chroma buffer from both channels with interleaving.
|
||||||
|
const std::size_t half_width = frame_width / 2;
|
||||||
|
const u8* chroma_b_src = frame->data[1];
|
||||||
|
const u8* chroma_r_src = frame->data[2];
|
||||||
|
for (std::size_t y = 0; y < half_height; ++y) {
|
||||||
|
const std::size_t src = y * half_stride;
|
||||||
|
const std::size_t dst = y * aligned_width;
|
||||||
|
|
||||||
|
for (std::size_t x = 0; x < half_width; ++x) {
|
||||||
|
chroma_buffer[dst + x * 2] = chroma_b_src[src + x];
|
||||||
|
chroma_buffer[dst + x * 2 + 1] = chroma_r_src[src + x];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case AV_PIX_FMT_NV12: {
|
||||||
|
// Frame from VA-API hardware
|
||||||
|
// This is already interleaved so just copy
|
||||||
|
const u8* chroma_src = frame->data[1];
|
||||||
|
for (std::size_t y = 0; y < half_height; ++y) {
|
||||||
|
const std::size_t src = y * stride;
|
||||||
|
const std::size_t dst = y * aligned_width;
|
||||||
|
for (std::size_t x = 0; x < frame_width; ++x) {
|
||||||
|
chroma_buffer[dst + x] = chroma_src[src + x];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
|
||||||
|
chroma_buffer.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Host1x
|
||||||
|
|
||||||
|
} // namespace Tegra
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue