early-access version 3830
This commit is contained in:
parent
8c4ecdc426
commit
e62fb0263c
37 changed files with 380 additions and 2052 deletions
|
@ -1,7 +1,7 @@
|
|||
yuzu emulator early access
|
||||
=============
|
||||
|
||||
This is the source code for early-access 3829.
|
||||
This is the source code for early-access 3830.
|
||||
|
||||
## Legal Notice
|
||||
|
||||
|
|
|
@ -129,17 +129,13 @@ void LogSettings() {
|
|||
log_path("DataStorage_SDMCDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::SDMCDir));
|
||||
}
|
||||
|
||||
void UpdateGPUAccuracy() {
|
||||
values.current_gpu_accuracy = values.gpu_accuracy.GetValue();
|
||||
}
|
||||
|
||||
bool IsGPULevelExtreme() {
|
||||
return values.current_gpu_accuracy == GpuAccuracy::Extreme;
|
||||
return values.gpu_accuracy.GetValue() == GpuAccuracy::Extreme;
|
||||
}
|
||||
|
||||
bool IsGPULevelHigh() {
|
||||
return values.current_gpu_accuracy == GpuAccuracy::Extreme ||
|
||||
values.current_gpu_accuracy == GpuAccuracy::High;
|
||||
return values.gpu_accuracy.GetValue() == GpuAccuracy::Extreme ||
|
||||
values.gpu_accuracy.GetValue() == GpuAccuracy::High;
|
||||
}
|
||||
|
||||
bool IsFastmemEnabled() {
|
||||
|
|
|
@ -307,7 +307,6 @@ struct Values {
|
|||
Specialization::Default,
|
||||
true,
|
||||
true};
|
||||
GpuAccuracy current_gpu_accuracy{GpuAccuracy::High};
|
||||
SwitchableSetting<AnisotropyMode, true> max_anisotropy{
|
||||
linkage, AnisotropyMode::Automatic, AnisotropyMode::Automatic, AnisotropyMode::X16,
|
||||
"max_anisotropy", Category::RendererAdvanced};
|
||||
|
@ -515,7 +514,6 @@ struct Values {
|
|||
|
||||
extern Values values;
|
||||
|
||||
void UpdateGPUAccuracy();
|
||||
bool IsGPULevelExtreme();
|
||||
bool IsGPULevelHigh();
|
||||
|
||||
|
|
|
@ -95,12 +95,6 @@ add_library(video_core STATIC
|
|||
memory_manager.h
|
||||
precompiled_headers.h
|
||||
pte_kind.h
|
||||
query_cache/bank_base.h
|
||||
query_cache/query_base.h
|
||||
query_cache/query_cache_base.h
|
||||
query_cache/query_cache.h
|
||||
query_cache/query_stream.h
|
||||
query_cache/types.h
|
||||
query_cache.h
|
||||
rasterizer_accelerated.cpp
|
||||
rasterizer_accelerated.h
|
||||
|
|
|
@ -272,19 +272,13 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad
|
|||
if (!cpu_addr) {
|
||||
return {&slot_buffers[NULL_BUFFER_ID], 0};
|
||||
}
|
||||
return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op);
|
||||
}
|
||||
|
||||
template <class P>
|
||||
std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
|
||||
VAddr cpu_addr, u32 size, ObtainBufferSynchronize sync_info, ObtainBufferOperation post_op) {
|
||||
const BufferId buffer_id = FindBuffer(cpu_addr, size);
|
||||
const BufferId buffer_id = FindBuffer(*cpu_addr, size);
|
||||
Buffer& buffer = slot_buffers[buffer_id];
|
||||
|
||||
// synchronize op
|
||||
switch (sync_info) {
|
||||
case ObtainBufferSynchronize::FullSynchronize:
|
||||
SynchronizeBuffer(buffer, cpu_addr, size);
|
||||
SynchronizeBuffer(buffer, *cpu_addr, size);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -292,10 +286,10 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
|
|||
|
||||
switch (post_op) {
|
||||
case ObtainBufferOperation::MarkAsWritten:
|
||||
MarkWrittenBuffer(buffer_id, cpu_addr, size);
|
||||
MarkWrittenBuffer(buffer_id, *cpu_addr, size);
|
||||
break;
|
||||
case ObtainBufferOperation::DiscardWrite: {
|
||||
IntervalType interval{cpu_addr, size};
|
||||
IntervalType interval{*cpu_addr, size};
|
||||
ClearDownload(interval);
|
||||
break;
|
||||
}
|
||||
|
@ -303,7 +297,7 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(
|
|||
break;
|
||||
}
|
||||
|
||||
return {&buffer, buffer.Offset(cpu_addr)};
|
||||
return {&buffer, buffer.Offset(*cpu_addr)};
|
||||
}
|
||||
|
||||
template <class P>
|
||||
|
|
|
@ -295,10 +295,6 @@ public:
|
|||
[[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(GPUVAddr gpu_addr, u32 size,
|
||||
ObtainBufferSynchronize sync_info,
|
||||
ObtainBufferOperation post_op);
|
||||
|
||||
[[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(VAddr gpu_addr, u32 size,
|
||||
ObtainBufferSynchronize sync_info,
|
||||
ObtainBufferOperation post_op);
|
||||
void FlushCachedWrites();
|
||||
|
||||
/// Return true when there are uncommitted buffers to be downloaded
|
||||
|
@ -339,14 +335,6 @@ public:
|
|||
|
||||
[[nodiscard]] std::pair<Buffer*, u32> GetDrawIndirectBuffer();
|
||||
|
||||
template <typename Func>
|
||||
void BufferOperations(Func&& func) {
|
||||
do {
|
||||
channel_state->has_deleted_buffers = false;
|
||||
func();
|
||||
} while (channel_state->has_deleted_buffers);
|
||||
}
|
||||
|
||||
std::recursive_mutex mutex;
|
||||
Runtime& runtime;
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public:
|
|||
virtual void CreateChannel(Tegra::Control::ChannelState& channel);
|
||||
|
||||
/// Bind a channel for execution.
|
||||
virtual void BindToChannel(s32 id);
|
||||
void BindToChannel(s32 id);
|
||||
|
||||
/// Erase channel's state.
|
||||
void EraseChannel(s32 id);
|
||||
|
|
|
@ -46,7 +46,6 @@ public:
|
|||
};
|
||||
|
||||
struct IndirectParams {
|
||||
bool is_byte_count;
|
||||
bool is_indexed;
|
||||
bool include_count;
|
||||
GPUVAddr count_start_address;
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
namespace Tegra::Engines {
|
||||
|
||||
using VideoCore::QueryType;
|
||||
|
||||
/// First register id that is actually a Macro call.
|
||||
constexpr u32 MacroRegistersStart = 0xE00;
|
||||
|
||||
|
@ -494,21 +496,27 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
|
|||
}
|
||||
|
||||
void Maxwell3D::ProcessQueryGet() {
|
||||
VideoCommon::QueryPropertiesFlags flags{};
|
||||
if (regs.report_semaphore.query.short_query == 0) {
|
||||
flags |= VideoCommon::QueryPropertiesFlags::HasTimeout;
|
||||
}
|
||||
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
||||
const VideoCommon::QueryType query_type =
|
||||
static_cast<VideoCommon::QueryType>(regs.report_semaphore.query.report.Value());
|
||||
const u32 payload = regs.report_semaphore.payload;
|
||||
const u32 subreport = regs.report_semaphore.query.sub_report;
|
||||
switch (regs.report_semaphore.query.operation) {
|
||||
case Regs::ReportSemaphore::Operation::Release:
|
||||
if (regs.report_semaphore.query.short_query != 0) {
|
||||
flags |= VideoCommon::QueryPropertiesFlags::IsAFence;
|
||||
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
||||
const u32 payload = regs.report_semaphore.payload;
|
||||
std::function<void()> operation([this, sequence_address, payload] {
|
||||
memory_manager.Write<u32>(sequence_address, payload);
|
||||
});
|
||||
rasterizer->SignalFence(std::move(operation));
|
||||
} else {
|
||||
struct LongQueryResult {
|
||||
u64_le value;
|
||||
u64_le timestamp;
|
||||
};
|
||||
const GPUVAddr sequence_address{regs.report_semaphore.Address()};
|
||||
const u32 payload = regs.report_semaphore.payload;
|
||||
[this, sequence_address, payload] {
|
||||
memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
|
||||
memory_manager.Write<u64>(sequence_address, payload);
|
||||
}();
|
||||
}
|
||||
rasterizer->Query(sequence_address, query_type, flags, payload, subreport);
|
||||
break;
|
||||
case Regs::ReportSemaphore::Operation::Acquire:
|
||||
// TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
|
||||
|
@ -516,7 +524,11 @@ void Maxwell3D::ProcessQueryGet() {
|
|||
UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE");
|
||||
break;
|
||||
case Regs::ReportSemaphore::Operation::ReportOnly:
|
||||
rasterizer->Query(sequence_address, query_type, flags, payload, subreport);
|
||||
if (const std::optional<u64> result = GetQueryResult()) {
|
||||
// If the query returns an empty optional it means it's cached and deferred.
|
||||
// In this case we have a non-empty result, so we stamp it immediately.
|
||||
StampQueryResult(*result, regs.report_semaphore.query.short_query == 0);
|
||||
}
|
||||
break;
|
||||
case Regs::ReportSemaphore::Operation::Trap:
|
||||
UNIMPLEMENTED_MSG("Unimplemented query operation TRAP");
|
||||
|
@ -528,10 +540,6 @@ void Maxwell3D::ProcessQueryGet() {
|
|||
}
|
||||
|
||||
void Maxwell3D::ProcessQueryCondition() {
|
||||
if (rasterizer->AccelerateConditionalRendering()) {
|
||||
execute_on = true;
|
||||
return;
|
||||
}
|
||||
const GPUVAddr condition_address{regs.render_enable.Address()};
|
||||
switch (regs.render_enable_override) {
|
||||
case Regs::RenderEnable::Override::AlwaysRender:
|
||||
|
@ -541,6 +549,10 @@ void Maxwell3D::ProcessQueryCondition() {
|
|||
execute_on = false;
|
||||
break;
|
||||
case Regs::RenderEnable::Override::UseRenderEnable: {
|
||||
if (rasterizer->AccelerateConditionalRendering()) {
|
||||
execute_on = true;
|
||||
return;
|
||||
}
|
||||
switch (regs.render_enable.mode) {
|
||||
case Regs::RenderEnable::Mode::True: {
|
||||
execute_on = true;
|
||||
|
@ -582,9 +594,15 @@ void Maxwell3D::ProcessQueryCondition() {
|
|||
}
|
||||
|
||||
void Maxwell3D::ProcessCounterReset() {
|
||||
#if ANDROID
|
||||
if (!Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
switch (regs.clear_report_value) {
|
||||
case Regs::ClearReport::ZPassPixelCount:
|
||||
rasterizer->ResetCounter(VideoCommon::QueryType::ZPassPixelCount64);
|
||||
rasterizer->ResetCounter(QueryType::SamplesPassed);
|
||||
break;
|
||||
default:
|
||||
LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value);
|
||||
|
@ -598,6 +616,28 @@ void Maxwell3D::ProcessSyncPoint() {
|
|||
rasterizer->SignalSyncPoint(sync_point);
|
||||
}
|
||||
|
||||
std::optional<u64> Maxwell3D::GetQueryResult() {
|
||||
switch (regs.report_semaphore.query.report) {
|
||||
case Regs::ReportSemaphore::Report::Payload:
|
||||
return regs.report_semaphore.payload;
|
||||
case Regs::ReportSemaphore::Report::ZPassPixelCount64:
|
||||
#if ANDROID
|
||||
if (!Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
return 120;
|
||||
}
|
||||
#endif
|
||||
// Deferred.
|
||||
rasterizer->Query(regs.report_semaphore.Address(), QueryType::SamplesPassed,
|
||||
system.GPU().GetTicks());
|
||||
return std::nullopt;
|
||||
default:
|
||||
LOG_DEBUG(HW_GPU, "Unimplemented query report type {}",
|
||||
regs.report_semaphore.query.report.Value());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
void Maxwell3D::ProcessCBBind(size_t stage_index) {
|
||||
// Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader
|
||||
// stage.
|
||||
|
|
|
@ -3182,6 +3182,9 @@ private:
|
|||
/// Handles writes to syncing register.
|
||||
void ProcessSyncPoint();
|
||||
|
||||
/// Returns a query's value or an empty object if the value will be deferred through a cache.
|
||||
std::optional<u64> GetQueryResult();
|
||||
|
||||
void RefreshParametersImpl();
|
||||
|
||||
bool IsMethodExecutable(u32 method);
|
||||
|
|
|
@ -361,17 +361,21 @@ void MaxwellDMA::ReleaseSemaphore() {
|
|||
const auto type = regs.launch_dma.semaphore_type;
|
||||
const GPUVAddr address = regs.semaphore.address;
|
||||
const u32 payload = regs.semaphore.payload;
|
||||
VideoCommon::QueryPropertiesFlags flags{VideoCommon::QueryPropertiesFlags::IsAFence};
|
||||
switch (type) {
|
||||
case LaunchDMA::SemaphoreType::NONE:
|
||||
break;
|
||||
case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
|
||||
rasterizer->Query(address, VideoCommon::QueryType::Payload, flags, payload, 0);
|
||||
std::function<void()> operation(
|
||||
[this, address, payload] { memory_manager.Write<u32>(address, payload); });
|
||||
rasterizer->SignalFence(std::move(operation));
|
||||
break;
|
||||
}
|
||||
case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
|
||||
rasterizer->Query(address, VideoCommon::QueryType::Payload,
|
||||
flags | VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0);
|
||||
std::function<void()> operation([this, address, payload] {
|
||||
memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
|
||||
memory_manager.Write<u64>(address, payload);
|
||||
});
|
||||
rasterizer->SignalFence(std::move(operation));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -77,8 +77,10 @@ void Puller::ProcessSemaphoreTriggerMethod() {
|
|||
if (op == GpuSemaphoreOperation::WriteLong) {
|
||||
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
||||
const u32 payload = regs.semaphore_sequence;
|
||||
rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload,
|
||||
VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0);
|
||||
[this, sequence_address, payload] {
|
||||
memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
|
||||
memory_manager.Write<u64>(sequence_address, payload);
|
||||
}();
|
||||
} else {
|
||||
do {
|
||||
const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
|
||||
|
@ -113,8 +115,10 @@ void Puller::ProcessSemaphoreTriggerMethod() {
|
|||
void Puller::ProcessSemaphoreRelease() {
|
||||
const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
|
||||
const u32 payload = regs.semaphore_release;
|
||||
rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload,
|
||||
VideoCommon::QueryPropertiesFlags::IsAFence, payload, 0);
|
||||
std::function<void()> operation([this, sequence_address, payload] {
|
||||
memory_manager.Write<u32>(sequence_address, payload);
|
||||
});
|
||||
rasterizer->SignalFence(std::move(operation));
|
||||
}
|
||||
|
||||
void Puller::ProcessSemaphoreAcquire() {
|
||||
|
@ -123,6 +127,7 @@ void Puller::ProcessSemaphoreAcquire() {
|
|||
while (word != value) {
|
||||
regs.acquire_active = true;
|
||||
regs.acquire_value = value;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
rasterizer->ReleaseFences();
|
||||
word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
|
||||
// TODO(kemathe73) figure out how to do the acquire_timeout
|
||||
|
|
|
@ -55,9 +55,6 @@ public:
|
|||
|
||||
// Unlike other fences, this one doesn't
|
||||
void SignalOrdering() {
|
||||
if constexpr (!can_async_check) {
|
||||
TryReleasePendingFences<false>();
|
||||
}
|
||||
std::scoped_lock lock{buffer_cache.mutex};
|
||||
buffer_cache.AccumulateFlushes();
|
||||
}
|
||||
|
@ -107,25 +104,9 @@ public:
|
|||
SignalFence(std::move(func));
|
||||
}
|
||||
|
||||
void WaitPendingFences([[maybe_unused]] bool force) {
|
||||
void WaitPendingFences() {
|
||||
if constexpr (!can_async_check) {
|
||||
TryReleasePendingFences<true>();
|
||||
} else {
|
||||
if (!force) {
|
||||
return;
|
||||
}
|
||||
std::mutex wait_mutex;
|
||||
std::condition_variable wait_cv;
|
||||
std::atomic<bool> wait_finished{};
|
||||
std::function<void()> func([&] {
|
||||
std::scoped_lock lk(wait_mutex);
|
||||
wait_finished.store(true, std::memory_order_relaxed);
|
||||
wait_cv.notify_all();
|
||||
});
|
||||
SignalFence(std::move(func));
|
||||
std::unique_lock lk(wait_mutex);
|
||||
wait_cv.wait(
|
||||
lk, [&wait_finished] { return wait_finished.load(std::memory_order_relaxed); });
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -102,8 +102,7 @@ struct GPU::Impl {
|
|||
|
||||
/// Signal the ending of command list.
|
||||
void OnCommandListEnd() {
|
||||
rasterizer->ReleaseFences(false);
|
||||
Settings::UpdateGPUAccuracy();
|
||||
rasterizer->ReleaseFences();
|
||||
}
|
||||
|
||||
/// Request a host GPU memory flush from the CPU.
|
||||
|
@ -221,7 +220,6 @@ struct GPU::Impl {
|
|||
/// This can be used to launch any necessary threads and register any necessary
|
||||
/// core timing events.
|
||||
void Start() {
|
||||
Settings::UpdateGPUAccuracy();
|
||||
gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,9 +41,6 @@ set(SHADER_FILES
|
|||
pitch_unswizzle.comp
|
||||
present_bicubic.frag
|
||||
present_gaussian.frag
|
||||
queries_prefix_scan_sum.comp
|
||||
queries_prefix_scan_sum_nosubgroups.comp
|
||||
resolve_conditional_render.comp
|
||||
smaa_edge_detection.vert
|
||||
smaa_edge_detection.frag
|
||||
smaa_blending_weight_calculation.vert
|
||||
|
@ -73,7 +70,6 @@ if ("${GLSLANGVALIDATOR}" STREQUAL "GLSLANGVALIDATOR-NOTFOUND")
|
|||
endif()
|
||||
|
||||
set(GLSL_FLAGS "")
|
||||
set(SPIR_V_VERSION "spirv1.3")
|
||||
set(QUIET_FLAG "--quiet")
|
||||
|
||||
set(SHADER_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/include)
|
||||
|
@ -127,7 +123,7 @@ foreach(FILENAME IN ITEMS ${SHADER_FILES})
|
|||
OUTPUT
|
||||
${SPIRV_HEADER_FILE}
|
||||
COMMAND
|
||||
${GLSLANGVALIDATOR} -V ${QUIET_FLAG} -I"${FIDELITYFX_INCLUDE_DIR}" ${GLSL_FLAGS} --variable-name ${SPIRV_VARIABLE_NAME} -o ${SPIRV_HEADER_FILE} ${SOURCE_FILE} --target-env ${SPIR_V_VERSION}
|
||||
${GLSLANGVALIDATOR} -V ${QUIET_FLAG} -I"${FIDELITYFX_INCLUDE_DIR}" ${GLSL_FLAGS} --variable-name ${SPIRV_VARIABLE_NAME} -o ${SPIRV_HEADER_FILE} ${SOURCE_FILE}
|
||||
MAIN_DEPENDENCY
|
||||
${SOURCE_FILE}
|
||||
)
|
||||
|
|
|
@ -67,7 +67,6 @@ public:
|
|||
}
|
||||
|
||||
auto& params = maxwell3d.draw_manager->GetIndirectParams();
|
||||
params.is_byte_count = false;
|
||||
params.is_indexed = false;
|
||||
params.include_count = false;
|
||||
params.count_start_address = 0;
|
||||
|
@ -162,7 +161,6 @@ public:
|
|||
0, 0x644, Maxwell3D::HLEReplacementAttributeType::BaseInstance);
|
||||
}
|
||||
auto& params = maxwell3d.draw_manager->GetIndirectParams();
|
||||
params.is_byte_count = false;
|
||||
params.is_indexed = true;
|
||||
params.include_count = false;
|
||||
params.count_start_address = 0;
|
||||
|
@ -258,7 +256,6 @@ public:
|
|||
const u32 estimate = static_cast<u32>(maxwell3d.EstimateIndexBufferSize());
|
||||
maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
|
||||
auto& params = maxwell3d.draw_manager->GetIndirectParams();
|
||||
params.is_byte_count = false;
|
||||
params.is_indexed = true;
|
||||
params.include_count = true;
|
||||
params.count_start_address = maxwell3d.GetMacroAddress(4);
|
||||
|
@ -322,47 +319,6 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
class HLE_DrawIndirectByteCount final : public HLEMacroImpl {
|
||||
public:
|
||||
explicit HLE_DrawIndirectByteCount(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
|
||||
|
||||
void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
|
||||
auto topology = static_cast<Maxwell3D::Regs::PrimitiveTopology>(parameters[0] & 0xFFFFU);
|
||||
if (!maxwell3d.AnyParametersDirty() || !IsTopologySafe(topology)) {
|
||||
Fallback(parameters);
|
||||
return;
|
||||
}
|
||||
|
||||
auto& params = maxwell3d.draw_manager->GetIndirectParams();
|
||||
params.is_byte_count = true;
|
||||
params.is_indexed = false;
|
||||
params.include_count = false;
|
||||
params.count_start_address = 0;
|
||||
params.indirect_start_address = maxwell3d.GetMacroAddress(2);
|
||||
params.buffer_size = 4;
|
||||
params.max_draw_counts = 1;
|
||||
params.stride = parameters[1];
|
||||
maxwell3d.regs.draw.begin = parameters[0];
|
||||
maxwell3d.regs.draw_auto_stride = parameters[1];
|
||||
maxwell3d.regs.draw_auto_byte_count = parameters[2];
|
||||
|
||||
maxwell3d.draw_manager->DrawArrayIndirect(topology);
|
||||
}
|
||||
|
||||
private:
|
||||
void Fallback(const std::vector<u32>& parameters) {
|
||||
maxwell3d.RefreshParameters();
|
||||
|
||||
maxwell3d.regs.draw.begin = parameters[0];
|
||||
maxwell3d.regs.draw_auto_stride = parameters[1];
|
||||
maxwell3d.regs.draw_auto_byte_count = parameters[2];
|
||||
|
||||
maxwell3d.draw_manager->DrawArray(
|
||||
maxwell3d.regs.draw.topology, 0,
|
||||
maxwell3d.regs.draw_auto_byte_count / maxwell3d.regs.draw_auto_stride, 0, 1);
|
||||
}
|
||||
};
|
||||
|
||||
class HLE_C713C83D8F63CCF3 final : public HLEMacroImpl {
|
||||
public:
|
||||
explicit HLE_C713C83D8F63CCF3(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
|
||||
|
@ -580,11 +536,6 @@ HLEMacro::HLEMacro(Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {
|
|||
[](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> {
|
||||
return std::make_unique<HLE_TransformFeedbackSetup>(maxwell3d__);
|
||||
}));
|
||||
builders.emplace(0xB5F74EDB717278ECULL,
|
||||
std::function<std::unique_ptr<CachedMacro>(Maxwell3D&)>(
|
||||
[](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> {
|
||||
return std::make_unique<HLE_DrawIndirectByteCount>(maxwell3d__);
|
||||
}));
|
||||
}
|
||||
|
||||
HLEMacro::~HLEMacro() = default;
|
||||
|
|
|
@ -25,13 +25,6 @@
|
|||
#include "video_core/rasterizer_interface.h"
|
||||
#include "video_core/texture_cache/slot_vector.h"
|
||||
|
||||
namespace VideoCore {
|
||||
enum class QueryType {
|
||||
SamplesPassed,
|
||||
};
|
||||
constexpr std::size_t NumQueryTypes = 1;
|
||||
} // namespace VideoCore
|
||||
|
||||
namespace VideoCommon {
|
||||
|
||||
using AsyncJobId = SlotId;
|
||||
|
@ -105,10 +98,10 @@ private:
|
|||
};
|
||||
|
||||
template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
|
||||
class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
|
||||
class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
|
||||
public:
|
||||
explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Core::Memory::Memory& cpu_memory_)
|
||||
explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Core::Memory::Memory& cpu_memory_)
|
||||
: rasterizer{rasterizer_},
|
||||
// Use reinterpret_cast instead of static_cast as workaround for
|
||||
// UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include "video_core/cache_types.h"
|
||||
#include "video_core/engines/fermi_2d.h"
|
||||
#include "video_core/gpu.h"
|
||||
#include "video_core/query_cache/types.h"
|
||||
#include "video_core/rasterizer_download_area.h"
|
||||
|
||||
namespace Tegra {
|
||||
|
@ -27,6 +26,11 @@ struct ChannelState;
|
|||
|
||||
namespace VideoCore {
|
||||
|
||||
enum class QueryType {
|
||||
SamplesPassed,
|
||||
};
|
||||
constexpr std::size_t NumQueryTypes = 1;
|
||||
|
||||
enum class LoadCallbackStage {
|
||||
Prepare,
|
||||
Build,
|
||||
|
@ -54,11 +58,10 @@ public:
|
|||
virtual void DispatchCompute() = 0;
|
||||
|
||||
/// Resets the counter of a query
|
||||
virtual void ResetCounter(VideoCommon::QueryType type) = 0;
|
||||
virtual void ResetCounter(QueryType type) = 0;
|
||||
|
||||
/// Records a GPU query and caches it
|
||||
virtual void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) = 0;
|
||||
virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
|
||||
|
||||
/// Signal an uniform buffer binding
|
||||
virtual void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
||||
|
@ -80,7 +83,7 @@ public:
|
|||
virtual void SignalReference() = 0;
|
||||
|
||||
/// Release all pending fences.
|
||||
virtual void ReleaseFences(bool force = true) = 0;
|
||||
virtual void ReleaseFences() = 0;
|
||||
|
||||
/// Notify rasterizer that all caches should be flushed to Switch memory
|
||||
virtual void FlushAll() = 0;
|
||||
|
|
|
@ -26,18 +26,16 @@ void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {}
|
|||
void RasterizerNull::DrawTexture() {}
|
||||
void RasterizerNull::Clear(u32 layer_count) {}
|
||||
void RasterizerNull::DispatchCompute() {}
|
||||
void RasterizerNull::ResetCounter(VideoCommon::QueryType type) {}
|
||||
void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
|
||||
void RasterizerNull::ResetCounter(VideoCore::QueryType type) {}
|
||||
void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
|
||||
std::optional<u64> timestamp) {
|
||||
if (!gpu_memory) {
|
||||
return;
|
||||
}
|
||||
if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
|
||||
u64 ticks = m_gpu.GetTicks();
|
||||
gpu_memory->Write<u64>(gpu_addr + 8, ticks);
|
||||
gpu_memory->Write<u64>(gpu_addr, static_cast<u64>(payload));
|
||||
} else {
|
||||
gpu_memory->Write<u32>(gpu_addr, payload);
|
||||
|
||||
gpu_memory->Write(gpu_addr, u64{0});
|
||||
if (timestamp) {
|
||||
gpu_memory->Write(gpu_addr + 8, *timestamp);
|
||||
}
|
||||
}
|
||||
void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
||||
|
@ -76,7 +74,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) {
|
|||
syncpoint_manager.IncrementHost(value);
|
||||
}
|
||||
void RasterizerNull::SignalReference() {}
|
||||
void RasterizerNull::ReleaseFences(bool) {}
|
||||
void RasterizerNull::ReleaseFences() {}
|
||||
void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
|
||||
void RasterizerNull::WaitForIdle() {}
|
||||
void RasterizerNull::FragmentBarrier() {}
|
||||
|
|
|
@ -42,9 +42,8 @@ public:
|
|||
void DrawTexture() override;
|
||||
void Clear(u32 layer_count) override;
|
||||
void DispatchCompute() override;
|
||||
void ResetCounter(VideoCommon::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
|
||||
void ResetCounter(VideoCore::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
|
||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
|
||||
void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
|
||||
void FlushAll() override;
|
||||
|
@ -64,7 +63,7 @@ public:
|
|||
void SyncOperation(std::function<void()>&& func) override;
|
||||
void SignalSyncPoint(u32 value) override;
|
||||
void SignalReference() override;
|
||||
void ReleaseFences(bool force) override;
|
||||
void ReleaseFences() override;
|
||||
void FlushAndInvalidateRegion(
|
||||
VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||
void WaitForIdle() override;
|
||||
|
|
|
@ -27,7 +27,7 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
|
|||
} // Anonymous namespace
|
||||
|
||||
QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
|
||||
: QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
|
||||
: QueryCacheBase(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
|
||||
|
||||
QueryCache::~QueryCache() = default;
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ class RasterizerOpenGL;
|
|||
using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
|
||||
|
||||
class QueryCache final
|
||||
: public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> {
|
||||
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
|
||||
public:
|
||||
explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
|
||||
~QueryCache();
|
||||
|
|
|
@ -385,39 +385,13 @@ void RasterizerOpenGL::DispatchCompute() {
|
|||
has_written_global_memory |= pipeline->WritesGlobalMemory();
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::ResetCounter(VideoCommon::QueryType type) {
|
||||
if (type == VideoCommon::QueryType::ZPassPixelCount64) {
|
||||
query_cache.ResetCounter(VideoCore::QueryType::SamplesPassed);
|
||||
}
|
||||
void RasterizerOpenGL::ResetCounter(VideoCore::QueryType type) {
|
||||
query_cache.ResetCounter(type);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
|
||||
if (type == VideoCommon::QueryType::ZPassPixelCount64) {
|
||||
if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
|
||||
query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, {gpu.GetTicks()});
|
||||
} else {
|
||||
query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, std::nullopt);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (type != VideoCommon::QueryType::Payload) {
|
||||
payload = 1u;
|
||||
}
|
||||
std::function<void()> func([this, gpu_addr, flags, memory_manager = gpu_memory, payload]() {
|
||||
if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
|
||||
u64 ticks = gpu.GetTicks();
|
||||
memory_manager->Write<u64>(gpu_addr + 8, ticks);
|
||||
memory_manager->Write<u64>(gpu_addr, static_cast<u64>(payload));
|
||||
} else {
|
||||
memory_manager->Write<u32>(gpu_addr, payload);
|
||||
}
|
||||
});
|
||||
if (True(flags & VideoCommon::QueryPropertiesFlags::IsAFence)) {
|
||||
SignalFence(std::move(func));
|
||||
return;
|
||||
}
|
||||
func();
|
||||
void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
|
||||
std::optional<u64> timestamp) {
|
||||
query_cache.Query(gpu_addr, type, timestamp);
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
||||
|
@ -588,8 +562,8 @@ void RasterizerOpenGL::SignalReference() {
|
|||
fence_manager.SignalOrdering();
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::ReleaseFences(bool force) {
|
||||
fence_manager.WaitPendingFences(force);
|
||||
void RasterizerOpenGL::ReleaseFences() {
|
||||
fence_manager.WaitPendingFences();
|
||||
}
|
||||
|
||||
void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size,
|
||||
|
|
|
@ -86,9 +86,8 @@ public:
|
|||
void DrawTexture() override;
|
||||
void Clear(u32 layer_count) override;
|
||||
void DispatchCompute() override;
|
||||
void ResetCounter(VideoCommon::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
|
||||
void ResetCounter(VideoCore::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
|
||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
|
||||
void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
|
||||
void FlushAll() override;
|
||||
|
@ -108,7 +107,7 @@ public:
|
|||
void SyncOperation(std::function<void()>&& func) override;
|
||||
void SignalSyncPoint(u32 value) override;
|
||||
void SignalReference() override;
|
||||
void ReleaseFences(bool force = true) override;
|
||||
void ReleaseFences() override;
|
||||
void FlushAndInvalidateRegion(
|
||||
VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||
void WaitForIdle() override;
|
||||
|
|
|
@ -61,9 +61,6 @@ vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allo
|
|||
if (device.IsExtTransformFeedbackSupported()) {
|
||||
flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
|
||||
}
|
||||
if (device.IsExtConditionalRendering()) {
|
||||
flags |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT;
|
||||
}
|
||||
const VkBufferCreateInfo buffer_ci = {
|
||||
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
#include "common/common_types.h"
|
||||
#include "common/div_ceil.h"
|
||||
#include "video_core/host_shaders/astc_decoder_comp_spv.h"
|
||||
#include "video_core/host_shaders/queries_prefix_scan_sum_comp_spv.h"
|
||||
#include "video_core/host_shaders/queries_prefix_scan_sum_nosubgroups_comp_spv.h"
|
||||
#include "video_core/host_shaders/resolve_conditional_render_comp_spv.h"
|
||||
#include "video_core/host_shaders/vulkan_quad_indexed_comp_spv.h"
|
||||
#include "video_core/host_shaders/vulkan_uint8_comp_spv.h"
|
||||
#include "video_core/renderer_vulkan/vk_compute_pass.h"
|
||||
|
@ -60,30 +57,6 @@ constexpr std::array<VkDescriptorSetLayoutBinding, 2> INPUT_OUTPUT_DESCRIPTOR_SE
|
|||
},
|
||||
}};
|
||||
|
||||
constexpr std::array<VkDescriptorSetLayoutBinding, 3> QUERIES_SCAN_DESCRIPTOR_SET_BINDINGS{{
|
||||
{
|
||||
.binding = 0,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = 1,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
{
|
||||
.binding = 2,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.pImmutableSamplers = nullptr,
|
||||
},
|
||||
}};
|
||||
|
||||
constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{
|
||||
.uniform_buffers = 0,
|
||||
.storage_buffers = 2,
|
||||
|
@ -94,16 +67,6 @@ constexpr DescriptorBankInfo INPUT_OUTPUT_BANK_INFO{
|
|||
.score = 2,
|
||||
};
|
||||
|
||||
constexpr DescriptorBankInfo QUERIES_SCAN_BANK_INFO{
|
||||
.uniform_buffers = 0,
|
||||
.storage_buffers = 3,
|
||||
.texture_buffers = 0,
|
||||
.image_buffers = 0,
|
||||
.textures = 0,
|
||||
.images = 0,
|
||||
.score = 3,
|
||||
};
|
||||
|
||||
constexpr std::array<VkDescriptorSetLayoutBinding, ASTC_NUM_BINDINGS> ASTC_DESCRIPTOR_SET_BINDINGS{{
|
||||
{
|
||||
.binding = ASTC_BINDING_INPUT_BUFFER,
|
||||
|
@ -140,15 +103,6 @@ constexpr VkDescriptorUpdateTemplateEntry INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLAT
|
|||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
};
|
||||
|
||||
constexpr VkDescriptorUpdateTemplateEntry QUERIES_SCAN_DESCRIPTOR_UPDATE_TEMPLATE{
|
||||
.dstBinding = 0,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 3,
|
||||
.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
.offset = 0,
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
};
|
||||
|
||||
constexpr std::array<VkDescriptorUpdateTemplateEntry, ASTC_NUM_BINDINGS>
|
||||
ASTC_PASS_DESCRIPTOR_UPDATE_TEMPLATE_ENTRY{{
|
||||
{
|
||||
|
@ -177,19 +131,13 @@ struct AstcPushConstants {
|
|||
u32 block_height;
|
||||
u32 block_height_mask;
|
||||
};
|
||||
|
||||
struct QueriesPrefixScanPushConstants {
|
||||
u32 max_accumulation_base;
|
||||
u32 accumulation_limit;
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
|
||||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntry> templates,
|
||||
const DescriptorBankInfo& bank_info,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code,
|
||||
std::optional<u32> optional_subgroup_size)
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code)
|
||||
: device{device_} {
|
||||
descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
|
@ -230,19 +178,13 @@ ComputePass::ComputePass(const Device& device_, DescriptorPool& descriptor_pool,
|
|||
.pCode = code.data(),
|
||||
});
|
||||
device.SaveShader(code);
|
||||
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
|
||||
.pNext = nullptr,
|
||||
.requiredSubgroupSize = optional_subgroup_size ? *optional_subgroup_size : 32U,
|
||||
};
|
||||
bool use_setup_size = device.IsExtSubgroupSizeControlSupported() && optional_subgroup_size;
|
||||
pipeline = device.GetLogical().CreateComputePipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.pNext = use_setup_size ? &subgroup_size_ci : nullptr,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage = VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
.module = *module,
|
||||
|
@ -360,114 +302,6 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
|||
return {staging.buffer, staging.offset};
|
||||
}
|
||||
|
||||
ConditionalRenderingResolvePass::ConditionalRenderingResolvePass(
|
||||
const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue_)
|
||||
: ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
|
||||
INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, nullptr,
|
||||
RESOLVE_CONDITIONAL_RENDER_COMP_SPV),
|
||||
scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
|
||||
|
||||
void ConditionalRenderingResolvePass::Resolve(VkBuffer dst_buffer, VkBuffer src_buffer,
|
||||
u32 src_offset, bool compare_to_zero) {
|
||||
const size_t compare_size = compare_to_zero ? 8 : 24;
|
||||
|
||||
compute_pass_descriptor_queue.Acquire();
|
||||
compute_pass_descriptor_queue.AddBuffer(src_buffer, src_offset, compare_size);
|
||||
compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, sizeof(u32));
|
||||
const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr VkMemoryBarrier read_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
|
||||
};
|
||||
static constexpr VkMemoryBarrier write_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
|
||||
};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.Dispatch(1, 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier);
|
||||
});
|
||||
}
|
||||
|
||||
QueriesPrefixScanPass::QueriesPrefixScanPass(
|
||||
const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue_)
|
||||
: ComputePass(
|
||||
device_, descriptor_pool_, QUERIES_SCAN_DESCRIPTOR_SET_BINDINGS,
|
||||
QUERIES_SCAN_DESCRIPTOR_UPDATE_TEMPLATE, QUERIES_SCAN_BANK_INFO,
|
||||
COMPUTE_PUSH_CONSTANT_RANGE<sizeof(QueriesPrefixScanPushConstants)>,
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_BASIC_BIT) &&
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) &&
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_BIT) &&
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT)
|
||||
? std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_COMP_SPV)
|
||||
: std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_NOSUBGROUPS_COMP_SPV),
|
||||
{32}),
|
||||
scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
|
||||
|
||||
void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer,
|
||||
VkBuffer src_buffer, size_t number_of_sums,
|
||||
size_t max_accumulation_limit) {
|
||||
size_t aligned_runs = Common::AlignUp(number_of_sums, 32);
|
||||
|
||||
compute_pass_descriptor_queue.Acquire();
|
||||
compute_pass_descriptor_queue.AddBuffer(src_buffer, 0, aligned_runs * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, aligned_runs * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(accumulation_buffer, 0, sizeof(u64));
|
||||
const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([this, descriptor_data, max_accumulation_limit, number_of_sums,
|
||||
aligned_runs](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr VkMemoryBarrier read_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
|
||||
};
|
||||
static constexpr VkMemoryBarrier write_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT |
|
||||
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
|
||||
VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT |
|
||||
VK_ACCESS_UNIFORM_READ_BIT |
|
||||
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
|
||||
};
|
||||
const QueriesPrefixScanPushConstants uniforms{
|
||||
.max_accumulation_base = static_cast<u32>(max_accumulation_limit),
|
||||
.accumulation_limit = static_cast<u32>(number_of_sums - 1),
|
||||
};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
|
||||
cmdbuf.Dispatch(static_cast<u32>(aligned_runs / 32U), 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier);
|
||||
});
|
||||
}
|
||||
|
||||
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
StagingBufferPool& staging_buffer_pool_,
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <span>
|
||||
#include <utility>
|
||||
|
||||
|
@ -32,8 +31,7 @@ public:
|
|||
vk::Span<VkDescriptorSetLayoutBinding> bindings,
|
||||
vk::Span<VkDescriptorUpdateTemplateEntry> templates,
|
||||
const DescriptorBankInfo& bank_info,
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code,
|
||||
std::optional<u32> optional_subgroup_size = std::nullopt);
|
||||
vk::Span<VkPushConstantRange> push_constants, std::span<const u32> code);
|
||||
~ComputePass();
|
||||
|
||||
protected:
|
||||
|
@ -84,33 +82,6 @@ private:
|
|||
ComputePassDescriptorQueue& compute_pass_descriptor_queue;
|
||||
};
|
||||
|
||||
class ConditionalRenderingResolvePass final : public ComputePass {
|
||||
public:
|
||||
explicit ConditionalRenderingResolvePass(
|
||||
const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
|
||||
|
||||
void Resolve(VkBuffer dst_buffer, VkBuffer src_buffer, u32 src_offset, bool compare_to_zero);
|
||||
|
||||
private:
|
||||
Scheduler& scheduler;
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue;
|
||||
};
|
||||
|
||||
class QueriesPrefixScanPass final : public ComputePass {
|
||||
public:
|
||||
explicit QueriesPrefixScanPass(const Device& device_, Scheduler& scheduler_,
|
||||
DescriptorPool& descriptor_pool_,
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
|
||||
|
||||
void Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer, VkBuffer src_buffer,
|
||||
size_t number_of_sums, size_t max_accumulation_limit);
|
||||
|
||||
private:
|
||||
Scheduler& scheduler;
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue;
|
||||
};
|
||||
|
||||
class ASTCDecoderPass final : public ComputePass {
|
||||
public:
|
||||
explicit ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
#include "video_core/fence_manager.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_query_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
|
||||
namespace Core {
|
||||
|
@ -21,6 +20,7 @@ class RasterizerInterface;
|
|||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class QueryCache;
|
||||
class Scheduler;
|
||||
|
||||
class InnerFence : public VideoCommon::FenceBase {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,75 +1,101 @@
|
|||
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "video_core/query_cache/query_cache_base.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "common/common_types.h"
|
||||
#include "video_core/query_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_resource_pool.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace VideoCore {
|
||||
class RasterizerInterface;
|
||||
}
|
||||
|
||||
namespace VideoCommon {
|
||||
class StreamerInterface;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class CachedQuery;
|
||||
class Device;
|
||||
class HostCounter;
|
||||
class QueryCache;
|
||||
class Scheduler;
|
||||
class StagingBufferPool;
|
||||
|
||||
struct QueryCacheRuntimeImpl;
|
||||
using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
|
||||
|
||||
class QueryCacheRuntime {
|
||||
class QueryPool final : public ResourcePool {
|
||||
public:
|
||||
explicit QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer,
|
||||
Core::Memory::Memory& cpu_memory_,
|
||||
Vulkan::BufferCache& buffer_cache_, const Device& device_,
|
||||
const MemoryAllocator& memory_allocator_, Scheduler& scheduler_,
|
||||
StagingBufferPool& staging_pool_,
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue,
|
||||
DescriptorPool& descriptor_pool);
|
||||
~QueryCacheRuntime();
|
||||
explicit QueryPool(const Device& device, Scheduler& scheduler, VideoCore::QueryType type);
|
||||
~QueryPool() override;
|
||||
|
||||
template <typename SyncValuesType>
|
||||
void SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer = nullptr);
|
||||
std::pair<VkQueryPool, u32> Commit();
|
||||
|
||||
void Barriers(bool is_prebarrier);
|
||||
void Reserve(std::pair<VkQueryPool, u32> query);
|
||||
|
||||
void EndHostConditionalRendering();
|
||||
|
||||
void PauseHostConditionalRendering();
|
||||
|
||||
void ResumeHostConditionalRendering();
|
||||
|
||||
bool HostConditionalRenderingCompareValue(VideoCommon::LookupData object_1, bool qc_dirty);
|
||||
|
||||
bool HostConditionalRenderingCompareValues(VideoCommon::LookupData object_1,
|
||||
VideoCommon::LookupData object_2, bool qc_dirty,
|
||||
bool equal_check);
|
||||
|
||||
VideoCommon::StreamerInterface* GetStreamerInterface(VideoCommon::QueryType query_type);
|
||||
|
||||
void Bind3DEngine(Tegra::Engines::Maxwell3D* maxwell3d);
|
||||
|
||||
template <typename Func>
|
||||
void View3DRegs(Func&& func);
|
||||
protected:
|
||||
void Allocate(std::size_t begin, std::size_t end) override;
|
||||
|
||||
private:
|
||||
void HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, bool is_equal);
|
||||
void HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal);
|
||||
friend struct QueryCacheRuntimeImpl;
|
||||
std::unique_ptr<QueryCacheRuntimeImpl> impl;
|
||||
static constexpr std::size_t GROW_STEP = 512;
|
||||
|
||||
const Device& device;
|
||||
const VideoCore::QueryType type;
|
||||
|
||||
std::vector<vk::QueryPool> pools;
|
||||
std::vector<bool> usage;
|
||||
};
|
||||
|
||||
struct QueryCacheParams {
|
||||
using RuntimeType = typename Vulkan::QueryCacheRuntime;
|
||||
class QueryCache final
|
||||
: public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
|
||||
public:
|
||||
explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
|
||||
Core::Memory::Memory& cpu_memory_, const Device& device_,
|
||||
Scheduler& scheduler_);
|
||||
~QueryCache();
|
||||
|
||||
std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
|
||||
|
||||
void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
|
||||
|
||||
const Device& GetDevice() const noexcept {
|
||||
return device;
|
||||
}
|
||||
|
||||
Scheduler& GetScheduler() const noexcept {
|
||||
return scheduler;
|
||||
}
|
||||
|
||||
private:
|
||||
const Device& device;
|
||||
Scheduler& scheduler;
|
||||
std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
|
||||
};
|
||||
|
||||
using QueryCache = VideoCommon::QueryCacheBase<QueryCacheParams>;
|
||||
class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> {
|
||||
public:
|
||||
explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_,
|
||||
VideoCore::QueryType type_);
|
||||
~HostCounter();
|
||||
|
||||
void EndQuery();
|
||||
|
||||
private:
|
||||
u64 BlockingQuery(bool async = false) const override;
|
||||
|
||||
QueryCache& cache;
|
||||
const VideoCore::QueryType type;
|
||||
const std::pair<VkQueryPool, u32> query;
|
||||
const u64 tick;
|
||||
};
|
||||
|
||||
class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> {
|
||||
public:
|
||||
explicit CachedQuery(QueryCache&, VideoCore::QueryType, VAddr cpu_addr_, u8* host_ptr_)
|
||||
: CachedQueryBase{cpu_addr_, host_ptr_} {}
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_query_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_rasterizer.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
|
||||
|
@ -171,11 +170,9 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
|
|||
buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
|
||||
guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool),
|
||||
buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
|
||||
query_cache_runtime(this, cpu_memory_, buffer_cache, device, memory_allocator, scheduler,
|
||||
staging_pool, compute_pass_descriptor_queue, descriptor_pool),
|
||||
query_cache(gpu, *this, cpu_memory_, query_cache_runtime),
|
||||
pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue,
|
||||
render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
|
||||
query_cache{*this, cpu_memory_, device, scheduler},
|
||||
accelerate_dma(buffer_cache, texture_cache, scheduler),
|
||||
fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
|
||||
wfi_event(device.GetLogical().CreateEvent()) {
|
||||
|
@ -192,7 +189,14 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
|||
FlushWork();
|
||||
gpu_memory->FlushCaching();
|
||||
|
||||
query_cache.NotifySegment(true);
|
||||
#if ANDROID
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
query_cache.UpdateCounters();
|
||||
}
|
||||
#else
|
||||
query_cache.UpdateCounters();
|
||||
#endif
|
||||
|
||||
GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
|
||||
if (!pipeline) {
|
||||
|
@ -203,12 +207,13 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
|
|||
pipeline->SetEngine(maxwell3d, gpu_memory);
|
||||
pipeline->Configure(is_indexed);
|
||||
|
||||
BeginTransformFeedback();
|
||||
|
||||
UpdateDynamicStates();
|
||||
|
||||
HandleTransformFeedback();
|
||||
query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
|
||||
maxwell3d->regs.zpass_pixel_count_enable);
|
||||
draw_func();
|
||||
|
||||
EndTransformFeedback();
|
||||
}
|
||||
|
||||
void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
|
||||
|
@ -236,14 +241,6 @@ void RasterizerVulkan::DrawIndirect() {
|
|||
const auto indirect_buffer = buffer_cache.GetDrawIndirectBuffer();
|
||||
const auto& buffer = indirect_buffer.first;
|
||||
const auto& offset = indirect_buffer.second;
|
||||
if (params.is_byte_count) {
|
||||
scheduler.Record([buffer_obj = buffer->Handle(), offset,
|
||||
stride = params.stride](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.DrawIndirectByteCountEXT(1, 0, buffer_obj, offset, 0,
|
||||
static_cast<u32>(stride));
|
||||
});
|
||||
return;
|
||||
}
|
||||
if (params.include_count) {
|
||||
const auto count = buffer_cache.GetDrawIndirectCount();
|
||||
const auto& draw_buffer = count.first;
|
||||
|
@ -283,15 +280,20 @@ void RasterizerVulkan::DrawTexture() {
|
|||
SCOPE_EXIT({ gpu.TickWork(); });
|
||||
FlushWork();
|
||||
|
||||
query_cache.NotifySegment(true);
|
||||
#if ANDROID
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
query_cache.UpdateCounters();
|
||||
}
|
||||
#else
|
||||
query_cache.UpdateCounters();
|
||||
#endif
|
||||
|
||||
texture_cache.SynchronizeGraphicsDescriptors();
|
||||
texture_cache.UpdateRenderTargets(false);
|
||||
|
||||
UpdateDynamicStates();
|
||||
|
||||
query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
|
||||
maxwell3d->regs.zpass_pixel_count_enable);
|
||||
const auto& draw_texture_state = maxwell3d->draw_manager->GetDrawTextureState();
|
||||
const auto& sampler = texture_cache.GetGraphicsSampler(draw_texture_state.src_sampler);
|
||||
const auto& texture = texture_cache.GetImageView(draw_texture_state.src_texture);
|
||||
|
@ -314,9 +316,14 @@ void RasterizerVulkan::Clear(u32 layer_count) {
|
|||
FlushWork();
|
||||
gpu_memory->FlushCaching();
|
||||
|
||||
query_cache.NotifySegment(true);
|
||||
query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
|
||||
maxwell3d->regs.zpass_pixel_count_enable);
|
||||
#if ANDROID
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
query_cache.UpdateCounters();
|
||||
}
|
||||
#else
|
||||
query_cache.UpdateCounters();
|
||||
#endif
|
||||
|
||||
auto& regs = maxwell3d->regs;
|
||||
const bool use_color = regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B ||
|
||||
|
@ -461,13 +468,13 @@ void RasterizerVulkan::DispatchCompute() {
|
|||
scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::ResetCounter(VideoCommon::QueryType type) {
|
||||
query_cache.CounterReset(type);
|
||||
void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
|
||||
query_cache.ResetCounter(type);
|
||||
}
|
||||
|
||||
void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
|
||||
query_cache.CounterReport(gpu_addr, type, flags, payload, subreport);
|
||||
void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
|
||||
std::optional<u64> timestamp) {
|
||||
query_cache.Query(gpu_addr, type, timestamp);
|
||||
}
|
||||
|
||||
void RasterizerVulkan::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
|
||||
|
@ -648,8 +655,8 @@ void RasterizerVulkan::SignalReference() {
|
|||
fence_manager.SignalReference();
|
||||
}
|
||||
|
||||
void RasterizerVulkan::ReleaseFences(bool force) {
|
||||
fence_manager.WaitPendingFences(force);
|
||||
void RasterizerVulkan::ReleaseFences() {
|
||||
fence_manager.WaitPendingFences();
|
||||
}
|
||||
|
||||
void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size,
|
||||
|
@ -673,8 +680,6 @@ void RasterizerVulkan::WaitForIdle() {
|
|||
flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
|
||||
}
|
||||
|
||||
query_cache.NotifyWFI();
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.SetEvent(event, flags);
|
||||
|
@ -718,7 +723,19 @@ void RasterizerVulkan::TickFrame() {
|
|||
|
||||
bool RasterizerVulkan::AccelerateConditionalRendering() {
|
||||
gpu_memory->FlushCaching();
|
||||
return query_cache.AccelerateHostConditionalRendering();
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// TODO(Blinkhawk): Reimplement Host conditional rendering.
|
||||
return false;
|
||||
}
|
||||
// Medium / Low Hack: stub any checks on queries written into the buffer cache.
|
||||
const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
|
||||
Maxwell::ReportSemaphore::Compare cmp;
|
||||
if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
|
||||
VideoCommon::CacheType::BufferCache |
|
||||
VideoCommon::CacheType::QueryCache)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RasterizerVulkan::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surface& src,
|
||||
|
@ -764,7 +781,6 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
|
|||
if (!image_view) {
|
||||
return false;
|
||||
}
|
||||
query_cache.NotifySegment(false);
|
||||
screen_info.image = image_view->ImageHandle();
|
||||
screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D);
|
||||
screen_info.width = image_view->size.width;
|
||||
|
@ -903,18 +919,31 @@ void RasterizerVulkan::UpdateDynamicStates() {
|
|||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::HandleTransformFeedback() {
|
||||
void RasterizerVulkan::BeginTransformFeedback() {
|
||||
const auto& regs = maxwell3d->regs;
|
||||
if (regs.transform_feedback_enabled == 0) {
|
||||
return;
|
||||
}
|
||||
if (!device.IsExtTransformFeedbackSupported()) {
|
||||
LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
|
||||
return;
|
||||
}
|
||||
query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,
|
||||
regs.transform_feedback_enabled);
|
||||
if (regs.transform_feedback_enabled != 0) {
|
||||
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
|
||||
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
|
||||
UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
|
||||
regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
|
||||
scheduler.Record(
|
||||
[](vk::CommandBuffer cmdbuf) { cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::EndTransformFeedback() {
|
||||
const auto& regs = maxwell3d->regs;
|
||||
if (regs.transform_feedback_enabled == 0) {
|
||||
return;
|
||||
}
|
||||
if (!device.IsExtTransformFeedbackSupported()) {
|
||||
return;
|
||||
}
|
||||
scheduler.Record(
|
||||
[](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
|
||||
}
|
||||
|
||||
void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
|
||||
|
|
|
@ -84,9 +84,8 @@ public:
|
|||
void DrawTexture() override;
|
||||
void Clear(u32 layer_count) override;
|
||||
void DispatchCompute() override;
|
||||
void ResetCounter(VideoCommon::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
|
||||
VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
|
||||
void ResetCounter(VideoCore::QueryType type) override;
|
||||
void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
|
||||
void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
|
||||
void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
|
||||
void FlushAll() override;
|
||||
|
@ -107,7 +106,7 @@ public:
|
|||
void SyncOperation(std::function<void()>&& func) override;
|
||||
void SignalSyncPoint(u32 value) override;
|
||||
void SignalReference() override;
|
||||
void ReleaseFences(bool force = true) override;
|
||||
void ReleaseFences() override;
|
||||
void FlushAndInvalidateRegion(
|
||||
VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
|
||||
void WaitForIdle() override;
|
||||
|
@ -147,7 +146,9 @@ private:
|
|||
|
||||
void UpdateDynamicStates();
|
||||
|
||||
void HandleTransformFeedback();
|
||||
void BeginTransformFeedback();
|
||||
|
||||
void EndTransformFeedback();
|
||||
|
||||
void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
|
@ -194,9 +195,8 @@ private:
|
|||
TextureCache texture_cache;
|
||||
BufferCacheRuntime buffer_cache_runtime;
|
||||
BufferCache buffer_cache;
|
||||
QueryCacheRuntime query_cache_runtime;
|
||||
QueryCache query_cache;
|
||||
PipelineCache pipeline_cache;
|
||||
QueryCache query_cache;
|
||||
AccelerateDMA accelerate_dma;
|
||||
FenceManager fence_manager;
|
||||
|
||||
|
|
|
@ -243,10 +243,10 @@ void Scheduler::AllocateNewContext() {
|
|||
#if ANDROID
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
query_cache->NotifySegment(true);
|
||||
query_cache->UpdateCounters();
|
||||
}
|
||||
#else
|
||||
query_cache->NotifySegment(true);
|
||||
query_cache->UpdateCounters();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -261,12 +261,11 @@ void Scheduler::EndPendingOperations() {
|
|||
#if ANDROID
|
||||
if (Settings::IsGPULevelHigh()) {
|
||||
// This is problematic on Android, disable on GPU Normal.
|
||||
// query_cache->DisableStreams();
|
||||
query_cache->DisableStreams();
|
||||
}
|
||||
#else
|
||||
// query_cache->DisableStreams();
|
||||
query_cache->DisableStreams();
|
||||
#endif
|
||||
query_cache->NotifySegment(false);
|
||||
EndRenderPass();
|
||||
}
|
||||
|
||||
|
|
|
@ -17,11 +17,6 @@
|
|||
#include "video_core/renderer_vulkan/vk_master_semaphore.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace VideoCommon {
|
||||
template <typename Trait>
|
||||
class QueryCacheBase;
|
||||
}
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class CommandPool;
|
||||
|
@ -29,8 +24,7 @@ class Device;
|
|||
class Framebuffer;
|
||||
class GraphicsPipeline;
|
||||
class StateTracker;
|
||||
|
||||
struct QueryCacheParams;
|
||||
class QueryCache;
|
||||
|
||||
/// The scheduler abstracts command buffer and fence management with an interface that's able to do
|
||||
/// OpenGL-like operations on Vulkan command buffers.
|
||||
|
@ -69,7 +63,7 @@ public:
|
|||
void InvalidateState();
|
||||
|
||||
/// Assigns the query cache.
|
||||
void SetQueryCache(VideoCommon::QueryCacheBase<QueryCacheParams>& query_cache_) {
|
||||
void SetQueryCache(QueryCache& query_cache_) {
|
||||
query_cache = &query_cache_;
|
||||
}
|
||||
|
||||
|
@ -225,7 +219,7 @@ private:
|
|||
std::unique_ptr<MasterSemaphore> master_semaphore;
|
||||
std::unique_ptr<CommandPool> command_pool;
|
||||
|
||||
VideoCommon::QueryCacheBase<QueryCacheParams>* query_cache = nullptr;
|
||||
QueryCache* query_cache = nullptr;
|
||||
|
||||
vk::CommandBuffer current_cmdbuf;
|
||||
|
||||
|
|
|
@ -60,7 +60,6 @@ VK_DEFINE_HANDLE(VmaAllocator)
|
|||
|
||||
// Define miscellaneous extensions which may be used by the implementation here.
|
||||
#define FOR_EACH_VK_EXTENSION(EXTENSION) \
|
||||
EXTENSION(EXT, CONDITIONAL_RENDERING, conditional_rendering) \
|
||||
EXTENSION(EXT, CONSERVATIVE_RASTERIZATION, conservative_rasterization) \
|
||||
EXTENSION(EXT, DEPTH_RANGE_UNRESTRICTED, depth_range_unrestricted) \
|
||||
EXTENSION(EXT, MEMORY_BUDGET, memory_budget) \
|
||||
|
@ -93,7 +92,6 @@ VK_DEFINE_HANDLE(VmaAllocator)
|
|||
|
||||
// Define extensions where the absence of the extension may result in a degraded experience.
|
||||
#define FOR_EACH_VK_RECOMMENDED_EXTENSION(EXTENSION_NAME) \
|
||||
EXTENSION_NAME(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME) \
|
||||
EXTENSION_NAME(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME) \
|
||||
EXTENSION_NAME(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME) \
|
||||
EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME) \
|
||||
|
@ -530,10 +528,6 @@ public:
|
|||
return extensions.shader_atomic_int64;
|
||||
}
|
||||
|
||||
bool IsExtConditionalRendering() const {
|
||||
return extensions.conditional_rendering;
|
||||
}
|
||||
|
||||
bool HasTimelineSemaphore() const;
|
||||
|
||||
/// Returns the minimum supported version of SPIR-V.
|
||||
|
|
|
@ -75,7 +75,6 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
|
|||
X(vkBeginCommandBuffer);
|
||||
X(vkBindBufferMemory);
|
||||
X(vkBindImageMemory);
|
||||
X(vkCmdBeginConditionalRenderingEXT);
|
||||
X(vkCmdBeginQuery);
|
||||
X(vkCmdBeginRenderPass);
|
||||
X(vkCmdBeginTransformFeedbackEXT);
|
||||
|
@ -92,7 +91,6 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
|
|||
X(vkCmdCopyBufferToImage);
|
||||
X(vkCmdCopyImage);
|
||||
X(vkCmdCopyImageToBuffer);
|
||||
X(vkCmdCopyQueryPoolResults);
|
||||
X(vkCmdDispatch);
|
||||
X(vkCmdDraw);
|
||||
X(vkCmdDrawIndexed);
|
||||
|
@ -100,8 +98,6 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
|
|||
X(vkCmdDrawIndexedIndirect);
|
||||
X(vkCmdDrawIndirectCount);
|
||||
X(vkCmdDrawIndexedIndirectCount);
|
||||
X(vkCmdDrawIndirectByteCountEXT);
|
||||
X(vkCmdEndConditionalRenderingEXT);
|
||||
X(vkCmdEndQuery);
|
||||
X(vkCmdEndRenderPass);
|
||||
X(vkCmdEndTransformFeedbackEXT);
|
||||
|
|
|
@ -185,7 +185,6 @@ struct DeviceDispatch : InstanceDispatch {
|
|||
PFN_vkBeginCommandBuffer vkBeginCommandBuffer{};
|
||||
PFN_vkBindBufferMemory vkBindBufferMemory{};
|
||||
PFN_vkBindImageMemory vkBindImageMemory{};
|
||||
PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT{};
|
||||
PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT{};
|
||||
PFN_vkCmdBeginQuery vkCmdBeginQuery{};
|
||||
PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass{};
|
||||
|
@ -203,7 +202,6 @@ struct DeviceDispatch : InstanceDispatch {
|
|||
PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage{};
|
||||
PFN_vkCmdCopyImage vkCmdCopyImage{};
|
||||
PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer{};
|
||||
PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults{};
|
||||
PFN_vkCmdDispatch vkCmdDispatch{};
|
||||
PFN_vkCmdDraw vkCmdDraw{};
|
||||
PFN_vkCmdDrawIndexed vkCmdDrawIndexed{};
|
||||
|
@ -211,8 +209,6 @@ struct DeviceDispatch : InstanceDispatch {
|
|||
PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect{};
|
||||
PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount{};
|
||||
PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount{};
|
||||
PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT{};
|
||||
PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT{};
|
||||
PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT{};
|
||||
PFN_vkCmdEndQuery vkCmdEndQuery{};
|
||||
PFN_vkCmdEndRenderPass vkCmdEndRenderPass{};
|
||||
|
@ -1185,13 +1181,6 @@ public:
|
|||
count_offset, draw_count, stride);
|
||||
}
|
||||
|
||||
void DrawIndirectByteCountEXT(u32 instance_count, u32 first_instance, VkBuffer counter_buffer,
|
||||
VkDeviceSize counter_buffer_offset, u32 counter_offset,
|
||||
u32 stride) {
|
||||
dld->vkCmdDrawIndirectByteCountEXT(handle, instance_count, first_instance, counter_buffer,
|
||||
counter_buffer_offset, counter_offset, stride);
|
||||
}
|
||||
|
||||
void ClearAttachments(Span<VkClearAttachment> attachments,
|
||||
Span<VkClearRect> rects) const noexcept {
|
||||
dld->vkCmdClearAttachments(handle, attachments.size(), attachments.data(), rects.size(),
|
||||
|
@ -1276,13 +1265,6 @@ public:
|
|||
regions.data());
|
||||
}
|
||||
|
||||
void CopyQueryPoolResults(VkQueryPool query_pool, u32 first_query, u32 query_count,
|
||||
VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize stride,
|
||||
VkQueryResultFlags flags) const noexcept {
|
||||
dld->vkCmdCopyQueryPoolResults(handle, query_pool, first_query, query_count, dst_buffer,
|
||||
dst_offset, stride, flags);
|
||||
}
|
||||
|
||||
void FillBuffer(VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize size,
|
||||
u32 data) const noexcept {
|
||||
dld->vkCmdFillBuffer(handle, dst_buffer, dst_offset, size, data);
|
||||
|
@ -1461,15 +1443,6 @@ public:
|
|||
counter_buffers, counter_buffer_offsets);
|
||||
}
|
||||
|
||||
void BeginConditionalRenderingEXT(
|
||||
const VkConditionalRenderingBeginInfoEXT& info) const noexcept {
|
||||
dld->vkCmdBeginConditionalRenderingEXT(handle, &info);
|
||||
}
|
||||
|
||||
void EndConditionalRenderingEXT() const noexcept {
|
||||
dld->vkCmdEndConditionalRenderingEXT(handle);
|
||||
}
|
||||
|
||||
void BeginDebugUtilsLabelEXT(const char* label, std::span<float, 4> color) const noexcept {
|
||||
const VkDebugUtilsLabelEXT label_info{
|
||||
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
|
||||
|
|
Loading…
Reference in a new issue