early-access version 2765
This commit is contained in:
parent
43bf9efaf8
commit
a3a83b9639
83 changed files with 264 additions and 312 deletions
|
@ -1,7 +1,7 @@
|
|||
yuzu emulator early access
|
||||
=============
|
||||
|
||||
This is the source code for early-access 2764.
|
||||
This is the source code for early-access 2765.
|
||||
|
||||
## Legal Notice
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, Vo
|
|||
in_params.node_id);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1312,7 +1312,7 @@ void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, std::s
|
|||
samples_to_read - samples_read, channel, temp_mix_offset);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
|
||||
ASSERT_MSG(false, "Unimplemented sample format={}", in_params.sample_format);
|
||||
}
|
||||
|
||||
temp_mix_offset += samples_decoded;
|
||||
|
|
|
@ -50,7 +50,7 @@ EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
|
|||
effects[i] = std::make_unique<EffectBiquadFilter>();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unimplemented effect {}", effect);
|
||||
ASSERT_MSG(false, "Unimplemented effect {}", effect);
|
||||
effects[i] = std::make_unique<EffectStubbed>();
|
||||
}
|
||||
return GetInfo(i);
|
||||
|
@ -104,7 +104,7 @@ void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
|
|||
auto& params = GetParams();
|
||||
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
|
||||
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
|
||||
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
|
||||
ASSERT_MSG(false, "Invalid reverb max channel count {}", reverb_params->max_channels);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -483,7 +483,7 @@ bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
|
|||
// Add more work
|
||||
index_stack.push(j);
|
||||
} else if (node_state == NodeStates::State::InFound) {
|
||||
UNREACHABLE_MSG("Node start marked as found");
|
||||
ASSERT_MSG(false, "Node start marked as found");
|
||||
ResetState();
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -114,7 +114,7 @@ void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
|
|||
in_params.current_playstate = ServerPlayState::Play;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
|
||||
ASSERT_MSG(false, "Unknown playstate {}", voice_in.play_state);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -410,7 +410,7 @@ bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
|
|||
return in_params.should_depop;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
|
||||
ASSERT_MSG(false, "Invalid playstate {}", in_params.current_playstate);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -6,8 +6,17 @@
|
|||
|
||||
#include "common/settings.h"
|
||||
|
||||
void assert_handle_failure() {
|
||||
void assert_check_condition(bool cond, std::function<void()>&& on_failure) {
|
||||
if (!cond) [[unlikely]] {
|
||||
on_failure();
|
||||
|
||||
if (Settings::values.use_debug_asserts) {
|
||||
Crash();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[[noreturn]] void unreachable_impl() {
|
||||
Crash();
|
||||
throw std::runtime_error("Unreachable code");
|
||||
}
|
||||
|
|
|
@ -4,49 +4,59 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "common/logging/log.h"
|
||||
|
||||
// Sometimes we want to try to continue even after hitting an assert.
|
||||
// However touching this file yields a global recompilation as this header is included almost
|
||||
// everywhere. So let's just move the handling of the failed assert to a single cpp file.
|
||||
void assert_handle_failure();
|
||||
|
||||
// For asserts we'd like to keep all the junk executed when an assert happens away from the
|
||||
// important code in the function. One way of doing this is to put all the relevant code inside a
|
||||
// lambda and force the compiler to not inline it. Unfortunately, MSVC seems to have no syntax to
|
||||
// specify __declspec on lambda functions, so what we do instead is define a noinline wrapper
|
||||
// template that calls the lambda. This seems to generate an extra instruction at the call-site
|
||||
// compared to the ideal implementation (which wouldn't support ASSERT_MSG parameters), but is good
|
||||
// enough for our purposes.
|
||||
template <typename Fn>
|
||||
#if defined(_MSC_VER)
|
||||
[[msvc::noinline]]
|
||||
#elif defined(__GNUC__)
|
||||
[[gnu::cold, gnu::noinline]]
|
||||
#endif
|
||||
static void
|
||||
assert_noinline_call(const Fn& fn) {
|
||||
fn();
|
||||
assert_handle_failure();
|
||||
}
|
||||
// lambda and force the compiler to not inline it.
|
||||
void assert_check_condition(bool cond, std::function<void()>&& on_failure);
|
||||
|
||||
[[noreturn]] void unreachable_impl();
|
||||
|
||||
#define ASSERT(_a_) \
|
||||
do \
|
||||
do { \
|
||||
if (std::is_constant_evaluated()) { \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
/* Will trigger compile error here */ \
|
||||
assert_check_condition(bool(_a_), \
|
||||
[] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
} else { \
|
||||
assert_check_condition(bool(_a_), [] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define ASSERT_MSG(_a_, ...) \
|
||||
do \
|
||||
do { \
|
||||
if (std::is_constant_evaluated()) { \
|
||||
if (!(_a_)) { \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
/* Will trigger compile error here */ \
|
||||
assert_check_condition(bool(_a_), \
|
||||
[] { LOG_CRITICAL(Debug, "Assertion Failed!"); }); \
|
||||
} \
|
||||
while (0)
|
||||
} else { \
|
||||
assert_check_condition( \
|
||||
bool(_a_), [&] { LOG_CRITICAL(Debug, "Assertion Failed!\n" __VA_ARGS__); }); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define UNREACHABLE() \
|
||||
do { \
|
||||
LOG_CRITICAL(Debug, "Unreachable code!"); \
|
||||
unreachable_impl(); \
|
||||
} while (0)
|
||||
|
||||
#define UNREACHABLE() assert_noinline_call([] { LOG_CRITICAL(Debug, "Unreachable code!"); })
|
||||
#define UNREACHABLE_MSG(...) \
|
||||
assert_noinline_call([&] { LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); })
|
||||
do { \
|
||||
LOG_CRITICAL(Debug, "Unreachable code!\n" __VA_ARGS__); \
|
||||
unreachable_impl(); \
|
||||
} while (0)
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define DEBUG_ASSERT(_a_) ASSERT(_a_)
|
||||
|
|
|
@ -147,7 +147,7 @@ void UpdateRescalingInfo() {
|
|||
info.down_shift = 0;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
info.up_scale = 1;
|
||||
info.down_shift = 0;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,8 @@
|
|||
|
||||
namespace Core {
|
||||
|
||||
CpuManager::CpuManager(System& system_) : system{system_} {}
|
||||
CpuManager::CpuManager(System& system_)
|
||||
: pause_barrier{std::make_unique<Common::Barrier>(1)}, system{system_} {}
|
||||
CpuManager::~CpuManager() = default;
|
||||
|
||||
void CpuManager::ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager,
|
||||
|
@ -30,8 +31,10 @@ void CpuManager::Initialize() {
|
|||
for (std::size_t core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
|
||||
core_data[core].host_thread = std::jthread(ThreadStart, std::ref(*this), core);
|
||||
}
|
||||
pause_barrier = std::make_unique<Common::Barrier>(Core::Hardware::NUM_CPU_CORES + 1);
|
||||
} else {
|
||||
core_data[0].host_thread = std::jthread(ThreadStart, std::ref(*this), 0);
|
||||
pause_barrier = std::make_unique<Common::Barrier>(2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,51 +141,14 @@ void CpuManager::MultiCoreRunSuspendThread() {
|
|||
auto core = kernel.CurrentPhysicalCoreIndex();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
current_thread->DisableDispatch();
|
||||
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.CurrentPhysicalCoreIndex());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
}
|
||||
}
|
||||
|
||||
void CpuManager::MultiCorePause(bool paused) {
|
||||
if (!paused) {
|
||||
bool all_not_barrier = false;
|
||||
while (!all_not_barrier) {
|
||||
all_not_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_not_barrier &= !data.is_running.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
for (auto& data : core_data) {
|
||||
data.enter_barrier->Set();
|
||||
}
|
||||
if (paused_state.load()) {
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_barrier &= data.is_paused.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
for (auto& data : core_data) {
|
||||
data.exit_barrier->Set();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/// Wait until all cores are paused.
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = true;
|
||||
for (const auto& data : core_data) {
|
||||
all_barrier &= data.is_paused.load() && data.initialized.load();
|
||||
}
|
||||
}
|
||||
/// Don't release the barrier
|
||||
}
|
||||
paused_state = paused;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
/// SingleCore ///
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -235,8 +201,9 @@ void CpuManager::SingleCoreRunSuspendThread() {
|
|||
auto core = kernel.GetCurrentHostThreadID();
|
||||
auto& scheduler = *kernel.CurrentScheduler();
|
||||
Kernel::KThread* current_thread = scheduler.GetCurrentThread();
|
||||
current_thread->DisableDispatch();
|
||||
|
||||
Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[0].host_context);
|
||||
ASSERT(scheduler.ContextSwitchPending());
|
||||
ASSERT(core == kernel.GetCurrentHostThreadID());
|
||||
scheduler.RescheduleCurrentCore();
|
||||
}
|
||||
|
@ -274,37 +241,21 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) {
|
|||
}
|
||||
}
|
||||
|
||||
void CpuManager::SingleCorePause(bool paused) {
|
||||
if (!paused) {
|
||||
bool all_not_barrier = false;
|
||||
while (!all_not_barrier) {
|
||||
all_not_barrier = !core_data[0].is_running.load() && core_data[0].initialized.load();
|
||||
}
|
||||
core_data[0].enter_barrier->Set();
|
||||
if (paused_state.load()) {
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
||||
}
|
||||
core_data[0].exit_barrier->Set();
|
||||
}
|
||||
} else {
|
||||
/// Wait until all cores are paused.
|
||||
bool all_barrier = false;
|
||||
while (!all_barrier) {
|
||||
all_barrier = core_data[0].is_paused.load() && core_data[0].initialized.load();
|
||||
}
|
||||
/// Don't release the barrier
|
||||
}
|
||||
paused_state = paused;
|
||||
void CpuManager::Pause(bool paused) {
|
||||
std::scoped_lock lk{pause_lock};
|
||||
|
||||
if (pause_state == paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
void CpuManager::Pause(bool paused) {
|
||||
if (is_multicore) {
|
||||
MultiCorePause(paused);
|
||||
} else {
|
||||
SingleCorePause(paused);
|
||||
}
|
||||
// Set the new state
|
||||
pause_state.store(paused);
|
||||
|
||||
// Wake up any waiting threads
|
||||
pause_state.notify_all();
|
||||
|
||||
// Wait for all threads to successfully change state before returning
|
||||
pause_barrier->Sync();
|
||||
}
|
||||
|
||||
void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
||||
|
@ -320,27 +271,29 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
|||
Common::SetCurrentThreadName(name.c_str());
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
auto& data = core_data[core];
|
||||
data.enter_barrier = std::make_unique<Common::Event>();
|
||||
data.exit_barrier = std::make_unique<Common::Event>();
|
||||
data.host_context = Common::Fiber::ThreadToFiber();
|
||||
data.is_running = false;
|
||||
data.initialized = true;
|
||||
const bool sc_sync = !is_async_gpu && !is_multicore;
|
||||
bool sc_sync_first_use = sc_sync;
|
||||
|
||||
// Cleanup
|
||||
SCOPE_EXIT({
|
||||
data.host_context->Exit();
|
||||
data.enter_barrier.reset();
|
||||
data.exit_barrier.reset();
|
||||
data.initialized = false;
|
||||
MicroProfileOnThreadExit();
|
||||
});
|
||||
|
||||
/// Running
|
||||
while (running_mode) {
|
||||
data.is_running = false;
|
||||
data.enter_barrier->Wait();
|
||||
if (pause_state.load(std::memory_order_relaxed)) {
|
||||
// Wait for caller to acknowledge pausing
|
||||
pause_barrier->Sync();
|
||||
|
||||
// Wait until unpaused
|
||||
pause_state.wait(true, std::memory_order_relaxed);
|
||||
|
||||
// Wait for caller to acknowledge unpausing
|
||||
pause_barrier->Sync();
|
||||
}
|
||||
|
||||
if (sc_sync_first_use) {
|
||||
system.GPU().ObtainContext();
|
||||
sc_sync_first_use = false;
|
||||
|
@ -352,12 +305,7 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
|
|||
}
|
||||
|
||||
auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
|
||||
data.is_running = true;
|
||||
Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
|
||||
data.is_running = false;
|
||||
data.is_paused = true;
|
||||
data.exit_barrier->Wait();
|
||||
data.is_paused = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -69,13 +69,11 @@ private:
|
|||
void MultiCoreRunGuestLoop();
|
||||
void MultiCoreRunIdleThread();
|
||||
void MultiCoreRunSuspendThread();
|
||||
void MultiCorePause(bool paused);
|
||||
|
||||
void SingleCoreRunGuestThread();
|
||||
void SingleCoreRunGuestLoop();
|
||||
void SingleCoreRunIdleThread();
|
||||
void SingleCoreRunSuspendThread();
|
||||
void SingleCorePause(bool paused);
|
||||
|
||||
static void ThreadStart(std::stop_token stop_token, CpuManager& cpu_manager, std::size_t core);
|
||||
|
||||
|
@ -83,16 +81,13 @@ private:
|
|||
|
||||
struct CoreData {
|
||||
std::shared_ptr<Common::Fiber> host_context;
|
||||
std::unique_ptr<Common::Event> enter_barrier;
|
||||
std::unique_ptr<Common::Event> exit_barrier;
|
||||
std::atomic<bool> is_running;
|
||||
std::atomic<bool> is_paused;
|
||||
std::atomic<bool> initialized;
|
||||
std::jthread host_thread;
|
||||
};
|
||||
|
||||
std::atomic<bool> running_mode{};
|
||||
std::atomic<bool> paused_state{};
|
||||
std::atomic<bool> pause_state{};
|
||||
std::unique_ptr<Common::Barrier> pause_barrier{};
|
||||
std::mutex pause_lock{};
|
||||
|
||||
std::array<CoreData, Core::Hardware::NUM_CPU_CORES> core_data{};
|
||||
|
||||
|
|
|
@ -140,7 +140,6 @@ u64 GetSignatureTypeDataSize(SignatureType type) {
|
|||
return 0x3C;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 GetSignatureTypePaddingSize(SignatureType type) {
|
||||
|
@ -155,7 +154,6 @@ u64 GetSignatureTypePaddingSize(SignatureType type) {
|
|||
return 0x40;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
SignatureType Ticket::GetSignatureType() const {
|
||||
|
|
|
@ -419,7 +419,7 @@ std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type
|
|||
Core::Crypto::Mode::ECB);
|
||||
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
|
||||
|
||||
Core::Crypto::Key128 out;
|
||||
Core::Crypto::Key128 out{};
|
||||
if (type == NCASectionCryptoType::XTS) {
|
||||
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
|
||||
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {
|
||||
|
|
|
@ -50,7 +50,7 @@ std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockTyp
|
|||
low = mid + 1;
|
||||
}
|
||||
}
|
||||
UNREACHABLE_MSG("Offset could not be found in BKTR block.");
|
||||
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
|
||||
return {0, 0};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
|
|
@ -108,7 +108,7 @@ ContentRecordType GetCRTypeFromNCAType(NCAContentType type) {
|
|||
// TODO(DarkLordZach): Peek at NCA contents to differentiate Manual and Legal.
|
||||
return ContentRecordType::HtmlDocument;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NCAContentType={:02X}", type);
|
||||
ASSERT_MSG(false, "Invalid NCAContentType={:02X}", type);
|
||||
return ContentRecordType{};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ VirtualFile RealVfsFilesystem::MoveFile(std::string_view old_path_, std::string_
|
|||
LOG_ERROR(Service_FS, "Failed to open path {} in order to re-cache it", new_path);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ void DefaultControllerApplet::ReconfigureControllers(std::function<void()> callb
|
|||
controller->SetNpadStyleIndex(Core::HID::NpadStyleIndex::Handheld);
|
||||
controller->Connect(true);
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unable to add a new controller based on the given parameters!");
|
||||
ASSERT_MSG(false, "Unable to add a new controller based on the given parameters!");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type) {
|
|||
return handheld.get();
|
||||
case NpadIdType::Invalid:
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
|
||||
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ const EmulatedController* HIDCore::GetEmulatedController(NpadIdType npad_id_type
|
|||
return handheld.get();
|
||||
case NpadIdType::Invalid:
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid NpadIdType={}", npad_id_type);
|
||||
ASSERT_MSG(false, "Invalid NpadIdType={}", npad_id_type);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ public:
|
|||
if (index < DomainHandlerCount()) {
|
||||
domain_handlers[index] = nullptr;
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unexpected handler index {}", index);
|
||||
ASSERT_MSG(false, "Unexpected handler index {}", index);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ void InitializeSlabHeaps(Core::System& system, KMemoryLayout& memory_layout) {
|
|||
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
|
||||
// If we somehow get an invalid type, abort.
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown slab type: {}", slab_types[i]);
|
||||
ASSERT_MSG(false, "Unknown slab type: {}", slab_types[i]);
|
||||
}
|
||||
|
||||
// If we've hit the end of a gap, free it.
|
||||
|
|
|
@ -35,7 +35,7 @@ public:
|
|||
case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
|
||||
return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ public:
|
|||
case Svc::ArbitrationType::WaitIfEqual:
|
||||
return WaitIfEqual(addr, value, timeout);
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultUnknown;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ u64 KAddressSpaceInfo::GetAddressSpaceStart(std::size_t width, Type type) {
|
|||
ASSERT(IsAllowedIndexForAddress(AddressSpaceIndices39Bit[index]));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].address;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ std::size_t KAddressSpaceInfo::GetAddressSpaceSize(std::size_t width, Type type)
|
|||
ASSERT(IsAllowed39BitType(type));
|
||||
return AddressSpaceInfos[AddressSpaceIndices39Bit[index]].size;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ namespace Kernel {
|
|||
class KernelCore;
|
||||
class KProcess;
|
||||
|
||||
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
|
||||
#define KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, ATTRIBUTE) \
|
||||
\
|
||||
private: \
|
||||
friend class ::Kernel::KClassTokenGenerator; \
|
||||
|
@ -40,16 +40,19 @@ public:
|
|||
static constexpr const char* GetStaticTypeName() { \
|
||||
return TypeName; \
|
||||
} \
|
||||
virtual TypeObj GetTypeObj() const { \
|
||||
virtual TypeObj GetTypeObj() ATTRIBUTE { \
|
||||
return GetStaticTypeObj(); \
|
||||
} \
|
||||
virtual const char* GetTypeName() const { \
|
||||
virtual const char* GetTypeName() ATTRIBUTE { \
|
||||
return GetStaticTypeName(); \
|
||||
} \
|
||||
\
|
||||
private: \
|
||||
constexpr bool operator!=(const TypeObj& rhs)
|
||||
|
||||
#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(CLASS, BASE_CLASS, const override)
|
||||
|
||||
class KAutoObject {
|
||||
protected:
|
||||
class TypeObj {
|
||||
|
@ -82,7 +85,7 @@ protected:
|
|||
};
|
||||
|
||||
private:
|
||||
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
|
||||
KERNEL_AUTOOBJECT_TRAITS_IMPL(KAutoObject, KAutoObject, const);
|
||||
|
||||
public:
|
||||
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
|
||||
|
|
|
@ -49,6 +49,7 @@ private:
|
|||
}
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
}();
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -29,7 +29,7 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
|||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||
return KMemoryManager::Pool::SystemNonSecure;
|
||||
} else {
|
||||
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
|
|||
case FileSys::ProgramAddressSpaceType::Is39Bit:
|
||||
return 39;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_
|
|||
const std::size_t needed_size{
|
||||
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
|
||||
if (alloc_size < needed_size) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1341,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
|
|||
new_state = KMemoryState::AliasCodeData;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1734,9 +1734,7 @@ void KPageTable::AddRegionToPages(VAddr start, std::size_t num_pages,
|
|||
VAddr addr{start};
|
||||
while (addr < start + (num_pages * PageSize)) {
|
||||
const PAddr paddr{GetPhysicalAddr(addr)};
|
||||
if (!paddr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
ASSERT(paddr != 0);
|
||||
page_linked_list.AddBlock(paddr, 1);
|
||||
addr += PageSize;
|
||||
}
|
||||
|
@ -1767,7 +1765,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, const KPageLin
|
|||
system.Memory().MapMemoryRegion(page_table_impl, addr, size, node.GetAddress());
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
addr += size;
|
||||
|
@ -1798,7 +1796,7 @@ ResultCode KPageTable::Operate(VAddr addr, std::size_t num_pages, KMemoryPermiss
|
|||
case OperationType::ChangePermissionsAndRefresh:
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
@ -1835,7 +1833,6 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
|||
return code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1871,7 +1868,6 @@ std::size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
|||
return code_region_end - code_region_start;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ ResultCode KPort::EnqueueSession(KServerSession* session) {
|
|||
if (auto session_ptr = server.GetSessionRequestHandler().lock()) {
|
||||
session_ptr->ClientConnected(server.AcceptSession());
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
|
|
|
@ -350,7 +350,7 @@ ResultCode KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata,
|
|||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
// Create TLS region
|
||||
|
|
|
@ -97,13 +97,13 @@ ResultCode KServerSession::HandleDomainSyncRequest(Kernel::HLERequestContext& co
|
|||
"object_id {} is too big! This probably means a recent service call "
|
||||
"to {} needed to return a new interface!",
|
||||
object_id, name);
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultSuccess; // Ignore error if asserts are off
|
||||
}
|
||||
if (auto strong_ptr = manager->DomainHandler(object_id - 1).lock()) {
|
||||
return strong_ptr->HandleSyncRequest(*this, context);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
|
|||
UNIMPLEMENTED();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
||||
ASSERT_MSG(false, "KThread::Initialize: Unknown ThreadType {}", static_cast<u32>(type));
|
||||
break;
|
||||
}
|
||||
thread_type = type;
|
||||
|
|
|
@ -252,6 +252,7 @@ struct KernelCore::Impl {
|
|||
core_id)
|
||||
.IsSuccess());
|
||||
suspend_threads[core_id]->SetName(fmt::format("SuspendThread:{}", core_id));
|
||||
suspend_threads[core_id]->DisableDispatch();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1073,9 +1074,6 @@ void KernelCore::Suspend(bool in_suspention) {
|
|||
impl->suspend_threads[core_id]->SetState(state);
|
||||
impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
|
||||
ThreadWaitReasonForDebugging::Suspended);
|
||||
if (!should_suspend) {
|
||||
impl->suspend_threads[core_id]->DisableDispatch();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1876,7 +1876,7 @@ static void SleepThread(Core::System& system, s64 nanoseconds) {
|
|||
KScheduler::YieldToAnyThread(kernel);
|
||||
} else {
|
||||
// Nintendo does nothing at all if an otherwise invalid value is passed.
|
||||
UNREACHABLE_MSG("Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
||||
ASSERT_MSG(false, "Unimplemented sleep yield type '{:016X}'!", nanoseconds);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ ResultCode Controller::GetStatus() const {
|
|||
}
|
||||
|
||||
void Controller::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
|
||||
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
|
||||
}
|
||||
|
||||
void Controller::Execute() {
|
||||
|
|
|
@ -156,7 +156,7 @@ ResultCode Error::GetStatus() const {
|
|||
}
|
||||
|
||||
void Error::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Unexpected interactive applet data!");
|
||||
ASSERT_MSG(false, "Unexpected interactive applet data!");
|
||||
}
|
||||
|
||||
void Error::Execute() {
|
||||
|
|
|
@ -76,7 +76,7 @@ ResultCode Auth::GetStatus() const {
|
|||
}
|
||||
|
||||
void Auth::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Unexpected interactive applet data.");
|
||||
ASSERT_MSG(false, "Unexpected interactive applet data.");
|
||||
}
|
||||
|
||||
void Auth::Execute() {
|
||||
|
@ -175,7 +175,7 @@ ResultCode PhotoViewer::GetStatus() const {
|
|||
}
|
||||
|
||||
void PhotoViewer::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Unexpected interactive applet data.");
|
||||
ASSERT_MSG(false, "Unexpected interactive applet data.");
|
||||
}
|
||||
|
||||
void PhotoViewer::Execute() {
|
||||
|
|
|
@ -67,7 +67,7 @@ ResultCode MiiEdit::GetStatus() const {
|
|||
}
|
||||
|
||||
void MiiEdit::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
|
||||
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
|
||||
}
|
||||
|
||||
void MiiEdit::Execute() {
|
||||
|
|
|
@ -44,7 +44,7 @@ ResultCode ProfileSelect::GetStatus() const {
|
|||
}
|
||||
|
||||
void ProfileSelect::ExecuteInteractive() {
|
||||
UNREACHABLE_MSG("Attempted to call interactive execution on non-interactive applet.");
|
||||
ASSERT_MSG(false, "Attempted to call interactive execution on non-interactive applet.");
|
||||
}
|
||||
|
||||
void ProfileSelect::Execute() {
|
||||
|
|
|
@ -71,7 +71,7 @@ void SoftwareKeyboard::Initialize() {
|
|||
InitializeBackground(applet_mode);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid LibraryAppletMode={}", applet_mode);
|
||||
ASSERT_MSG(false, "Invalid LibraryAppletMode={}", applet_mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ void WebBrowser::Initialize() {
|
|||
InitializeLobby();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
|
||||
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ void WebBrowser::Execute() {
|
|||
ExecuteLobby();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid ShimKind={}", web_arg_header.shim_kind);
|
||||
ASSERT_MSG(false, "Invalid ShimKind={}", web_arg_header.shim_kind);
|
||||
WebBrowserExit(WebExitReason::EndButtonPressed);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -899,7 +899,7 @@ void FSP_SRV::OpenSaveDataFileSystem(Kernel::HLERequestContext& ctx) {
|
|||
case FileSys::SaveDataSpaceId::TemporaryStorage:
|
||||
case FileSys::SaveDataSpaceId::ProperSystem:
|
||||
case FileSys::SaveDataSpaceId::SafeMode:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
auto filesystem = std::make_shared<IFileSystem>(system, std::move(dir.Unwrap()),
|
||||
|
|
|
@ -61,6 +61,7 @@ void Controller_Gesture::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
|
|||
}
|
||||
|
||||
last_update_timestamp = shared_memory->gesture_lifo.timestamp;
|
||||
UpdateGestureSharedMemory(gesture, time_difference);
|
||||
}
|
||||
|
||||
void Controller_Gesture::ReadTouchInput() {
|
||||
|
@ -94,8 +95,7 @@ bool Controller_Gesture::ShouldUpdateGesture(const GestureProperties& gesture,
|
|||
return false;
|
||||
}
|
||||
|
||||
void Controller_Gesture::UpdateGestureSharedMemory(u8* data, std::size_t size,
|
||||
GestureProperties& gesture,
|
||||
void Controller_Gesture::UpdateGestureSharedMemory(GestureProperties& gesture,
|
||||
f32 time_difference) {
|
||||
GestureType type = GestureType::Idle;
|
||||
GestureAttribute attributes{};
|
||||
|
|
|
@ -107,8 +107,7 @@ private:
|
|||
bool ShouldUpdateGesture(const GestureProperties& gesture, f32 time_difference);
|
||||
|
||||
// Updates the shared memory to the next state
|
||||
void UpdateGestureSharedMemory(u8* data, std::size_t size, GestureProperties& gesture,
|
||||
f32 time_difference);
|
||||
void UpdateGestureSharedMemory(GestureProperties& gesture, f32 time_difference);
|
||||
|
||||
// Initializes new gesture
|
||||
void NewGesture(GestureProperties& gesture, GestureType& type, GestureAttribute& attributes);
|
||||
|
|
|
@ -160,7 +160,7 @@ void Controller_NPad::InitNewlyAddedController(Core::HID::NpadIdType npad_id) {
|
|||
shared_memory->system_properties.raw = 0;
|
||||
switch (controller_type) {
|
||||
case Core::HID::NpadStyleIndex::None:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::ProController:
|
||||
shared_memory->style_tag.fullkey.Assign(1);
|
||||
|
@ -422,7 +422,7 @@ void Controller_NPad::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
|
|||
libnx_state.connection_status.is_connected.Assign(1);
|
||||
switch (controller_type) {
|
||||
case Core::HID::NpadStyleIndex::None:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::ProController:
|
||||
case Core::HID::NpadStyleIndex::NES:
|
||||
|
@ -597,7 +597,7 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
|
|||
|
||||
switch (controller_type) {
|
||||
case Core::HID::NpadStyleIndex::None:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
case Core::HID::NpadStyleIndex::ProController:
|
||||
case Core::HID::NpadStyleIndex::Pokeball:
|
||||
|
@ -856,7 +856,7 @@ void Controller_NPad::VibrateController(
|
|||
}
|
||||
|
||||
if (vibration_device_handle.device_index == Core::HID::DeviceIndex::None) {
|
||||
UNREACHABLE_MSG("DeviceIndex should never be None!");
|
||||
ASSERT_MSG(false, "DeviceIndex should never be None!");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -1441,7 +1441,7 @@ void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
|
|||
break;
|
||||
case Core::HID::DeviceIndex::None:
|
||||
default:
|
||||
UNREACHABLE_MSG("DeviceIndex should never be None!");
|
||||
ASSERT_MSG(false, "DeviceIndex should never be None!");
|
||||
vibration_device_info.position = Core::HID::VibrationDevicePosition::None;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -347,7 +347,7 @@ public:
|
|||
}
|
||||
|
||||
if (!succeeded) {
|
||||
UNREACHABLE_MSG("Out of address space!");
|
||||
ASSERT_MSG(false, "Out of address space!");
|
||||
return Kernel::ResultOutOfMemory;
|
||||
}
|
||||
|
||||
|
|
|
@ -290,7 +290,7 @@ MiiStoreData BuildRandomStoreData(Age age, Gender gender, Race race, const Commo
|
|||
u8 glasses_type{};
|
||||
while (glasses_type_start < glasses_type_info.values[glasses_type]) {
|
||||
if (++glasses_type >= glasses_type_info.values_count) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ u32 SyncpointManager::AllocateSyncpoint() {
|
|||
return syncpoint_id;
|
||||
}
|
||||
}
|
||||
UNREACHABLE_MSG("No more available syncpoints!");
|
||||
ASSERT_MSG(false, "No more available syncpoints!");
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
@ -659,7 +659,7 @@ Status BufferQueueProducer::Query(NativeWindow what, s32* out_value) {
|
|||
value = core->consumer_usage_bit;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return Status::BadValue;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,12 +48,12 @@ ResultCode StandardUserSystemClockCore::GetClockContext(Core::System& system,
|
|||
}
|
||||
|
||||
ResultCode StandardUserSystemClockCore::Flush(const SystemClockContext&) {
|
||||
UNREACHABLE();
|
||||
UNIMPLEMENTED();
|
||||
return ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
ResultCode StandardUserSystemClockCore::SetClockContext(const SystemClockContext&) {
|
||||
UNREACHABLE();
|
||||
UNIMPLEMENTED();
|
||||
return ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ struct TimeManager::Impl final {
|
|||
FileSys::VirtualFile& vfs_file) {
|
||||
if (time_zone_content_manager.GetTimeZoneManager().SetDeviceLocationNameWithTimeZoneRule(
|
||||
location_name, vfs_file) != ResultSuccess) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ struct TimeManager::Impl final {
|
|||
} else {
|
||||
if (standard_local_system_clock_core.SetCurrentTime(system_, posix_time) !=
|
||||
ResultSuccess) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ struct TimeManager::Impl final {
|
|||
|
||||
if (standard_network_system_clock_core.SetSystemClockContext(clock_context) !=
|
||||
ResultSuccess) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -183,7 +183,7 @@ struct TimeManager::Impl final {
|
|||
Clock::SteadyClockTimePoint steady_clock_time_point) {
|
||||
if (standard_user_system_clock_core.SetAutomaticCorrectionEnabled(
|
||||
system_, is_automatic_correction_enabled) != ResultSuccess) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ struct TimeManager::Impl final {
|
|||
if (GetStandardLocalSystemClockCore()
|
||||
.SetCurrentTime(system_, timespan.ToSeconds())
|
||||
.IsError()) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -279,7 +279,7 @@ static constexpr int TransitionTime(int year, Rule rule, int offset) {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
return value + rule.transition_time + offset;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ u64 MemoryReadWidth(Core::Memory::Memory& memory, u32 width, VAddr addr) {
|
|||
return memory.Read64(addr);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public:
|
|||
[[nodiscard]] Stack Remove(Token token) const;
|
||||
|
||||
private:
|
||||
boost::container::small_vector<StackEntry, 3> entries;
|
||||
std::vector<StackEntry> entries;
|
||||
};
|
||||
|
||||
struct IndirectBranch {
|
||||
|
|
|
@ -976,12 +976,7 @@ private:
|
|||
IR::AbstractSyntaxList& syntax_list;
|
||||
bool uses_demote_to_helper{};
|
||||
|
||||
// TODO: C++20 Remove this when all compilers support constexpr std::vector
|
||||
#if __cpp_lib_constexpr_vector >= 201907
|
||||
static constexpr Flow::Block dummy_flow_block;
|
||||
#else
|
||||
const Flow::Block dummy_flow_block;
|
||||
#endif
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ void Codec::Decode() {
|
|||
vp9_hidden_frame = vp9_decoder->WasFrameHidden();
|
||||
return vp9_decoder->GetFrameBytes();
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return std::vector<u8>{};
|
||||
}
|
||||
}();
|
||||
|
|
|
@ -228,7 +228,7 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
|
||||
|
|
|
@ -202,7 +202,7 @@ public:
|
|||
case Size::Size_11_11_10:
|
||||
return 3;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -238,7 +238,7 @@ public:
|
|||
case Size::Size_11_11_10:
|
||||
return 4;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ public:
|
|||
case Size::Size_11_11_10:
|
||||
return "11_11_10";
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ public:
|
|||
case Type::Float:
|
||||
return "FLOAT";
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -336,7 +336,7 @@ public:
|
|||
case 3:
|
||||
return {x3, y3};
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return {0, 0};
|
||||
}
|
||||
}
|
||||
|
@ -1193,7 +1193,7 @@ public:
|
|||
case IndexFormat::UnsignedInt:
|
||||
return 4;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ void MaxwellDMA::Launch() {
|
|||
|
||||
if (!is_src_pitch && !is_dst_pitch) {
|
||||
// If both the source and the destination are in block layout, assert.
|
||||
UNREACHABLE_MSG("Tiled->Tiled DMA transfers are not yet implemented");
|
||||
UNIMPLEMENTED_MSG("Tiled->Tiled DMA transfers are not yet implemented");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -260,7 +260,7 @@ void MaxwellDMA::ReleaseSemaphore() {
|
|||
memory_manager.Write<u64>(address + 8, system.GPU().GetTicks());
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
||||
ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
|
|||
} else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) {
|
||||
rasterizer->OnCPUWrite(invalidate->addr, invalidate->size);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
state.signaled_fence.store(next.fence);
|
||||
if (next.block) {
|
||||
|
|
|
@ -71,7 +71,7 @@ void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
|
|||
}
|
||||
}
|
||||
if (!mid_method.has_value()) {
|
||||
UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
|
||||
ASSERT_MSG(false, "Macro 0x{0:x} was not uploaded", method);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -308,7 +308,6 @@ bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond,
|
|||
return value != 0;
|
||||
}
|
||||
UNREACHABLE();
|
||||
return true;
|
||||
}
|
||||
|
||||
Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
|
||||
|
|
|
@ -67,7 +67,7 @@ void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
|
|||
ASSERT(it->first == gpu_addr);
|
||||
map_ranges.erase(it);
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unmapping non-existent GPU address=0x{:x}", gpu_addr);
|
||||
ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
|
||||
}
|
||||
const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
|
||||
|
||||
|
@ -206,7 +206,7 @@ T MemoryManager::Read(GPUVAddr addr) const {
|
|||
return value;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ void MemoryManager::Write(GPUVAddr addr, T data) {
|
|||
return;
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
template u8 MemoryManager::Read<u8>(GPUVAddr addr) const;
|
||||
|
|
|
@ -48,7 +48,7 @@ GLenum Stage(size_t stage_index) {
|
|||
case 4:
|
||||
return GL_FRAGMENT_SHADER;
|
||||
}
|
||||
UNREACHABLE_MSG("{}", stage_index);
|
||||
ASSERT_MSG(false, "{}", stage_index);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ GLenum AssemblyStage(size_t stage_index) {
|
|||
case 4:
|
||||
return GL_FRAGMENT_PROGRAM_NV;
|
||||
}
|
||||
UNREACHABLE_MSG("{}", stage_index);
|
||||
ASSERT_MSG(false, "{}", stage_index);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
|
|||
case Maxwell::TessellationPrimitive::Quads:
|
||||
return Shader::TessPrimitive::Quads;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return Shader::TessPrimitive::Triangles;
|
||||
}();
|
||||
info.tess_spacing = [&] {
|
||||
|
@ -97,7 +97,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
|
|||
case Maxwell::TessellationSpacing::FractionalEven:
|
||||
return Shader::TessSpacing::FractionalEven;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return Shader::TessSpacing::Equal;
|
||||
}();
|
||||
break;
|
||||
|
|
|
@ -83,7 +83,7 @@ GLenum ImageTarget(const VideoCommon::ImageInfo& info) {
|
|||
case ImageType::Buffer:
|
||||
return GL_TEXTURE_BUFFER;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image type={}", info.type);
|
||||
ASSERT_MSG(false, "Invalid image type={}", info.type);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ GLenum ImageTarget(Shader::TextureType type, int num_samples = 1) {
|
|||
case Shader::TextureType::Buffer:
|
||||
return GL_TEXTURE_BUFFER;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image view type={}", type);
|
||||
ASSERT_MSG(false, "Invalid image view type={}", type);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ GLenum TextureMode(PixelFormat format, bool is_first) {
|
|||
case PixelFormat::S8_UINT_D24_UNORM:
|
||||
return is_first ? GL_STENCIL_INDEX : GL_DEPTH_COMPONENT;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return GL_DEPTH_COMPONENT;
|
||||
}
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ GLint Swizzle(SwizzleSource source) {
|
|||
case SwizzleSource::OneFloat:
|
||||
return GL_ONE;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid swizzle source={}", source);
|
||||
ASSERT_MSG(false, "Invalid swizzle source={}", source);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ GLint ConvertA5B5G5R1_UNORM(SwizzleSource source) {
|
|||
case SwizzleSource::OneFloat:
|
||||
return GL_ONE;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid swizzle source={}", source);
|
||||
ASSERT_MSG(false, "Invalid swizzle source={}", source);
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
|
@ -381,10 +381,10 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
|
|||
glTextureStorage3D(handle, gl_num_levels, gl_internal_format, width, height, depth);
|
||||
break;
|
||||
case GL_TEXTURE_BUFFER:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid target=0x{:x}", target);
|
||||
ASSERT_MSG(false, "Invalid target=0x{:x}", target);
|
||||
break;
|
||||
}
|
||||
return texture;
|
||||
|
@ -420,7 +420,7 @@ OGLTexture MakeImage(const VideoCommon::ImageInfo& info, GLenum gl_internal_form
|
|||
case Shader::ImageFormat::R32G32B32A32_UINT:
|
||||
return GL_RGBA32UI;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image format={}", format);
|
||||
ASSERT_MSG(false, "Invalid image format={}", format);
|
||||
return GL_R32UI;
|
||||
}
|
||||
|
||||
|
@ -579,7 +579,7 @@ void TextureCacheRuntime::EmulateCopyImage(Image& dst, Image& src,
|
|||
} else if (IsPixelFormatBGR(dst.info.format) || IsPixelFormatBGR(src.info.format)) {
|
||||
format_conversion_pass.ConvertImage(dst, src, copies);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -620,7 +620,7 @@ void TextureCacheRuntime::AccelerateImageUpload(Image& image, const ImageBufferM
|
|||
case ImageType::Linear:
|
||||
return util_shaders.PitchUpload(image, map, swizzles);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -639,7 +639,7 @@ FormatProperties TextureCacheRuntime::FormatInfo(ImageType type, GLenum internal
|
|||
case ImageType::e3D:
|
||||
return format_properties[2].at(internal_format);
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return FormatProperties{};
|
||||
}
|
||||
}
|
||||
|
@ -888,7 +888,7 @@ void Image::CopyBufferToImage(const VideoCommon::BufferImageCopy& copy, size_t b
|
|||
}
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -924,7 +924,7 @@ void Image::CopyImageToBuffer(const VideoCommon::BufferImageCopy& copy, size_t b
|
|||
depth = copy.image_extent.depth;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
// Compressed formats don't have a pixel format or type
|
||||
const bool is_compressed = gl_format == GL_NONE;
|
||||
|
@ -950,7 +950,7 @@ void Image::Scale(bool up_scale) {
|
|||
case SurfaceType::DepthStencil:
|
||||
return GL_DEPTH_STENCIL_ATTACHMENT;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return GL_COLOR_ATTACHMENT0;
|
||||
}
|
||||
}();
|
||||
|
@ -965,7 +965,7 @@ void Image::Scale(bool up_scale) {
|
|||
case SurfaceType::DepthStencil:
|
||||
return GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return GL_COLOR_BUFFER_BIT;
|
||||
}
|
||||
}();
|
||||
|
@ -980,7 +980,7 @@ void Image::Scale(bool up_scale) {
|
|||
case SurfaceType::DepthStencil:
|
||||
return 3;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 0;
|
||||
}
|
||||
}();
|
||||
|
@ -1045,7 +1045,7 @@ bool Image::ScaleUp(bool ignore) {
|
|||
return false;
|
||||
}
|
||||
if (info.type == ImageType::Linear) {
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
flags |= ImageFlagBits::Rescaled;
|
||||
|
@ -1139,7 +1139,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
|
|||
UNIMPLEMENTED();
|
||||
break;
|
||||
case ImageViewType::Buffer:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
switch (info.type) {
|
||||
|
@ -1319,7 +1319,7 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
|
|||
buffer_bits |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
buffer_bits |= GL_DEPTH_BUFFER_BIT;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ inline GLenum IndexFormat(Maxwell::IndexFormat index_format) {
|
|||
case Maxwell::IndexFormat::UnsignedInt:
|
||||
return GL_UNSIGNED_INT;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid index_format={}", index_format);
|
||||
ASSERT_MSG(false, "Invalid index_format={}", index_format);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -243,7 +243,7 @@ inline GLenum PrimitiveTopology(Maxwell::PrimitiveTopology topology) {
|
|||
case Maxwell::PrimitiveTopology::Patches:
|
||||
return GL_PATCHES;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid topology={}", topology);
|
||||
ASSERT_MSG(false, "Invalid topology={}", topology);
|
||||
return GL_POINTS;
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ inline GLenum TextureFilterMode(Tegra::Texture::TextureFilter filter_mode,
|
|||
}
|
||||
break;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
|
||||
ASSERT_MSG(false, "Invalid texture filter mode={} and mipmap filter mode={}", filter_mode,
|
||||
mipmap_filter_mode);
|
||||
return GL_NEAREST;
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ inline GLenum PolygonMode(Maxwell::PolygonMode polygon_mode) {
|
|||
case Maxwell::PolygonMode::Fill:
|
||||
return GL_FILL;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid polygon mode={}", polygon_mode);
|
||||
ASSERT_MSG(false, "Invalid polygon mode={}", polygon_mode);
|
||||
return GL_FILL;
|
||||
}
|
||||
|
||||
|
@ -563,7 +563,7 @@ inline GLenum ReductionFilter(Tegra::Texture::SamplerReduction filter) {
|
|||
case Tegra::Texture::SamplerReduction::Max:
|
||||
return GL_MAX;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid reduction filter={}", static_cast<int>(filter));
|
||||
ASSERT_MSG(false, "Invalid reduction filter={}", static_cast<int>(filter));
|
||||
return GL_WEIGHTED_AVERAGE_ARB;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ const char* GetSource(GLenum source) {
|
|||
case GL_DEBUG_SOURCE_OTHER:
|
||||
return "OTHER";
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return "Unknown source";
|
||||
}
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ const char* GetType(GLenum type) {
|
|||
case GL_DEBUG_TYPE_MARKER:
|
||||
return "MARKER";
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return "Unknown type";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ GLenum StoreFormat(u32 bytes_per_block) {
|
|||
case 16:
|
||||
return GL_RGBA32UI;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return GL_R8UI;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ VkFilter Filter(Tegra::Texture::TextureFilter filter) {
|
|||
case Tegra::Texture::TextureFilter::Linear:
|
||||
return VK_FILTER_LINEAR;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid sampler filter={}", filter);
|
||||
ASSERT_MSG(false, "Invalid sampler filter={}", filter);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter
|
|||
case Tegra::Texture::TextureMipmapFilter::Linear:
|
||||
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid sampler mipmap mode={}", mipmap_filter);
|
||||
ASSERT_MSG(false, "Invalid sampler mipmap mode={}", mipmap_filter);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ VkSamplerAddressMode WrapMode(const Device& device, Tegra::Texture::WrapMode wra
|
|||
case Tegra::Texture::TextureFilter::Linear:
|
||||
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
|
||||
case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
|
||||
return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
|
||||
|
@ -744,7 +744,7 @@ VkViewportCoordinateSwizzleNV ViewportSwizzle(Maxwell::ViewportSwizzle swizzle)
|
|||
case Maxwell::ViewportSwizzle::NegativeW:
|
||||
return VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
|
||||
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -757,7 +757,7 @@ VkSamplerReductionMode SamplerReduction(Tegra::Texture::SamplerReduction reducti
|
|||
case Tegra::Texture::SamplerReduction::Max:
|
||||
return VK_SAMPLER_REDUCTION_MODE_MAX_EXT;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid sampler mode={}", static_cast<int>(reduction));
|
||||
ASSERT_MSG(false, "Invalid sampler mode={}", static_cast<int>(reduction));
|
||||
return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT;
|
||||
}
|
||||
|
||||
|
@ -780,7 +780,7 @@ VkSampleCountFlagBits MsaaMode(Tegra::Texture::MsaaMode msaa_mode) {
|
|||
case Tegra::Texture::MsaaMode::Msaa4x4:
|
||||
return VK_SAMPLE_COUNT_16_BIT;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid msaa_mode={}", static_cast<int>(msaa_mode));
|
||||
ASSERT_MSG(false, "Invalid msaa_mode={}", static_cast<int>(msaa_mode));
|
||||
return VK_SAMPLE_COUNT_1_BIT;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ size_t BytesPerIndex(VkIndexType index_type) {
|
|||
case VK_INDEX_TYPE_UINT32:
|
||||
return 4;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid index type={}", index_type);
|
||||
ASSERT_MSG(false, "Invalid index type={}", index_type);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ void BufferCacheRuntime::ReserveQuadArrayLUT(u32 num_indices, bool wait_for_idle
|
|||
std::memcpy(staging_data, MakeQuadIndices<u32>(quad, first).data(), quad_size);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
staging_data += quad_size;
|
||||
|
|
|
@ -265,7 +265,7 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
|
|||
case Tegra::Engines::Maxwell3D::Regs::IndexFormat::UnsignedInt:
|
||||
return 2;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return 2;
|
||||
}();
|
||||
const u32 input_size = num_vertices << index_shift;
|
||||
|
|
|
@ -174,7 +174,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
|
|||
case Maxwell::TessellationPrimitive::Quads:
|
||||
return Shader::TessPrimitive::Quads;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return Shader::TessPrimitive::Triangles;
|
||||
}();
|
||||
info.tess_spacing = [&] {
|
||||
|
@ -187,7 +187,7 @@ Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> program
|
|||
case Maxwell::TessellationSpacing::FractionalEven:
|
||||
return Shader::TessSpacing::FractionalEven;
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return Shader::TessSpacing::Equal;
|
||||
}();
|
||||
break;
|
||||
|
|
|
@ -263,7 +263,7 @@ StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage
|
|||
case MemoryUsage::Download:
|
||||
return download_cache;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid memory usage={}", usage);
|
||||
ASSERT_MSG(false, "Invalid memory usage={}", usage);
|
||||
return upload_cache;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
case ImageType::Buffer:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image type={}", type);
|
||||
ASSERT_MSG(false, "Invalid image type={}", type);
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
case 16:
|
||||
return VK_SAMPLE_COUNT_16_BIT;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
|
||||
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
|
||||
return VK_SAMPLE_COUNT_1_BIT;
|
||||
}
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid surface type");
|
||||
ASSERT_MSG(false, "Invalid surface type");
|
||||
}
|
||||
}
|
||||
if (info.storage) {
|
||||
|
@ -179,7 +179,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
case VideoCore::Surface::SurfaceType::DepthStencil:
|
||||
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid surface type");
|
||||
ASSERT_MSG(false, "Invalid surface type");
|
||||
return VkImageAspectFlags{};
|
||||
}
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
case SwizzleSource::OneInt:
|
||||
return VK_COMPONENT_SWIZZLE_ONE;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid swizzle={}", swizzle);
|
||||
ASSERT_MSG(false, "Invalid swizzle={}", swizzle);
|
||||
return VK_COMPONENT_SWIZZLE_ZERO;
|
||||
}
|
||||
|
||||
|
@ -242,10 +242,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
case Shader::TextureType::ColorArrayCube:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
|
||||
case Shader::TextureType::Buffer:
|
||||
UNREACHABLE_MSG("Texture buffers can't be image views");
|
||||
ASSERT_MSG(false, "Texture buffers can't be image views");
|
||||
return VK_IMAGE_VIEW_TYPE_1D;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image view type={}", type);
|
||||
ASSERT_MSG(false, "Invalid image view type={}", type);
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
}
|
||||
|
||||
|
@ -269,10 +269,10 @@ constexpr VkBorderColor ConvertBorderColor(const std::array<float, 4>& color) {
|
|||
UNIMPLEMENTED_MSG("Rect image view");
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
case VideoCommon::ImageViewType::Buffer:
|
||||
UNREACHABLE_MSG("Texture buffers can't be image views");
|
||||
ASSERT_MSG(false, "Texture buffers can't be image views");
|
||||
return VK_IMAGE_VIEW_TYPE_1D;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image view type={}", type);
|
||||
ASSERT_MSG(false, "Invalid image view type={}", type);
|
||||
return VK_IMAGE_VIEW_TYPE_2D;
|
||||
}
|
||||
|
||||
|
@ -644,7 +644,7 @@ struct RangedBarrierRange {
|
|||
case Shader::ImageFormat::R32G32B32A32_UINT:
|
||||
return VK_FORMAT_R32G32B32A32_UINT;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid image format={}", format);
|
||||
ASSERT_MSG(false, "Invalid image format={}", format);
|
||||
return VK_FORMAT_R32_UINT;
|
||||
}
|
||||
|
||||
|
@ -1596,7 +1596,7 @@ ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewI
|
|||
UNIMPLEMENTED();
|
||||
break;
|
||||
case VideoCommon::ImageViewType::Buffer:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1822,7 +1822,7 @@ void TextureCacheRuntime::AccelerateImageUpload(
|
|||
if (IsPixelFormatASTC(image.info.format)) {
|
||||
return astc_decoder_pass.Assemble(image, map, swizzles);
|
||||
}
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
||||
|
|
|
@ -280,7 +280,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
|
|||
stage_index = 4;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid program={}", program);
|
||||
ASSERT_MSG(false, "Invalid program={}", program);
|
||||
break;
|
||||
}
|
||||
const u64 local_size{sph.LocalMemorySize()};
|
||||
|
|
|
@ -29,7 +29,7 @@ SurfaceTarget SurfaceTargetFromTextureType(Tegra::Texture::TextureType texture_t
|
|||
return SurfaceTarget::Texture2DArray;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented texture_type={}", texture_type);
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return SurfaceTarget::Texture2D;
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ bool SurfaceTargetIsLayered(SurfaceTarget target) {
|
|||
return true;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ bool SurfaceTargetIsArray(SurfaceTarget target) {
|
|||
return true;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unimplemented surface_target={}", target);
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ ImageInfo::ImageInfo(const TICEntry& config) noexcept {
|
|||
resources.layers = 1;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
|
||||
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
|
||||
break;
|
||||
}
|
||||
if (type != ImageType::Linear) {
|
||||
|
|
|
@ -71,7 +71,7 @@ ImageViewInfo::ImageViewInfo(const TICEntry& config, s32 base_layer) noexcept
|
|||
range.extent.layers = config.Depth() * 6;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
|
||||
ASSERT_MSG(false, "Invalid texture_type={}", static_cast<int>(config.texture_type.Value()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace VideoCommon {
|
|||
case 16:
|
||||
return {2, 2};
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid number of samples={}", num_samples);
|
||||
ASSERT_MSG(false, "Invalid number of samples={}", num_samples);
|
||||
return {1, 1};
|
||||
}
|
||||
|
||||
|
@ -47,7 +47,7 @@ namespace VideoCommon {
|
|||
case MsaaMode::Msaa4x4:
|
||||
return 16;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid MSAA mode={}", static_cast<int>(msaa_mode));
|
||||
ASSERT_MSG(false, "Invalid MSAA mode={}", static_cast<int>(msaa_mode));
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1485,13 +1485,13 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
|||
std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
|
||||
const auto page_it = selected_page_table.find(page);
|
||||
if (page_it == selected_page_table.end()) {
|
||||
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
return;
|
||||
}
|
||||
std::vector<ImageId>& image_ids = page_it->second;
|
||||
const auto vector_it = std::ranges::find(image_ids, image_id);
|
||||
if (vector_it == image_ids.end()) {
|
||||
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
|
||||
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
||||
page << PAGE_BITS);
|
||||
return;
|
||||
}
|
||||
|
@ -1504,13 +1504,13 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
|||
ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
|
||||
const auto page_it = page_table.find(page);
|
||||
if (page_it == page_table.end()) {
|
||||
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
return;
|
||||
}
|
||||
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
||||
const auto vector_it = std::ranges::find(image_map_ids, map_id);
|
||||
if (vector_it == image_map_ids.end()) {
|
||||
UNREACHABLE_MSG("Unregistering unregistered image in page=0x{:x}",
|
||||
ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
|
||||
page << PAGE_BITS);
|
||||
return;
|
||||
}
|
||||
|
@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
|
|||
ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
|
||||
const auto page_it = page_table.find(page);
|
||||
if (page_it == page_table.end()) {
|
||||
UNREACHABLE_MSG("Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS);
|
||||
return;
|
||||
}
|
||||
std::vector<ImageMapId>& image_map_ids = page_it->second;
|
||||
|
@ -1616,7 +1616,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
|
|||
const GPUVAddr gpu_addr = image.gpu_addr;
|
||||
const auto alloc_it = image_allocs_table.find(gpu_addr);
|
||||
if (alloc_it == image_allocs_table.end()) {
|
||||
UNREACHABLE_MSG("Trying to delete an image alloc that does not exist in address 0x{:x}",
|
||||
ASSERT_MSG(false, "Trying to delete an image alloc that does not exist in address 0x{:x}",
|
||||
gpu_addr);
|
||||
return;
|
||||
}
|
||||
|
@ -1624,7 +1624,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
|
|||
std::vector<ImageId>& alloc_images = slot_image_allocs[alloc_id].images;
|
||||
const auto alloc_image_it = std::ranges::find(alloc_images, image_id);
|
||||
if (alloc_image_it == alloc_images.end()) {
|
||||
UNREACHABLE_MSG("Trying to delete an image that does not exist");
|
||||
ASSERT_MSG(false, "Trying to delete an image that does not exist");
|
||||
return;
|
||||
}
|
||||
ASSERT_MSG(False(image.flags & ImageFlagBits::Tracked), "Image was not untracked");
|
||||
|
|
|
@ -87,7 +87,7 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
|
|||
BPP_CASE(16)
|
||||
#undef BPP_CASE
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
|
|||
BPP_CASE(16)
|
||||
#undef BPP_CASE
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -230,7 +230,7 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
|
|||
BPP_CASE(16)
|
||||
#undef BPP_CASE
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,7 +253,7 @@ void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 widt
|
|||
BPP_CASE(16)
|
||||
#undef BPP_CASE
|
||||
default:
|
||||
UNREACHABLE_MSG("Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -738,7 +738,8 @@ VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags
|
|||
// The wanted format is not supported by hardware, search for alternatives
|
||||
const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
|
||||
if (alternatives == nullptr) {
|
||||
UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
|
||||
ASSERT_MSG(false,
|
||||
"Format={} with usage={} and type={} has no defined alternatives and host "
|
||||
"hardware does not support it",
|
||||
wanted_format, wanted_usage, format_type);
|
||||
return wanted_format;
|
||||
|
@ -756,7 +757,8 @@ VkFormat Device::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags
|
|||
}
|
||||
|
||||
// No alternatives found, panic
|
||||
UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
|
||||
ASSERT_MSG(false,
|
||||
"Format={} with usage={} and type={} is not supported by the host hardware and "
|
||||
"doesn't support any of the alternatives",
|
||||
wanted_format, wanted_usage, format_type);
|
||||
return wanted_format;
|
||||
|
|
|
@ -49,7 +49,7 @@ struct Range {
|
|||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
|
||||
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid memory usage={}", usage);
|
||||
ASSERT_MSG(false, "Invalid memory usage={}", usage);
|
||||
return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask,
|
|||
// Remove device local, if it's not supported by the requested resource
|
||||
return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
|
||||
}
|
||||
UNREACHABLE_MSG("No compatible memory types found");
|
||||
ASSERT_MSG(false, "No compatible memory types found");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -349,7 +349,7 @@ bool IsHostVisible(MemoryUsage usage) noexcept {
|
|||
case MemoryUsage::Download:
|
||||
return true;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid memory usage={}", usage);
|
||||
ASSERT_MSG(false, "Invalid memory usage={}", usage);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -631,7 +631,7 @@ void QtControllerSelectorDialog::DisableUnsupportedPlayers() {
|
|||
switch (max_supported_players) {
|
||||
case 0:
|
||||
default:
|
||||
UNREACHABLE();
|
||||
ASSERT(false);
|
||||
return;
|
||||
case 1:
|
||||
ui->widgetSpacer->hide();
|
||||
|
|
Loading…
Reference in a new issue