early-access version 1995
This commit is contained in:
parent
1c11ae4a45
commit
66b92b0ba8
95 changed files with 18941 additions and 675 deletions
|
@ -1,7 +1,7 @@
|
|||
yuzu emulator early access
|
||||
=============
|
||||
|
||||
This is the source code for early-access 1994.
|
||||
This is the source code for early-access 1995.
|
||||
|
||||
## Legal Notice
|
||||
|
||||
|
|
5
externals/CMakeLists.txt
vendored
5
externals/CMakeLists.txt
vendored
|
@ -7,7 +7,9 @@ include(DownloadExternals)
|
|||
# xbyak
|
||||
if (ARCHITECTURE_x86 OR ARCHITECTURE_x86_64)
|
||||
add_library(xbyak INTERFACE)
|
||||
target_include_directories(xbyak SYSTEM INTERFACE ./xbyak/xbyak)
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_include_directories(xbyak SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
|
||||
endif()
|
||||
|
||||
|
@ -19,6 +21,7 @@ target_include_directories(catch-single-include INTERFACE catch/single_include)
|
|||
if (ARCHITECTURE_x86_64)
|
||||
set(DYNARMIC_TESTS OFF)
|
||||
set(DYNARMIC_NO_BUNDLED_FMT ON)
|
||||
set(DYNARMIC_IGNORE_ASSERTS ON CACHE BOOL "" FORCE)
|
||||
add_subdirectory(dynarmic)
|
||||
endif()
|
||||
|
||||
|
|
2
externals/dynarmic/.gitignore
vendored
2
externals/dynarmic/.gitignore
vendored
|
@ -3,4 +3,4 @@ build/
|
|||
build-*/
|
||||
docs/Doxygen/
|
||||
# Generated files
|
||||
src/backend/x64/mig/
|
||||
src/dynarmic/backend/x64/mig/
|
||||
|
|
3
externals/dynarmic/CMakeLists.txt
vendored
3
externals/dynarmic/CMakeLists.txt
vendored
|
@ -12,6 +12,7 @@ endif()
|
|||
option(DYNARMIC_ENABLE_CPU_FEATURE_DETECTION "Turning this off causes dynarmic to assume the host CPU doesn't support anything later than SSE3" ON)
|
||||
option(DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT "Enables support for systems that require W^X" OFF)
|
||||
option(DYNARMIC_FATAL_ERRORS "Errors are fatal" OFF)
|
||||
option(DYNARMIC_IGNORE_ASSERTS "Ignore asserts" OFF)
|
||||
option(DYNARMIC_TESTS "Build tests" ${MASTER_PROJECT})
|
||||
option(DYNARMIC_TESTS_USE_UNICORN "Enable fuzzing tests against unicorn" OFF)
|
||||
option(DYNARMIC_USE_LLVM "Support disassembly of jitted x86_64 code using LLVM" OFF)
|
||||
|
@ -27,7 +28,7 @@ if (NOT CMAKE_BUILD_TYPE)
|
|||
endif()
|
||||
|
||||
# Set hard requirements for C++
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
|
||||
|
|
26
externals/dynarmic/docs/Design.md
vendored
26
externals/dynarmic/docs/Design.md
vendored
|
@ -5,14 +5,14 @@ support for other versions of the ARM architecture, having a interpreter mode, a
|
|||
for other architectures.
|
||||
|
||||
Users of this library interact with it primarily through the interface provided in
|
||||
[`include/dynarmic`](../include/dynarmic). Users specify how dynarmic's CPU core interacts with
|
||||
[`src/dynarmic/interface`](../src/dynarmic/interface). Users specify how dynarmic's CPU core interacts with
|
||||
the rest of their system providing an implementation of the relevant `UserCallbacks` interface.
|
||||
Users setup the CPU state using member functions of `Jit`, then call `Jit::Execute` to start CPU
|
||||
execution. The callbacks defined on `UserCallbacks` may be called from dynamically generated code,
|
||||
so users of the library should not depend on the stack being in a walkable state for unwinding.
|
||||
|
||||
* A32: [`Jit`](../include/dynarmic/A32/a32.h), [`UserCallbacks`](../include/dynarmic/A32/config.h)
|
||||
* A64: [`Jit`](../include/dynarmic/A64/a64.h), [`UserCallbacks`](../include/dynarmic/A64/config.h)
|
||||
* A32: [`Jit`](../src/dynarmic/interface/A32/a32.h), [`UserCallbacks`](../src/dynarmic/interface/A32/config.h)
|
||||
* A64: [`Jit`](../src/dynarmic/interface/A64/a64.h), [`UserCallbacks`](../src/dynarmic/interface/A64/config.h)
|
||||
|
||||
Dynarmic reads instructions from memory by calling `UserCallbacks::MemoryReadCode`. These
|
||||
instructions then pass through several stages:
|
||||
|
@ -26,19 +26,19 @@ instructions then pass through several stages:
|
|||
Using the A32 frontend with the x64 backend as an example:
|
||||
|
||||
* Decoding is done by [double dispatch](https://en.wikipedia.org/wiki/Visitor_pattern) in
|
||||
[`src/frontend/A32/decoder/{arm.h,thumb16.h,thumb32.h}`](../src/frontend/A32/decoder/).
|
||||
* Translation is done by the visitors in `src/frontend/A32/translate/translate_{arm,thumb}.cpp`.
|
||||
The function [`Translate`](../src/frontend/A32/translate/translate.h) takes a starting memory location,
|
||||
[`src/frontend/A32/decoder/{arm.h,thumb16.h,thumb32.h}`](../src/dynarmic/frontend/A32/decoder/).
|
||||
* Translation is done by the visitors in [`src/dynarmic/frontend/A32/translate/translate_{arm,thumb}.cpp`](../src/dynarmic/frontend/A32/translate/).
|
||||
The function [`Translate`](../src/dynarmic/frontend/A32/translate/translate.h) takes a starting memory location,
|
||||
some CPU state, and memory reader callback and returns a basic block of IR.
|
||||
* The IR can be found under [`src/frontend/ir/`](../src/frontend/ir/).
|
||||
* Optimizations can be found under [`src/ir_opt/`](../src/ir_opt/).
|
||||
* Emission is done by `EmitX64` which can be found in `src/backend_x64/emit_x64.{h,cpp}`.
|
||||
* Execution is performed by calling `BlockOfCode::RunCode` in `src/backend_x64/block_of_code.{h,cpp}`.
|
||||
* The IR can be found under [`src/frontend/ir/`](../src/dynarmic/ir/).
|
||||
* Optimizations can be found under [`src/ir_opt/`](../src/dynarmic/ir/opt/).
|
||||
* Emission is done by `EmitX64` which can be found in [`src/dynarmic/backend/x64/emit_x64.{h,cpp}`](../src/dynarmic/backend/x64/).
|
||||
* Execution is performed by calling `BlockOfCode::RunCode` in [`src/dynarmic/backend/x64/block_of_code.{h,cpp}`](../src/dynarmic/backend/x64/).
|
||||
|
||||
## Decoder
|
||||
|
||||
The decoder is a double dispatch decoder. Each instruction is represented by a line in the relevant
|
||||
instruction table. Here is an example line from [`arm.h`](../src/frontend/A32/decoder/arm.h):
|
||||
instruction table. Here is an example line from [`arm.h`](../src/dynarmic/frontend/A32/decoder/arm.h):
|
||||
|
||||
INST(&V::arm_ADC_imm, "ADC (imm)", "cccc0010101Snnnnddddrrrrvvvvvvvv")
|
||||
|
||||
|
@ -61,7 +61,7 @@ error results.
|
|||
## Translator
|
||||
|
||||
The translator is a visitor that uses the decoder to decode instructions. The translator generates IR code with the
|
||||
help of the [`IREmitter` class](../src/frontend/ir/ir_emitter.h). An example of a translation function follows:
|
||||
help of the [`IREmitter` class](../src/dynarmic/ir/ir_emitter.h). An example of a translation function follows:
|
||||
|
||||
bool ArmTranslatorVisitor::arm_ADC_imm(Cond cond, bool S, Reg n, Reg d, int rotate, Imm8 imm8) {
|
||||
u32 imm32 = ArmExpandImm(rotate, imm8);
|
||||
|
@ -107,7 +107,7 @@ function analyser in the medium-term future.
|
|||
Dynarmic's intermediate representation is typed. Each microinstruction may take zero or more arguments and may
|
||||
return zero or more arguments. A subset of the microinstructions available is documented below.
|
||||
|
||||
A complete list of microinstructions can be found in [src/frontend/ir/opcodes.inc](../src/frontend/ir/opcodes.inc).
|
||||
A complete list of microinstructions can be found in [src/dynarmic/ir/opcodes.inc](../src/dynarmic/ir/opcodes.inc).
|
||||
|
||||
The below lists some commonly used microinstructions.
|
||||
|
||||
|
|
6
externals/dynarmic/externals/CMakeLists.txt
vendored
6
externals/dynarmic/externals/CMakeLists.txt
vendored
|
@ -5,7 +5,7 @@
|
|||
# catch
|
||||
|
||||
add_library(catch INTERFACE)
|
||||
target_include_directories(catch INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/catch>)
|
||||
target_include_directories(catch INTERFACE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/catch/include>)
|
||||
|
||||
# fmt
|
||||
|
||||
|
@ -36,7 +36,9 @@ endif()
|
|||
if (NOT TARGET xbyak)
|
||||
if (ARCHITECTURE STREQUAL "x86" OR ARCHITECTURE STREQUAL "x86_64")
|
||||
add_library(xbyak INTERFACE)
|
||||
target_include_directories(xbyak SYSTEM INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak)
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/xbyak/xbyak DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_include_directories(xbyak SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/xbyak/include)
|
||||
target_compile_definitions(xbyak INTERFACE XBYAK_NO_OP_NAMES)
|
||||
endif()
|
||||
endif()
|
||||
|
|
17802
externals/dynarmic/externals/catch/include/catch2/catch.hpp
vendored
Executable file
17802
externals/dynarmic/externals/catch/include/catch2/catch.hpp
vendored
Executable file
File diff suppressed because it is too large
Load diff
1
externals/dynarmic/externals/mp/README.md
vendored
1
externals/dynarmic/externals/mp/README.md
vendored
|
@ -103,6 +103,7 @@ Type traits not in the standard library.
|
|||
* `mp::parameter_list`: Get a typelist of the parameter types
|
||||
* `mp::get_parameter`: Get the type of a parameter by index
|
||||
* `mp::equivalent_function_type`: Get an equivalent function type (for MFPs this does not include the class)
|
||||
* `mp::equivalent_function_type_with_class`: Get an equivalent function type with explicit `this` argument (MFPs only)
|
||||
* `mp::return_type`: Return type of the function
|
||||
* `mp::class_type`: Only valid for member function pointer types. Gets the class the member function is associated with.
|
||||
|
||||
|
|
|
@ -36,11 +36,15 @@ struct function_info<R(*)(As...)> : function_info<R(As...)> {};
|
|||
template<class C, class R, class... As>
|
||||
struct function_info<R(C::*)(As...)> : function_info<R(As...)> {
|
||||
using class_type = C;
|
||||
|
||||
using equivalent_function_type_with_class = R(C*, As...);
|
||||
};
|
||||
|
||||
template<class C, class R, class... As>
|
||||
struct function_info<R(C::*)(As...) const> : function_info<R(As...)> {
|
||||
using class_type = C;
|
||||
|
||||
using equivalent_function_type_with_class = R(C*, As...);
|
||||
};
|
||||
|
||||
template<class F>
|
||||
|
@ -55,6 +59,9 @@ using get_parameter = typename function_info<F>::template parameter<I>::type;
|
|||
template<class F>
|
||||
using equivalent_function_type = typename function_info<F>::equivalent_function_type;
|
||||
|
||||
template<class F>
|
||||
using equivalent_function_type_with_class = typename function_info<F>::equivalent_function_type_with_class;
|
||||
|
||||
template<class F>
|
||||
using return_type = typename function_info<F>::return_type;
|
||||
|
||||
|
|
|
@ -381,6 +381,9 @@ endif()
|
|||
if (DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT)
|
||||
target_compile_definitions(dynarmic PRIVATE DYNARMIC_ENABLE_NO_EXECUTE_SUPPORT=1)
|
||||
endif()
|
||||
if (DYNARMIC_IGNORE_ASSERTS)
|
||||
target_compile_definitions(dynarmic PRIVATE DYNARMIC_IGNORE_ASSERTS=1)
|
||||
endif()
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL "Windows")
|
||||
target_compile_definitions(dynarmic PRIVATE FMT_USE_WINDOWS_H=0)
|
||||
endif()
|
||||
|
|
|
@ -629,18 +629,10 @@ static void EmitSetFlag(BlockOfCode& code, A32EmitContext& ctx, IR::Inst* inst,
|
|||
}
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32GetNFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitGetFlag(code, ctx, inst, NZCV::x64_n_flag_bit);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32SetNFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitSetFlag(code, ctx, inst, NZCV::x64_n_flag_bit);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32GetZFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitGetFlag(code, ctx, inst, NZCV::x64_z_flag_bit);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32SetZFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitSetFlag(code, ctx, inst, NZCV::x64_z_flag_bit);
|
||||
}
|
||||
|
@ -653,10 +645,6 @@ void A32EmitX64::EmitA32SetCFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
|||
EmitSetFlag(code, ctx, inst, NZCV::x64_c_flag_bit);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32GetVFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitGetFlag(code, ctx, inst, NZCV::x64_v_flag_bit);
|
||||
}
|
||||
|
||||
void A32EmitX64::EmitA32SetVFlag(A32EmitContext& ctx, IR::Inst* inst) {
|
||||
EmitSetFlag(code, ctx, inst, NZCV::x64_v_flag_bit);
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
#include "dynarmic/common/assert.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include "dynarmic/common/llvm_disassemble.h"
|
||||
#include "dynarmic/common/scope_exit.h"
|
||||
#include "dynarmic/common/x64_disassemble.h"
|
||||
#include "dynarmic/frontend/A32/translate/translate.h"
|
||||
#include "dynarmic/interface/A32/a32.h"
|
||||
#include "dynarmic/interface/A32/context.h"
|
||||
|
@ -91,13 +91,6 @@ struct Jit::Impl {
|
|||
jit_state.exclusive_state = 0;
|
||||
}
|
||||
|
||||
std::string Disassemble(const IR::LocationDescriptor& descriptor) {
|
||||
auto block = GetBasicBlock(descriptor);
|
||||
std::string result = fmt::format("address: {}\nsize: {} bytes\n", block.entrypoint, block.size);
|
||||
result += Common::DisassembleX64(block.entrypoint, reinterpret_cast<const char*>(block.entrypoint) + block.size);
|
||||
return result;
|
||||
}
|
||||
|
||||
void PerformCacheInvalidation() {
|
||||
if (invalidate_entire_cache) {
|
||||
jit_state.ResetRSB();
|
||||
|
@ -324,8 +317,9 @@ void Jit::LoadContext(const Context& ctx) {
|
|||
impl->jit_state.TransferJitState(ctx.impl->jit_state, reset_rsb);
|
||||
}
|
||||
|
||||
std::string Jit::Disassemble() const {
|
||||
return Common::DisassembleX64(impl->block_of_code.GetCodeBegin(), impl->block_of_code.getCurr());
|
||||
void Jit::DumpDisassembly() const {
|
||||
const size_t size = (const char*)impl->block_of_code.getCurr() - (const char*)impl->block_of_code.GetCodeBegin();
|
||||
Common::DumpDisassembledX64(impl->block_of_code.GetCodeBegin(), size);
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::A32
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#include <array>
|
||||
|
||||
#include <xbyak.h>
|
||||
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
namespace Dynarmic::Backend::X64 {
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
#include "dynarmic/backend/x64/devirtualize.h"
|
||||
#include "dynarmic/backend/x64/jitstate_info.h"
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include "dynarmic/common/llvm_disassemble.h"
|
||||
#include "dynarmic/common/scope_exit.h"
|
||||
#include "dynarmic/common/x64_disassemble.h"
|
||||
#include "dynarmic/frontend/A64/translate/translate.h"
|
||||
#include "dynarmic/interface/A64/a64.h"
|
||||
#include "dynarmic/ir/basic_block.h"
|
||||
|
@ -199,8 +199,9 @@ public:
|
|||
return is_executing;
|
||||
}
|
||||
|
||||
std::string Disassemble() const {
|
||||
return Common::DisassembleX64(block_of_code.GetCodeBegin(), block_of_code.getCurr());
|
||||
void DumpDisassembly() const {
|
||||
const size_t size = (const char*)block_of_code.getCurr() - (const char*)block_of_code.GetCodeBegin();
|
||||
Common::DumpDisassembledX64(block_of_code.GetCodeBegin(), size);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -397,8 +398,8 @@ bool Jit::IsExecuting() const {
|
|||
return impl->IsExecuting();
|
||||
}
|
||||
|
||||
std::string Jit::Disassemble() const {
|
||||
return impl->Disassemble();
|
||||
void Jit::DumpDisassembly() const {
|
||||
return impl->DumpDisassembly();
|
||||
}
|
||||
|
||||
} // namespace Dynarmic::A64
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#include <array>
|
||||
|
||||
#include <xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/nzcv_util.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include "dynarmic/frontend/A64/location_descriptor.h"
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/block_of_code.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <array>
|
||||
#include <cstring>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/a32_jitstate.h"
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
|
@ -258,8 +258,6 @@ void BlockOfCode::GenRunCode(std::function<void(BlockOfCode&)> rcp) {
|
|||
SwitchMxcsrOnEntry();
|
||||
jmp(ABI_PARAM2);
|
||||
|
||||
align();
|
||||
|
||||
// Dispatcher loop
|
||||
|
||||
Xbyak::Label return_to_caller, return_to_caller_mxcsr_already_exited;
|
||||
|
|
|
@ -10,8 +10,8 @@
|
|||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak_util.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
#include <xbyak/xbyak_util.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
#include "dynarmic/backend/x64/callback.h"
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <map>
|
||||
#include <tuple>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
#include <tsl/robin_map.h>
|
||||
#include <tsl/robin_set.h>
|
||||
#include <xbyak_util.h>
|
||||
#include <xbyak/xbyak_util.h>
|
||||
|
||||
#include "dynarmic/backend/x64/exception_handler.h"
|
||||
#include "dynarmic/backend/x64/reg_alloc.h"
|
||||
|
|
|
@ -1283,6 +1283,72 @@ void EmitX64::EmitAnd64(EmitContext& ctx, IR::Inst* inst) {
|
|||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void EmitX64::EmitAndNot32(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
if (!args[0].IsImmediate() && !args[1].IsImmediate() && code.HasHostFeature(HostFeature::BMI1)) {
|
||||
Xbyak::Reg32 op_a = ctx.reg_alloc.UseGpr(args[0]).cvt32();
|
||||
Xbyak::Reg32 op_b = ctx.reg_alloc.UseGpr(args[1]).cvt32();
|
||||
Xbyak::Reg32 result = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||
code.andn(result, op_b, op_a);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
}
|
||||
|
||||
Xbyak::Reg32 result;
|
||||
if (args[1].IsImmediate()) {
|
||||
result = ctx.reg_alloc.ScratchGpr().cvt32();
|
||||
code.mov(result, u32(~args[1].GetImmediateU32()));
|
||||
} else {
|
||||
result = ctx.reg_alloc.UseScratchGpr(args[1]).cvt32();
|
||||
code.not_(result);
|
||||
}
|
||||
|
||||
if (args[0].IsImmediate()) {
|
||||
const u32 op_arg = args[0].GetImmediateU32();
|
||||
code.and_(result, op_arg);
|
||||
} else {
|
||||
OpArg op_arg = ctx.reg_alloc.UseOpArg(args[0]);
|
||||
op_arg.setBit(32);
|
||||
code.and_(result, *op_arg);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void EmitX64::EmitAndNot64(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
if (!args[0].IsImmediate() && !args[1].IsImmediate() && code.HasHostFeature(HostFeature::BMI1)) {
|
||||
Xbyak::Reg64 op_a = ctx.reg_alloc.UseGpr(args[0]);
|
||||
Xbyak::Reg64 op_b = ctx.reg_alloc.UseGpr(args[1]);
|
||||
Xbyak::Reg64 result = ctx.reg_alloc.ScratchGpr();
|
||||
code.andn(result, op_b, op_a);
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
}
|
||||
|
||||
Xbyak::Reg64 result;
|
||||
if (args[1].IsImmediate()) {
|
||||
result = ctx.reg_alloc.ScratchGpr();
|
||||
code.mov(result, ~args[1].GetImmediateU64());
|
||||
} else {
|
||||
result = ctx.reg_alloc.UseScratchGpr(args[1]);
|
||||
code.not_(result);
|
||||
}
|
||||
|
||||
if (args[0].FitsInImmediateS32()) {
|
||||
const u32 op_arg = u32(args[0].GetImmediateS32());
|
||||
code.and_(result, op_arg);
|
||||
} else {
|
||||
OpArg op_arg = ctx.reg_alloc.UseOpArg(args[0]);
|
||||
op_arg.setBit(64);
|
||||
code.and_(result, *op_arg);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
}
|
||||
|
||||
void EmitX64::EmitEor32(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
|
|
|
@ -766,12 +766,16 @@ static void EmitFPRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
|||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
if constexpr (fsize == 32) {
|
||||
code.rcpss(result, operand);
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrcp14s)(result, operand, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
code.rcpss(result, result);
|
||||
code.cvtss2sd(result, result);
|
||||
if constexpr (fsize == 32) {
|
||||
code.rcpss(result, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
code.rcpss(result, result);
|
||||
code.cvtss2sd(result, result);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
|
@ -984,20 +988,22 @@ static void EmitFPRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* i
|
|||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
if constexpr (fsize == 32) {
|
||||
code.rsqrtss(result, operand);
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrsqrt14s)(result, operand, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
code.rsqrtss(result, result);
|
||||
code.cvtss2sd(result, result);
|
||||
if constexpr (fsize == 32) {
|
||||
code.rsqrtss(result, operand);
|
||||
} else {
|
||||
code.cvtsd2ss(result, operand);
|
||||
code.rsqrtss(result, result);
|
||||
code.cvtss2sd(result, result);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO: VRSQRT14SS implementation (AVX512F)
|
||||
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
|
|
|
@ -165,7 +165,7 @@ void EmitX64::EmitVectorGetElement8(EmitContext& ctx, IR::Inst* inst) {
|
|||
if (code.HasHostFeature(HostFeature::SSE41)) {
|
||||
code.pextrb(dest, source, index);
|
||||
} else {
|
||||
code.pextrw(dest, source, index / 2);
|
||||
code.pextrw(dest, source, u8(index / 2));
|
||||
if (index % 2 == 1) {
|
||||
code.shr(dest, 8);
|
||||
} else {
|
||||
|
@ -439,6 +439,17 @@ void EmitX64::EmitVectorAnd(EmitContext& ctx, IR::Inst* inst) {
|
|||
EmitVectorOperation(code, ctx, inst, &Xbyak::CodeGenerator::pand);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorAndNot(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
||||
const Xbyak::Xmm xmm_a = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm xmm_b = ctx.reg_alloc.UseScratchXmm(args[1]);
|
||||
|
||||
code.pandn(xmm_b, xmm_a);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, xmm_b);
|
||||
}
|
||||
|
||||
static void ArithmeticShiftRightByte(EmitContext& ctx, BlockOfCode& code, const Xbyak::Xmm& result, u8 shift_amount) {
|
||||
if (code.HasHostFeature(HostFeature::GFNI)) {
|
||||
const u64 shift_matrix = shift_amount < 8
|
||||
|
@ -741,6 +752,148 @@ void EmitX64::EmitVectorBroadcast64(EmitContext& ctx, IR::Inst* inst) {
|
|||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElementLower8(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 16);
|
||||
|
||||
if (index > 0) {
|
||||
code.psrldq(a, index);
|
||||
}
|
||||
|
||||
if (code.HasHostFeature(HostFeature::AVX2)) {
|
||||
code.vpbroadcastb(a, a);
|
||||
code.vmovq(a, a);
|
||||
} else if (code.HasHostFeature(HostFeature::SSSE3)) {
|
||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
code.pxor(tmp, tmp);
|
||||
code.pshufb(a, tmp);
|
||||
code.movq(a, a);
|
||||
} else {
|
||||
code.punpcklbw(a, a);
|
||||
code.pshuflw(a, a, 0);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElementLower16(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 8);
|
||||
|
||||
if (index > 0) {
|
||||
code.psrldq(a, u8(index * 2));
|
||||
}
|
||||
|
||||
code.pshuflw(a, a, 0);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElementLower32(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 4);
|
||||
|
||||
if (index > 0) {
|
||||
code.psrldq(a, u8(index * 4));
|
||||
}
|
||||
|
||||
code.pshuflw(a, a, 0b01'00'01'00);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElement8(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 16);
|
||||
|
||||
if (index > 0) {
|
||||
code.psrldq(a, index);
|
||||
}
|
||||
|
||||
if (code.HasHostFeature(HostFeature::AVX2)) {
|
||||
code.vpbroadcastb(a, a);
|
||||
} else if (code.HasHostFeature(HostFeature::SSSE3)) {
|
||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
code.pxor(tmp, tmp);
|
||||
code.pshufb(a, tmp);
|
||||
} else {
|
||||
code.punpcklbw(a, a);
|
||||
code.pshuflw(a, a, 0);
|
||||
code.punpcklqdq(a, a);
|
||||
}
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElement16(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 8);
|
||||
|
||||
if (index == 0 && code.HasHostFeature(HostFeature::AVX2)) {
|
||||
code.vpbroadcastw(a, a);
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
return;
|
||||
}
|
||||
|
||||
if (index < 4) {
|
||||
code.pshuflw(a, a, Common::Replicate<u8>(index, 2));
|
||||
code.punpcklqdq(a, a);
|
||||
} else {
|
||||
code.pshufhw(a, a, Common::Replicate<u8>(u8(index - 4), 2));
|
||||
code.punpckhqdq(a, a);
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElement32(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 4);
|
||||
|
||||
code.pshufd(a, a, Common::Replicate<u8>(index, 2));
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
void EmitX64::EmitVectorBroadcastElement64(EmitContext& ctx, IR::Inst* inst) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
const Xbyak::Xmm a = ctx.reg_alloc.UseScratchXmm(args[0]);
|
||||
ASSERT(args[1].IsImmediate());
|
||||
const u8 index = args[1].GetImmediateU8();
|
||||
ASSERT(index < 2);
|
||||
|
||||
if (code.HasHostFeature(HostFeature::AVX)) {
|
||||
code.vpermilpd(a, a, Common::Replicate<u8>(index, 1));
|
||||
} else {
|
||||
if (index == 0) {
|
||||
code.punpcklqdq(a, a);
|
||||
} else {
|
||||
code.punpckhqdq(a, a);
|
||||
}
|
||||
}
|
||||
ctx.reg_alloc.DefineValue(inst, a);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
static void EmitVectorCountLeadingZeros(VectorArray<T>& result, const VectorArray<T>& data) {
|
||||
for (size_t i = 0; i < result.size(); i++) {
|
||||
|
|
|
@ -1288,12 +1288,16 @@ static void EmitRecipEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* ins
|
|||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
if constexpr (fsize == 32) {
|
||||
code.rcpps(result, operand);
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrcp14p)(result, operand);
|
||||
} else {
|
||||
code.cvtpd2ps(result, operand);
|
||||
code.rcpps(result, result);
|
||||
code.cvtps2pd(result, result);
|
||||
if constexpr (fsize == 32) {
|
||||
code.rcpps(result, operand);
|
||||
} else {
|
||||
code.cvtpd2ps(result, operand);
|
||||
code.rcpps(result, result);
|
||||
code.cvtps2pd(result, result);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
|
@ -1502,12 +1506,16 @@ static void EmitRSqrtEstimate(BlockOfCode& code, EmitContext& ctx, IR::Inst* ins
|
|||
const Xbyak::Xmm operand = ctx.reg_alloc.UseXmm(args[0]);
|
||||
const Xbyak::Xmm result = ctx.reg_alloc.ScratchXmm();
|
||||
|
||||
if constexpr (fsize == 32) {
|
||||
code.rsqrtps(result, operand);
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
FCODE(vrsqrt14p)(result, operand);
|
||||
} else {
|
||||
code.cvtpd2ps(result, operand);
|
||||
code.rsqrtps(result, result);
|
||||
code.cvtps2pd(result, result);
|
||||
if constexpr (fsize == 32) {
|
||||
code.rsqrtps(result, operand);
|
||||
} else {
|
||||
code.cvtpd2ps(result, operand);
|
||||
code.rsqrtps(result, result);
|
||||
code.cvtps2pd(result, result);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.reg_alloc.DefineValue(inst, result);
|
||||
|
@ -1707,8 +1715,6 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
|||
const auto rounding = static_cast<FP::RoundingMode>(inst->GetArg(2).GetU8());
|
||||
[[maybe_unused]] const bool fpcr_controlled = inst->GetArg(3).GetU1();
|
||||
|
||||
// TODO: AVX512 implementation
|
||||
|
||||
if constexpr (fsize != 16) {
|
||||
if (code.HasHostFeature(HostFeature::SSE41) && rounding != FP::RoundingMode::ToNearest_TieAwayFromZero) {
|
||||
auto args = ctx.reg_alloc.GetArgumentInfo(inst);
|
||||
|
@ -1737,17 +1743,21 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
|||
if constexpr (fsize == 32) {
|
||||
code.cvttps2dq(src, src);
|
||||
} else {
|
||||
const Xbyak::Reg64 hi = ctx.reg_alloc.ScratchGpr();
|
||||
const Xbyak::Reg64 lo = ctx.reg_alloc.ScratchGpr();
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
code.vcvttpd2qq(src, src);
|
||||
} else {
|
||||
const Xbyak::Reg64 hi = ctx.reg_alloc.ScratchGpr();
|
||||
const Xbyak::Reg64 lo = ctx.reg_alloc.ScratchGpr();
|
||||
|
||||
code.cvttsd2si(lo, src);
|
||||
code.punpckhqdq(src, src);
|
||||
code.cvttsd2si(hi, src);
|
||||
code.movq(src, lo);
|
||||
code.pinsrq(src, hi, 1);
|
||||
code.cvttsd2si(lo, src);
|
||||
code.punpckhqdq(src, src);
|
||||
code.cvttsd2si(hi, src);
|
||||
code.movq(src, lo);
|
||||
code.pinsrq(src, hi, 1);
|
||||
|
||||
ctx.reg_alloc.Release(hi);
|
||||
ctx.reg_alloc.Release(lo);
|
||||
ctx.reg_alloc.Release(hi);
|
||||
ctx.reg_alloc.Release(lo);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1765,29 +1775,43 @@ void EmitFPVectorToFixed(BlockOfCode& code, EmitContext& ctx, IR::Inst* inst) {
|
|||
[[maybe_unused]] constexpr u64 float_upper_limit_unsigned = fsize == 32 ? 0x4f800000 : 0x43f0000000000000;
|
||||
|
||||
if constexpr (unsigned_) {
|
||||
// Zero is minimum
|
||||
code.xorps(xmm0, xmm0);
|
||||
FCODE(cmplep)(xmm0, src);
|
||||
FCODE(andp)(src, xmm0);
|
||||
if (code.HasHostFeature(HostFeature::AVX512_OrthoFloat)) {
|
||||
// Mask positive values
|
||||
code.xorps(xmm0, xmm0);
|
||||
FCODE(vcmpp)(k1, src, xmm0, Cmp::GreaterEqual_OQ);
|
||||
|
||||
// Will we exceed unsigned range?
|
||||
const Xbyak::Xmm exceed_unsigned = ctx.reg_alloc.ScratchXmm();
|
||||
code.movaps(exceed_unsigned, GetVectorOf<fsize, float_upper_limit_unsigned>(code));
|
||||
FCODE(cmplep)(exceed_unsigned, src);
|
||||
// Convert positive values to unsigned integers, write 0 anywhere else
|
||||
// vcvttp*2u*q already saturates out-of-range values to (0xFFFF...)
|
||||
if constexpr (fsize == 32) {
|
||||
code.vcvttps2udq(src | k1 | T_z, src);
|
||||
} else {
|
||||
code.vcvttpd2uqq(src | k1 | T_z, src);
|
||||
}
|
||||
} else {
|
||||
// Zero is minimum
|
||||
code.xorps(xmm0, xmm0);
|
||||
FCODE(cmplep)(xmm0, src);
|
||||
FCODE(andp)(src, xmm0);
|
||||
|
||||
// Will be exceed signed range?
|
||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||
code.movaps(tmp, GetVectorOf<fsize, float_upper_limit_signed>(code));
|
||||
code.movaps(xmm0, tmp);
|
||||
FCODE(cmplep)(xmm0, src);
|
||||
FCODE(andp)(tmp, xmm0);
|
||||
FCODE(subp)(src, tmp);
|
||||
perform_conversion(src);
|
||||
ICODE(psll)(xmm0, static_cast<u8>(fsize - 1));
|
||||
FCODE(orp)(src, xmm0);
|
||||
// Will we exceed unsigned range?
|
||||
const Xbyak::Xmm exceed_unsigned = ctx.reg_alloc.ScratchXmm();
|
||||
code.movaps(exceed_unsigned, GetVectorOf<fsize, float_upper_limit_unsigned>(code));
|
||||
FCODE(cmplep)(exceed_unsigned, src);
|
||||
|
||||
// Saturate to max
|
||||
FCODE(orp)(src, exceed_unsigned);
|
||||
// Will be exceed signed range?
|
||||
const Xbyak::Xmm tmp = ctx.reg_alloc.ScratchXmm();
|
||||
code.movaps(tmp, GetVectorOf<fsize, float_upper_limit_signed>(code));
|
||||
code.movaps(xmm0, tmp);
|
||||
FCODE(cmplep)(xmm0, src);
|
||||
FCODE(andp)(tmp, xmm0);
|
||||
FCODE(subp)(src, tmp);
|
||||
perform_conversion(src);
|
||||
ICODE(psll)(xmm0, static_cast<u8>(fsize - 1));
|
||||
FCODE(orp)(src, xmm0);
|
||||
|
||||
// Saturate to max
|
||||
FCODE(orp)(src, exceed_unsigned);
|
||||
}
|
||||
} else {
|
||||
constexpr u64 integer_max = static_cast<FPT>(std::numeric_limits<std::conditional_t<unsigned_, FPT, std::make_signed_t<FPT>>>::max());
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
#include "dynarmic/backend/x64/hostloc.h"
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
#include "dynarmic/backend/x64/stack_layout.h"
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <utility>
|
||||
|
||||
#include <fmt/ostream.h>
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/abi.h"
|
||||
#include "dynarmic/backend/x64/stack_layout.h"
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
|
||||
#include "dynarmic/backend/x64/block_of_code.h"
|
||||
#include "dynarmic/backend/x64/hostloc.h"
|
||||
|
|
|
@ -203,7 +203,7 @@ constexpr T Replicate(T value, size_t element_size) {
|
|||
ASSERT_MSG(BitSize<T>() % element_size == 0, "bitsize of T not divisible by element_size");
|
||||
if (element_size == BitSize<T>())
|
||||
return value;
|
||||
return Replicate(value | (value << element_size), element_size * 2);
|
||||
return Replicate<T>(T(value | value << element_size), element_size * 2);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#endif
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include "dynarmic/common/cast_util.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
#include "dynarmic/common/llvm_disassemble.h"
|
||||
|
||||
|
@ -48,7 +49,7 @@ std::string DisassembleX64(const void* begin, const void* end) {
|
|||
LLVMDisasmDispose(llvm_ctx);
|
||||
#else
|
||||
result += fmt::format("(recompile with DYNARMIC_USE_LLVM=ON to disassemble the generated x86_64 code)\n");
|
||||
result += fmt::format("start: {:016x}, end: {:016x}\n", begin, end);
|
||||
result += fmt::format("start: {:016x}, end: {:016x}\n", BitCast<u64>(begin), BitCast<u64>(end));
|
||||
#endif
|
||||
|
||||
return result;
|
||||
|
|
|
@ -1364,12 +1364,12 @@ public:
|
|||
|
||||
std::string vfp_VMOV_from_i16(Cond cond, Imm<1> i1, size_t Vd, Reg t, bool D, Imm<1> i2) {
|
||||
const size_t index = concatenate(i1, i2).ZeroExtend();
|
||||
return fmt::format("vmov{}.{}16 {}[{}], {}", CondToString(cond), FPRegStr(true, Vd, D), index, t);
|
||||
return fmt::format("vmov{}.16 {}[{}], {}", CondToString(cond), FPRegStr(true, Vd, D), index, t);
|
||||
}
|
||||
|
||||
std::string vfp_VMOV_from_i8(Cond cond, Imm<1> i1, size_t Vd, Reg t, bool D, Imm<2> i2) {
|
||||
const size_t index = concatenate(i1, i2).ZeroExtend();
|
||||
return fmt::format("vmov{}.{}8 {}[{}], {}", CondToString(cond), FPRegStr(true, Vd, D), index, t);
|
||||
return fmt::format("vmov{}.8 {}[{}], {}", CondToString(cond), FPRegStr(true, Vd, D), index, t);
|
||||
}
|
||||
|
||||
std::string vfp_VMOV_to_i32(Cond cond, Imm<1> i, size_t Vn, Reg t, bool N) {
|
||||
|
|
|
@ -32,6 +32,7 @@ bool IsConditionPassed(TranslatorVisitor& v, IR::Cond cond) {
|
|||
|
||||
if (cond == IR::Cond::NV) {
|
||||
// NV conditional is obsolete
|
||||
v.cond_state = ConditionalState::Break;
|
||||
v.RaiseException(Exception::UnpredictableInstruction);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -80,8 +80,7 @@ bool TranslatorVisitor::asimd_VDUP_scalar(bool D, Imm<4> imm4, size_t Vd, bool Q
|
|||
const auto m = ToVector(false, Vm, M);
|
||||
|
||||
const auto reg_m = ir.GetVector(m);
|
||||
const auto scalar = ir.VectorGetElement(esize, reg_m, index);
|
||||
const auto result = ir.VectorBroadcast(esize, scalar);
|
||||
const auto result = ir.VectorBroadcastElement(esize, reg_m, index);
|
||||
|
||||
ir.SetVector(d, result);
|
||||
return true;
|
||||
|
|
|
@ -318,7 +318,7 @@ bool TranslatorVisitor::asimd_VAND_reg(bool D, size_t Vn, size_t Vd, bool N, boo
|
|||
|
||||
bool TranslatorVisitor::asimd_VBIC_reg(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
|
||||
return BitwiseInstruction<false>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_n, const auto& reg_m) {
|
||||
return ir.VectorAnd(reg_n, ir.VectorNot(reg_m));
|
||||
return ir.VectorAndNot(reg_n, reg_m);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -342,19 +342,19 @@ bool TranslatorVisitor::asimd_VEOR_reg(bool D, size_t Vn, size_t Vd, bool N, boo
|
|||
|
||||
bool TranslatorVisitor::asimd_VBSL(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
|
||||
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
|
||||
return ir.VectorOr(ir.VectorAnd(reg_n, reg_d), ir.VectorAnd(reg_m, ir.VectorNot(reg_d)));
|
||||
return ir.VectorOr(ir.VectorAnd(reg_n, reg_d), ir.VectorAndNot(reg_m, reg_d));
|
||||
});
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::asimd_VBIT(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
|
||||
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
|
||||
return ir.VectorOr(ir.VectorAnd(reg_n, reg_m), ir.VectorAnd(reg_d, ir.VectorNot(reg_m)));
|
||||
return ir.VectorOr(ir.VectorAnd(reg_n, reg_m), ir.VectorAndNot(reg_d, reg_m));
|
||||
});
|
||||
}
|
||||
|
||||
bool TranslatorVisitor::asimd_VBIF(bool D, size_t Vn, size_t Vd, bool N, bool Q, bool M, size_t Vm) {
|
||||
return BitwiseInstruction<true>(*this, D, Vn, Vd, N, Q, M, Vm, [this](const auto& reg_d, const auto& reg_n, const auto& reg_m) {
|
||||
return ir.VectorOr(ir.VectorAnd(reg_d, reg_m), ir.VectorAnd(reg_n, ir.VectorNot(reg_m)));
|
||||
return ir.VectorOr(ir.VectorAnd(reg_d, reg_m), ir.VectorAndNot(reg_n, reg_m));
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -46,9 +46,8 @@ bool ScalarMultiply(TranslatorVisitor& v, bool Q, bool D, size_t sz, size_t Vn,
|
|||
const auto n = ToVector(Q, Vn, N);
|
||||
const auto [m, index] = GetScalarLocation(esize, M, Vm);
|
||||
|
||||
const auto scalar = v.ir.VectorGetElement(esize, v.ir.GetVector(m), index);
|
||||
const auto reg_n = v.ir.GetVector(n);
|
||||
const auto reg_m = v.ir.VectorBroadcast(esize, scalar);
|
||||
const auto reg_m = v.ir.VectorBroadcastElement(esize, v.ir.GetVector(m), index);
|
||||
const auto addend = F ? v.ir.FPVectorMul(esize, reg_n, reg_m, false)
|
||||
: v.ir.VectorMultiply(esize, reg_n, reg_m);
|
||||
const auto result = [&] {
|
||||
|
@ -125,9 +124,8 @@ bool ScalarMultiplyReturnHigh(TranslatorVisitor& v, bool Q, bool D, size_t sz, s
|
|||
const auto n = ToVector(Q, Vn, N);
|
||||
const auto [m, index] = GetScalarLocation(esize, M, Vm);
|
||||
|
||||
const auto scalar = v.ir.VectorGetElement(esize, v.ir.GetVector(m), index);
|
||||
const auto reg_n = v.ir.GetVector(n);
|
||||
const auto reg_m = v.ir.VectorBroadcast(esize, scalar);
|
||||
const auto reg_m = v.ir.VectorBroadcastElement(esize, v.ir.GetVector(m), index);
|
||||
const auto result = [&] {
|
||||
const auto tmp = v.ir.VectorSignedSaturatedDoublingMultiply(esize, reg_n, reg_m);
|
||||
|
||||
|
@ -177,9 +175,8 @@ bool TranslatorVisitor::asimd_VQDMULL_scalar(bool D, size_t sz, size_t Vn, size_
|
|||
const auto n = ToVector(false, Vn, N);
|
||||
const auto [m, index] = GetScalarLocation(esize, M, Vm);
|
||||
|
||||
const auto scalar = ir.VectorGetElement(esize, ir.GetVector(m), index);
|
||||
const auto reg_n = ir.GetVector(n);
|
||||
const auto reg_m = ir.VectorBroadcast(esize, scalar);
|
||||
const auto reg_m = ir.VectorBroadcastElement(esize, ir.GetVector(m), index);
|
||||
const auto result = ir.VectorSignedSaturatedDoublingMultiplyLong(esize, reg_n, reg_m);
|
||||
|
||||
ir.SetVector(d, result);
|
||||
|
|
|
@ -177,7 +177,7 @@ bool TranslatorVisitor::asimd_VSRI(bool D, size_t imm6, size_t Vd, bool L, bool
|
|||
|
||||
const auto shifted = ir.VectorLogicalShiftRight(esize, reg_m, static_cast<u8>(shift_amount));
|
||||
const auto mask_vec = ir.VectorBroadcast(esize, I(esize, mask));
|
||||
const auto result = ir.VectorOr(ir.VectorAnd(reg_d, ir.VectorNot(mask_vec)), shifted);
|
||||
const auto result = ir.VectorOr(ir.VectorAndNot(reg_d, mask_vec), shifted);
|
||||
|
||||
ir.SetVector(d, result);
|
||||
return true;
|
||||
|
@ -203,7 +203,7 @@ bool TranslatorVisitor::asimd_VSLI(bool D, size_t imm6, size_t Vd, bool L, bool
|
|||
|
||||
const auto shifted = ir.VectorLogicalShiftLeft(esize, reg_m, static_cast<u8>(shift_amount));
|
||||
const auto mask_vec = ir.VectorBroadcast(esize, I(esize, mask));
|
||||
const auto result = ir.VectorOr(ir.VectorAnd(reg_d, ir.VectorNot(mask_vec)), shifted);
|
||||
const auto result = ir.VectorOr(ir.VectorAndNot(reg_d, mask_vec), shifted);
|
||||
|
||||
ir.SetVector(d, result);
|
||||
return true;
|
||||
|
|
|
@ -250,7 +250,7 @@ bool TranslatorVisitor::arm_BIC_imm(Cond cond, bool S, Reg n, Reg d, int rotate,
|
|||
}
|
||||
|
||||
const auto imm_carry = ArmExpandImm_C(rotate, imm8, ir.GetCFlag());
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(ir.Imm32(imm_carry.imm32)));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), ir.Imm32(imm_carry.imm32));
|
||||
if (d == Reg::PC) {
|
||||
if (S) {
|
||||
// This is UNPREDICTABLE when in user-mode.
|
||||
|
@ -280,7 +280,7 @@ bool TranslatorVisitor::arm_BIC_reg(Cond cond, bool S, Reg n, Reg d, Imm<5> imm5
|
|||
|
||||
const auto carry_in = ir.GetCFlag();
|
||||
const auto shifted = EmitImmShift(ir.GetRegister(m), shift, imm5, carry_in);
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(shifted.result));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), shifted.result);
|
||||
if (d == Reg::PC) {
|
||||
if (S) {
|
||||
// This is UNPREDICTABLE when in user-mode.
|
||||
|
@ -315,7 +315,7 @@ bool TranslatorVisitor::arm_BIC_rsr(Cond cond, bool S, Reg n, Reg d, Reg s, Shif
|
|||
const auto shift_n = ir.LeastSignificantByte(ir.GetRegister(s));
|
||||
const auto carry_in = ir.GetCFlag();
|
||||
const auto shifted = EmitRegShift(ir.GetRegister(m), shift, shift_n, carry_in);
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(shifted.result));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), shifted.result);
|
||||
|
||||
ir.SetRegister(d, result);
|
||||
if (S) {
|
||||
|
|
|
@ -356,7 +356,7 @@ bool TranslatorVisitor::thumb16_MUL_reg(Reg n, Reg d_m) {
|
|||
bool TranslatorVisitor::thumb16_BIC_reg(Reg m, Reg d_n) {
|
||||
const Reg d = d_n;
|
||||
const Reg n = d_n;
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(ir.GetRegister(m)));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), ir.GetRegister(m));
|
||||
|
||||
ir.SetRegister(d, result);
|
||||
if (!ir.current_location.IT().IsInITBlock()) {
|
||||
|
|
|
@ -45,7 +45,7 @@ bool TranslatorVisitor::thumb32_BIC_imm(Imm<1> i, bool S, Reg n, Imm<3> imm3, Re
|
|||
}
|
||||
|
||||
const auto imm_carry = ThumbExpandImm_C(i, imm3, imm8, ir.GetCFlag());
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(ir.Imm32(imm_carry.imm32)));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), ir.Imm32(imm_carry.imm32));
|
||||
|
||||
ir.SetRegister(d, result);
|
||||
if (S) {
|
||||
|
|
|
@ -45,7 +45,7 @@ bool TranslatorVisitor::thumb32_BIC_reg(bool S, Reg n, Imm<3> imm3, Reg d, Imm<2
|
|||
}
|
||||
|
||||
const auto shifted = EmitImmShift(ir.GetRegister(m), type, imm3, imm2, ir.GetCFlag());
|
||||
const auto result = ir.And(ir.GetRegister(n), ir.Not(shifted.result));
|
||||
const auto result = ir.AndNot(ir.GetRegister(n), shifted.result);
|
||||
ir.SetRegister(d, result);
|
||||
if (S) {
|
||||
ir.SetNFlag(ir.MostSignificantBit(result));
|
||||
|
|
|
@ -128,8 +128,8 @@ bool TranslatorVisitor::BIC_shift(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Re
|
|||
const u8 shift_amount = imm6.ZeroExtend<u8>();
|
||||
|
||||
const auto operand1 = X(datasize, Rn);
|
||||
const auto operand2 = ir.Not(ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount)));
|
||||
const auto result = ir.And(operand1, operand2);
|
||||
const auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount));
|
||||
const auto result = ir.AndNot(operand1, operand2);
|
||||
|
||||
X(datasize, Rd, result);
|
||||
return true;
|
||||
|
@ -225,8 +225,8 @@ bool TranslatorVisitor::BICS(bool sf, Imm<2> shift, Reg Rm, Imm<6> imm6, Reg Rn,
|
|||
const u8 shift_amount = imm6.ZeroExtend<u8>();
|
||||
|
||||
const auto operand1 = X(datasize, Rn);
|
||||
const auto operand2 = ir.Not(ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount)));
|
||||
const auto result = ir.And(operand1, operand2);
|
||||
const auto operand2 = ShiftReg(datasize, Rm, shift, ir.Imm8(shift_amount));
|
||||
const auto result = ir.AndNot(operand1, operand2);
|
||||
|
||||
ir.SetNZCV(ir.NZCVFrom(result));
|
||||
X(datasize, Rd, result);
|
||||
|
|
|
@ -41,8 +41,7 @@ bool TranslatorVisitor::DUP_elt_2(bool Q, Imm<5> imm5, Vec Vn, Vec Vd) {
|
|||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand = V(idxdsize, Vn);
|
||||
const IR::UAny element = ir.VectorGetElement(esize, operand, index);
|
||||
const IR::U128 result = Q ? ir.VectorBroadcast(esize, element) : ir.VectorBroadcastLower(esize, element);
|
||||
const IR::U128 result = Q ? ir.VectorBroadcastElement(esize, operand, index) : ir.VectorBroadcastElementLower(esize, operand, index);
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ bool TranslatorVisitor::BCAX(Vec Vm, Vec Va, Vec Vn, Vec Vd) {
|
|||
const IR::U128 m = ir.GetQ(Vm);
|
||||
const IR::U128 n = ir.GetQ(Vn);
|
||||
|
||||
const IR::U128 result = ir.VectorEor(n, ir.VectorAnd(m, ir.VectorNot(a)));
|
||||
const IR::U128 result = ir.VectorEor(n, ir.VectorAndNot(m, a));
|
||||
|
||||
ir.SetQ(Vd, result);
|
||||
return true;
|
||||
|
|
|
@ -65,7 +65,7 @@ bool SM3TT2(TranslatorVisitor& v, Vec Vm, Imm<2> imm2, Vec Vn, Vec Vd, SM3TTVari
|
|||
return v.ir.Eor(after_low_d, v.ir.Eor(top_d, before_top_d));
|
||||
}
|
||||
const IR::U32 tmp1 = v.ir.And(top_d, before_top_d);
|
||||
const IR::U32 tmp2 = v.ir.And(v.ir.Not(top_d), after_low_d);
|
||||
const IR::U32 tmp2 = v.ir.AndNot(after_low_d, top_d);
|
||||
return v.ir.Or(tmp1, tmp2);
|
||||
}();
|
||||
const IR::U32 final_tt2 = v.ir.Add(tt2, v.ir.Add(low_d, v.ir.Add(top_n, wj)));
|
||||
|
|
|
@ -156,7 +156,7 @@ bool ShiftAndInsert(TranslatorVisitor& v, Imm<4> immh, Imm<3> immb, Vec Vn, Vec
|
|||
return v.ir.LogicalShiftLeft(operand1, v.ir.Imm8(shift_amount));
|
||||
}();
|
||||
|
||||
const IR::U64 result = v.ir.Or(v.ir.And(operand2, v.ir.Not(v.ir.Imm64(mask))), shifted);
|
||||
const IR::U64 result = v.ir.Or(v.ir.AndNot(operand2, v.ir.Imm64(mask)), shifted);
|
||||
v.V_scalar(esize, Vd, result);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -143,8 +143,8 @@ bool TranslatorVisitor::SQRDMULH_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> V
|
|||
const auto [index, Vm] = Combine(size, H, L, M, Vmlo);
|
||||
|
||||
const IR::U128 operand1 = ir.ZeroExtendToQuad(ir.VectorGetElement(esize, V(128, Vn), 0));
|
||||
const IR::UAny operand2 = ir.VectorGetElement(esize, V(128, Vm), index);
|
||||
const IR::U128 broadcast = ir.VectorBroadcast(esize, operand2);
|
||||
const IR::U128 operand2 = V(128, Vm);
|
||||
const IR::U128 broadcast = ir.VectorBroadcastElement(esize, operand2, index);
|
||||
const IR::UpperAndLower multiply = ir.VectorSignedSaturatedDoublingMultiply(esize, operand1, broadcast);
|
||||
const IR::U128 result = ir.VectorAdd(esize, multiply.upper, ir.VectorLogicalShiftRight(esize, multiply.lower, static_cast<u8>(esize - 1)));
|
||||
|
||||
|
@ -161,8 +161,8 @@ bool TranslatorVisitor::SQDMULL_elt_1(Imm<2> size, Imm<1> L, Imm<1> M, Imm<4> Vm
|
|||
const auto [index, Vm] = Combine(size, H, L, M, Vmlo);
|
||||
|
||||
const IR::U128 operand1 = ir.ZeroExtendToQuad(ir.VectorGetElement(esize, V(128, Vn), 0));
|
||||
const IR::UAny operand2 = ir.VectorGetElement(esize, V(128, Vm), index);
|
||||
const IR::U128 broadcast = ir.VectorBroadcast(esize, operand2);
|
||||
const IR::U128 operand2 = V(128, Vm);
|
||||
const IR::U128 broadcast = ir.VectorBroadcastElement(esize, operand2, index);
|
||||
const IR::U128 result = ir.VectorSignedSaturatedDoublingMultiplyLong(esize, operand1, broadcast);
|
||||
|
||||
V(128, Vd, result);
|
||||
|
|
|
@ -50,7 +50,7 @@ IR::U128 SHA512Hash(IREmitter& ir, Vec Vm, Vec Vn, Vec Vd, SHA512HashPart part)
|
|||
const IR::U64 tmp1 = ir.And(a, b);
|
||||
|
||||
if (part == SHA512HashPart::Part1) {
|
||||
const IR::U64 tmp2 = ir.And(ir.Not(a), c);
|
||||
const IR::U64 tmp2 = ir.AndNot(c, a);
|
||||
return ir.Eor(tmp1, tmp2);
|
||||
}
|
||||
|
||||
|
|
|
@ -350,7 +350,7 @@ bool TranslatorVisitor::SRI_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd)
|
|||
|
||||
const IR::U128 shifted = ir.VectorLogicalShiftRight(esize, operand1, shift_amount);
|
||||
const IR::U128 mask_vec = ir.VectorBroadcast(esize, I(esize, mask));
|
||||
const IR::U128 result = ir.VectorOr(ir.VectorAnd(operand2, ir.VectorNot(mask_vec)), shifted);
|
||||
const IR::U128 result = ir.VectorOr(ir.VectorAndNot(operand2, mask_vec), shifted);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
|
@ -376,7 +376,7 @@ bool TranslatorVisitor::SLI_2(bool Q, Imm<4> immh, Imm<3> immb, Vec Vn, Vec Vd)
|
|||
|
||||
const IR::U128 shifted = ir.VectorLogicalShiftLeft(esize, operand1, shift_amount);
|
||||
const IR::U128 mask_vec = ir.VectorBroadcast(esize, I(esize, mask));
|
||||
const IR::U128 result = ir.VectorOr(ir.VectorAnd(operand2, ir.VectorNot(mask_vec)), shifted);
|
||||
const IR::U128 result = ir.VectorOr(ir.VectorAndNot(operand2, mask_vec), shifted);
|
||||
|
||||
V(datasize, Vd, result);
|
||||
return true;
|
||||
|
|
|
@ -773,7 +773,7 @@ bool TranslatorVisitor::BIC_asimd_reg(bool Q, Vec Vm, Vec Vn, Vec Vd) {
|
|||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(datasize, Vm);
|
||||
|
||||
IR::U128 result = ir.VectorAnd(operand1, ir.VectorNot(operand2));
|
||||
IR::U128 result = ir.VectorAndNot(operand1, operand2);
|
||||
if (datasize == 64) {
|
||||
result = ir.VectorZeroUpper(result);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ bool MultiplyByElement(TranslatorVisitor& v, bool Q, Imm<2> size, Imm<1> L, Imm<
|
|||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::U128 operand1 = v.V(datasize, Vn);
|
||||
const IR::U128 operand2 = v.ir.VectorBroadcast(esize, v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index));
|
||||
const IR::U128 operand2 = v.ir.VectorBroadcastElement(esize, v.V(idxdsize, Vm), index);
|
||||
const IR::U128 operand3 = v.V(datasize, Vd);
|
||||
|
||||
IR::U128 result = v.ir.VectorMultiply(esize, operand1, operand2);
|
||||
|
@ -64,9 +64,8 @@ bool FPMultiplyByElement(TranslatorVisitor& v, bool Q, bool sz, Imm<1> L, Imm<1>
|
|||
const size_t esize = sz ? 64 : 32;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::UAny element2 = v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index);
|
||||
const IR::U128 operand1 = v.V(datasize, Vn);
|
||||
const IR::U128 operand2 = Q ? v.ir.VectorBroadcast(esize, element2) : v.ir.VectorBroadcastLower(esize, element2);
|
||||
const IR::U128 operand2 = Q ? v.ir.VectorBroadcastElement(esize, v.V(idxdsize, Vm), index) : v.ir.VectorBroadcastElementLower(esize, v.V(idxdsize, Vm), index);
|
||||
const IR::U128 operand3 = v.V(datasize, Vd);
|
||||
|
||||
const IR::U128 result = [&] {
|
||||
|
@ -93,9 +92,8 @@ bool FPMultiplyByElementHalfPrecision(TranslatorVisitor& v, bool Q, Imm<1> L, Im
|
|||
const size_t esize = 16;
|
||||
const size_t datasize = Q ? 128 : 64;
|
||||
|
||||
const IR::UAny element2 = v.ir.VectorGetElement(esize, v.V(idxdsize, Vm), index);
|
||||
const IR::U128 operand1 = v.V(datasize, Vn);
|
||||
const IR::U128 operand2 = Q ? v.ir.VectorBroadcast(esize, element2) : v.ir.VectorBroadcastLower(esize, element2);
|
||||
const IR::U128 operand2 = Q ? v.ir.VectorBroadcastElement(esize, v.V(idxdsize, Vm), index) : v.ir.VectorBroadcastElementLower(esize, v.V(idxdsize, Vm), index);
|
||||
const IR::U128 operand3 = v.V(datasize, Vd);
|
||||
|
||||
// TODO: We currently don't implement half-precision paths for
|
||||
|
@ -179,7 +177,7 @@ bool MultiplyLong(TranslatorVisitor& v, bool Q, Imm<2> size, Imm<1> L, Imm<1> M,
|
|||
|
||||
const IR::U128 operand1 = v.Vpart(datasize, Vn, Q);
|
||||
const IR::U128 operand2 = v.V(idxsize, Vm);
|
||||
const IR::U128 index_vector = v.ir.VectorBroadcast(esize, v.ir.VectorGetElement(esize, operand2, index));
|
||||
const IR::U128 index_vector = v.ir.VectorBroadcastElement(esize, operand2, index);
|
||||
|
||||
const IR::U128 result = [&] {
|
||||
const auto [extended_op1, extended_index] = extend_operands(operand1, index_vector);
|
||||
|
@ -349,7 +347,7 @@ bool TranslatorVisitor::SQDMULL_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, I
|
|||
|
||||
const IR::U128 operand1 = Vpart(datasize, Vn, part);
|
||||
const IR::U128 operand2 = V(idxsize, Vm);
|
||||
const IR::U128 index_vector = ir.VectorBroadcast(esize, ir.VectorGetElement(esize, operand2, index));
|
||||
const IR::U128 index_vector = ir.VectorBroadcastElement(esize, operand2, index);
|
||||
const IR::U128 result = ir.VectorSignedSaturatedDoublingMultiplyLong(esize, operand1, index_vector);
|
||||
|
||||
V(128, Vd, result);
|
||||
|
@ -368,7 +366,7 @@ bool TranslatorVisitor::SQDMULH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M, I
|
|||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(idxsize, Vm);
|
||||
const IR::U128 index_vector = ir.VectorBroadcast(esize, ir.VectorGetElement(esize, operand2, index));
|
||||
const IR::U128 index_vector = ir.VectorBroadcastElement(esize, operand2, index);
|
||||
const IR::U128 result = ir.VectorSignedSaturatedDoublingMultiply(esize, operand1, index_vector).upper;
|
||||
|
||||
V(datasize, Vd, result);
|
||||
|
@ -387,7 +385,7 @@ bool TranslatorVisitor::SQRDMULH_elt_2(bool Q, Imm<2> size, Imm<1> L, Imm<1> M,
|
|||
|
||||
const IR::U128 operand1 = V(datasize, Vn);
|
||||
const IR::U128 operand2 = V(idxsize, Vm);
|
||||
const IR::U128 index_vector = ir.VectorBroadcast(esize, ir.VectorGetElement(esize, operand2, index));
|
||||
const IR::U128 index_vector = ir.VectorBroadcastElement(esize, operand2, index);
|
||||
const IR::UpperAndLower multiply = ir.VectorSignedSaturatedDoublingMultiply(esize, operand1, index_vector);
|
||||
const IR::U128 result = ir.VectorAdd(esize, multiply.upper, ir.VectorLogicalShiftRight(esize, multiply.lower, static_cast<u8>(esize - 1)));
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ bool TranslatorVisitor::AXFlag() {
|
|||
const IR::U32 v = ir.And(nzcv, ir.Imm32(0x10000000));
|
||||
|
||||
const IR::U32 new_z = ir.Or(ir.LogicalShiftLeft(v, ir.Imm8(2)), z);
|
||||
const IR::U32 new_c = ir.And(ir.And(c, ir.Not(ir.LogicalShiftLeft(v, ir.Imm8(1)))), ir.Imm32(0x20000000));
|
||||
const IR::U32 new_c = ir.And(ir.AndNot(c, ir.LogicalShiftLeft(v, ir.Imm8(1))), ir.Imm32(0x20000000));
|
||||
|
||||
ir.SetNZCVRaw(ir.Or(new_z, new_c));
|
||||
return true;
|
||||
|
@ -27,8 +27,8 @@ bool TranslatorVisitor::XAFlag() {
|
|||
const IR::U32 z = ir.And(nzcv, ir.Imm32(0x40000000));
|
||||
const IR::U32 c = ir.And(nzcv, ir.Imm32(0x20000000));
|
||||
|
||||
const IR::U32 not_z = ir.And(ir.Not(z), ir.Imm32(0x40000000));
|
||||
const IR::U32 not_c = ir.And(ir.Not(c), ir.Imm32(0x20000000));
|
||||
const IR::U32 not_z = ir.AndNot(ir.Imm32(0x40000000), z);
|
||||
const IR::U32 not_c = ir.AndNot(ir.Imm32(0x20000000), c);
|
||||
|
||||
const IR::U32 new_n = ir.And(ir.LogicalShiftLeft(not_c, ir.Imm8(2)),
|
||||
ir.LogicalShiftLeft(not_z, ir.Imm8(1)));
|
||||
|
|
|
@ -104,7 +104,10 @@ struct detail {
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef DYNARMIC_IGNORE_ASSERTS
|
||||
// Avoids a MSVC ICE.
|
||||
ASSERT(std::all_of(masks.begin(), masks.end(), [](auto m) { return m != 0; }));
|
||||
#endif
|
||||
|
||||
return std::make_tuple(masks, shifts);
|
||||
}
|
||||
|
|
|
@ -88,11 +88,8 @@ public:
|
|||
return is_executing;
|
||||
}
|
||||
|
||||
/**
|
||||
* Debugging: Disassemble all of compiled code.
|
||||
* @return A string containing disassembly of all host machine code produced.
|
||||
*/
|
||||
std::string Disassemble() const;
|
||||
/// Debugging: Dump a disassembly all compiled code to the console.
|
||||
void DumpDisassembly() const;
|
||||
|
||||
private:
|
||||
bool is_executing = false;
|
||||
|
|
|
@ -114,11 +114,8 @@ public:
|
|||
*/
|
||||
bool IsExecuting() const;
|
||||
|
||||
/**
|
||||
* Debugging: Disassemble all of compiled code.
|
||||
* @return A string containing disassembly of all host machine code produced.
|
||||
*/
|
||||
std::string Disassemble() const;
|
||||
/// Debugging: Dump a disassembly all of compiled code to the console.
|
||||
void DumpDisassembly() const;
|
||||
|
||||
private:
|
||||
struct Impl;
|
||||
|
|
|
@ -317,6 +317,15 @@ U32U64 IREmitter::And(const U32U64& a, const U32U64& b) {
|
|||
}
|
||||
}
|
||||
|
||||
U32U64 IREmitter::AndNot(const U32U64& a, const U32U64& b) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
return Inst<U32>(Opcode::AndNot32, a, b);
|
||||
} else {
|
||||
return Inst<U64>(Opcode::AndNot64, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
U32U64 IREmitter::Eor(const U32U64& a, const U32U64& b) {
|
||||
ASSERT(a.GetType() == b.GetType());
|
||||
if (a.GetType() == Type::U32) {
|
||||
|
@ -958,6 +967,10 @@ U128 IREmitter::VectorAnd(const U128& a, const U128& b) {
|
|||
return Inst<U128>(Opcode::VectorAnd, a, b);
|
||||
}
|
||||
|
||||
U128 IREmitter::VectorAndNot(const U128& a, const U128& b) {
|
||||
return Inst<U128>(Opcode::VectorAndNot, a, b);
|
||||
}
|
||||
|
||||
U128 IREmitter::VectorArithmeticShiftRight(size_t esize, const U128& a, u8 shift_amount) {
|
||||
switch (esize) {
|
||||
case 8:
|
||||
|
@ -1012,6 +1025,34 @@ U128 IREmitter::VectorBroadcast(size_t esize, const UAny& a) {
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
U128 IREmitter::VectorBroadcastElementLower(size_t esize, const U128& a, size_t index) {
|
||||
ASSERT_MSG(esize * index < 128, "Invalid index");
|
||||
switch (esize) {
|
||||
case 8:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElementLower8, a, u8(index));
|
||||
case 16:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElementLower16, a, u8(index));
|
||||
case 32:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElementLower32, a, u8(index));
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
U128 IREmitter::VectorBroadcastElement(size_t esize, const U128& a, size_t index) {
|
||||
ASSERT_MSG(esize * index < 128, "Invalid index");
|
||||
switch (esize) {
|
||||
case 8:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElement8, a, u8(index));
|
||||
case 16:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElement16, a, u8(index));
|
||||
case 32:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElement32, a, u8(index));
|
||||
case 64:
|
||||
return Inst<U128>(Opcode::VectorBroadcastElement64, a, u8(index));
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
U128 IREmitter::VectorCountLeadingZeros(size_t esize, const U128& a) {
|
||||
switch (esize) {
|
||||
case 8:
|
||||
|
|
|
@ -144,6 +144,7 @@ public:
|
|||
U32U64 UnsignedDiv(const U32U64& a, const U32U64& b);
|
||||
U32U64 SignedDiv(const U32U64& a, const U32U64& b);
|
||||
U32U64 And(const U32U64& a, const U32U64& b);
|
||||
U32U64 AndNot(const U32U64& a, const U32U64& b);
|
||||
U32U64 Eor(const U32U64& a, const U32U64& b);
|
||||
U32U64 Or(const U32U64& a, const U32U64& b);
|
||||
U32U64 Not(const U32U64& a);
|
||||
|
@ -240,10 +241,13 @@ public:
|
|||
U128 VectorAbs(size_t esize, const U128& a);
|
||||
U128 VectorAdd(size_t esize, const U128& a, const U128& b);
|
||||
U128 VectorAnd(const U128& a, const U128& b);
|
||||
U128 VectorAndNot(const U128& a, const U128& b);
|
||||
U128 VectorArithmeticShiftRight(size_t esize, const U128& a, u8 shift_amount);
|
||||
U128 VectorArithmeticVShift(size_t esize, const U128& a, const U128& b);
|
||||
U128 VectorBroadcast(size_t esize, const UAny& a);
|
||||
U128 VectorBroadcastLower(size_t esize, const UAny& a);
|
||||
U128 VectorBroadcastElement(size_t esize, const U128& a, size_t index);
|
||||
U128 VectorBroadcastElementLower(size_t esize, const U128& a, size_t index);
|
||||
U128 VectorCountLeadingZeros(size_t esize, const U128& a);
|
||||
U128 VectorEor(const U128& a, const U128& b);
|
||||
U128 VectorDeinterleaveEven(size_t esize, const U128& a, const U128& b);
|
||||
|
|
|
@ -155,10 +155,7 @@ bool Inst::IsMemoryReadOrWrite() const {
|
|||
bool Inst::ReadsFromCPSR() const {
|
||||
switch (op) {
|
||||
case Opcode::A32GetCpsr:
|
||||
case Opcode::A32GetNFlag:
|
||||
case Opcode::A32GetZFlag:
|
||||
case Opcode::A32GetCFlag:
|
||||
case Opcode::A32GetVFlag:
|
||||
case Opcode::A32GetGEFlags:
|
||||
case Opcode::A32UpdateUpperLocationDescriptor:
|
||||
case Opcode::A64GetCFlag:
|
||||
|
@ -566,6 +563,8 @@ bool Inst::MayGetNZCVFromOp() const {
|
|||
case Opcode::Sub64:
|
||||
case Opcode::And32:
|
||||
case Opcode::And64:
|
||||
case Opcode::AndNot32:
|
||||
case Opcode::AndNot64:
|
||||
case Opcode::Eor32:
|
||||
case Opcode::Eor64:
|
||||
case Opcode::Or32:
|
||||
|
|
17
externals/dynarmic/src/dynarmic/ir/opcodes.inc
vendored
17
externals/dynarmic/src/dynarmic/ir/opcodes.inc
vendored
|
@ -1,3 +1,5 @@
|
|||
// clang-format off
|
||||
|
||||
// opcode name, return type, arg1 type, arg2 type, arg3 type, arg4 type, ...
|
||||
|
||||
OPCODE(Void, Void, )
|
||||
|
@ -20,13 +22,10 @@ A32OPC(SetCpsr, Void, U32
|
|||
A32OPC(SetCpsrNZCV, Void, NZCV )
|
||||
A32OPC(SetCpsrNZCVRaw, Void, U32 )
|
||||
A32OPC(SetCpsrNZCVQ, Void, U32 )
|
||||
A32OPC(GetNFlag, U1, )
|
||||
A32OPC(SetNFlag, Void, U1 )
|
||||
A32OPC(GetZFlag, U1, )
|
||||
A32OPC(SetZFlag, Void, U1 )
|
||||
A32OPC(GetCFlag, U1, )
|
||||
A32OPC(SetCFlag, Void, U1 )
|
||||
A32OPC(GetVFlag, U1, )
|
||||
A32OPC(SetVFlag, Void, U1 )
|
||||
A32OPC(OrQFlag, Void, U1 )
|
||||
A32OPC(GetGEFlags, U32, )
|
||||
|
@ -141,6 +140,8 @@ OPCODE(SignedDiv32, U32, U32,
|
|||
OPCODE(SignedDiv64, U64, U64, U64 )
|
||||
OPCODE(And32, U32, U32, U32 )
|
||||
OPCODE(And64, U64, U64, U64 )
|
||||
OPCODE(AndNot32, U32, U32, U32 )
|
||||
OPCODE(AndNot64, U64, U64, U64 )
|
||||
OPCODE(Eor32, U32, U32, U32 )
|
||||
OPCODE(Eor64, U64, U64, U64 )
|
||||
OPCODE(Or32, U32, U32, U32 )
|
||||
|
@ -289,6 +290,7 @@ OPCODE(VectorAdd16, U128, U128
|
|||
OPCODE(VectorAdd32, U128, U128, U128 )
|
||||
OPCODE(VectorAdd64, U128, U128, U128 )
|
||||
OPCODE(VectorAnd, U128, U128, U128 )
|
||||
OPCODE(VectorAndNot, U128, U128, U128 )
|
||||
OPCODE(VectorArithmeticShiftRight8, U128, U128, U8 )
|
||||
OPCODE(VectorArithmeticShiftRight16, U128, U128, U8 )
|
||||
OPCODE(VectorArithmeticShiftRight32, U128, U128, U8 )
|
||||
|
@ -304,6 +306,13 @@ OPCODE(VectorBroadcast8, U128, U8
|
|||
OPCODE(VectorBroadcast16, U128, U16 )
|
||||
OPCODE(VectorBroadcast32, U128, U32 )
|
||||
OPCODE(VectorBroadcast64, U128, U64 )
|
||||
OPCODE(VectorBroadcastElementLower8, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElementLower16, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElementLower32, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElement8, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElement16, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElement32, U128, U128, U8 )
|
||||
OPCODE(VectorBroadcastElement64, U128, U128, U8 )
|
||||
OPCODE(VectorCountLeadingZeros8, U128, U128 )
|
||||
OPCODE(VectorCountLeadingZeros16, U128, U128 )
|
||||
OPCODE(VectorCountLeadingZeros32, U128, U128 )
|
||||
|
@ -718,3 +727,5 @@ A32OPC(CoprocGetOneWord, U32, Copr
|
|||
A32OPC(CoprocGetTwoWords, U64, CoprocInfo )
|
||||
A32OPC(CoprocLoadWords, Void, CoprocInfo, U32 )
|
||||
A32OPC(CoprocStoreWords, Void, CoprocInfo, U32 )
|
||||
|
||||
// clang-format on
|
||||
|
|
|
@ -170,18 +170,10 @@ void A32GetSetElimination(IR::Block& block) {
|
|||
do_set(cpsr_info.n, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetNFlag: {
|
||||
do_get(cpsr_info.n, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetZFlag: {
|
||||
do_set(cpsr_info.z, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetZFlag: {
|
||||
do_get(cpsr_info.z, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetCFlag: {
|
||||
do_set(cpsr_info.c, inst->GetArg(0), inst);
|
||||
break;
|
||||
|
@ -194,10 +186,6 @@ void A32GetSetElimination(IR::Block& block) {
|
|||
do_set(cpsr_info.v, inst->GetArg(0), inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32GetVFlag: {
|
||||
do_get(cpsr_info.v, inst);
|
||||
break;
|
||||
}
|
||||
case IR::Opcode::A32SetGEFlags: {
|
||||
do_set(cpsr_info.ge, inst->GetArg(0), inst);
|
||||
break;
|
||||
|
|
4
externals/dynarmic/tests/A32/fuzz_arm.cpp
vendored
4
externals/dynarmic/tests/A32/fuzz_arm.cpp
vendored
|
@ -10,7 +10,7 @@
|
|||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../fuzz_util.h"
|
||||
#include "../rand_int.h"
|
||||
|
@ -372,7 +372,7 @@ static void RunTestInstance(Dynarmic::A32::Jit& jit,
|
|||
fmt::print("\n");
|
||||
|
||||
fmt::print("x86_64:\n");
|
||||
fmt::print("{}\n", jit.Disassemble());
|
||||
jit.DumpDisassembly();
|
||||
|
||||
fmt::print("Interrupts:\n");
|
||||
for (const auto& i : uni_env.interrupts) {
|
||||
|
|
5
externals/dynarmic/tests/A32/fuzz_thumb.cpp
vendored
5
externals/dynarmic/tests/A32/fuzz_thumb.cpp
vendored
|
@ -12,7 +12,7 @@
|
|||
#include <string_view>
|
||||
#include <tuple>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../rand_int.h"
|
||||
#include "../unicorn_emu/a32_unicorn.h"
|
||||
|
@ -183,7 +183,8 @@ static void RunInstance(size_t run_number, ThumbTestEnv& test_env, A32Unicorn<Th
|
|||
Optimization::DeadCodeElimination(ir_block);
|
||||
Optimization::VerificationPass(ir_block);
|
||||
printf("\n\nIR:\n%s", IR::DumpBlock(ir_block).c_str());
|
||||
printf("\n\nx86_64:\n%s", jit.Disassemble().c_str());
|
||||
printf("\n\nx86_64:\n");
|
||||
jit.DumpDisassembly();
|
||||
num_insts += ir_block.CycleCount();
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "dynarmic/frontend/A32/disassembler/disassembler.h"
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "./testenv.h"
|
||||
#include "dynarmic/frontend/A32/location_descriptor.h"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "./testenv.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
53
externals/dynarmic/tests/A64/a64.cpp
vendored
53
externals/dynarmic/tests/A64/a64.cpp
vendored
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "./testenv.h"
|
||||
#include "dynarmic/common/fp/fpsr.h"
|
||||
|
@ -675,6 +675,57 @@ TEST_CASE("A64: FMADD", "[a64]") {
|
|||
REQUIRE(jit.GetVector(10) == Vector{0x3f059921bf0dbfff, 0x0000000000000000});
|
||||
}
|
||||
|
||||
TEST_CASE("A64: FMLA.4S(lane)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4f8f11c0); // FMLA.4S V0, V14, V15[0]
|
||||
env.code_mem.emplace_back(0x4faf11c1); // FMLA.4S V1, V14, V15[1]
|
||||
env.code_mem.emplace_back(0x4f8f19c2); // FMLA.4S V2, V14, V15[2]
|
||||
env.code_mem.emplace_back(0x4faf19c3); // FMLA.4S V3, V14, V15[3]
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
||||
jit.SetPC(0);
|
||||
jit.SetVector(0, {0x3ff00000'3ff00000, 0x00000000'00000000});
|
||||
jit.SetVector(1, {0x3ff00000'3ff00000, 0x00000000'00000000});
|
||||
jit.SetVector(2, {0x3ff00000'3ff00000, 0x00000000'00000000});
|
||||
jit.SetVector(3, {0x3ff00000'3ff00000, 0x00000000'00000000});
|
||||
|
||||
jit.SetVector(14, {0x3ff00000'3ff00000, 0x3ff00000'3ff00000});
|
||||
jit.SetVector(15, {0x3ff00000'40000000, 0x40400000'40800000});
|
||||
|
||||
env.ticks_left = 5;
|
||||
jit.Run();
|
||||
|
||||
REQUIRE(jit.GetVector(0) == Vector{0x40b4000040b40000, 0x4070000040700000});
|
||||
REQUIRE(jit.GetVector(1) == Vector{0x40ac800040ac8000, 0x4061000040610000});
|
||||
REQUIRE(jit.GetVector(2) == Vector{0x4116000041160000, 0x40f0000040f00000});
|
||||
REQUIRE(jit.GetVector(3) == Vector{0x40f0000040f00000, 0x40b4000040b40000});
|
||||
}
|
||||
|
||||
TEST_CASE("A64: FMUL.4S(lane)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
||||
env.code_mem.emplace_back(0x4f8f91c0); // FMUL.4S V0, V14, V15[0]
|
||||
env.code_mem.emplace_back(0x4faf91c1); // FMUL.4S V1, V14, V15[1]
|
||||
env.code_mem.emplace_back(0x4f8f99c2); // FMUL.4S V2, V14, V15[2]
|
||||
env.code_mem.emplace_back(0x4faf99c3); // FMUL.4S V3, V14, V15[3]
|
||||
env.code_mem.emplace_back(0x14000000); // B .
|
||||
|
||||
jit.SetPC(0);
|
||||
jit.SetVector(14, {0x3ff00000'3ff00000, 0x3ff00000'3ff00000});
|
||||
jit.SetVector(15, {0x3ff00000'40000000, 0x40400000'40800000});
|
||||
|
||||
env.ticks_left = 5;
|
||||
jit.Run();
|
||||
|
||||
REQUIRE(jit.GetVector(0) == Vector{0x4070000040700000, 0x4070000040700000});
|
||||
REQUIRE(jit.GetVector(1) == Vector{0x4061000040610000, 0x4061000040610000});
|
||||
REQUIRE(jit.GetVector(2) == Vector{0x40f0000040f00000, 0x40f0000040f00000});
|
||||
REQUIRE(jit.GetVector(3) == Vector{0x40b4000040b40000, 0x40b4000040b40000});
|
||||
}
|
||||
|
||||
TEST_CASE("A64: FMLA.4S (denormal)", "[a64]") {
|
||||
A64TestEnv env;
|
||||
A64::Jit jit{A64::UserConfig{&env}};
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../fuzz_util.h"
|
||||
#include "../rand_int.h"
|
||||
|
@ -232,7 +232,7 @@ static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv
|
|||
}
|
||||
const auto uni_vecs = uni.GetVectors();
|
||||
for (size_t i = 0; i < vecs.size(); ++i) {
|
||||
fmt::print("{:3s}: {}{} {}{} {}\n", A64::VecToString(static_cast<A64::Vec>(i)),
|
||||
fmt::print("{:3s}: {:016x}{:016x} {:016x}{:016x} {}\n", A64::VecToString(static_cast<A64::Vec>(i)),
|
||||
uni_vecs[i][1], uni_vecs[i][0],
|
||||
jit.GetVectors()[i][1], jit.GetVectors()[i][0],
|
||||
uni_vecs[i] != jit.GetVectors()[i] ? "*" : "");
|
||||
|
@ -276,7 +276,7 @@ static void RunTestInstance(Dynarmic::A64::Jit& jit, A64Unicorn& uni, A64TestEnv
|
|||
fmt::print("{}\n", IR::DumpBlock(ir_block));
|
||||
|
||||
fmt::print("x86_64:\n");
|
||||
fmt::print("{}\n", jit.Disassemble());
|
||||
jit.DumpDisassembly();
|
||||
|
||||
fmt::print("Interrupts:\n");
|
||||
for (auto& i : uni_env.interrupts) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "./testenv.h"
|
||||
#include "dynarmic/interface/A64/a64.h"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
#include <array>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../rand_int.h"
|
||||
#include "../unicorn_emu/a64_unicorn.h"
|
||||
|
|
4
externals/dynarmic/tests/cpu_info.cpp
vendored
4
externals/dynarmic/tests/cpu_info.cpp
vendored
|
@ -6,8 +6,8 @@
|
|||
#include <array>
|
||||
#include <utility>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <xbyak_util.h>
|
||||
#include <catch2/catch.hpp>
|
||||
#include <xbyak/xbyak_util.h>
|
||||
|
||||
TEST_CASE("Host CPU supports", "[a64]") {
|
||||
Xbyak::util::Cpu cpu_info;
|
||||
|
|
2
externals/dynarmic/tests/decoder_tests.cpp
vendored
2
externals/dynarmic/tests/decoder_tests.cpp
vendored
|
@ -7,7 +7,7 @@
|
|||
#include <iomanip>
|
||||
#include <iostream>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "dynarmic/common/assert.h"
|
||||
#include "dynarmic/frontend/A32/decoder/asimd.h"
|
||||
|
|
2
externals/dynarmic/tests/fp/FPToFixed.cpp
vendored
2
externals/dynarmic/tests/fp/FPToFixed.cpp
vendored
|
@ -6,7 +6,7 @@
|
|||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../rand_int.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../rand_int.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
||||
#include "../rand_int.h"
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
2
externals/dynarmic/tests/main.cpp
vendored
2
externals/dynarmic/tests/main.cpp
vendored
|
@ -4,4 +4,4 @@
|
|||
*/
|
||||
|
||||
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
|
|
2
externals/dynarmic/tests/rsqrt_test.cpp
vendored
2
externals/dynarmic/tests/rsqrt_test.cpp
vendored
|
@ -3,7 +3,7 @@
|
|||
* SPDX-License-Identifier: 0BSD
|
||||
*/
|
||||
|
||||
#include <catch.hpp>
|
||||
#include <catch2/catch.hpp>
|
||||
#include <fmt/printf.h>
|
||||
|
||||
#include "dynarmic/common/common_types.h"
|
||||
|
|
|
@ -2,13 +2,9 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <climits>
|
||||
#include <condition_variable>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
|
@ -16,28 +12,173 @@
|
|||
#include <windows.h> // For OutputDebugStringW
|
||||
#endif
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/fs/file.h"
|
||||
#include "common/fs/fs.h"
|
||||
#include "common/fs/fs_paths.h"
|
||||
#include "common/fs/path_util.h"
|
||||
#include "common/literals.h"
|
||||
|
||||
#include "common/logging/backend.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/logging/text_formatter.h"
|
||||
#include "common/settings.h"
|
||||
#ifdef _WIN32
|
||||
#include "common/string_util.h"
|
||||
#endif
|
||||
#include "common/threadsafe_queue.h"
|
||||
|
||||
namespace Common::Log {
|
||||
|
||||
namespace {
|
||||
|
||||
/**
|
||||
* Interface for logging backends.
|
||||
*/
|
||||
class Backend {
|
||||
public:
|
||||
virtual ~Backend() = default;
|
||||
|
||||
virtual void Write(const Entry& entry) = 0;
|
||||
|
||||
virtual void EnableForStacktrace() = 0;
|
||||
|
||||
virtual void Flush() = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to stderr and with color
|
||||
*/
|
||||
class ColorConsoleBackend final : public Backend {
|
||||
public:
|
||||
explicit ColorConsoleBackend() = default;
|
||||
|
||||
~ColorConsoleBackend() override = default;
|
||||
|
||||
void Write(const Entry& entry) override {
|
||||
if (enabled.load(std::memory_order_relaxed)) {
|
||||
PrintColoredMessage(entry);
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() override {
|
||||
// stderr shouldn't be buffered
|
||||
}
|
||||
|
||||
void EnableForStacktrace() override {
|
||||
enabled = true;
|
||||
}
|
||||
|
||||
void SetEnabled(bool enabled_) {
|
||||
enabled = enabled_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic_bool enabled{false};
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to a file passed into the constructor
|
||||
*/
|
||||
class FileBackend final : public Backend {
|
||||
public:
|
||||
explicit FileBackend(const std::filesystem::path& filename) {
|
||||
auto old_filename = filename;
|
||||
old_filename += ".old.txt";
|
||||
|
||||
// Existence checks are done within the functions themselves.
|
||||
// We don't particularly care if these succeed or not.
|
||||
static_cast<void>(FS::RemoveFile(old_filename));
|
||||
static_cast<void>(FS::RenameFile(filename, old_filename));
|
||||
|
||||
file = std::make_unique<FS::IOFile>(filename, FS::FileAccessMode::Write,
|
||||
FS::FileType::TextFile);
|
||||
}
|
||||
|
||||
~FileBackend() override = default;
|
||||
|
||||
void Write(const Entry& entry) override {
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
bytes_written += file->WriteString(FormatLogMessage(entry).append(1, '\n'));
|
||||
|
||||
using namespace Common::Literals;
|
||||
// Prevent logs from exceeding a set maximum size in the event that log entries are spammed.
|
||||
const auto write_limit = Settings::values.extended_logging ? 1_GiB : 100_MiB;
|
||||
const bool write_limit_exceeded = bytes_written > write_limit;
|
||||
if (entry.log_level >= Level::Error || write_limit_exceeded) {
|
||||
if (write_limit_exceeded) {
|
||||
// Stop writing after the write limit is exceeded.
|
||||
// Don't close the file so we can print a stacktrace if necessary
|
||||
enabled = false;
|
||||
}
|
||||
file->Flush();
|
||||
}
|
||||
}
|
||||
|
||||
void Flush() override {
|
||||
file->Flush();
|
||||
}
|
||||
|
||||
void EnableForStacktrace() override {
|
||||
enabled = true;
|
||||
bytes_written = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<FS::IOFile> file;
|
||||
bool enabled = true;
|
||||
std::size_t bytes_written = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to Visual Studio's output window
|
||||
*/
|
||||
class DebuggerBackend final : public Backend {
|
||||
public:
|
||||
explicit DebuggerBackend() = default;
|
||||
|
||||
~DebuggerBackend() override = default;
|
||||
|
||||
void Write(const Entry& entry) override {
|
||||
#ifdef _WIN32
|
||||
::OutputDebugStringW(UTF8ToUTF16W(FormatLogMessage(entry).append(1, '\n')).c_str());
|
||||
#endif
|
||||
}
|
||||
|
||||
void Flush() override {}
|
||||
|
||||
void EnableForStacktrace() override {}
|
||||
};
|
||||
|
||||
bool initialization_in_progress_suppress_logging = false;
|
||||
|
||||
/**
|
||||
* Static state as a singleton.
|
||||
*/
|
||||
class Impl {
|
||||
public:
|
||||
static Impl& Instance() {
|
||||
static Impl backend;
|
||||
return backend;
|
||||
if (!instance) {
|
||||
abort();
|
||||
}
|
||||
return *instance;
|
||||
}
|
||||
|
||||
static void Initialize() {
|
||||
if (instance) {
|
||||
abort();
|
||||
}
|
||||
using namespace Common::FS;
|
||||
initialization_in_progress_suppress_logging = true;
|
||||
const auto& log_dir = GetYuzuPath(YuzuPath::LogDir);
|
||||
void(CreateDir(log_dir));
|
||||
Filter filter;
|
||||
filter.ParseFilterString(Settings::values.log_filter.GetValue());
|
||||
instance = std::unique_ptr<Impl, decltype(&Deleter)>(new Impl(log_dir / LOG_FILE, filter),
|
||||
Deleter);
|
||||
initialization_in_progress_suppress_logging = false;
|
||||
}
|
||||
|
||||
Impl(const Impl&) = delete;
|
||||
|
@ -46,74 +187,54 @@ public:
|
|||
Impl(Impl&&) = delete;
|
||||
Impl& operator=(Impl&&) = delete;
|
||||
|
||||
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
|
||||
const char* function, std::string message) {
|
||||
message_queue.Push(
|
||||
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message)));
|
||||
}
|
||||
|
||||
void AddBackend(std::unique_ptr<Backend> backend) {
|
||||
std::lock_guard lock{writing_mutex};
|
||||
backends.push_back(std::move(backend));
|
||||
}
|
||||
|
||||
void RemoveBackend(std::string_view backend_name) {
|
||||
std::lock_guard lock{writing_mutex};
|
||||
|
||||
std::erase_if(backends, [&backend_name](const auto& backend) {
|
||||
return backend_name == backend->GetName();
|
||||
});
|
||||
}
|
||||
|
||||
const Filter& GetGlobalFilter() const {
|
||||
return filter;
|
||||
}
|
||||
|
||||
void SetGlobalFilter(const Filter& f) {
|
||||
filter = f;
|
||||
}
|
||||
|
||||
Backend* GetBackend(std::string_view backend_name) {
|
||||
const auto it =
|
||||
std::find_if(backends.begin(), backends.end(),
|
||||
[&backend_name](const auto& i) { return backend_name == i->GetName(); });
|
||||
if (it == backends.end())
|
||||
return nullptr;
|
||||
return it->get();
|
||||
void SetColorConsoleBackendEnabled(bool enabled) {
|
||||
color_console_backend.SetEnabled(enabled);
|
||||
}
|
||||
|
||||
void PushEntry(Class log_class, Level log_level, const char* filename, unsigned int line_num,
|
||||
const char* function, std::string message) {
|
||||
if (!filter.CheckMessage(log_class, log_level))
|
||||
return;
|
||||
const Entry& entry =
|
||||
CreateEntry(log_class, log_level, filename, line_num, function, std::move(message));
|
||||
message_queue.Push(entry);
|
||||
}
|
||||
|
||||
private:
|
||||
Impl() {
|
||||
backend_thread = std::thread([&] {
|
||||
Entry entry;
|
||||
auto write_logs = [&](Entry& e) {
|
||||
std::lock_guard lock{writing_mutex};
|
||||
for (const auto& backend : backends) {
|
||||
backend->Write(e);
|
||||
}
|
||||
};
|
||||
while (true) {
|
||||
entry = message_queue.PopWait();
|
||||
if (entry.final_entry) {
|
||||
break;
|
||||
}
|
||||
write_logs(entry);
|
||||
}
|
||||
|
||||
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a
|
||||
// case where a system is repeatedly spamming logs even on close.
|
||||
const int MAX_LOGS_TO_WRITE = filter.IsDebug() ? INT_MAX : 100;
|
||||
int logs_written = 0;
|
||||
while (logs_written++ < MAX_LOGS_TO_WRITE && message_queue.Pop(entry)) {
|
||||
write_logs(entry);
|
||||
}
|
||||
});
|
||||
}
|
||||
Impl(const std::filesystem::path& file_backend_filename, const Filter& filter_)
|
||||
: filter{filter_}, file_backend{file_backend_filename}, backend_thread{std::thread([this] {
|
||||
Common::SetCurrentThreadName("yuzu:Log");
|
||||
Entry entry;
|
||||
const auto write_logs = [this, &entry]() {
|
||||
ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
|
||||
};
|
||||
while (true) {
|
||||
entry = message_queue.PopWait();
|
||||
if (entry.final_entry) {
|
||||
break;
|
||||
}
|
||||
write_logs();
|
||||
}
|
||||
// Drain the logging queue. Only writes out up to MAX_LOGS_TO_WRITE to prevent a
|
||||
// case where a system is repeatedly spamming logs even on close.
|
||||
int max_logs_to_write = filter.IsDebug() ? INT_MAX : 100;
|
||||
while (max_logs_to_write-- && message_queue.Pop(entry)) {
|
||||
write_logs();
|
||||
}
|
||||
})} {}
|
||||
|
||||
~Impl() {
|
||||
Entry entry;
|
||||
entry.final_entry = true;
|
||||
message_queue.Push(entry);
|
||||
StopBackendThread();
|
||||
}
|
||||
|
||||
void StopBackendThread() {
|
||||
Entry stop_entry{};
|
||||
stop_entry.final_entry = true;
|
||||
message_queue.Push(stop_entry);
|
||||
backend_thread.join();
|
||||
}
|
||||
|
||||
|
@ -135,100 +256,51 @@ private:
|
|||
};
|
||||
}
|
||||
|
||||
std::mutex writing_mutex;
|
||||
std::thread backend_thread;
|
||||
std::vector<std::unique_ptr<Backend>> backends;
|
||||
MPSCQueue<Entry> message_queue;
|
||||
void ForEachBackend(auto lambda) {
|
||||
lambda(static_cast<Backend&>(debugger_backend));
|
||||
lambda(static_cast<Backend&>(color_console_backend));
|
||||
lambda(static_cast<Backend&>(file_backend));
|
||||
}
|
||||
|
||||
static void Deleter(Impl* ptr) {
|
||||
delete ptr;
|
||||
}
|
||||
|
||||
static inline std::unique_ptr<Impl, decltype(&Deleter)> instance{nullptr, Deleter};
|
||||
|
||||
Filter filter;
|
||||
DebuggerBackend debugger_backend{};
|
||||
ColorConsoleBackend color_console_backend{};
|
||||
FileBackend file_backend;
|
||||
|
||||
std::thread backend_thread;
|
||||
MPSCQueue<Entry> message_queue{};
|
||||
std::chrono::steady_clock::time_point time_origin{std::chrono::steady_clock::now()};
|
||||
};
|
||||
} // namespace
|
||||
|
||||
ConsoleBackend::~ConsoleBackend() = default;
|
||||
|
||||
void ConsoleBackend::Write(const Entry& entry) {
|
||||
PrintMessage(entry);
|
||||
void Initialize() {
|
||||
Impl::Initialize();
|
||||
}
|
||||
|
||||
ColorConsoleBackend::~ColorConsoleBackend() = default;
|
||||
|
||||
void ColorConsoleBackend::Write(const Entry& entry) {
|
||||
PrintColoredMessage(entry);
|
||||
}
|
||||
|
||||
FileBackend::FileBackend(const std::filesystem::path& filename) {
|
||||
auto old_filename = filename;
|
||||
old_filename += ".old.txt";
|
||||
|
||||
// Existence checks are done within the functions themselves.
|
||||
// We don't particularly care if these succeed or not.
|
||||
FS::RemoveFile(old_filename);
|
||||
void(FS::RenameFile(filename, old_filename));
|
||||
|
||||
file =
|
||||
std::make_unique<FS::IOFile>(filename, FS::FileAccessMode::Write, FS::FileType::TextFile);
|
||||
}
|
||||
|
||||
FileBackend::~FileBackend() = default;
|
||||
|
||||
void FileBackend::Write(const Entry& entry) {
|
||||
if (!file->IsOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
using namespace Common::Literals;
|
||||
// Prevent logs from exceeding a set maximum size in the event that log entries are spammed.
|
||||
constexpr std::size_t MAX_BYTES_WRITTEN = 100_MiB;
|
||||
constexpr std::size_t MAX_BYTES_WRITTEN_EXTENDED = 1_GiB;
|
||||
|
||||
const bool write_limit_exceeded =
|
||||
bytes_written > MAX_BYTES_WRITTEN_EXTENDED ||
|
||||
(bytes_written > MAX_BYTES_WRITTEN && !Settings::values.extended_logging);
|
||||
|
||||
// Close the file after the write limit is exceeded.
|
||||
if (write_limit_exceeded) {
|
||||
file->Close();
|
||||
return;
|
||||
}
|
||||
|
||||
bytes_written += file->WriteString(FormatLogMessage(entry).append(1, '\n'));
|
||||
if (entry.log_level >= Level::Error) {
|
||||
file->Flush();
|
||||
}
|
||||
}
|
||||
|
||||
DebuggerBackend::~DebuggerBackend() = default;
|
||||
|
||||
void DebuggerBackend::Write(const Entry& entry) {
|
||||
#ifdef _WIN32
|
||||
::OutputDebugStringW(UTF8ToUTF16W(FormatLogMessage(entry).append(1, '\n')).c_str());
|
||||
#endif
|
||||
void DisableLoggingInTests() {
|
||||
initialization_in_progress_suppress_logging = true;
|
||||
}
|
||||
|
||||
void SetGlobalFilter(const Filter& filter) {
|
||||
Impl::Instance().SetGlobalFilter(filter);
|
||||
}
|
||||
|
||||
void AddBackend(std::unique_ptr<Backend> backend) {
|
||||
Impl::Instance().AddBackend(std::move(backend));
|
||||
}
|
||||
|
||||
void RemoveBackend(std::string_view backend_name) {
|
||||
Impl::Instance().RemoveBackend(backend_name);
|
||||
}
|
||||
|
||||
Backend* GetBackend(std::string_view backend_name) {
|
||||
return Impl::Instance().GetBackend(backend_name);
|
||||
void SetColorConsoleBackendEnabled(bool enabled) {
|
||||
Impl::Instance().SetColorConsoleBackendEnabled(enabled);
|
||||
}
|
||||
|
||||
void FmtLogMessageImpl(Class log_class, Level log_level, const char* filename,
|
||||
unsigned int line_num, const char* function, const char* format,
|
||||
const fmt::format_args& args) {
|
||||
auto& instance = Impl::Instance();
|
||||
const auto& filter = instance.GetGlobalFilter();
|
||||
if (!filter.CheckMessage(log_class, log_level))
|
||||
return;
|
||||
|
||||
instance.PushEntry(log_class, log_level, filename, line_num, function,
|
||||
fmt::vformat(format, args));
|
||||
if (!initialization_in_progress_suppress_logging) {
|
||||
Impl::Instance().PushEntry(log_class, log_level, filename, line_num, function,
|
||||
fmt::vformat(format, args));
|
||||
}
|
||||
}
|
||||
} // namespace Common::Log
|
||||
|
|
|
@ -5,120 +5,21 @@
|
|||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include "common/logging/filter.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
namespace Common::FS {
|
||||
class IOFile;
|
||||
}
|
||||
|
||||
namespace Common::Log {
|
||||
|
||||
class Filter;
|
||||
|
||||
/**
|
||||
* Interface for logging backends. As loggers can be created and removed at runtime, this can be
|
||||
* used by a frontend for adding a custom logging backend as needed
|
||||
*/
|
||||
class Backend {
|
||||
public:
|
||||
virtual ~Backend() = default;
|
||||
/// Initializes the logging system. This should be the first thing called in main.
|
||||
void Initialize();
|
||||
|
||||
virtual void SetFilter(const Filter& new_filter) {
|
||||
filter = new_filter;
|
||||
}
|
||||
virtual const char* GetName() const = 0;
|
||||
virtual void Write(const Entry& entry) = 0;
|
||||
|
||||
private:
|
||||
Filter filter;
|
||||
};
|
||||
void DisableLoggingInTests();
|
||||
|
||||
/**
|
||||
* Backend that writes to stderr without any color commands
|
||||
*/
|
||||
class ConsoleBackend : public Backend {
|
||||
public:
|
||||
~ConsoleBackend() override;
|
||||
|
||||
static const char* Name() {
|
||||
return "console";
|
||||
}
|
||||
const char* GetName() const override {
|
||||
return Name();
|
||||
}
|
||||
void Write(const Entry& entry) override;
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to stderr and with color
|
||||
*/
|
||||
class ColorConsoleBackend : public Backend {
|
||||
public:
|
||||
~ColorConsoleBackend() override;
|
||||
|
||||
static const char* Name() {
|
||||
return "color_console";
|
||||
}
|
||||
|
||||
const char* GetName() const override {
|
||||
return Name();
|
||||
}
|
||||
void Write(const Entry& entry) override;
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to a file passed into the constructor
|
||||
*/
|
||||
class FileBackend : public Backend {
|
||||
public:
|
||||
explicit FileBackend(const std::filesystem::path& filename);
|
||||
~FileBackend() override;
|
||||
|
||||
static const char* Name() {
|
||||
return "file";
|
||||
}
|
||||
|
||||
const char* GetName() const override {
|
||||
return Name();
|
||||
}
|
||||
|
||||
void Write(const Entry& entry) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<FS::IOFile> file;
|
||||
std::size_t bytes_written = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Backend that writes to Visual Studio's output window
|
||||
*/
|
||||
class DebuggerBackend : public Backend {
|
||||
public:
|
||||
~DebuggerBackend() override;
|
||||
|
||||
static const char* Name() {
|
||||
return "debugger";
|
||||
}
|
||||
const char* GetName() const override {
|
||||
return Name();
|
||||
}
|
||||
void Write(const Entry& entry) override;
|
||||
};
|
||||
|
||||
void AddBackend(std::unique_ptr<Backend> backend);
|
||||
|
||||
void RemoveBackend(std::string_view backend_name);
|
||||
|
||||
Backend* GetBackend(std::string_view backend_name);
|
||||
|
||||
/**
|
||||
* The global filter will prevent any messages from even being processed if they are filtered. Each
|
||||
* backend can have a filter, but if the level is lower than the global filter, the backend will
|
||||
* never get the message
|
||||
* The global filter will prevent any messages from even being processed if they are filtered.
|
||||
*/
|
||||
void SetGlobalFilter(const Filter& filter);
|
||||
|
||||
void SetColorConsoleBackendEnabled(bool enabled);
|
||||
} // namespace Common::Log
|
|
@ -4,175 +4,242 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
// a simple lockless thread-safe,
|
||||
// single reader, single writer queue
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <cstddef>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <utility>
|
||||
#include <optional>
|
||||
|
||||
namespace Common {
|
||||
|
||||
/// a more foolproof multiple reader, multiple writer queue
|
||||
template <typename T>
|
||||
class SPSCQueue {
|
||||
class MPMCQueue {
|
||||
#define ABORT() \
|
||||
do { \
|
||||
std::cerr << __FILE__ " ERR " << __LINE__ << std::endl; \
|
||||
abort(); \
|
||||
} while (0)
|
||||
public:
|
||||
SPSCQueue() {
|
||||
write_ptr = read_ptr = new ElementPtr();
|
||||
}
|
||||
~SPSCQueue() {
|
||||
// this will empty out the whole queue
|
||||
delete read_ptr;
|
||||
}
|
||||
|
||||
[[nodiscard]] std::size_t Size() const {
|
||||
return size.load();
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Empty() const {
|
||||
return Size() == 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] T& Front() const {
|
||||
return read_ptr->current;
|
||||
~MPMCQueue() {
|
||||
Clear();
|
||||
if (waiting || head || tail) {
|
||||
// Remove all the ABORT() after 1 month merged without problems
|
||||
ABORT();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Arg>
|
||||
void Push(Arg&& t) {
|
||||
// create the element, add it to the queue
|
||||
write_ptr->current = std::forward<Arg>(t);
|
||||
// set the next pointer to a new element ptr
|
||||
// then advance the write pointer
|
||||
ElementPtr* new_ptr = new ElementPtr();
|
||||
write_ptr->next.store(new_ptr, std::memory_order_release);
|
||||
write_ptr = new_ptr;
|
||||
++size;
|
||||
|
||||
// cv_mutex must be held or else there will be a missed wakeup if the other thread is in the
|
||||
// line before cv.wait
|
||||
// TODO(bunnei): This can be replaced with C++20 waitable atomics when properly supported.
|
||||
// See discussion on https://github.com/yuzu-emu/yuzu/pull/3173 for details.
|
||||
std::lock_guard lock{cv_mutex};
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
void Pop() {
|
||||
--size;
|
||||
|
||||
ElementPtr* tmpptr = read_ptr;
|
||||
// advance the read pointer
|
||||
read_ptr = tmpptr->next.load();
|
||||
// set the next element to nullptr to stop the recursive deletion
|
||||
tmpptr->next.store(nullptr);
|
||||
delete tmpptr; // this also deletes the element
|
||||
}
|
||||
|
||||
bool Pop(T& t) {
|
||||
if (Empty())
|
||||
return false;
|
||||
|
||||
--size;
|
||||
|
||||
ElementPtr* tmpptr = read_ptr;
|
||||
read_ptr = tmpptr->next.load(std::memory_order_acquire);
|
||||
t = std::move(tmpptr->current);
|
||||
tmpptr->next.store(nullptr);
|
||||
delete tmpptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
void Wait() {
|
||||
if (Empty()) {
|
||||
std::unique_lock lock{cv_mutex};
|
||||
cv.wait(lock, [this]() { return !Empty(); });
|
||||
Node* const node = new Node(std::forward<Arg>(t));
|
||||
if (!node || node == PLACEHOLDER) {
|
||||
ABORT();
|
||||
}
|
||||
while (true) {
|
||||
if (Node* const previous = tail.load(ACQUIRE)) {
|
||||
if (Node* exchange = nullptr;
|
||||
!previous->next.compare_exchange_weak(exchange, node, ACQ_REL)) {
|
||||
continue;
|
||||
}
|
||||
if (tail.exchange(node, ACQ_REL) != previous) {
|
||||
ABORT();
|
||||
}
|
||||
} else {
|
||||
if (Node* exchange = nullptr;
|
||||
!tail.compare_exchange_weak(exchange, node, ACQ_REL)) {
|
||||
continue;
|
||||
}
|
||||
for (Node* exchange = nullptr;
|
||||
!head.compare_exchange_weak(exchange, node, ACQ_REL);)
|
||||
;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (waiting.load(ACQUIRE)) {
|
||||
std::lock_guard lock{mutex};
|
||||
condition.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
bool Pop(T& t) {
|
||||
return PopImpl<false>(t);
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
Wait();
|
||||
T t;
|
||||
Pop(t);
|
||||
if (!PopImpl<true>(t)) {
|
||||
ABORT();
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
// not thread-safe
|
||||
void Wait() {
|
||||
if (head.load(ACQUIRE)) {
|
||||
return;
|
||||
}
|
||||
static_cast<void>(waiting.fetch_add(1, ACQ_REL));
|
||||
std::unique_lock lock{mutex};
|
||||
while (true) {
|
||||
if (head.load(ACQUIRE)) {
|
||||
break;
|
||||
}
|
||||
condition.wait(lock);
|
||||
}
|
||||
if (!waiting.fetch_sub(1, ACQ_REL)) {
|
||||
ABORT();
|
||||
}
|
||||
}
|
||||
|
||||
void Clear() {
|
||||
size.store(0);
|
||||
delete read_ptr;
|
||||
write_ptr = read_ptr = new ElementPtr();
|
||||
while (true) {
|
||||
Node* const last = tail.load(ACQUIRE);
|
||||
if (!last) {
|
||||
return;
|
||||
}
|
||||
if (Node* exchange = nullptr;
|
||||
!last->next.compare_exchange_weak(exchange, PLACEHOLDER, ACQ_REL)) {
|
||||
continue;
|
||||
}
|
||||
if (tail.exchange(nullptr, ACQ_REL) != last) {
|
||||
ABORT();
|
||||
}
|
||||
Node* node = head.exchange(nullptr, ACQ_REL);
|
||||
while (node && node != PLACEHOLDER) {
|
||||
Node* next = node->next.load(ACQUIRE);
|
||||
delete node;
|
||||
node = next;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
// stores a pointer to element
|
||||
// and a pointer to the next ElementPtr
|
||||
class ElementPtr {
|
||||
public:
|
||||
ElementPtr() {}
|
||||
~ElementPtr() {
|
||||
ElementPtr* next_ptr = next.load();
|
||||
|
||||
if (next_ptr)
|
||||
delete next_ptr;
|
||||
template <bool WAIT>
|
||||
bool PopImpl(T& t) {
|
||||
std::optional<std::unique_lock<std::mutex>> lock{std::nullopt};
|
||||
while (true) {
|
||||
Node* const node = head.load(ACQUIRE);
|
||||
if (!node) {
|
||||
if constexpr (!WAIT) {
|
||||
return false;
|
||||
}
|
||||
if (!lock) {
|
||||
static_cast<void>(waiting.fetch_add(1, ACQ_REL));
|
||||
lock = std::unique_lock{mutex};
|
||||
continue;
|
||||
}
|
||||
condition.wait(*lock);
|
||||
continue;
|
||||
}
|
||||
Node* const next = node->next.load(ACQUIRE);
|
||||
if (next) {
|
||||
if (next == PLACEHOLDER) {
|
||||
continue;
|
||||
}
|
||||
if (Node* exchange = node; !head.compare_exchange_weak(exchange, next, ACQ_REL)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
if (Node* exchange = nullptr;
|
||||
!node->next.compare_exchange_weak(exchange, PLACEHOLDER, ACQ_REL)) {
|
||||
continue;
|
||||
}
|
||||
if (tail.exchange(nullptr, ACQ_REL) != node) {
|
||||
ABORT();
|
||||
}
|
||||
if (head.exchange(nullptr, ACQ_REL) != node) {
|
||||
ABORT();
|
||||
}
|
||||
}
|
||||
t = std::move(node->value);
|
||||
delete node;
|
||||
if (lock) {
|
||||
if (!waiting.fetch_sub(1, ACQ_REL)) {
|
||||
ABORT();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
T current;
|
||||
std::atomic<ElementPtr*> next{nullptr};
|
||||
struct Node {
|
||||
template <typename Arg>
|
||||
explicit Node(Arg&& t) : value{std::forward<Arg>(t)} {}
|
||||
|
||||
Node(const Node&) = delete;
|
||||
Node& operator=(const Node&) = delete;
|
||||
|
||||
Node(Node&&) = delete;
|
||||
Node& operator=(Node&&) = delete;
|
||||
|
||||
const T value;
|
||||
std::atomic<Node*> next{nullptr};
|
||||
};
|
||||
|
||||
ElementPtr* write_ptr;
|
||||
ElementPtr* read_ptr;
|
||||
std::atomic_size_t size{0};
|
||||
std::mutex cv_mutex;
|
||||
std::condition_variable cv;
|
||||
// We only need to avoid SEQ_CST on X86
|
||||
// We can add RELAXED later if we port to ARM and it's too slow
|
||||
static constexpr auto ACQUIRE = std::memory_order_acquire;
|
||||
static constexpr auto ACQ_REL = std::memory_order_acq_rel;
|
||||
static inline const auto PLACEHOLDER = reinterpret_cast<Node*>(1);
|
||||
|
||||
std::atomic<Node*> head{nullptr};
|
||||
std::atomic<Node*> tail{nullptr};
|
||||
|
||||
std::atomic_size_t waiting{0};
|
||||
std::condition_variable condition{};
|
||||
std::mutex mutex{};
|
||||
#undef ABORT
|
||||
};
|
||||
|
||||
// a simple thread-safe,
|
||||
// single reader, multiple writer queue
|
||||
|
||||
/// a simple lockless thread-safe,
|
||||
/// single reader, single writer queue
|
||||
template <typename T>
|
||||
class MPSCQueue {
|
||||
class /*[[deprecated("Transition to MPMCQueue")]]*/ SPSCQueue {
|
||||
public:
|
||||
[[nodiscard]] std::size_t Size() const {
|
||||
return spsc_queue.Size();
|
||||
}
|
||||
|
||||
[[nodiscard]] bool Empty() const {
|
||||
return spsc_queue.Empty();
|
||||
}
|
||||
|
||||
[[nodiscard]] T& Front() const {
|
||||
return spsc_queue.Front();
|
||||
}
|
||||
|
||||
template <typename Arg>
|
||||
void Push(Arg&& t) {
|
||||
std::lock_guard lock{write_lock};
|
||||
spsc_queue.Push(t);
|
||||
}
|
||||
|
||||
void Pop() {
|
||||
return spsc_queue.Pop();
|
||||
queue.Push(std::forward<Arg>(t));
|
||||
}
|
||||
|
||||
bool Pop(T& t) {
|
||||
return spsc_queue.Pop(t);
|
||||
return queue.Pop(t);
|
||||
}
|
||||
|
||||
void Wait() {
|
||||
spsc_queue.Wait();
|
||||
queue.Wait();
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
return spsc_queue.PopWait();
|
||||
return queue.PopWait();
|
||||
}
|
||||
|
||||
// not thread-safe
|
||||
void Clear() {
|
||||
spsc_queue.Clear();
|
||||
queue.Clear();
|
||||
}
|
||||
|
||||
private:
|
||||
SPSCQueue<T> spsc_queue;
|
||||
std::mutex write_lock;
|
||||
MPMCQueue<T> queue{};
|
||||
};
|
||||
|
||||
/// a simple thread-safe,
|
||||
/// single reader, multiple writer queue
|
||||
template <typename T>
|
||||
class /*[[deprecated("Transition to MPMCQueue")]]*/ MPSCQueue {
|
||||
public:
|
||||
template <typename Arg>
|
||||
void Push(Arg&& t) {
|
||||
queue.Push(std::forward<Arg>(t));
|
||||
}
|
||||
|
||||
bool Pop(T& t) {
|
||||
return queue.Pop(t);
|
||||
}
|
||||
|
||||
T PopWait() {
|
||||
return queue.PopWait();
|
||||
}
|
||||
|
||||
private:
|
||||
MPMCQueue<T> queue{};
|
||||
};
|
||||
} // namespace Common
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#include <bitset>
|
||||
#include <initializer_list>
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
#include "common/assert.h"
|
||||
|
||||
namespace Common::X64 {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
#include "common/x64/xbyak_abi.h"
|
||||
|
||||
namespace Common::X64 {
|
||||
|
|
|
@ -84,8 +84,6 @@ FileSys::StorageId GetStorageIdForFrontendSlot(
|
|||
|
||||
} // Anonymous namespace
|
||||
|
||||
/*static*/ System System::s_instance;
|
||||
|
||||
FileSys::VirtualFile GetGameFileFromPath(const FileSys::VirtualFilesystem& vfs,
|
||||
const std::string& path) {
|
||||
// To account for split 00+01+etc files.
|
||||
|
@ -425,6 +423,13 @@ struct System::Impl {
|
|||
System::System() : impl{std::make_unique<Impl>(*this)} {}
|
||||
System::~System() = default;
|
||||
|
||||
void System::InitializeGlobalInstance() {
|
||||
if (s_instance) {
|
||||
abort();
|
||||
}
|
||||
s_instance = std::unique_ptr<System>(new System);
|
||||
}
|
||||
|
||||
CpuManager& System::GetCpuManager() {
|
||||
return impl->cpu_manager;
|
||||
}
|
||||
|
|
|
@ -121,9 +121,14 @@ public:
|
|||
* @returns Reference to the instance of the System singleton class.
|
||||
*/
|
||||
[[deprecated("Use of the global system instance is deprecated")]] static System& GetInstance() {
|
||||
return s_instance;
|
||||
if (!s_instance) {
|
||||
abort();
|
||||
}
|
||||
return *s_instance;
|
||||
}
|
||||
|
||||
static void InitializeGlobalInstance();
|
||||
|
||||
/// Enumeration representing the return values of the System Initialize and Load process.
|
||||
enum class ResultStatus : u32 {
|
||||
Success, ///< Succeeded
|
||||
|
@ -393,7 +398,7 @@ private:
|
|||
struct Impl;
|
||||
std::unique_ptr<Impl> impl;
|
||||
|
||||
static System s_instance;
|
||||
inline static std::unique_ptr<System> s_instance{};
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
|
|
@ -261,20 +261,23 @@ struct KernelCore::Impl {
|
|||
current_process = process;
|
||||
}
|
||||
|
||||
/// Creates a new host thread ID, should only be called by GetHostThreadId
|
||||
u32 AllocateHostThreadId(std::optional<std::size_t> core_id) {
|
||||
if (core_id) {
|
||||
static inline thread_local u32 host_thread_id = UINT32_MAX;
|
||||
|
||||
/// Gets the host thread ID for the caller, allocating a new one if this is the first time
|
||||
u32 GetHostThreadId(std::size_t core_id) {
|
||||
if (static_cast<s32>(host_thread_id) < 0) {
|
||||
// The first for slots are reserved for CPU core threads
|
||||
ASSERT(*core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
return static_cast<u32>(*core_id);
|
||||
} else {
|
||||
return next_host_thread_id++;
|
||||
ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
|
||||
host_thread_id = static_cast<u32>(core_id);
|
||||
}
|
||||
return host_thread_id;
|
||||
}
|
||||
|
||||
/// Gets the host thread ID for the caller, allocating a new one if this is the first time
|
||||
u32 GetHostThreadId(std::optional<std::size_t> core_id = std::nullopt) {
|
||||
const thread_local auto host_thread_id{AllocateHostThreadId(core_id)};
|
||||
u32 GetHostThreadId() {
|
||||
if (static_cast<s32>(host_thread_id) < 0) {
|
||||
host_thread_id = next_host_thread_id++;
|
||||
}
|
||||
return host_thread_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "core/hle/service/nifm/nifm.h"
|
||||
#include "core/hle/service/service.h"
|
||||
#include "core/network/network.h"
|
||||
#include "core/network/network_interface.h"
|
||||
|
||||
namespace Service::NIFM {
|
||||
|
||||
|
@ -357,16 +358,10 @@ private:
|
|||
static_assert(sizeof(IpConfigInfo) == sizeof(IpAddressSetting) + sizeof(DnsSetting),
|
||||
"IpConfigInfo has incorrect size.");
|
||||
|
||||
auto ipv4 = Network::GetHostIPv4Address();
|
||||
if (!ipv4) {
|
||||
LOG_ERROR(Service_NIFM, "Couldn't get host IPv4 address, defaulting to 0.0.0.0");
|
||||
ipv4.emplace(Network::IPv4Address{0, 0, 0, 0});
|
||||
}
|
||||
|
||||
const IpConfigInfo ip_config_info{
|
||||
IpConfigInfo ip_config_info{
|
||||
.ip_address_setting{
|
||||
.is_automatic{true},
|
||||
.current_address{*ipv4},
|
||||
.current_address{0, 0, 0, 0},
|
||||
.subnet_mask{255, 255, 255, 0},
|
||||
.gateway{192, 168, 1, 1},
|
||||
},
|
||||
|
@ -377,6 +372,19 @@ private:
|
|||
},
|
||||
};
|
||||
|
||||
const auto iface = Network::GetSelectedNetworkInterface();
|
||||
if (iface) {
|
||||
ip_config_info.ip_address_setting =
|
||||
IpAddressSetting{.is_automatic{true},
|
||||
.current_address{Network::TranslateIPv4(iface->ip_address)},
|
||||
.subnet_mask{Network::TranslateIPv4(iface->subnet_mask)},
|
||||
.gateway{Network::TranslateIPv4(iface->gateway)}};
|
||||
|
||||
} else {
|
||||
LOG_ERROR(Service_NIFM,
|
||||
"Couldn't get host network configuration info, using default values");
|
||||
}
|
||||
|
||||
IPC::ResponseBuilder rb{ctx, 2 + (sizeof(IpConfigInfo) + 3) / sizeof(u32)};
|
||||
rb.Push(ResultSuccess);
|
||||
rb.PushRaw<IpConfigInfo>(ip_config_info);
|
||||
|
|
|
@ -50,11 +50,6 @@ void Finalize() {
|
|||
WSACleanup();
|
||||
}
|
||||
|
||||
constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
auto& bytes = addr.S_un.S_un_b;
|
||||
return IPv4Address{bytes.s_b1, bytes.s_b2, bytes.s_b3, bytes.s_b4};
|
||||
}
|
||||
|
||||
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
|
||||
sockaddr_in result;
|
||||
|
||||
|
@ -141,12 +136,6 @@ void Initialize() {}
|
|||
|
||||
void Finalize() {}
|
||||
|
||||
constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
const u32 bytes = addr.s_addr;
|
||||
return IPv4Address{static_cast<u8>(bytes), static_cast<u8>(bytes >> 8),
|
||||
static_cast<u8>(bytes >> 16), static_cast<u8>(bytes >> 24)};
|
||||
}
|
||||
|
||||
sockaddr TranslateFromSockAddrIn(SockAddrIn input) {
|
||||
sockaddr_in result;
|
||||
|
||||
|
|
|
@ -11,6 +11,12 @@
|
|||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <winsock2.h>
|
||||
#elif YUZU_UNIX
|
||||
#include <netinet/in.h>
|
||||
#endif
|
||||
|
||||
namespace Network {
|
||||
|
||||
class Socket;
|
||||
|
@ -93,6 +99,19 @@ public:
|
|||
~NetworkInstance();
|
||||
};
|
||||
|
||||
#ifdef _WIN32
|
||||
constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
auto& bytes = addr.S_un.S_un_b;
|
||||
return IPv4Address{bytes.s_b1, bytes.s_b2, bytes.s_b3, bytes.s_b4};
|
||||
}
|
||||
#elif YUZU_UNIX
|
||||
constexpr IPv4Address TranslateIPv4(in_addr addr) {
|
||||
const u32 bytes = addr.s_addr;
|
||||
return IPv4Address{static_cast<u8>(bytes), static_cast<u8>(bytes >> 8),
|
||||
static_cast<u8>(bytes >> 16), static_cast<u8>(bytes >> 24)};
|
||||
}
|
||||
#endif
|
||||
|
||||
/// @brief Returns host's IPv4 address
|
||||
/// @return human ordered IPv4 address (e.g. 192.168.0.1) as an array
|
||||
std::optional<IPv4Address> GetHostIPv4Address();
|
||||
|
|
|
@ -2,11 +2,15 @@
|
|||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
#include "common/bit_cast.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/settings.h"
|
||||
#include "common/string_util.h"
|
||||
#include "core/network/network_interface.h"
|
||||
|
||||
|
@ -29,8 +33,9 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
|
|||
|
||||
// retry up to 5 times
|
||||
for (int i = 0; i < 5 && ret == ERROR_BUFFER_OVERFLOW; i++) {
|
||||
ret = GetAdaptersAddresses(AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER,
|
||||
nullptr, adapter_addresses.data(), &buf_size);
|
||||
ret = GetAdaptersAddresses(
|
||||
AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS,
|
||||
nullptr, adapter_addresses.data(), &buf_size);
|
||||
|
||||
if (ret == ERROR_BUFFER_OVERFLOW) {
|
||||
adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1);
|
||||
|
@ -57,9 +62,26 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
|
|||
*current_address->FirstUnicastAddress->Address.lpSockaddr)
|
||||
.sin_addr;
|
||||
|
||||
ULONG mask = 0;
|
||||
if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength,
|
||||
&mask) != NO_ERROR) {
|
||||
LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask");
|
||||
continue;
|
||||
}
|
||||
|
||||
struct in_addr gateway = {.S_un{.S_addr{0}}};
|
||||
if (current_address->FirstGatewayAddress != nullptr &&
|
||||
current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) {
|
||||
gateway = Common::BitCast<struct sockaddr_in>(
|
||||
*current_address->FirstGatewayAddress->Address.lpSockaddr)
|
||||
.sin_addr;
|
||||
}
|
||||
|
||||
result.push_back(NetworkInterface{
|
||||
.name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})},
|
||||
.ip_address{ip_addr}});
|
||||
.ip_address{ip_addr},
|
||||
.subnet_mask = in_addr{.S_un{.S_addr{mask}}},
|
||||
.gateway = gateway});
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -83,7 +105,7 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
|
|||
}
|
||||
|
||||
for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) {
|
||||
if (ifa->ifa_addr == nullptr) {
|
||||
if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -95,9 +117,59 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
|
|||
continue;
|
||||
}
|
||||
|
||||
std::uint32_t gateway{0};
|
||||
std::ifstream file{"/proc/net/route"};
|
||||
if (file.is_open()) {
|
||||
|
||||
// ignore header
|
||||
file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
|
||||
|
||||
bool gateway_found = false;
|
||||
|
||||
for (std::string line; std::getline(file, line);) {
|
||||
std::istringstream iss{line};
|
||||
|
||||
std::string iface_name{};
|
||||
iss >> iface_name;
|
||||
if (iface_name != ifa->ifa_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
iss >> std::hex;
|
||||
|
||||
std::uint32_t dest{0};
|
||||
iss >> dest;
|
||||
if (dest != 0) {
|
||||
// not the default route
|
||||
continue;
|
||||
}
|
||||
|
||||
iss >> gateway;
|
||||
|
||||
std::uint16_t flags{0};
|
||||
iss >> flags;
|
||||
|
||||
// flag RTF_GATEWAY (defined in <linux/route.h>)
|
||||
if ((flags & 0x2) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
gateway_found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!gateway_found) {
|
||||
gateway = 0;
|
||||
}
|
||||
} else {
|
||||
LOG_ERROR(Network, "Failed to open \"/proc/net/route\"");
|
||||
}
|
||||
|
||||
result.push_back(NetworkInterface{
|
||||
.name{ifa->ifa_name},
|
||||
.ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr}});
|
||||
.ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr},
|
||||
.subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr},
|
||||
.gateway{in_addr{.s_addr = gateway}}});
|
||||
}
|
||||
|
||||
freeifaddrs(ifaddr);
|
||||
|
@ -107,4 +179,25 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
|
|||
|
||||
#endif
|
||||
|
||||
std::optional<NetworkInterface> GetSelectedNetworkInterface() {
|
||||
const std::string& selected_network_interface = Settings::values.network_interface.GetValue();
|
||||
const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
|
||||
if (network_interfaces.size() == 0) {
|
||||
LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces");
|
||||
return {};
|
||||
}
|
||||
|
||||
const auto res =
|
||||
std::ranges::find_if(network_interfaces, [&selected_network_interface](const auto& iface) {
|
||||
return iface.name == selected_network_interface;
|
||||
});
|
||||
|
||||
if (res != network_interfaces.end()) {
|
||||
return *res;
|
||||
} else {
|
||||
LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Network
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -18,8 +19,11 @@ namespace Network {
|
|||
struct NetworkInterface {
|
||||
std::string name;
|
||||
struct in_addr ip_address;
|
||||
struct in_addr subnet_mask;
|
||||
struct in_addr gateway;
|
||||
};
|
||||
|
||||
std::vector<NetworkInterface> GetAvailableNetworkInterfaces();
|
||||
std::optional<NetworkInterface> GetSelectedNetworkInterface();
|
||||
|
||||
} // namespace Network
|
||||
|
|
|
@ -4,11 +4,13 @@
|
|||
|
||||
#include <catch2/catch.hpp>
|
||||
#include <math.h>
|
||||
#include "common/logging/backend.h"
|
||||
#include "common/param_package.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
TEST_CASE("ParamPackage", "[common]") {
|
||||
Common::Log::DisableLoggingInTests();
|
||||
ParamPackage original{
|
||||
{"abc", "xyz"},
|
||||
{"def", "42"},
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#include <array>
|
||||
#include <bitset>
|
||||
#include <xbyak.h>
|
||||
#include <xbyak/xbyak.h>
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
#include "common/x64/xbyak_abi.h"
|
||||
|
|
|
@ -21,6 +21,7 @@ void ToggleConsole() {
|
|||
console_shown = UISettings::values.show_console.GetValue();
|
||||
}
|
||||
|
||||
using namespace Common::Log;
|
||||
#if defined(_WIN32) && !defined(_DEBUG)
|
||||
FILE* temp;
|
||||
if (UISettings::values.show_console) {
|
||||
|
@ -29,24 +30,20 @@ void ToggleConsole() {
|
|||
freopen_s(&temp, "CONIN$", "r", stdin);
|
||||
freopen_s(&temp, "CONOUT$", "w", stdout);
|
||||
freopen_s(&temp, "CONOUT$", "w", stderr);
|
||||
Common::Log::AddBackend(std::make_unique<Common::Log::ColorConsoleBackend>());
|
||||
SetColorConsoleBackendEnabled(true);
|
||||
}
|
||||
} else {
|
||||
if (FreeConsole()) {
|
||||
// In order to close the console, we have to also detach the streams on it.
|
||||
// Just redirect them to NUL if there is no console window
|
||||
Common::Log::RemoveBackend(Common::Log::ColorConsoleBackend::Name());
|
||||
SetColorConsoleBackendEnabled(false);
|
||||
freopen_s(&temp, "NUL", "r", stdin);
|
||||
freopen_s(&temp, "NUL", "w", stdout);
|
||||
freopen_s(&temp, "NUL", "w", stderr);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (UISettings::values.show_console) {
|
||||
Common::Log::AddBackend(std::make_unique<Common::Log::ColorConsoleBackend>());
|
||||
} else {
|
||||
Common::Log::RemoveBackend(Common::Log::ColorConsoleBackend::Name());
|
||||
}
|
||||
SetColorConsoleBackendEnabled(UISettings::values.show_console.GetValue());
|
||||
#endif
|
||||
}
|
||||
} // namespace Debugger
|
||||
|
|
|
@ -177,21 +177,6 @@ void GMainWindow::ShowTelemetryCallout() {
|
|||
|
||||
const int GMainWindow::max_recent_files_item;
|
||||
|
||||
static void InitializeLogging() {
|
||||
using namespace Common;
|
||||
|
||||
Log::Filter log_filter;
|
||||
log_filter.ParseFilterString(Settings::values.log_filter.GetValue());
|
||||
Log::SetGlobalFilter(log_filter);
|
||||
|
||||
const auto log_dir = FS::GetYuzuPath(FS::YuzuPath::LogDir);
|
||||
void(FS::CreateDir(log_dir));
|
||||
Log::AddBackend(std::make_unique<Log::FileBackend>(log_dir / LOG_FILE));
|
||||
#ifdef _WIN32
|
||||
Log::AddBackend(std::make_unique<Log::DebuggerBackend>());
|
||||
#endif
|
||||
}
|
||||
|
||||
static void RemoveCachedContents() {
|
||||
const auto cache_dir = Common::FS::GetYuzuPath(Common::FS::YuzuPath::CacheDir);
|
||||
const auto offline_fonts = cache_dir / "fonts";
|
||||
|
@ -209,8 +194,6 @@ GMainWindow::GMainWindow()
|
|||
: input_subsystem{std::make_shared<InputCommon::InputSubsystem>()},
|
||||
config{std::make_unique<Config>()}, vfs{std::make_shared<FileSys::RealVfsFilesystem>()},
|
||||
provider{std::make_unique<FileSys::ManualContentProvider>()} {
|
||||
InitializeLogging();
|
||||
|
||||
LoadTranslation();
|
||||
|
||||
setAcceptDrops(true);
|
||||
|
@ -3463,6 +3446,7 @@ void GMainWindow::SetDiscordEnabled([[maybe_unused]] bool state) {
|
|||
#endif
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
Common::Log::Initialize();
|
||||
Common::DetachedTasks detached_tasks;
|
||||
MicroProfileOnThreadCreate("Frontend");
|
||||
SCOPE_EXIT({ MicroProfileShutdown(); });
|
||||
|
@ -3502,6 +3486,7 @@ int main(int argc, char* argv[]) {
|
|||
// generating shaders
|
||||
setlocale(LC_ALL, "C");
|
||||
|
||||
Core::System::InitializeGlobalInstance();
|
||||
GMainWindow main_window;
|
||||
// After settings have been loaded by GMainWindow, apply the filter
|
||||
main_window.show();
|
||||
|
|
|
@ -74,31 +74,14 @@ static void PrintVersion() {
|
|||
std::cout << "yuzu " << Common::g_scm_branch << " " << Common::g_scm_desc << std::endl;
|
||||
}
|
||||
|
||||
static void InitializeLogging() {
|
||||
using namespace Common;
|
||||
|
||||
Log::Filter log_filter(Log::Level::Debug);
|
||||
log_filter.ParseFilterString(static_cast<std::string>(Settings::values.log_filter));
|
||||
Log::SetGlobalFilter(log_filter);
|
||||
|
||||
Log::AddBackend(std::make_unique<Log::ColorConsoleBackend>());
|
||||
|
||||
const auto& log_dir = FS::GetYuzuPath(FS::YuzuPath::LogDir);
|
||||
void(FS::CreateDir(log_dir));
|
||||
Log::AddBackend(std::make_unique<Log::FileBackend>(log_dir / LOG_FILE));
|
||||
#ifdef _WIN32
|
||||
Log::AddBackend(std::make_unique<Log::DebuggerBackend>());
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Application entry point
|
||||
int main(int argc, char** argv) {
|
||||
Common::Log::Initialize();
|
||||
Common::Log::SetColorConsoleBackendEnabled(true);
|
||||
Common::DetachedTasks detached_tasks;
|
||||
Config config;
|
||||
|
||||
int option_index = 0;
|
||||
|
||||
InitializeLogging();
|
||||
#ifdef _WIN32
|
||||
int argc_w;
|
||||
auto argv_w = CommandLineToArgvW(GetCommandLineW(), &argc_w);
|
||||
|
@ -163,6 +146,7 @@ int main(int argc, char** argv) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
Core::System::InitializeGlobalInstance();
|
||||
auto& system{Core::System::GetInstance()};
|
||||
InputCommon::InputSubsystem input_subsystem;
|
||||
|
||||
|
|
Loading…
Reference in a new issue