early-access version 2647
This commit is contained in:
parent
016426eebf
commit
614f3e6a00
11 changed files with 61 additions and 71 deletions
|
@ -1,7 +1,7 @@
|
||||||
yuzu emulator early access
|
yuzu emulator early access
|
||||||
=============
|
=============
|
||||||
|
|
||||||
This is the source code for early-access 2645.
|
This is the source code for early-access 2647.
|
||||||
|
|
||||||
## Legal Notice
|
## Legal Notice
|
||||||
|
|
||||||
|
|
|
@ -103,12 +103,6 @@ else()
|
||||||
-Wno-unused-parameter
|
-Wno-unused-parameter
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: Remove when we update to a GCC compiler that enables this
|
|
||||||
# by default (i.e. GCC 10 or newer).
|
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL GNU)
|
|
||||||
add_compile_options(-fconcepts)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64)
|
if (ARCHITECTURE_x86_64)
|
||||||
add_compile_options("-mcx16")
|
add_compile_options("-mcx16")
|
||||||
endif()
|
endif()
|
||||||
|
|
|
@ -73,11 +73,13 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
|
void InterpreterFallback(u32 pc, std::size_t num_instructions) override {
|
||||||
|
parent.LogBacktrace();
|
||||||
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
|
UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc,
|
||||||
MemoryReadCode(pc));
|
MemoryReadCode(pc));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override {
|
||||||
|
parent.LogBacktrace();
|
||||||
LOG_CRITICAL(Core_ARM,
|
LOG_CRITICAL(Core_ARM,
|
||||||
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
"ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})",
|
||||||
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
|
exception, pc, MemoryReadCode(pc), parent.IsInThumbMode());
|
||||||
|
|
|
@ -84,6 +84,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
|
void InterpreterFallback(u64 pc, std::size_t num_instructions) override {
|
||||||
|
parent.LogBacktrace();
|
||||||
LOG_ERROR(Core_ARM,
|
LOG_ERROR(Core_ARM,
|
||||||
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
"Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc,
|
||||||
num_instructions, MemoryReadCode(pc));
|
num_instructions, MemoryReadCode(pc));
|
||||||
|
@ -121,6 +122,7 @@ public:
|
||||||
return;
|
return;
|
||||||
case Dynarmic::A64::Exception::Breakpoint:
|
case Dynarmic::A64::Exception::Breakpoint:
|
||||||
default:
|
default:
|
||||||
|
parent.LogBacktrace();
|
||||||
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})",
|
||||||
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
static_cast<std::size_t>(exception), pc, MemoryReadCode(pc));
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||||
const bool is_rescaling = info.uses_rescaling_uniform;
|
const bool is_rescaling = !info.texture_descriptors.empty() || !info.image_descriptors.empty();
|
||||||
scheduler.Record([this, descriptor_data, is_rescaling,
|
scheduler.Record([this, descriptor_data, is_rescaling,
|
||||||
rescaling_data = rescaling.Data()](vk::CommandBuffer cmdbuf) {
|
rescaling_data = rescaling.Data()](vk::CommandBuffer cmdbuf) {
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||||
|
|
|
@ -238,7 +238,6 @@ GraphicsPipeline::GraphicsPipeline(
|
||||||
enabled_uniform_buffer_masks[stage] = info->constant_buffer_mask;
|
enabled_uniform_buffer_masks[stage] = info->constant_buffer_mask;
|
||||||
std::ranges::copy(info->constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
|
std::ranges::copy(info->constant_buffer_used_sizes, uniform_buffer_sizes[stage].begin());
|
||||||
num_textures += Shader::NumDescriptors(info->texture_descriptors);
|
num_textures += Shader::NumDescriptors(info->texture_descriptors);
|
||||||
uses_rescale_unfiorm |= info->uses_rescaling_uniform;
|
|
||||||
}
|
}
|
||||||
auto func{[this, shader_notify, &render_pass_cache, &descriptor_pool, pipeline_statistics] {
|
auto func{[this, shader_notify, &render_pass_cache, &descriptor_pool, pipeline_statistics] {
|
||||||
DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
|
DescriptorLayoutBuilder builder{MakeBuilder(device, stage_infos)};
|
||||||
|
@ -472,8 +471,7 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
const bool is_rescaling{texture_cache.IsRescaling()};
|
const bool is_rescaling{texture_cache.IsRescaling()};
|
||||||
const bool update_rescaling{uses_rescale_unfiorm ? scheduler.UpdateRescaling(is_rescaling)
|
const bool update_rescaling{scheduler.UpdateRescaling(is_rescaling)};
|
||||||
: false};
|
|
||||||
const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)};
|
const bool bind_pipeline{scheduler.UpdateGraphicsPipeline(this)};
|
||||||
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
const void* const descriptor_data{update_descriptor_queue.UpdateData()};
|
||||||
scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(),
|
scheduler.Record([this, descriptor_data, bind_pipeline, rescaling_data = rescaling.Data(),
|
||||||
|
@ -481,12 +479,10 @@ void GraphicsPipeline::ConfigureDraw(const RescalingPushConstant& rescaling) {
|
||||||
if (bind_pipeline) {
|
if (bind_pipeline) {
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
|
||||||
}
|
}
|
||||||
if (uses_rescale_unfiorm) {
|
|
||||||
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS,
|
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS,
|
||||||
RESCALING_LAYOUT_WORDS_OFFSET, sizeof(rescaling_data),
|
RESCALING_LAYOUT_WORDS_OFFSET, sizeof(rescaling_data),
|
||||||
rescaling_data.data());
|
rescaling_data.data());
|
||||||
}
|
if (update_rescaling) {
|
||||||
if (uses_rescale_unfiorm && update_rescaling) {
|
|
||||||
const f32 config_down_factor{Settings::values.resolution_info.down_factor};
|
const f32 config_down_factor{Settings::values.resolution_info.down_factor};
|
||||||
const f32 scale_down_factor{is_rescaling ? config_down_factor : 1.0f};
|
const f32 scale_down_factor{is_rescaling ? config_down_factor : 1.0f};
|
||||||
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS,
|
cmdbuf.PushConstants(*pipeline_layout, VK_SHADER_STAGE_ALL_GRAPHICS,
|
||||||
|
|
|
@ -151,7 +151,6 @@ private:
|
||||||
std::mutex build_mutex;
|
std::mutex build_mutex;
|
||||||
std::atomic_bool is_built{false};
|
std::atomic_bool is_built{false};
|
||||||
bool uses_push_descriptor{false};
|
bool uses_push_descriptor{false};
|
||||||
bool uses_rescale_unfiorm{false};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
|
@ -234,9 +234,12 @@ void RasterizerVulkan::Clear() {
|
||||||
const VkExtent2D render_area = framebuffer->RenderArea();
|
const VkExtent2D render_area = framebuffer->RenderArea();
|
||||||
scheduler.RequestRenderpass(framebuffer);
|
scheduler.RequestRenderpass(framebuffer);
|
||||||
|
|
||||||
const bool is_rescaling = texture_cache.IsRescaling();
|
u32 up_scale = 1;
|
||||||
const u32 up_scale = is_rescaling ? Settings::values.resolution_info.up_scale : 1U;
|
u32 down_shift = 0;
|
||||||
const u32 down_shift = is_rescaling ? Settings::values.resolution_info.down_shift : 0U;
|
if (texture_cache.IsRescaling()) {
|
||||||
|
up_scale = Settings::values.resolution_info.up_scale;
|
||||||
|
down_shift = Settings::values.resolution_info.down_shift;
|
||||||
|
}
|
||||||
UpdateViewportsState(regs);
|
UpdateViewportsState(regs);
|
||||||
|
|
||||||
VkClearRect clear_rect{
|
VkClearRect clear_rect{
|
||||||
|
@ -692,9 +695,12 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
|
||||||
if (!state_tracker.TouchScissors()) {
|
if (!state_tracker.TouchScissors()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const bool is_rescaling = texture_cache.IsRescaling();
|
u32 up_scale = 1;
|
||||||
const u32 up_scale = is_rescaling ? Settings::values.resolution_info.up_scale : 1U;
|
u32 down_shift = 0;
|
||||||
const u32 down_shift = is_rescaling ? Settings::values.resolution_info.down_shift : 0U;
|
if (texture_cache.IsRescaling()) {
|
||||||
|
up_scale = Settings::values.resolution_info.up_scale;
|
||||||
|
down_shift = Settings::values.resolution_info.down_shift;
|
||||||
|
}
|
||||||
const std::array scissors{
|
const std::array scissors{
|
||||||
GetScissorState(regs, 0, up_scale, down_shift),
|
GetScissorState(regs, 0, up_scale, down_shift),
|
||||||
GetScissorState(regs, 1, up_scale, down_shift),
|
GetScissorState(regs, 1, up_scale, down_shift),
|
||||||
|
|
|
@ -29,10 +29,7 @@ constexpr VkDeviceSize MAX_ALIGNMENT = 256;
|
||||||
constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
|
constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
|
||||||
// Stream buffer size in bytes
|
// Stream buffer size in bytes
|
||||||
constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
|
constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
|
||||||
|
constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
|
||||||
constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_STREAM_REGIONS;
|
|
||||||
static_assert(Common::IsAligned(REGION_SIZE, MAX_ALIGNMENT),
|
|
||||||
"Stream buffer region size must be VK buffer aligned");
|
|
||||||
|
|
||||||
constexpr VkMemoryPropertyFlags HOST_FLAGS =
|
constexpr VkMemoryPropertyFlags HOST_FLAGS =
|
||||||
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
|
||||||
|
@ -86,17 +83,6 @@ u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_
|
||||||
size_t Region(size_t iterator) noexcept {
|
size_t Region(size_t iterator) noexcept {
|
||||||
return iterator / REGION_SIZE;
|
return iterator / REGION_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr std::array<VkDeviceSize, StagingBufferPool::NUM_STREAM_REGIONS> MakeStreamBufferOffset() {
|
|
||||||
std::array<VkDeviceSize, StagingBufferPool::NUM_STREAM_REGIONS> offsets{};
|
|
||||||
for (size_t i = 0; i < StagingBufferPool::NUM_STREAM_REGIONS; ++i) {
|
|
||||||
offsets[i] = static_cast<VkDeviceSize>(i * REGION_SIZE);
|
|
||||||
}
|
|
||||||
return offsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr auto STREAM_BUFFER_OFFSETS_LUT = MakeStreamBufferOffset();
|
|
||||||
|
|
||||||
} // Anonymous namespace
|
} // Anonymous namespace
|
||||||
|
|
||||||
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
|
StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
|
||||||
|
@ -173,19 +159,31 @@ void StagingBufferPool::TickFrame() {
|
||||||
}
|
}
|
||||||
|
|
||||||
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
|
StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
|
||||||
const auto num_requested_regions = Region(size) + 1;
|
if (AreRegionsActive(Region(free_iterator) + 1,
|
||||||
const auto available_index = NextAvailableStreamIndex(num_requested_regions);
|
std::min(Region(iterator + size) + 1, NUM_SYNCS))) {
|
||||||
if (!available_index) {
|
|
||||||
// Avoid waiting for the previous usages to be free
|
// Avoid waiting for the previous usages to be free
|
||||||
return GetStagingBuffer(size, MemoryUsage::Upload);
|
return GetStagingBuffer(size, MemoryUsage::Upload);
|
||||||
}
|
}
|
||||||
const u64 current_tick = scheduler.CurrentTick();
|
const u64 current_tick = scheduler.CurrentTick();
|
||||||
const auto begin_itr = sync_ticks.begin() + *available_index;
|
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator),
|
||||||
std::fill(begin_itr, begin_itr + num_requested_regions, current_tick);
|
current_tick);
|
||||||
|
used_iterator = iterator;
|
||||||
|
free_iterator = std::max(free_iterator, iterator + size);
|
||||||
|
|
||||||
const VkDeviceSize offset = STREAM_BUFFER_OFFSETS_LUT[*available_index];
|
if (iterator + size >= STREAM_BUFFER_SIZE) {
|
||||||
ASSERT(offset + size <= STREAM_BUFFER_SIZE);
|
std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS,
|
||||||
next_index = (*available_index + num_requested_regions) % NUM_STREAM_REGIONS;
|
current_tick);
|
||||||
|
used_iterator = 0;
|
||||||
|
iterator = 0;
|
||||||
|
free_iterator = size;
|
||||||
|
|
||||||
|
if (AreRegionsActive(0, Region(size) + 1)) {
|
||||||
|
// Avoid waiting for the previous usages to be free
|
||||||
|
return GetStagingBuffer(size, MemoryUsage::Upload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const size_t offset = iterator;
|
||||||
|
iterator = Common::AlignUp(iterator + size, MAX_ALIGNMENT);
|
||||||
return StagingBufferRef{
|
return StagingBufferRef{
|
||||||
.buffer = *stream_buffer,
|
.buffer = *stream_buffer,
|
||||||
.offset = static_cast<VkDeviceSize>(offset),
|
.offset = static_cast<VkDeviceSize>(offset),
|
||||||
|
@ -193,22 +191,10 @@ StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<size_t> StagingBufferPool::NextAvailableStreamIndex(size_t num_regions) const {
|
bool StagingBufferPool::AreRegionsActive(size_t region_begin, size_t region_end) const {
|
||||||
const auto is_index_available = [this, num_regions](size_t begin_offset) {
|
|
||||||
const u64 gpu_tick = scheduler.GetMasterSemaphore().KnownGpuTick();
|
const u64 gpu_tick = scheduler.GetMasterSemaphore().KnownGpuTick();
|
||||||
const auto tick_check = [gpu_tick](u64 sync_tick) { return gpu_tick >= sync_tick; };
|
return std::any_of(sync_ticks.begin() + region_begin, sync_ticks.begin() + region_end,
|
||||||
|
[gpu_tick](u64 sync_tick) { return gpu_tick < sync_tick; });
|
||||||
const auto begin_itr = sync_ticks.begin() + begin_offset;
|
|
||||||
const bool is_available = std::all_of(begin_itr, begin_itr + num_regions, tick_check);
|
|
||||||
return is_available ? std::optional(begin_offset) : std::nullopt;
|
|
||||||
};
|
|
||||||
// Avoid overflow
|
|
||||||
if (next_index + num_regions <= NUM_STREAM_REGIONS) {
|
|
||||||
return is_index_available(next_index);
|
|
||||||
}
|
|
||||||
// Not enough contiguous regions at the next_index,
|
|
||||||
// Check if the contiguous range in the front is available
|
|
||||||
return is_index_available(0);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) {
|
StagingBufferRef StagingBufferPool::GetStagingBuffer(size_t size, MemoryUsage usage) {
|
||||||
|
|
|
@ -25,7 +25,7 @@ struct StagingBufferRef {
|
||||||
|
|
||||||
class StagingBufferPool {
|
class StagingBufferPool {
|
||||||
public:
|
public:
|
||||||
static constexpr size_t NUM_STREAM_REGIONS = 0x8000;
|
static constexpr size_t NUM_SYNCS = 16;
|
||||||
|
|
||||||
explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator,
|
explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator,
|
||||||
VKScheduler& scheduler);
|
VKScheduler& scheduler);
|
||||||
|
@ -67,7 +67,7 @@ private:
|
||||||
|
|
||||||
StagingBufferRef GetStreamBuffer(size_t size);
|
StagingBufferRef GetStreamBuffer(size_t size);
|
||||||
|
|
||||||
std::optional<size_t> NextAvailableStreamIndex(size_t num_regions) const;
|
bool AreRegionsActive(size_t region_begin, size_t region_end) const;
|
||||||
|
|
||||||
StagingBufferRef GetStagingBuffer(size_t size, MemoryUsage usage);
|
StagingBufferRef GetStagingBuffer(size_t size, MemoryUsage usage);
|
||||||
|
|
||||||
|
@ -89,8 +89,10 @@ private:
|
||||||
vk::DeviceMemory stream_memory;
|
vk::DeviceMemory stream_memory;
|
||||||
u8* stream_pointer = nullptr;
|
u8* stream_pointer = nullptr;
|
||||||
|
|
||||||
size_t next_index = 0;
|
size_t iterator = 0;
|
||||||
std::array<u64, NUM_STREAM_REGIONS> sync_ticks{};
|
size_t used_iterator = 0;
|
||||||
|
size_t free_iterator = 0;
|
||||||
|
std::array<u64, NUM_SYNCS> sync_ticks{};
|
||||||
|
|
||||||
StagingBuffersCache device_local_cache;
|
StagingBuffersCache device_local_cache;
|
||||||
StagingBuffersCache upload_cache;
|
StagingBuffersCache upload_cache;
|
||||||
|
|
|
@ -328,8 +328,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const bool rescaled = RescaleRenderTargets(is_clear);
|
const bool rescaled = RescaleRenderTargets(is_clear);
|
||||||
const auto& resolution_info = Settings::values.resolution_info;
|
if (is_rescaling != rescaled) {
|
||||||
if (resolution_info.active && is_rescaling != rescaled) {
|
|
||||||
flags[Dirty::RescaleViewports] = true;
|
flags[Dirty::RescaleViewports] = true;
|
||||||
flags[Dirty::RescaleScissors] = true;
|
flags[Dirty::RescaleScissors] = true;
|
||||||
is_rescaling = rescaled;
|
is_rescaling = rescaled;
|
||||||
|
@ -346,8 +345,12 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
|
||||||
for (size_t index = 0; index < NUM_RT; ++index) {
|
for (size_t index = 0; index < NUM_RT; ++index) {
|
||||||
render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index));
|
render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index));
|
||||||
}
|
}
|
||||||
const u32 up_scale = is_rescaling ? resolution_info.up_scale : 1U;
|
u32 up_scale = 1;
|
||||||
const u32 down_shift = is_rescaling ? resolution_info.down_shift : 0U;
|
u32 down_shift = 0;
|
||||||
|
if (is_rescaling) {
|
||||||
|
up_scale = Settings::values.resolution_info.up_scale;
|
||||||
|
down_shift = Settings::values.resolution_info.down_shift;
|
||||||
|
}
|
||||||
render_targets.size = Extent2D{
|
render_targets.size = Extent2D{
|
||||||
(maxwell3d.regs.render_area.width * up_scale) >> down_shift,
|
(maxwell3d.regs.render_area.width * up_scale) >> down_shift,
|
||||||
(maxwell3d.regs.render_area.height * up_scale) >> down_shift,
|
(maxwell3d.regs.render_area.height * up_scale) >> down_shift,
|
||||||
|
|
Loading…
Reference in a new issue