mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2024-11-23 11:20:07 +00:00
resources binding moved into vk_rasterizer
Some checks failed
Build and Release / reuse (push) Has been cancelled
Build and Release / clang-format (push) Has been cancelled
Build and Release / get-info (push) Has been cancelled
Build and Release / windows-sdl (push) Has been cancelled
Build and Release / windows-qt (push) Has been cancelled
Build and Release / macos-sdl (push) Has been cancelled
Build and Release / macos-qt (push) Has been cancelled
Build and Release / linux-sdl (push) Has been cancelled
Build and Release / linux-qt (push) Has been cancelled
Build and Release / pre-release (push) Has been cancelled
Some checks failed
Build and Release / reuse (push) Has been cancelled
Build and Release / clang-format (push) Has been cancelled
Build and Release / get-info (push) Has been cancelled
Build and Release / windows-sdl (push) Has been cancelled
Build and Release / windows-qt (push) Has been cancelled
Build and Release / macos-sdl (push) Has been cancelled
Build and Release / macos-qt (push) Has been cancelled
Build and Release / linux-sdl (push) Has been cancelled
Build and Release / linux-qt (push) Has been cancelled
Build and Release / pre-release (push) Has been cancelled
This commit is contained in:
parent
43a5876ee4
commit
abca0ab3e6
@ -328,7 +328,7 @@ void* Linker::AllocateTlsForThread(bool is_primary) {
|
|||||||
void* addr_out{reinterpret_cast<void*>(KernelAllocBase)};
|
void* addr_out{reinterpret_cast<void*>(KernelAllocBase)};
|
||||||
if (is_primary) {
|
if (is_primary) {
|
||||||
const size_t tls_aligned = Common::AlignUp(total_tls_size, 16_KB);
|
const size_t tls_aligned = Common::AlignUp(total_tls_size, 16_KB);
|
||||||
const int ret = ::Libraries::Kernel::sceKernelMapNamedFlexibleMemory(
|
const int ret = Libraries::Kernel::sceKernelMapNamedFlexibleMemory(
|
||||||
&addr_out, tls_aligned, 3, 0, "SceKernelPrimaryTcbTls");
|
&addr_out, tls_aligned, 3, 0, "SceKernelPrimaryTcbTls");
|
||||||
ASSERT_MSG(ret == 0, "Unable to allocate TLS+TCB for the primary thread");
|
ASSERT_MSG(ret == 0, "Unable to allocate TLS+TCB for the primary thread");
|
||||||
} else {
|
} else {
|
||||||
|
@ -20,7 +20,7 @@ enum class Stage : u32 {
|
|||||||
Local,
|
Local,
|
||||||
Compute,
|
Compute,
|
||||||
};
|
};
|
||||||
constexpr u32 MaxStageTypes = 6;
|
constexpr u32 MaxStageTypes = 7;
|
||||||
|
|
||||||
[[nodiscard]] constexpr Stage StageFromIndex(size_t index) noexcept {
|
[[nodiscard]] constexpr Stage StageFromIndex(size_t index) noexcept {
|
||||||
return static_cast<Stage>(index);
|
return static_cast<Stage>(index);
|
||||||
|
@ -15,8 +15,10 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler
|
|||||||
DescriptorHeap& desc_heap_, vk::PipelineCache pipeline_cache,
|
DescriptorHeap& desc_heap_, vk::PipelineCache pipeline_cache,
|
||||||
u64 compute_key_, const Shader::Info& info_,
|
u64 compute_key_, const Shader::Info& info_,
|
||||||
vk::ShaderModule module)
|
vk::ShaderModule module)
|
||||||
: Pipeline{instance_, scheduler_, desc_heap_, pipeline_cache}, compute_key{compute_key_},
|
: Pipeline{instance_, scheduler_, desc_heap_, pipeline_cache, true}, compute_key{compute_key_} {
|
||||||
info{&info_} {
|
auto& info = stages[int(Shader::Stage::Compute)];
|
||||||
|
info = &info_;
|
||||||
|
|
||||||
const vk::PipelineShaderStageCreateInfo shader_ci = {
|
const vk::PipelineShaderStageCreateInfo shader_ci = {
|
||||||
.stage = vk::ShaderStageFlagBits::eCompute,
|
.stage = vk::ShaderStageFlagBits::eCompute,
|
||||||
.module = module,
|
.module = module,
|
||||||
@ -118,90 +120,4 @@ ComputePipeline::ComputePipeline(const Instance& instance_, Scheduler& scheduler
|
|||||||
|
|
||||||
ComputePipeline::~ComputePipeline() = default;
|
ComputePipeline::~ComputePipeline() = default;
|
||||||
|
|
||||||
bool ComputePipeline::BindResources(VideoCore::BufferCache& buffer_cache,
|
|
||||||
VideoCore::TextureCache& texture_cache) const {
|
|
||||||
// Bind resource buffers and textures.
|
|
||||||
boost::container::small_vector<vk::WriteDescriptorSet, 16> set_writes;
|
|
||||||
BufferBarriers buffer_barriers;
|
|
||||||
Shader::PushData push_data{};
|
|
||||||
Shader::Backend::Bindings binding{};
|
|
||||||
|
|
||||||
info->PushUd(binding, push_data);
|
|
||||||
|
|
||||||
buffer_infos.clear();
|
|
||||||
buffer_views.clear();
|
|
||||||
image_infos.clear();
|
|
||||||
|
|
||||||
// Most of the time when a metadata is updated with a shader it gets cleared. It means
|
|
||||||
// we can skip the whole dispatch and update the tracked state instead. Also, it is not
|
|
||||||
// intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we
|
|
||||||
// will need its full emulation anyways. For cases of metadata read a warning will be logged.
|
|
||||||
const auto IsMetaUpdate = [&](const auto& desc) {
|
|
||||||
const VAddr address = desc.GetSharp(*info).base_address;
|
|
||||||
if (desc.is_written) {
|
|
||||||
if (texture_cache.TouchMeta(address, true)) {
|
|
||||||
LOG_TRACE(Render_Vulkan, "Metadata update skipped");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (texture_cache.IsMeta(address)) {
|
|
||||||
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
for (const auto& desc : info->buffers) {
|
|
||||||
if (desc.is_gds_buffer) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (IsMetaUpdate(desc)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (const auto& desc : info->texture_buffers) {
|
|
||||||
if (IsMetaUpdate(desc)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
BindBuffers(buffer_cache, texture_cache, *info, binding, push_data, set_writes,
|
|
||||||
buffer_barriers);
|
|
||||||
|
|
||||||
BindTextures(texture_cache, *info, binding, set_writes);
|
|
||||||
|
|
||||||
if (set_writes.empty()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
|
||||||
if (!buffer_barriers.empty()) {
|
|
||||||
const auto dependencies = vk::DependencyInfo{
|
|
||||||
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
|
|
||||||
.bufferMemoryBarrierCount = u32(buffer_barriers.size()),
|
|
||||||
.pBufferMemoryBarriers = buffer_barriers.data(),
|
|
||||||
};
|
|
||||||
scheduler.EndRendering();
|
|
||||||
cmdbuf.pipelineBarrier2(dependencies);
|
|
||||||
}
|
|
||||||
|
|
||||||
cmdbuf.pushConstants(*pipeline_layout, vk::ShaderStageFlagBits::eCompute, 0u, sizeof(push_data),
|
|
||||||
&push_data);
|
|
||||||
|
|
||||||
// Bind descriptor set.
|
|
||||||
if (uses_push_descriptors) {
|
|
||||||
cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eCompute, *pipeline_layout, 0,
|
|
||||||
set_writes);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
const auto desc_set = desc_heap.Commit(*desc_layout);
|
|
||||||
for (auto& set_write : set_writes) {
|
|
||||||
set_write.dstSet = desc_set;
|
|
||||||
}
|
|
||||||
instance.GetDevice().updateDescriptorSets(set_writes, {});
|
|
||||||
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, *pipeline_layout, 0, desc_set, {});
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -24,13 +24,8 @@ public:
|
|||||||
vk::ShaderModule module);
|
vk::ShaderModule module);
|
||||||
~ComputePipeline();
|
~ComputePipeline();
|
||||||
|
|
||||||
bool BindResources(VideoCore::BufferCache& buffer_cache,
|
|
||||||
VideoCore::TextureCache& texture_cache) const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
u64 compute_key;
|
u64 compute_key;
|
||||||
const Shader::Info* info;
|
|
||||||
bool uses_push_descriptors{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -16,10 +16,6 @@
|
|||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
static constexpr auto gp_stage_flags = vk::ShaderStageFlagBits::eVertex |
|
|
||||||
vk::ShaderStageFlagBits::eGeometry |
|
|
||||||
vk::ShaderStageFlagBits::eFragment;
|
|
||||||
|
|
||||||
GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& scheduler_,
|
GraphicsPipeline::GraphicsPipeline(const Instance& instance_, Scheduler& scheduler_,
|
||||||
DescriptorHeap& desc_heap_, const GraphicsPipelineKey& key_,
|
DescriptorHeap& desc_heap_, const GraphicsPipelineKey& key_,
|
||||||
vk::PipelineCache pipeline_cache,
|
vk::PipelineCache pipeline_cache,
|
||||||
@ -389,67 +385,4 @@ void GraphicsPipeline::BuildDescSetLayout() {
|
|||||||
desc_layout = std::move(layout);
|
desc_layout = std::move(layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GraphicsPipeline::BindResources(const Liverpool::Regs& regs,
|
|
||||||
VideoCore::BufferCache& buffer_cache,
|
|
||||||
VideoCore::TextureCache& texture_cache) const {
|
|
||||||
// Bind resource buffers and textures.
|
|
||||||
boost::container::small_vector<vk::WriteDescriptorSet, 16> set_writes;
|
|
||||||
BufferBarriers buffer_barriers;
|
|
||||||
Shader::PushData push_data{};
|
|
||||||
Shader::Backend::Bindings binding{};
|
|
||||||
|
|
||||||
buffer_infos.clear();
|
|
||||||
buffer_views.clear();
|
|
||||||
image_infos.clear();
|
|
||||||
|
|
||||||
for (const auto* stage : stages) {
|
|
||||||
if (!stage) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (stage->uses_step_rates) {
|
|
||||||
push_data.step0 = regs.vgt_instance_step_rate_0;
|
|
||||||
push_data.step1 = regs.vgt_instance_step_rate_1;
|
|
||||||
}
|
|
||||||
stage->PushUd(binding, push_data);
|
|
||||||
|
|
||||||
BindBuffers(buffer_cache, texture_cache, *stage, binding, push_data, set_writes,
|
|
||||||
buffer_barriers);
|
|
||||||
|
|
||||||
BindTextures(texture_cache, *stage, binding, set_writes);
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
|
||||||
SCOPE_EXIT {
|
|
||||||
cmdbuf.pushConstants(*pipeline_layout, gp_stage_flags, 0U, sizeof(push_data), &push_data);
|
|
||||||
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, Handle());
|
|
||||||
};
|
|
||||||
|
|
||||||
if (set_writes.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!buffer_barriers.empty()) {
|
|
||||||
const auto dependencies = vk::DependencyInfo{
|
|
||||||
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
|
|
||||||
.bufferMemoryBarrierCount = u32(buffer_barriers.size()),
|
|
||||||
.pBufferMemoryBarriers = buffer_barriers.data(),
|
|
||||||
};
|
|
||||||
scheduler.EndRendering();
|
|
||||||
cmdbuf.pipelineBarrier2(dependencies);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bind descriptor set.
|
|
||||||
if (uses_push_descriptors) {
|
|
||||||
cmdbuf.pushDescriptorSetKHR(vk::PipelineBindPoint::eGraphics, *pipeline_layout, 0,
|
|
||||||
set_writes);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const auto desc_set = desc_heap.Commit(*desc_layout);
|
|
||||||
for (auto& set_write : set_writes) {
|
|
||||||
set_write.dstSet = desc_set;
|
|
||||||
}
|
|
||||||
instance.GetDevice().updateDescriptorSets(set_writes, {});
|
|
||||||
cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, *pipeline_layout, 0, desc_set, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||||
|
|
||||||
#include <xxhash.h>
|
#include <xxhash.h>
|
||||||
|
|
||||||
#include "common/types.h"
|
#include "common/types.h"
|
||||||
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
|
#include "video_core/renderer_vulkan/liverpool_to_vk.h"
|
||||||
#include "video_core/renderer_vulkan/vk_common.h"
|
#include "video_core/renderer_vulkan/vk_common.h"
|
||||||
@ -14,8 +15,8 @@ class TextureCache;
|
|||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
static constexpr u32 MaxVertexBufferCount = 32;
|
|
||||||
static constexpr u32 MaxShaderStages = 5;
|
static constexpr u32 MaxShaderStages = 5;
|
||||||
|
static constexpr u32 MaxVertexBufferCount = 32;
|
||||||
|
|
||||||
class Instance;
|
class Instance;
|
||||||
class Scheduler;
|
class Scheduler;
|
||||||
@ -61,13 +62,6 @@ public:
|
|||||||
std::span<const vk::ShaderModule> modules);
|
std::span<const vk::ShaderModule> modules);
|
||||||
~GraphicsPipeline();
|
~GraphicsPipeline();
|
||||||
|
|
||||||
void BindResources(const Liverpool::Regs& regs, VideoCore::BufferCache& buffer_cache,
|
|
||||||
VideoCore::TextureCache& texture_cache) const;
|
|
||||||
|
|
||||||
const Shader::Info& GetStage(Shader::Stage stage) const noexcept {
|
|
||||||
return *stages[u32(stage)];
|
|
||||||
}
|
|
||||||
|
|
||||||
bool IsEmbeddedVs() const noexcept {
|
bool IsEmbeddedVs() const noexcept {
|
||||||
static constexpr size_t EmbeddedVsHash = 0x9b2da5cf47f8c29f;
|
static constexpr size_t EmbeddedVsHash = 0x9b2da5cf47f8c29f;
|
||||||
return key.stage_hashes[u32(Shader::Stage::Vertex)] == EmbeddedVsHash;
|
return key.stage_hashes[u32(Shader::Stage::Vertex)] == EmbeddedVsHash;
|
||||||
@ -99,9 +93,7 @@ private:
|
|||||||
void BuildDescSetLayout();
|
void BuildDescSetLayout();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::array<const Shader::Info*, MaxShaderStages> stages{};
|
|
||||||
GraphicsPipelineKey key;
|
GraphicsPipelineKey key;
|
||||||
bool uses_push_descriptors{};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -38,8 +38,6 @@ struct Program {
|
|||||||
};
|
};
|
||||||
|
|
||||||
class PipelineCache {
|
class PipelineCache {
|
||||||
static constexpr size_t MaxShaderStages = 5;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit PipelineCache(const Instance& instance, Scheduler& scheduler,
|
explicit PipelineCache(const Instance& instance, Scheduler& scheduler,
|
||||||
AmdGpu::Liverpool* liverpool);
|
AmdGpu::Liverpool* liverpool);
|
||||||
|
@ -12,270 +12,47 @@
|
|||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
boost::container::static_vector<vk::DescriptorImageInfo, 32> Pipeline::image_infos;
|
|
||||||
boost::container::static_vector<vk::BufferView, 8> Pipeline::buffer_views;
|
|
||||||
boost::container::static_vector<vk::DescriptorBufferInfo, 32> Pipeline::buffer_infos;
|
|
||||||
boost::container::static_vector<VideoCore::ImageId, 32> Pipeline::bound_images;
|
|
||||||
|
|
||||||
Pipeline::Pipeline(const Instance& instance_, Scheduler& scheduler_, DescriptorHeap& desc_heap_,
|
Pipeline::Pipeline(const Instance& instance_, Scheduler& scheduler_, DescriptorHeap& desc_heap_,
|
||||||
vk::PipelineCache pipeline_cache)
|
vk::PipelineCache pipeline_cache, bool is_compute_ /*= false*/)
|
||||||
: instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_} {}
|
: instance{instance_}, scheduler{scheduler_}, desc_heap{desc_heap_}, is_compute{is_compute_} {}
|
||||||
|
|
||||||
Pipeline::~Pipeline() = default;
|
Pipeline::~Pipeline() = default;
|
||||||
|
|
||||||
void Pipeline::BindBuffers(VideoCore::BufferCache& buffer_cache,
|
void Pipeline::BindResources(DescriptorWrites& set_writes, const BufferBarriers& buffer_barriers,
|
||||||
VideoCore::TextureCache& texture_cache, const Shader::Info& stage,
|
const Shader::PushData& push_data) const {
|
||||||
Shader::Backend::Bindings& binding, Shader::PushData& push_data,
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
DescriptorWrites& set_writes, BufferBarriers& buffer_barriers) const {
|
const auto bind_point =
|
||||||
using BufferBindingInfo = std::pair<VideoCore::BufferId, AmdGpu::Buffer>;
|
IsCompute() ? vk::PipelineBindPoint::eCompute : vk::PipelineBindPoint::eGraphics;
|
||||||
static boost::container::static_vector<BufferBindingInfo, 32> buffer_bindings;
|
|
||||||
|
|
||||||
buffer_bindings.clear();
|
if (!buffer_barriers.empty()) {
|
||||||
|
const auto dependencies = vk::DependencyInfo{
|
||||||
for (const auto& desc : stage.buffers) {
|
.dependencyFlags = vk::DependencyFlagBits::eByRegion,
|
||||||
const auto vsharp = desc.GetSharp(stage);
|
.bufferMemoryBarrierCount = u32(buffer_barriers.size()),
|
||||||
if (!desc.is_gds_buffer && vsharp.base_address != 0 && vsharp.GetSize() > 0) {
|
.pBufferMemoryBarriers = buffer_barriers.data(),
|
||||||
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize());
|
};
|
||||||
buffer_bindings.emplace_back(buffer_id, vsharp);
|
scheduler.EndRendering();
|
||||||
} else {
|
cmdbuf.pipelineBarrier2(dependencies);
|
||||||
buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
using TexBufferBindingInfo = std::pair<VideoCore::BufferId, AmdGpu::Buffer>;
|
const auto stage_flags = IsCompute() ? vk::ShaderStageFlagBits::eCompute : gp_stage_flags;
|
||||||
static boost::container::static_vector<TexBufferBindingInfo, 32> texbuffer_bindings;
|
cmdbuf.pushConstants(*pipeline_layout, stage_flags, 0u, sizeof(push_data), &push_data);
|
||||||
|
|
||||||
texbuffer_bindings.clear();
|
// Bind descriptor set.
|
||||||
|
if (set_writes.empty()) {
|
||||||
for (const auto& desc : stage.texture_buffers) {
|
return;
|
||||||
const auto vsharp = desc.GetSharp(stage);
|
|
||||||
if (vsharp.base_address != 0 && vsharp.GetSize() > 0 &&
|
|
||||||
vsharp.GetDataFmt() != AmdGpu::DataFormat::FormatInvalid) {
|
|
||||||
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize());
|
|
||||||
texbuffer_bindings.emplace_back(buffer_id, vsharp);
|
|
||||||
} else {
|
|
||||||
texbuffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bind the flattened user data buffer as a UBO so it's accessible to the shader
|
if (uses_push_descriptors) {
|
||||||
if (stage.has_readconst) {
|
cmdbuf.pushDescriptorSetKHR(bind_point, *pipeline_layout, 0, set_writes);
|
||||||
const auto [vk_buffer, offset] = buffer_cache.ObtainHostUBO(stage.flattened_ud_buf);
|
return;
|
||||||
buffer_infos.emplace_back(vk_buffer->Handle(), offset,
|
|
||||||
stage.flattened_ud_buf.size() * sizeof(u32));
|
|
||||||
set_writes.push_back({
|
|
||||||
.dstSet = VK_NULL_HANDLE,
|
|
||||||
.dstBinding = binding.unified++,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 1,
|
|
||||||
.descriptorType = vk::DescriptorType::eUniformBuffer,
|
|
||||||
.pBufferInfo = &buffer_infos.back(),
|
|
||||||
});
|
|
||||||
++binding.buffer;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second pass to re-bind buffers that were updated after binding
|
const auto desc_set = desc_heap.Commit(*desc_layout);
|
||||||
for (u32 i = 0; i < buffer_bindings.size(); i++) {
|
for (auto& set_write : set_writes) {
|
||||||
const auto& [buffer_id, vsharp] = buffer_bindings[i];
|
set_write.dstSet = desc_set;
|
||||||
const auto& desc = stage.buffers[i];
|
|
||||||
const bool is_storage = desc.IsStorage(vsharp);
|
|
||||||
if (!buffer_id) {
|
|
||||||
if (desc.is_gds_buffer) {
|
|
||||||
const auto* gds_buf = buffer_cache.GetGdsBuffer();
|
|
||||||
buffer_infos.emplace_back(gds_buf->Handle(), 0, gds_buf->SizeBytes());
|
|
||||||
} else if (instance.IsNullDescriptorSupported()) {
|
|
||||||
buffer_infos.emplace_back(VK_NULL_HANDLE, 0, VK_WHOLE_SIZE);
|
|
||||||
} else {
|
|
||||||
auto& null_buffer = buffer_cache.GetBuffer(VideoCore::NULL_BUFFER_ID);
|
|
||||||
buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(
|
|
||||||
vsharp.base_address, vsharp.GetSize(), desc.is_written, false, buffer_id);
|
|
||||||
const u32 alignment =
|
|
||||||
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
|
||||||
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
|
||||||
const u32 adjust = offset - offset_aligned;
|
|
||||||
ASSERT(adjust % 4 == 0);
|
|
||||||
push_data.AddOffset(binding.buffer, adjust);
|
|
||||||
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned,
|
|
||||||
vsharp.GetSize() + adjust);
|
|
||||||
}
|
|
||||||
|
|
||||||
set_writes.push_back({
|
|
||||||
.dstSet = VK_NULL_HANDLE,
|
|
||||||
.dstBinding = binding.unified++,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 1,
|
|
||||||
.descriptorType = is_storage ? vk::DescriptorType::eStorageBuffer
|
|
||||||
: vk::DescriptorType::eUniformBuffer,
|
|
||||||
.pBufferInfo = &buffer_infos.back(),
|
|
||||||
});
|
|
||||||
++binding.buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto null_buffer_view =
|
|
||||||
instance.IsNullDescriptorSupported() ? VK_NULL_HANDLE : buffer_cache.NullBufferView();
|
|
||||||
for (u32 i = 0; i < texbuffer_bindings.size(); i++) {
|
|
||||||
const auto& [buffer_id, vsharp] = texbuffer_bindings[i];
|
|
||||||
const auto& desc = stage.texture_buffers[i];
|
|
||||||
vk::BufferView& buffer_view = buffer_views.emplace_back(null_buffer_view);
|
|
||||||
if (buffer_id) {
|
|
||||||
const u32 alignment = instance.TexelBufferMinAlignment();
|
|
||||||
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(
|
|
||||||
vsharp.base_address, vsharp.GetSize(), desc.is_written, true, buffer_id);
|
|
||||||
const u32 fmt_stride = AmdGpu::NumBits(vsharp.GetDataFmt()) >> 3;
|
|
||||||
ASSERT_MSG(fmt_stride == vsharp.GetStride(),
|
|
||||||
"Texel buffer stride must match format stride");
|
|
||||||
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
|
||||||
const u32 adjust = offset - offset_aligned;
|
|
||||||
ASSERT(adjust % fmt_stride == 0);
|
|
||||||
push_data.AddOffset(binding.buffer, adjust / fmt_stride);
|
|
||||||
buffer_view =
|
|
||||||
vk_buffer->View(offset_aligned, vsharp.GetSize() + adjust, desc.is_written,
|
|
||||||
vsharp.GetDataFmt(), vsharp.GetNumberFmt());
|
|
||||||
if (auto barrier =
|
|
||||||
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
|
|
||||||
: vk::AccessFlagBits2::eShaderRead,
|
|
||||||
vk::PipelineStageFlagBits2::eComputeShader)) {
|
|
||||||
buffer_barriers.emplace_back(*barrier);
|
|
||||||
}
|
|
||||||
if (desc.is_written) {
|
|
||||||
texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, vsharp.GetSize());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
set_writes.push_back({
|
|
||||||
.dstSet = VK_NULL_HANDLE,
|
|
||||||
.dstBinding = binding.unified++,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 1,
|
|
||||||
.descriptorType = desc.is_written ? vk::DescriptorType::eStorageTexelBuffer
|
|
||||||
: vk::DescriptorType::eUniformTexelBuffer,
|
|
||||||
.pTexelBufferView = &buffer_view,
|
|
||||||
});
|
|
||||||
++binding.buffer;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Pipeline::BindTextures(VideoCore::TextureCache& texture_cache, const Shader::Info& stage,
|
|
||||||
Shader::Backend::Bindings& binding,
|
|
||||||
DescriptorWrites& set_writes) const {
|
|
||||||
using ImageBindingInfo = std::pair<VideoCore::ImageId, VideoCore::TextureCache::TextureDesc>;
|
|
||||||
static boost::container::static_vector<ImageBindingInfo, 32> image_bindings;
|
|
||||||
|
|
||||||
image_bindings.clear();
|
|
||||||
|
|
||||||
for (const auto& image_desc : stage.images) {
|
|
||||||
const auto tsharp = image_desc.GetSharp(stage);
|
|
||||||
if (texture_cache.IsMeta(tsharp.Address())) {
|
|
||||||
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a shader (texture)");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tsharp.GetDataFmt() == AmdGpu::DataFormat::FormatInvalid) {
|
|
||||||
image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, std::tuple{});
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& [image_id, desc] = image_bindings.emplace_back(std::piecewise_construct, std::tuple{},
|
|
||||||
std::tuple{tsharp, image_desc});
|
|
||||||
image_id = texture_cache.FindImage(desc);
|
|
||||||
auto& image = texture_cache.GetImage(image_id);
|
|
||||||
ASSERT(False(image.flags & VideoCore::ImageFlagBits::Virtual));
|
|
||||||
if (image.binding.is_bound) {
|
|
||||||
// The image is already bound. In case if it is about to be used as storage we need
|
|
||||||
// to force general layout on it.
|
|
||||||
image.binding.force_general |= image_desc.is_storage;
|
|
||||||
}
|
|
||||||
if (image.binding.is_target) {
|
|
||||||
// The image is already bound as target. Since we read and output to it need to force
|
|
||||||
// general layout too.
|
|
||||||
image.binding.force_general = 1u;
|
|
||||||
}
|
|
||||||
image.binding.is_bound = 1u;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second pass to re-bind images that were updated after binding
|
|
||||||
for (auto& [image_id, desc] : image_bindings) {
|
|
||||||
bool is_storage = desc.type == VideoCore::TextureCache::BindingType::Storage;
|
|
||||||
if (!image_id) {
|
|
||||||
if (instance.IsNullDescriptorSupported()) {
|
|
||||||
image_infos.emplace_back(VK_NULL_HANDLE, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
|
|
||||||
} else {
|
|
||||||
auto& null_image = texture_cache.GetImageView(VideoCore::NULL_IMAGE_VIEW_ID);
|
|
||||||
image_infos.emplace_back(VK_NULL_HANDLE, *null_image.image_view,
|
|
||||||
vk::ImageLayout::eGeneral);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (auto& old_image = texture_cache.GetImage(image_id);
|
|
||||||
old_image.binding.needs_rebind) {
|
|
||||||
old_image.binding.Reset(); // clean up previous image binding state
|
|
||||||
image_id = texture_cache.FindImage(desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
bound_images.emplace_back(image_id);
|
|
||||||
|
|
||||||
auto& image = texture_cache.GetImage(image_id);
|
|
||||||
auto& image_view = texture_cache.FindTexture(image_id, desc.view_info);
|
|
||||||
|
|
||||||
if (image.binding.force_general || image.binding.is_target) {
|
|
||||||
image.Transit(vk::ImageLayout::eGeneral,
|
|
||||||
vk::AccessFlagBits2::eShaderRead |
|
|
||||||
(image.info.IsDepthStencil()
|
|
||||||
? vk::AccessFlagBits2::eDepthStencilAttachmentWrite
|
|
||||||
: vk::AccessFlagBits2::eColorAttachmentWrite),
|
|
||||||
{});
|
|
||||||
} else {
|
|
||||||
if (is_storage) {
|
|
||||||
image.Transit(vk::ImageLayout::eGeneral,
|
|
||||||
vk::AccessFlagBits2::eShaderRead |
|
|
||||||
vk::AccessFlagBits2::eShaderWrite,
|
|
||||||
desc.view_info.range);
|
|
||||||
} else {
|
|
||||||
const auto new_layout = image.info.IsDepthStencil()
|
|
||||||
? vk::ImageLayout::eDepthStencilReadOnlyOptimal
|
|
||||||
: vk::ImageLayout::eShaderReadOnlyOptimal;
|
|
||||||
image.Transit(new_layout, vk::AccessFlagBits2::eShaderRead,
|
|
||||||
desc.view_info.range);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
image.usage.storage |= is_storage;
|
|
||||||
image.usage.texture |= !is_storage;
|
|
||||||
|
|
||||||
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view,
|
|
||||||
image.last_state.layout);
|
|
||||||
}
|
|
||||||
|
|
||||||
set_writes.push_back({
|
|
||||||
.dstSet = VK_NULL_HANDLE,
|
|
||||||
.dstBinding = binding.unified++,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 1,
|
|
||||||
.descriptorType =
|
|
||||||
is_storage ? vk::DescriptorType::eStorageImage : vk::DescriptorType::eSampledImage,
|
|
||||||
.pImageInfo = &image_infos.back(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& sampler : stage.samplers) {
|
|
||||||
auto ssharp = sampler.GetSharp(stage);
|
|
||||||
if (sampler.disable_aniso) {
|
|
||||||
const auto& tsharp = stage.images[sampler.associated_image].GetSharp(stage);
|
|
||||||
if (tsharp.base_level == 0 && tsharp.last_level == 0) {
|
|
||||||
ssharp.max_aniso.Assign(AmdGpu::AnisoRatio::One);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const auto vk_sampler = texture_cache.GetSampler(ssharp);
|
|
||||||
image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
|
|
||||||
set_writes.push_back({
|
|
||||||
.dstSet = VK_NULL_HANDLE,
|
|
||||||
.dstBinding = binding.unified++,
|
|
||||||
.dstArrayElement = 0,
|
|
||||||
.descriptorCount = 1,
|
|
||||||
.descriptorType = vk::DescriptorType::eSampler,
|
|
||||||
.pImageInfo = &image_infos.back(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
instance.GetDevice().updateDescriptorSets(set_writes, {});
|
||||||
|
cmdbuf.bindDescriptorSets(bind_point, *pipeline_layout, 0, desc_set, {});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -14,6 +14,10 @@ class BufferCache;
|
|||||||
|
|
||||||
namespace Vulkan {
|
namespace Vulkan {
|
||||||
|
|
||||||
|
static constexpr auto gp_stage_flags = vk::ShaderStageFlagBits::eVertex |
|
||||||
|
vk::ShaderStageFlagBits::eGeometry |
|
||||||
|
vk::ShaderStageFlagBits::eFragment;
|
||||||
|
|
||||||
class Instance;
|
class Instance;
|
||||||
class Scheduler;
|
class Scheduler;
|
||||||
class DescriptorHeap;
|
class DescriptorHeap;
|
||||||
@ -21,7 +25,7 @@ class DescriptorHeap;
|
|||||||
class Pipeline {
|
class Pipeline {
|
||||||
public:
|
public:
|
||||||
Pipeline(const Instance& instance, Scheduler& scheduler, DescriptorHeap& desc_heap,
|
Pipeline(const Instance& instance, Scheduler& scheduler, DescriptorHeap& desc_heap,
|
||||||
vk::PipelineCache pipeline_cache);
|
vk::PipelineCache pipeline_cache, bool is_compute = false);
|
||||||
virtual ~Pipeline();
|
virtual ~Pipeline();
|
||||||
|
|
||||||
vk::Pipeline Handle() const noexcept {
|
vk::Pipeline Handle() const noexcept {
|
||||||
@ -32,22 +36,27 @@ public:
|
|||||||
return *pipeline_layout;
|
return *pipeline_layout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto GetStages() const {
|
||||||
|
if (is_compute) {
|
||||||
|
return std::span{stages.cend() - 1, stages.cend()};
|
||||||
|
} else {
|
||||||
|
return std::span{stages.cbegin(), stages.cend() - 1};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const Shader::Info& GetStage(Shader::Stage stage) const noexcept {
|
||||||
|
return *stages[u32(stage)];
|
||||||
|
}
|
||||||
|
|
||||||
|
bool IsCompute() const {
|
||||||
|
return is_compute;
|
||||||
|
}
|
||||||
|
|
||||||
using DescriptorWrites = boost::container::small_vector<vk::WriteDescriptorSet, 16>;
|
using DescriptorWrites = boost::container::small_vector<vk::WriteDescriptorSet, 16>;
|
||||||
using BufferBarriers = boost::container::small_vector<vk::BufferMemoryBarrier2, 16>;
|
using BufferBarriers = boost::container::small_vector<vk::BufferMemoryBarrier2, 16>;
|
||||||
|
|
||||||
void BindBuffers(VideoCore::BufferCache& buffer_cache, VideoCore::TextureCache& texture_cache,
|
void BindResources(DescriptorWrites& set_writes, const BufferBarriers& buffer_barriers,
|
||||||
const Shader::Info& stage, Shader::Backend::Bindings& binding,
|
const Shader::PushData& push_data) const;
|
||||||
Shader::PushData& push_data, DescriptorWrites& set_writes,
|
|
||||||
BufferBarriers& buffer_barriers) const;
|
|
||||||
|
|
||||||
void BindTextures(VideoCore::TextureCache& texture_cache, const Shader::Info& stage,
|
|
||||||
Shader::Backend::Bindings& binding, DescriptorWrites& set_writes) const;
|
|
||||||
void ResetBindings(VideoCore::TextureCache& texture_cache) const {
|
|
||||||
for (auto& image_id : bound_images) {
|
|
||||||
texture_cache.GetImage(image_id).binding.Reset();
|
|
||||||
}
|
|
||||||
bound_images.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
const Instance& instance;
|
const Instance& instance;
|
||||||
@ -56,10 +65,9 @@ protected:
|
|||||||
vk::UniquePipeline pipeline;
|
vk::UniquePipeline pipeline;
|
||||||
vk::UniquePipelineLayout pipeline_layout;
|
vk::UniquePipelineLayout pipeline_layout;
|
||||||
vk::UniqueDescriptorSetLayout desc_layout;
|
vk::UniqueDescriptorSetLayout desc_layout;
|
||||||
static boost::container::static_vector<vk::DescriptorImageInfo, 32> image_infos;
|
std::array<const Shader::Info*, Shader::MaxStageTypes> stages{};
|
||||||
static boost::container::static_vector<vk::BufferView, 8> buffer_views;
|
bool uses_push_descriptors{};
|
||||||
static boost::container::static_vector<vk::DescriptorBufferInfo, 32> buffer_infos;
|
const bool is_compute;
|
||||||
static boost::container::static_vector<VideoCore::ImageId, 32> bound_images;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -116,7 +116,7 @@ RenderState Rasterizer::PrepareRenderState(u32 mrt_mask) {
|
|||||||
auto& [image_id, desc] = cb_descs.emplace_back(std::piecewise_construct, std::tuple{},
|
auto& [image_id, desc] = cb_descs.emplace_back(std::piecewise_construct, std::tuple{},
|
||||||
std::tuple{col_buf, hint});
|
std::tuple{col_buf, hint});
|
||||||
const auto& image_view = texture_cache.FindRenderTarget(desc);
|
const auto& image_view = texture_cache.FindRenderTarget(desc);
|
||||||
image_id = image_view.image_id;
|
image_id = bound_images.emplace_back(image_view.image_id);
|
||||||
auto& image = texture_cache.GetImage(image_id);
|
auto& image = texture_cache.GetImage(image_id);
|
||||||
image.binding.is_target = 1u;
|
image.binding.is_target = 1u;
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ RenderState Rasterizer::PrepareRenderState(u32 mrt_mask) {
|
|||||||
std::tuple{regs.depth_buffer, regs.depth_view, regs.depth_control,
|
std::tuple{regs.depth_buffer, regs.depth_view, regs.depth_control,
|
||||||
htile_address, hint});
|
htile_address, hint});
|
||||||
const auto& image_view = texture_cache.FindDepthTarget(desc);
|
const auto& image_view = texture_cache.FindDepthTarget(desc);
|
||||||
image_id = image_view.image_id;
|
image_id = bound_images.emplace_back(image_view.image_id);
|
||||||
auto& image = texture_cache.GetImage(image_id);
|
auto& image = texture_cache.GetImage(image_id);
|
||||||
image.binding.is_target = 1u;
|
image.binding.is_target = 1u;
|
||||||
|
|
||||||
@ -181,7 +181,6 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
|
||||||
const auto& regs = liverpool->regs;
|
const auto& regs = liverpool->regs;
|
||||||
const GraphicsPipeline* pipeline = pipeline_cache.GetGraphicsPipeline();
|
const GraphicsPipeline* pipeline = pipeline_cache.GetGraphicsPipeline();
|
||||||
if (!pipeline) {
|
if (!pipeline) {
|
||||||
@ -190,10 +189,8 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
|
|||||||
|
|
||||||
auto state = PrepareRenderState(pipeline->GetMrtMask());
|
auto state = PrepareRenderState(pipeline->GetMrtMask());
|
||||||
|
|
||||||
try {
|
if (!BindResources(pipeline)) {
|
||||||
pipeline->BindResources(regs, buffer_cache, texture_cache);
|
return;
|
||||||
} catch (...) {
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex);
|
const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex);
|
||||||
@ -205,6 +202,9 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
|
|||||||
|
|
||||||
const auto [vertex_offset, instance_offset] = vs_info.GetDrawOffsets();
|
const auto [vertex_offset, instance_offset] = vs_info.GetDrawOffsets();
|
||||||
|
|
||||||
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
|
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle());
|
||||||
|
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
cmdbuf.drawIndexed(num_indices, regs.num_instances.NumInstances(), 0, s32(vertex_offset),
|
cmdbuf.drawIndexed(num_indices, regs.num_instances.NumInstances(), 0, s32(vertex_offset),
|
||||||
instance_offset);
|
instance_offset);
|
||||||
@ -215,7 +215,7 @@ void Rasterizer::Draw(bool is_indexed, u32 index_offset) {
|
|||||||
instance_offset);
|
instance_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
pipeline->ResetBindings(texture_cache);
|
ResetBindings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u32 size,
|
void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u32 size,
|
||||||
@ -237,10 +237,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3
|
|||||||
ASSERT_MSG(regs.primitive_type != AmdGpu::PrimitiveType::RectList,
|
ASSERT_MSG(regs.primitive_type != AmdGpu::PrimitiveType::RectList,
|
||||||
"Unsupported primitive type for indirect draw");
|
"Unsupported primitive type for indirect draw");
|
||||||
|
|
||||||
try {
|
if (!BindResources(pipeline)) {
|
||||||
pipeline->BindResources(regs, buffer_cache, texture_cache);
|
return;
|
||||||
} catch (...) {
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex);
|
const auto& vs_info = pipeline->GetStage(Shader::Stage::Vertex);
|
||||||
@ -262,6 +260,8 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3
|
|||||||
// instance offsets will be automatically applied by Vulkan from indirect args buffer.
|
// instance offsets will be automatically applied by Vulkan from indirect args buffer.
|
||||||
|
|
||||||
const auto cmdbuf = scheduler.CommandBuffer();
|
const auto cmdbuf = scheduler.CommandBuffer();
|
||||||
|
cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline->Handle());
|
||||||
|
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
static_assert(sizeof(VkDrawIndexedIndirectCommand) ==
|
static_assert(sizeof(VkDrawIndexedIndirectCommand) ==
|
||||||
AmdGpu::Liverpool::DrawIndexedIndirectArgsSize);
|
AmdGpu::Liverpool::DrawIndexedIndirectArgsSize);
|
||||||
@ -286,7 +286,7 @@ void Rasterizer::DrawIndirect(bool is_indexed, VAddr arg_address, u32 offset, u3
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pipeline->ResetBindings(texture_cache);
|
ResetBindings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::DispatchDirect() {
|
void Rasterizer::DispatchDirect() {
|
||||||
@ -299,20 +299,15 @@ void Rasterizer::DispatchDirect() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
if (!BindResources(pipeline)) {
|
||||||
const auto has_resources = pipeline->BindResources(buffer_cache, texture_cache);
|
|
||||||
if (!has_resources) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (...) {
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
scheduler.EndRendering();
|
scheduler.EndRendering();
|
||||||
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->Handle());
|
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->Handle());
|
||||||
cmdbuf.dispatch(cs_program.dim_x, cs_program.dim_y, cs_program.dim_z);
|
cmdbuf.dispatch(cs_program.dim_x, cs_program.dim_y, cs_program.dim_z);
|
||||||
|
|
||||||
pipeline->ResetBindings(texture_cache);
|
ResetBindings();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) {
|
void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) {
|
||||||
@ -325,21 +320,16 @@ void Rasterizer::DispatchIndirect(VAddr address, u32 offset, u32 size) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
if (!BindResources(pipeline)) {
|
||||||
const auto has_resources = pipeline->BindResources(buffer_cache, texture_cache);
|
|
||||||
if (!has_resources) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} catch (...) {
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
scheduler.EndRendering();
|
scheduler.EndRendering();
|
||||||
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->Handle());
|
cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline->Handle());
|
||||||
const auto [buffer, base] = buffer_cache.ObtainBuffer(address + offset, size, false);
|
const auto [buffer, base] = buffer_cache.ObtainBuffer(address + offset, size, false);
|
||||||
cmdbuf.dispatchIndirect(buffer->Handle(), base);
|
cmdbuf.dispatchIndirect(buffer->Handle(), base);
|
||||||
|
|
||||||
pipeline->ResetBindings(texture_cache);
|
ResetBindings();
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 Rasterizer::Flush() {
|
u64 Rasterizer::Flush() {
|
||||||
@ -353,13 +343,328 @@ void Rasterizer::Finish() {
|
|||||||
scheduler.Finish();
|
scheduler.Finish();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Rasterizer::BindResources(const Pipeline* pipeline) {
|
||||||
|
buffer_infos.clear();
|
||||||
|
buffer_views.clear();
|
||||||
|
image_infos.clear();
|
||||||
|
|
||||||
|
const auto& regs = liverpool->regs;
|
||||||
|
|
||||||
|
if (pipeline->IsCompute()) {
|
||||||
|
const auto& info = pipeline->GetStage(Shader::Stage::Compute);
|
||||||
|
|
||||||
|
// Most of the time when a metadata is updated with a shader it gets cleared. It means
|
||||||
|
// we can skip the whole dispatch and update the tracked state instead. Also, it is not
|
||||||
|
// intended to be consumed and in such rare cases (e.g. HTile introspection, CRAA) we
|
||||||
|
// will need its full emulation anyways. For cases of metadata read a warning will be
|
||||||
|
// logged.
|
||||||
|
const auto IsMetaUpdate = [&](const auto& desc) {
|
||||||
|
const VAddr address = desc.GetSharp(info).base_address;
|
||||||
|
if (desc.is_written) {
|
||||||
|
if (texture_cache.TouchMeta(address, true)) {
|
||||||
|
LOG_TRACE(Render_Vulkan, "Metadata update skipped");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (texture_cache.IsMeta(address)) {
|
||||||
|
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a CS shader (buffer)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const auto& desc : info.buffers) {
|
||||||
|
if (desc.is_gds_buffer) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (IsMetaUpdate(desc)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (const auto& desc : info.texture_buffers) {
|
||||||
|
if (IsMetaUpdate(desc)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
set_writes.clear();
|
||||||
|
buffer_barriers.clear();
|
||||||
|
|
||||||
|
// Bind resource buffers and textures.
|
||||||
|
Shader::PushData push_data{};
|
||||||
|
Shader::Backend::Bindings binding{};
|
||||||
|
|
||||||
|
for (const auto* stage : pipeline->GetStages()) {
|
||||||
|
if (!stage) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (stage->uses_step_rates) {
|
||||||
|
push_data.step0 = regs.vgt_instance_step_rate_0;
|
||||||
|
push_data.step1 = regs.vgt_instance_step_rate_1;
|
||||||
|
}
|
||||||
|
stage->PushUd(binding, push_data);
|
||||||
|
|
||||||
|
BindBuffers(*stage, binding, push_data, set_writes, buffer_barriers);
|
||||||
|
BindTextures(*stage, binding, set_writes);
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeline->BindResources(set_writes, buffer_barriers, push_data);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Rasterizer::BindBuffers(const Shader::Info& stage, Shader::Backend::Bindings& binding,
|
||||||
|
Shader::PushData& push_data, Pipeline::DescriptorWrites& set_writes,
|
||||||
|
Pipeline::BufferBarriers& buffer_barriers) {
|
||||||
|
buffer_bindings.clear();
|
||||||
|
|
||||||
|
for (const auto& desc : stage.buffers) {
|
||||||
|
const auto vsharp = desc.GetSharp(stage);
|
||||||
|
if (!desc.is_gds_buffer && vsharp.base_address != 0 && vsharp.GetSize() > 0) {
|
||||||
|
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize());
|
||||||
|
buffer_bindings.emplace_back(buffer_id, vsharp);
|
||||||
|
} else {
|
||||||
|
buffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
texbuffer_bindings.clear();
|
||||||
|
|
||||||
|
for (const auto& desc : stage.texture_buffers) {
|
||||||
|
const auto vsharp = desc.GetSharp(stage);
|
||||||
|
if (vsharp.base_address != 0 && vsharp.GetSize() > 0 &&
|
||||||
|
vsharp.GetDataFmt() != AmdGpu::DataFormat::FormatInvalid) {
|
||||||
|
const auto buffer_id = buffer_cache.FindBuffer(vsharp.base_address, vsharp.GetSize());
|
||||||
|
texbuffer_bindings.emplace_back(buffer_id, vsharp);
|
||||||
|
} else {
|
||||||
|
texbuffer_bindings.emplace_back(VideoCore::BufferId{}, vsharp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind the flattened user data buffer as a UBO so it's accessible to the shader
|
||||||
|
if (stage.has_readconst) {
|
||||||
|
const auto [vk_buffer, offset] = buffer_cache.ObtainHostUBO(stage.flattened_ud_buf);
|
||||||
|
buffer_infos.emplace_back(vk_buffer->Handle(), offset,
|
||||||
|
stage.flattened_ud_buf.size() * sizeof(u32));
|
||||||
|
set_writes.push_back({
|
||||||
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
.dstBinding = binding.unified++,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType = vk::DescriptorType::eUniformBuffer,
|
||||||
|
.pBufferInfo = &buffer_infos.back(),
|
||||||
|
});
|
||||||
|
++binding.buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass to re-bind buffers that were updated after binding
|
||||||
|
for (u32 i = 0; i < buffer_bindings.size(); i++) {
|
||||||
|
const auto& [buffer_id, vsharp] = buffer_bindings[i];
|
||||||
|
const auto& desc = stage.buffers[i];
|
||||||
|
const bool is_storage = desc.IsStorage(vsharp);
|
||||||
|
if (!buffer_id) {
|
||||||
|
if (desc.is_gds_buffer) {
|
||||||
|
const auto* gds_buf = buffer_cache.GetGdsBuffer();
|
||||||
|
buffer_infos.emplace_back(gds_buf->Handle(), 0, gds_buf->SizeBytes());
|
||||||
|
} else if (instance.IsNullDescriptorSupported()) {
|
||||||
|
buffer_infos.emplace_back(VK_NULL_HANDLE, 0, VK_WHOLE_SIZE);
|
||||||
|
} else {
|
||||||
|
auto& null_buffer = buffer_cache.GetBuffer(VideoCore::NULL_BUFFER_ID);
|
||||||
|
buffer_infos.emplace_back(null_buffer.Handle(), 0, VK_WHOLE_SIZE);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(
|
||||||
|
vsharp.base_address, vsharp.GetSize(), desc.is_written, false, buffer_id);
|
||||||
|
const u32 alignment =
|
||||||
|
is_storage ? instance.StorageMinAlignment() : instance.UniformMinAlignment();
|
||||||
|
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
||||||
|
const u32 adjust = offset - offset_aligned;
|
||||||
|
ASSERT(adjust % 4 == 0);
|
||||||
|
push_data.AddOffset(binding.buffer, adjust);
|
||||||
|
buffer_infos.emplace_back(vk_buffer->Handle(), offset_aligned,
|
||||||
|
vsharp.GetSize() + adjust);
|
||||||
|
}
|
||||||
|
|
||||||
|
set_writes.push_back({
|
||||||
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
.dstBinding = binding.unified++,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType = is_storage ? vk::DescriptorType::eStorageBuffer
|
||||||
|
: vk::DescriptorType::eUniformBuffer,
|
||||||
|
.pBufferInfo = &buffer_infos.back(),
|
||||||
|
});
|
||||||
|
++binding.buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto null_buffer_view =
|
||||||
|
instance.IsNullDescriptorSupported() ? VK_NULL_HANDLE : buffer_cache.NullBufferView();
|
||||||
|
for (u32 i = 0; i < texbuffer_bindings.size(); i++) {
|
||||||
|
const auto& [buffer_id, vsharp] = texbuffer_bindings[i];
|
||||||
|
const auto& desc = stage.texture_buffers[i];
|
||||||
|
vk::BufferView& buffer_view = buffer_views.emplace_back(null_buffer_view);
|
||||||
|
if (buffer_id) {
|
||||||
|
const u32 alignment = instance.TexelBufferMinAlignment();
|
||||||
|
const auto [vk_buffer, offset] = buffer_cache.ObtainBuffer(
|
||||||
|
vsharp.base_address, vsharp.GetSize(), desc.is_written, true, buffer_id);
|
||||||
|
const u32 fmt_stride = AmdGpu::NumBits(vsharp.GetDataFmt()) >> 3;
|
||||||
|
ASSERT_MSG(fmt_stride == vsharp.GetStride(),
|
||||||
|
"Texel buffer stride must match format stride");
|
||||||
|
const u32 offset_aligned = Common::AlignDown(offset, alignment);
|
||||||
|
const u32 adjust = offset - offset_aligned;
|
||||||
|
ASSERT(adjust % fmt_stride == 0);
|
||||||
|
push_data.AddOffset(binding.buffer, adjust / fmt_stride);
|
||||||
|
buffer_view =
|
||||||
|
vk_buffer->View(offset_aligned, vsharp.GetSize() + adjust, desc.is_written,
|
||||||
|
vsharp.GetDataFmt(), vsharp.GetNumberFmt());
|
||||||
|
if (auto barrier =
|
||||||
|
vk_buffer->GetBarrier(desc.is_written ? vk::AccessFlagBits2::eShaderWrite
|
||||||
|
: vk::AccessFlagBits2::eShaderRead,
|
||||||
|
vk::PipelineStageFlagBits2::eComputeShader)) {
|
||||||
|
buffer_barriers.emplace_back(*barrier);
|
||||||
|
}
|
||||||
|
if (desc.is_written) {
|
||||||
|
texture_cache.InvalidateMemoryFromGPU(vsharp.base_address, vsharp.GetSize());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
set_writes.push_back({
|
||||||
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
.dstBinding = binding.unified++,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType = desc.is_written ? vk::DescriptorType::eStorageTexelBuffer
|
||||||
|
: vk::DescriptorType::eUniformTexelBuffer,
|
||||||
|
.pTexelBufferView = &buffer_view,
|
||||||
|
});
|
||||||
|
++binding.buffer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Rasterizer::BindTextures(const Shader::Info& stage, Shader::Backend::Bindings& binding,
|
||||||
|
Pipeline::DescriptorWrites& set_writes) {
|
||||||
|
image_bindings.clear();
|
||||||
|
|
||||||
|
for (const auto& image_desc : stage.images) {
|
||||||
|
const auto tsharp = image_desc.GetSharp(stage);
|
||||||
|
if (texture_cache.IsMeta(tsharp.Address())) {
|
||||||
|
LOG_WARNING(Render_Vulkan, "Unexpected metadata read by a shader (texture)");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tsharp.GetDataFmt() == AmdGpu::DataFormat::FormatInvalid) {
|
||||||
|
image_bindings.emplace_back(std::piecewise_construct, std::tuple{}, std::tuple{});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& [image_id, desc] = image_bindings.emplace_back(std::piecewise_construct, std::tuple{},
|
||||||
|
std::tuple{tsharp, image_desc});
|
||||||
|
image_id = texture_cache.FindImage(desc);
|
||||||
|
auto& image = texture_cache.GetImage(image_id);
|
||||||
|
ASSERT(False(image.flags & VideoCore::ImageFlagBits::Virtual));
|
||||||
|
if (image.binding.is_bound) {
|
||||||
|
// The image is already bound. In case if it is about to be used as storage we need
|
||||||
|
// to force general layout on it.
|
||||||
|
image.binding.force_general |= image_desc.is_storage;
|
||||||
|
}
|
||||||
|
if (image.binding.is_target) {
|
||||||
|
// The image is already bound as target. Since we read and output to it need to force
|
||||||
|
// general layout too.
|
||||||
|
image.binding.force_general = 1u;
|
||||||
|
}
|
||||||
|
image.binding.is_bound = 1u;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass to re-bind images that were updated after binding
|
||||||
|
for (auto& [image_id, desc] : image_bindings) {
|
||||||
|
bool is_storage = desc.type == VideoCore::TextureCache::BindingType::Storage;
|
||||||
|
if (!image_id) {
|
||||||
|
if (instance.IsNullDescriptorSupported()) {
|
||||||
|
image_infos.emplace_back(VK_NULL_HANDLE, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
|
||||||
|
} else {
|
||||||
|
auto& null_image = texture_cache.GetImageView(VideoCore::NULL_IMAGE_VIEW_ID);
|
||||||
|
image_infos.emplace_back(VK_NULL_HANDLE, *null_image.image_view,
|
||||||
|
vk::ImageLayout::eGeneral);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (auto& old_image = texture_cache.GetImage(image_id);
|
||||||
|
old_image.binding.needs_rebind) {
|
||||||
|
old_image.binding.Reset(); // clean up previous image binding state
|
||||||
|
image_id = texture_cache.FindImage(desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
bound_images.emplace_back(image_id);
|
||||||
|
|
||||||
|
auto& image = texture_cache.GetImage(image_id);
|
||||||
|
auto& image_view = texture_cache.FindTexture(image_id, desc.view_info);
|
||||||
|
|
||||||
|
if (image.binding.force_general || image.binding.is_target) {
|
||||||
|
image.Transit(vk::ImageLayout::eGeneral,
|
||||||
|
vk::AccessFlagBits2::eShaderRead |
|
||||||
|
(image.info.IsDepthStencil()
|
||||||
|
? vk::AccessFlagBits2::eDepthStencilAttachmentWrite
|
||||||
|
: vk::AccessFlagBits2::eColorAttachmentWrite),
|
||||||
|
{});
|
||||||
|
} else {
|
||||||
|
if (is_storage) {
|
||||||
|
image.Transit(vk::ImageLayout::eGeneral,
|
||||||
|
vk::AccessFlagBits2::eShaderRead |
|
||||||
|
vk::AccessFlagBits2::eShaderWrite,
|
||||||
|
desc.view_info.range);
|
||||||
|
} else {
|
||||||
|
const auto new_layout = image.info.IsDepthStencil()
|
||||||
|
? vk::ImageLayout::eDepthStencilReadOnlyOptimal
|
||||||
|
: vk::ImageLayout::eShaderReadOnlyOptimal;
|
||||||
|
image.Transit(new_layout, vk::AccessFlagBits2::eShaderRead,
|
||||||
|
desc.view_info.range);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
image.usage.storage |= is_storage;
|
||||||
|
image.usage.texture |= !is_storage;
|
||||||
|
|
||||||
|
image_infos.emplace_back(VK_NULL_HANDLE, *image_view.image_view,
|
||||||
|
image.last_state.layout);
|
||||||
|
}
|
||||||
|
|
||||||
|
set_writes.push_back({
|
||||||
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
.dstBinding = binding.unified++,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType =
|
||||||
|
is_storage ? vk::DescriptorType::eStorageImage : vk::DescriptorType::eSampledImage,
|
||||||
|
.pImageInfo = &image_infos.back(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto& sampler : stage.samplers) {
|
||||||
|
auto ssharp = sampler.GetSharp(stage);
|
||||||
|
if (sampler.disable_aniso) {
|
||||||
|
const auto& tsharp = stage.images[sampler.associated_image].GetSharp(stage);
|
||||||
|
if (tsharp.base_level == 0 && tsharp.last_level == 0) {
|
||||||
|
ssharp.max_aniso.Assign(AmdGpu::AnisoRatio::One);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const auto vk_sampler = texture_cache.GetSampler(ssharp);
|
||||||
|
image_infos.emplace_back(vk_sampler, VK_NULL_HANDLE, vk::ImageLayout::eGeneral);
|
||||||
|
set_writes.push_back({
|
||||||
|
.dstSet = VK_NULL_HANDLE,
|
||||||
|
.dstBinding = binding.unified++,
|
||||||
|
.dstArrayElement = 0,
|
||||||
|
.descriptorCount = 1,
|
||||||
|
.descriptorType = vk::DescriptorType::eSampler,
|
||||||
|
.pImageInfo = &image_infos.back(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& state) {
|
void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& state) {
|
||||||
int cb_index = 0;
|
int cb_index = 0;
|
||||||
for (auto& [image_id, desc] : cb_descs) {
|
for (auto& [image_id, desc] : cb_descs) {
|
||||||
if (auto& old_img = texture_cache.GetImage(image_id); old_img.binding.needs_rebind) {
|
if (auto& old_img = texture_cache.GetImage(image_id); old_img.binding.needs_rebind) {
|
||||||
auto& view = texture_cache.FindRenderTarget(desc);
|
auto& view = texture_cache.FindRenderTarget(desc);
|
||||||
ASSERT(view.image_id != image_id);
|
ASSERT(view.image_id != image_id);
|
||||||
image_id = view.image_id;
|
image_id = bound_images.emplace_back(view.image_id);
|
||||||
auto& image = texture_cache.GetImage(view.image_id);
|
auto& image = texture_cache.GetImage(view.image_id);
|
||||||
state.color_attachments[cb_index].imageView = *view.image_view;
|
state.color_attachments[cb_index].imageView = *view.image_view;
|
||||||
state.color_attachments[cb_index].imageLayout = image.last_state.layout;
|
state.color_attachments[cb_index].imageLayout = image.last_state.layout;
|
||||||
@ -370,7 +675,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s
|
|||||||
state.height = std::min<u32>(state.height, std::max(image.info.size.height >> mip, 1u));
|
state.height = std::min<u32>(state.height, std::max(image.info.size.height >> mip, 1u));
|
||||||
ASSERT(old_img.info.size.width == state.width);
|
ASSERT(old_img.info.size.width == state.width);
|
||||||
ASSERT(old_img.info.size.height == state.height);
|
ASSERT(old_img.info.size.height == state.height);
|
||||||
old_img.binding.Reset();
|
|
||||||
}
|
}
|
||||||
auto& image = texture_cache.GetImage(image_id);
|
auto& image = texture_cache.GetImage(image_id);
|
||||||
if (image.binding.force_general) {
|
if (image.binding.force_general) {
|
||||||
@ -386,7 +690,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s
|
|||||||
}
|
}
|
||||||
image.usage.render_target = 1u;
|
image.usage.render_target = 1u;
|
||||||
state.color_attachments[cb_index].imageLayout = image.last_state.layout;
|
state.color_attachments[cb_index].imageLayout = image.last_state.layout;
|
||||||
image.binding.Reset();
|
|
||||||
++cb_index;
|
++cb_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -419,7 +722,6 @@ void Rasterizer::BeginRendering(const GraphicsPipeline& pipeline, RenderState& s
|
|||||||
state.depth_attachment.imageLayout = image.last_state.layout;
|
state.depth_attachment.imageLayout = image.last_state.layout;
|
||||||
image.usage.depth_target = true;
|
image.usage.depth_target = true;
|
||||||
image.usage.stencil = has_stencil;
|
image.usage.stencil = has_stencil;
|
||||||
image.binding.Reset();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduler.BeginRendering(state);
|
scheduler.BeginRendering(state);
|
||||||
|
@ -65,6 +65,21 @@ private:
|
|||||||
|
|
||||||
bool FilterDraw();
|
bool FilterDraw();
|
||||||
|
|
||||||
|
void BindBuffers(const Shader::Info& stage, Shader::Backend::Bindings& binding,
|
||||||
|
Shader::PushData& push_data, Pipeline::DescriptorWrites& set_writes,
|
||||||
|
Pipeline::BufferBarriers& buffer_barriers);
|
||||||
|
|
||||||
|
void BindTextures(const Shader::Info& stage, Shader::Backend::Bindings& binding,
|
||||||
|
Pipeline::DescriptorWrites& set_writes);
|
||||||
|
|
||||||
|
bool BindResources(const Pipeline* pipeline);
|
||||||
|
void ResetBindings() {
|
||||||
|
for (auto& image_id : bound_images) {
|
||||||
|
texture_cache.GetImage(image_id).binding.Reset();
|
||||||
|
}
|
||||||
|
bound_images.clear();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Instance& instance;
|
const Instance& instance;
|
||||||
Scheduler& scheduler;
|
Scheduler& scheduler;
|
||||||
@ -79,6 +94,20 @@ private:
|
|||||||
std::pair<VideoCore::ImageId, VideoCore::TextureCache::RenderTargetDesc>, 8>
|
std::pair<VideoCore::ImageId, VideoCore::TextureCache::RenderTargetDesc>, 8>
|
||||||
cb_descs;
|
cb_descs;
|
||||||
std::optional<std::pair<VideoCore::ImageId, VideoCore::TextureCache::DepthTargetDesc>> db_desc;
|
std::optional<std::pair<VideoCore::ImageId, VideoCore::TextureCache::DepthTargetDesc>> db_desc;
|
||||||
|
boost::container::static_vector<vk::DescriptorImageInfo, 32> image_infos;
|
||||||
|
boost::container::static_vector<vk::BufferView, 8> buffer_views;
|
||||||
|
boost::container::static_vector<vk::DescriptorBufferInfo, 32> buffer_infos;
|
||||||
|
boost::container::static_vector<VideoCore::ImageId, 64> bound_images;
|
||||||
|
|
||||||
|
Pipeline::DescriptorWrites set_writes;
|
||||||
|
Pipeline::BufferBarriers buffer_barriers;
|
||||||
|
|
||||||
|
using BufferBindingInfo = std::pair<VideoCore::BufferId, AmdGpu::Buffer>;
|
||||||
|
boost::container::static_vector<BufferBindingInfo, 32> buffer_bindings;
|
||||||
|
using TexBufferBindingInfo = std::pair<VideoCore::BufferId, AmdGpu::Buffer>;
|
||||||
|
boost::container::static_vector<TexBufferBindingInfo, 32> texbuffer_bindings;
|
||||||
|
using ImageBindingInfo = std::pair<VideoCore::ImageId, VideoCore::TextureCache::TextureDesc>;
|
||||||
|
boost::container::static_vector<ImageBindingInfo, 32> image_bindings;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
@ -71,8 +71,7 @@ public:
|
|||||||
struct RenderTargetDesc : public BaseDesc {
|
struct RenderTargetDesc : public BaseDesc {
|
||||||
RenderTargetDesc(const AmdGpu::Liverpool::ColorBuffer& buffer,
|
RenderTargetDesc(const AmdGpu::Liverpool::ColorBuffer& buffer,
|
||||||
const AmdGpu::Liverpool::CbDbExtent& hint = {})
|
const AmdGpu::Liverpool::CbDbExtent& hint = {})
|
||||||
: BaseDesc{BindingType::RenderTarget, ImageInfo{buffer, hint},
|
: BaseDesc{BindingType::RenderTarget, ImageInfo{buffer, hint}, ImageViewInfo{buffer}} {}
|
||||||
ImageViewInfo{buffer, false}} {}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct DepthTargetDesc : public BaseDesc {
|
struct DepthTargetDesc : public BaseDesc {
|
||||||
|
Loading…
Reference in New Issue
Block a user